mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-26 16:51:48 +00:00
24685 lines
719 KiB
Text
24685 lines
719 KiB
Text
diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt
|
|
new file mode 100644
|
|
index 000000000000..cb61516483d3
|
|
--- /dev/null
|
|
+++ b/Documentation/hwlat_detector.txt
|
|
@@ -0,0 +1,64 @@
|
|
+Introduction:
|
|
+-------------
|
|
+
|
|
+The module hwlat_detector is a special purpose kernel module that is used to
|
|
+detect large system latencies induced by the behavior of certain underlying
|
|
+hardware or firmware, independent of Linux itself. The code was developed
|
|
+originally to detect SMIs (System Management Interrupts) on x86 systems,
|
|
+however there is nothing x86 specific about this patchset. It was
|
|
+originally written for use by the "RT" patch since the Real Time
|
|
+kernel is highly latency sensitive.
|
|
+
|
|
+SMIs are usually not serviced by the Linux kernel, which typically does not
|
|
+even know that they are occuring. SMIs are instead are set up by BIOS code
|
|
+and are serviced by BIOS code, usually for "critical" events such as
|
|
+management of thermal sensors and fans. Sometimes though, SMIs are used for
|
|
+other tasks and those tasks can spend an inordinate amount of time in the
|
|
+handler (sometimes measured in milliseconds). Obviously this is a problem if
|
|
+you are trying to keep event service latencies down in the microsecond range.
|
|
+
|
|
+The hardware latency detector works by hogging all of the cpus for configurable
|
|
+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
|
|
+for some period, then looking for gaps in the TSC data. Any gap indicates a
|
|
+time when the polling was interrupted and since the machine is stopped and
|
|
+interrupts turned off the only thing that could do that would be an SMI.
|
|
+
|
|
+Note that the SMI detector should *NEVER* be used in a production environment.
|
|
+It is intended to be run manually to determine if the hardware platform has a
|
|
+problem with long system firmware service routines.
|
|
+
|
|
+Usage:
|
|
+------
|
|
+
|
|
+Loading the module hwlat_detector passing the parameter "enabled=1" (or by
|
|
+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
|
|
+step required to start the hwlat_detector. It is possible to redefine the
|
|
+threshold in microseconds (us) above which latency spikes will be taken
|
|
+into account (parameter "threshold=").
|
|
+
|
|
+Example:
|
|
+
|
|
+ # modprobe hwlat_detector enabled=1 threshold=100
|
|
+
|
|
+After the module is loaded, it creates a directory named "hwlat_detector" under
|
|
+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
|
|
+to have debugfs mounted, which might be on /sys/debug on your system.
|
|
+
|
|
+The /debug/hwlat_detector interface contains the following files:
|
|
+
|
|
+count - number of latency spikes observed since last reset
|
|
+enable - a global enable/disable toggle (0/1), resets count
|
|
+max - maximum hardware latency actually observed (usecs)
|
|
+sample - a pipe from which to read current raw sample data
|
|
+ in the format <timestamp> <latency observed usecs>
|
|
+ (can be opened O_NONBLOCK for a single sample)
|
|
+threshold - minimum latency value to be considered (usecs)
|
|
+width - time period to sample with CPUs held (usecs)
|
|
+ must be less than the total window size (enforced)
|
|
+window - total period of sampling, width being inside (usecs)
|
|
+
|
|
+By default we will set width to 500,000 and window to 1,000,000, meaning that
|
|
+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
|
|
+observe any latencies that exceed the threshold (initially 100 usecs),
|
|
+then we write to a global sample ring buffer of 8K samples, which is
|
|
+consumed by reading from the "sample" (pipe) debugfs file interface.
|
|
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
|
|
index c7523c419c88..9834c3aecc0e 100644
|
|
--- a/Documentation/kernel-parameters.txt
|
|
+++ b/Documentation/kernel-parameters.txt
|
|
@@ -1152,6 +1152,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|
See comment before ip2_setup() in
|
|
drivers/char/ip2/ip2base.c.
|
|
|
|
+ irqaffinity= [SMP] Set the default irq affinity mask
|
|
+ Format:
|
|
+ <cpu number>,...,<cpu number>
|
|
+ or
|
|
+ <cpu number>-<cpu number>
|
|
+ (must be a positive range in ascending order)
|
|
+ or a mixture
|
|
+ <cpu number>,...,<cpu number>-<cpu number>
|
|
+
|
|
irqfixup [HW]
|
|
When an interrupt is not handled search all handlers
|
|
for it. Intended to get systems with badly broken
|
|
@@ -2452,6 +2461,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|
|
|
sched_debug [KNL] Enables verbose scheduler debug messages.
|
|
|
|
+ skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate
|
|
+ xtime_lock contention on larger systems, and/or RCU lock
|
|
+ contention on all systems with CONFIG_MAXSMP set.
|
|
+ Format: { "0" | "1" }
|
|
+ 0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
|
|
+ 1 -- enable.
|
|
+ Note: increases power consumption, thus should only be
|
|
+ enabled if running jitter sensitive (HPC/RT) workloads.
|
|
+
|
|
security= [SECURITY] Choose a security module to enable at boot.
|
|
If this boot parameter is not specified, only the first
|
|
security module asking for security registration will be
|
|
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
|
|
index 642f84495b29..bd283ede08d3 100644
|
|
--- a/Documentation/sysrq.txt
|
|
+++ b/Documentation/sysrq.txt
|
|
@@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
|
|
On other - If you know of the key combos for other architectures, please
|
|
let me know so I can add them to this section.
|
|
|
|
-On all - write a character to /proc/sysrq-trigger. e.g.:
|
|
-
|
|
+On all - write a character to /proc/sysrq-trigger, e.g.:
|
|
echo t > /proc/sysrq-trigger
|
|
|
|
+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
|
|
+ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
|
|
+ Send an ICMP echo request with this pattern plus the particular
|
|
+ SysRq command key. Example:
|
|
+ # ping -c1 -s57 -p0102030468
|
|
+ will trigger the SysRq-H (help) command.
|
|
+
|
|
+
|
|
* What are the 'command' keys?
|
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
'b' - Will immediately reboot the system without syncing or unmounting
|
|
diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt
|
|
new file mode 100644
|
|
index 000000000000..6f2aeabf7faa
|
|
--- /dev/null
|
|
+++ b/Documentation/trace/histograms.txt
|
|
@@ -0,0 +1,186 @@
|
|
+ Using the Linux Kernel Latency Histograms
|
|
+
|
|
+
|
|
+This document gives a short explanation how to enable, configure and use
|
|
+latency histograms. Latency histograms are primarily relevant in the
|
|
+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
|
|
+and are used in the quality management of the Linux real-time
|
|
+capabilities.
|
|
+
|
|
+
|
|
+* Purpose of latency histograms
|
|
+
|
|
+A latency histogram continuously accumulates the frequencies of latency
|
|
+data. There are two types of histograms
|
|
+- potential sources of latencies
|
|
+- effective latencies
|
|
+
|
|
+
|
|
+* Potential sources of latencies
|
|
+
|
|
+Potential sources of latencies are code segments where interrupts,
|
|
+preemption or both are disabled (aka critical sections). To create
|
|
+histograms of potential sources of latency, the kernel stores the time
|
|
+stamp at the start of a critical section, determines the time elapsed
|
|
+when the end of the section is reached, and increments the frequency
|
|
+counter of that latency value - irrespective of whether any concurrently
|
|
+running process is affected by latency or not.
|
|
+- Configuration items (in the Kernel hacking/Tracers submenu)
|
|
+ CONFIG_INTERRUPT_OFF_LATENCY
|
|
+ CONFIG_PREEMPT_OFF_LATENCY
|
|
+
|
|
+
|
|
+* Effective latencies
|
|
+
|
|
+Effective latencies are actually occuring during wakeup of a process. To
|
|
+determine effective latencies, the kernel stores the time stamp when a
|
|
+process is scheduled to be woken up, and determines the duration of the
|
|
+wakeup time shortly before control is passed over to this process. Note
|
|
+that the apparent latency in user space may be somewhat longer, since the
|
|
+process may be interrupted after control is passed over to it but before
|
|
+the execution in user space takes place. Simply measuring the interval
|
|
+between enqueuing and wakeup may also not appropriate in cases when a
|
|
+process is scheduled as a result of a timer expiration. The timer may have
|
|
+missed its deadline, e.g. due to disabled interrupts, but this latency
|
|
+would not be registered. Therefore, the offsets of missed timers are
|
|
+recorded in a separate histogram. If both wakeup latency and missed timer
|
|
+offsets are configured and enabled, a third histogram may be enabled that
|
|
+records the overall latency as a sum of the timer latency, if any, and the
|
|
+wakeup latency. This histogram is called "timerandwakeup".
|
|
+- Configuration items (in the Kernel hacking/Tracers submenu)
|
|
+ CONFIG_WAKEUP_LATENCY
|
|
+ CONFIG_MISSED_TIMER_OFSETS
|
|
+
|
|
+
|
|
+* Usage
|
|
+
|
|
+The interface to the administration of the latency histograms is located
|
|
+in the debugfs file system. To mount it, either enter
|
|
+
|
|
+mount -t sysfs nodev /sys
|
|
+mount -t debugfs nodev /sys/kernel/debug
|
|
+
|
|
+from shell command line level, or add
|
|
+
|
|
+nodev /sys sysfs defaults 0 0
|
|
+nodev /sys/kernel/debug debugfs defaults 0 0
|
|
+
|
|
+to the file /etc/fstab. All latency histogram related files are then
|
|
+available in the directory /sys/kernel/debug/tracing/latency_hist. A
|
|
+particular histogram type is enabled by writing non-zero to the related
|
|
+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
|
|
+Select "preemptirqsoff" for the histograms of potential sources of
|
|
+latencies and "wakeup" for histograms of effective latencies etc. The
|
|
+histogram data - one per CPU - are available in the files
|
|
+
|
|
+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
|
|
+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
|
|
+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
|
|
+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
|
|
+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
|
|
+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
|
|
+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
|
|
+
|
|
+The histograms are reset by writing non-zero to the file "reset" in a
|
|
+particular latency directory. To reset all latency data, use
|
|
+
|
|
+#!/bin/sh
|
|
+
|
|
+TRACINGDIR=/sys/kernel/debug/tracing
|
|
+HISTDIR=$TRACINGDIR/latency_hist
|
|
+
|
|
+if test -d $HISTDIR
|
|
+then
|
|
+ cd $HISTDIR
|
|
+ for i in `find . | grep /reset$`
|
|
+ do
|
|
+ echo 1 >$i
|
|
+ done
|
|
+fi
|
|
+
|
|
+
|
|
+* Data format
|
|
+
|
|
+Latency data are stored with a resolution of one microsecond. The
|
|
+maximum latency is 10,240 microseconds. The data are only valid, if the
|
|
+overflow register is empty. Every output line contains the latency in
|
|
+microseconds in the first row and the number of samples in the second
|
|
+row. To display only lines with a positive latency count, use, for
|
|
+example,
|
|
+
|
|
+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
|
|
+
|
|
+#Minimum latency: 0 microseconds.
|
|
+#Average latency: 0 microseconds.
|
|
+#Maximum latency: 25 microseconds.
|
|
+#Total samples: 3104770694
|
|
+#There are 0 samples greater or equal than 10240 microseconds
|
|
+#usecs samples
|
|
+ 0 2984486876
|
|
+ 1 49843506
|
|
+ 2 58219047
|
|
+ 3 5348126
|
|
+ 4 2187960
|
|
+ 5 3388262
|
|
+ 6 959289
|
|
+ 7 208294
|
|
+ 8 40420
|
|
+ 9 4485
|
|
+ 10 14918
|
|
+ 11 18340
|
|
+ 12 25052
|
|
+ 13 19455
|
|
+ 14 5602
|
|
+ 15 969
|
|
+ 16 47
|
|
+ 17 18
|
|
+ 18 14
|
|
+ 19 1
|
|
+ 20 3
|
|
+ 21 2
|
|
+ 22 5
|
|
+ 23 2
|
|
+ 25 1
|
|
+
|
|
+
|
|
+* Wakeup latency of a selected process
|
|
+
|
|
+To only collect wakeup latency data of a particular process, write the
|
|
+PID of the requested process to
|
|
+
|
|
+/sys/kernel/debug/tracing/latency_hist/wakeup/pid
|
|
+
|
|
+PIDs are not considered, if this variable is set to 0.
|
|
+
|
|
+
|
|
+* Details of the process with the highest wakeup latency so far
|
|
+
|
|
+Selected data of the process that suffered from the highest wakeup
|
|
+latency that occurred in a particular CPU are available in the file
|
|
+
|
|
+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
|
|
+
|
|
+In addition, other relevant system data at the time when the
|
|
+latency occurred are given.
|
|
+
|
|
+The format of the data is (all in one line):
|
|
+<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
|
|
+<- <PID> <Priority> <Command> <Timestamp>
|
|
+
|
|
+The value of <Timeroffset> is only relevant in the combined timer
|
|
+and wakeup latency recording. In the wakeup recording, it is
|
|
+always 0, in the missed_timer_offsets recording, it is the same
|
|
+as <Latency>.
|
|
+
|
|
+When retrospectively searching for the origin of a latency and
|
|
+tracing was not enabled, it may be helpful to know the name and
|
|
+some basic data of the task that (finally) was switching to the
|
|
+late real-tlme task. In addition to the victim's data, also the
|
|
+data of the possible culprit are therefore displayed after the
|
|
+"<-" symbol.
|
|
+
|
|
+Finally, the timestamp of the time when the latency occurred
|
|
+in <seconds>.<microseconds> after the most recent system boot
|
|
+is provided.
|
|
+
|
|
+These data are also reset when the wakeup histogram is reset.
|
|
diff --git a/MAINTAINERS b/MAINTAINERS
|
|
index 5725829f6ebf..1b9477efb35b 100644
|
|
--- a/MAINTAINERS
|
|
+++ b/MAINTAINERS
|
|
@@ -3081,6 +3081,15 @@ L: linuxppc-dev@lists.ozlabs.org
|
|
S: Odd Fixes
|
|
F: drivers/tty/hvc/
|
|
|
|
+HARDWARE LATENCY DETECTOR
|
|
+P: Jon Masters
|
|
+M: jcm@jonmasters.org
|
|
+W: http://www.kernel.org/pub/linux/kernel/people/jcm/hwlat_detector/
|
|
+S: Supported
|
|
+L: linux-kernel@vger.kernel.org
|
|
+F: Documentation/hwlat_detector.txt
|
|
+F: drivers/misc/hwlat_detector.c
|
|
+
|
|
HARDWARE MONITORING
|
|
M: Jean Delvare <khali@linux-fr.org>
|
|
M: Guenter Roeck <guenter.roeck@ericsson.com>
|
|
diff --git a/arch/Kconfig b/arch/Kconfig
|
|
index 684eb5af439d..417ff4c6fc14 100644
|
|
--- a/arch/Kconfig
|
|
+++ b/arch/Kconfig
|
|
@@ -6,6 +6,7 @@ config OPROFILE
|
|
tristate "OProfile system profiling"
|
|
depends on PROFILING
|
|
depends on HAVE_OPROFILE
|
|
+ depends on !PREEMPT_RT_FULL
|
|
select RING_BUFFER
|
|
select RING_BUFFER_ALLOW_SWAP
|
|
help
|
|
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
|
|
index f153733fb938..e6ea9da573e5 100644
|
|
--- a/arch/alpha/mm/fault.c
|
|
+++ b/arch/alpha/mm/fault.c
|
|
@@ -106,7 +106,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|
|
|
/* If we're in an interrupt context, or have no user context,
|
|
we must not take the fault. */
|
|
- if (!mm || in_atomic())
|
|
+ if (!mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
|
|
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
|
|
index eeba63dede9c..f40b83fd91d8 100644
|
|
--- a/arch/arm/Kconfig
|
|
+++ b/arch/arm/Kconfig
|
|
@@ -30,6 +30,7 @@ config ARM
|
|
select HAVE_C_RECORDMCOUNT
|
|
select HAVE_GENERIC_HARDIRQS
|
|
select GENERIC_IRQ_SHOW
|
|
+ select IRQ_FORCED_THREADING
|
|
select CPU_PM if (SUSPEND || CPU_IDLE)
|
|
select GENERIC_PCI_IOMAP
|
|
select HAVE_BPF_JIT if NET
|
|
@@ -1734,7 +1735,7 @@ config HAVE_ARCH_PFN_VALID
|
|
|
|
config HIGHMEM
|
|
bool "High Memory Support"
|
|
- depends on MMU
|
|
+ depends on MMU && !PREEMPT_RT_FULL
|
|
help
|
|
The address space of ARM processors is only 4 Gigabytes large
|
|
and it has to accommodate user address space, kernel address
|
|
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
|
|
index d41d7cbf0ada..249dfc34a36f 100644
|
|
--- a/arch/arm/include/asm/cmpxchg.h
|
|
+++ b/arch/arm/include/asm/cmpxchg.h
|
|
@@ -127,6 +127,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
|
|
|
#else /* min ARCH >= ARMv6 */
|
|
|
|
+#define __HAVE_ARCH_CMPXCHG 1
|
|
+
|
|
extern void __bad_cmpxchg(volatile void *ptr, int size);
|
|
|
|
/*
|
|
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
|
|
index 7be54690aeec..3d1ae210c4b5 100644
|
|
--- a/arch/arm/include/asm/futex.h
|
|
+++ b/arch/arm/include/asm/futex.h
|
|
@@ -94,6 +94,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
|
return -EFAULT;
|
|
|
|
+ preempt_disable_rt();
|
|
+
|
|
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
|
"1: " TUSER(ldr) " %1, [%4]\n"
|
|
" teq %1, %2\n"
|
|
@@ -105,6 +107,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
: "cc", "memory");
|
|
|
|
*uval = val;
|
|
+
|
|
+ preempt_enable_rt();
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
|
|
index 85aa2b292692..43076536965c 100644
|
|
--- a/arch/arm/kernel/early_printk.c
|
|
+++ b/arch/arm/kernel/early_printk.c
|
|
@@ -29,28 +29,17 @@ static void early_console_write(struct console *con, const char *s, unsigned n)
|
|
early_write(s, n);
|
|
}
|
|
|
|
-static struct console early_console = {
|
|
+static struct console early_console_dev = {
|
|
.name = "earlycon",
|
|
.write = early_console_write,
|
|
.flags = CON_PRINTBUFFER | CON_BOOT,
|
|
.index = -1,
|
|
};
|
|
|
|
-asmlinkage void early_printk(const char *fmt, ...)
|
|
-{
|
|
- char buf[512];
|
|
- int n;
|
|
- va_list ap;
|
|
-
|
|
- va_start(ap, fmt);
|
|
- n = vscnprintf(buf, sizeof(buf), fmt, ap);
|
|
- early_write(buf, n);
|
|
- va_end(ap);
|
|
-}
|
|
-
|
|
static int __init setup_early_printk(char *buf)
|
|
{
|
|
- register_console(&early_console);
|
|
+ early_console = &early_console_dev;
|
|
+ register_console(&early_console_dev);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
|
|
index ac7306666418..b7a4871ff32f 100644
|
|
--- a/arch/arm/kernel/perf_event.c
|
|
+++ b/arch/arm/kernel/perf_event.c
|
|
@@ -444,7 +444,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
|
}
|
|
|
|
err = request_irq(irq, handle_irq,
|
|
- IRQF_DISABLED | IRQF_NOBALANCING,
|
|
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
|
|
"arm-pmu", armpmu);
|
|
if (err) {
|
|
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
|
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
|
|
index e09e5211bace..177a1776c4bf 100644
|
|
--- a/arch/arm/kernel/process.c
|
|
+++ b/arch/arm/kernel/process.c
|
|
@@ -531,6 +531,31 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
}
|
|
|
|
#ifdef CONFIG_MMU
|
|
+
|
|
+/*
|
|
+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
|
|
+ * initialized by pgtable_page_ctor() then a coredump of the vector page will
|
|
+ * fail.
|
|
+ */
|
|
+static int __init vectors_user_mapping_init_page(void)
|
|
+{
|
|
+ struct page *page;
|
|
+ unsigned long addr = 0xffff0000;
|
|
+ pgd_t *pgd;
|
|
+ pud_t *pud;
|
|
+ pmd_t *pmd;
|
|
+
|
|
+ pgd = pgd_offset_k(addr);
|
|
+ pud = pud_offset(pgd, addr);
|
|
+ pmd = pmd_offset(pud, addr);
|
|
+ page = pmd_page(*(pmd));
|
|
+
|
|
+ pgtable_page_ctor(page);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+late_initcall(vectors_user_mapping_init_page);
|
|
+
|
|
/*
|
|
* The vectors page is always readable from user space for the
|
|
* atomic helpers and the signal restart code. Insert it into the
|
|
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
|
|
index d68d1b694680..13db45b9c135 100644
|
|
--- a/arch/arm/kernel/signal.c
|
|
+++ b/arch/arm/kernel/signal.c
|
|
@@ -617,6 +617,9 @@ static void do_signal(struct pt_regs *regs, int syscall)
|
|
if (!user_mode(regs))
|
|
return;
|
|
|
|
+ local_irq_enable();
|
|
+ preempt_check_resched();
|
|
+
|
|
/*
|
|
* If we were from a system call, check for system call restarting...
|
|
*/
|
|
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
|
|
index 00df012c4678..bbafc675c015 100644
|
|
--- a/arch/arm/kernel/unwind.c
|
|
+++ b/arch/arm/kernel/unwind.c
|
|
@@ -87,7 +87,7 @@ extern const struct unwind_idx __start_unwind_idx[];
|
|
static const struct unwind_idx *__origin_unwind_idx;
|
|
extern const struct unwind_idx __stop_unwind_idx[];
|
|
|
|
-static DEFINE_SPINLOCK(unwind_lock);
|
|
+static DEFINE_RAW_SPINLOCK(unwind_lock);
|
|
static LIST_HEAD(unwind_tables);
|
|
|
|
/* Convert a prel31 symbol to an absolute address */
|
|
@@ -195,7 +195,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
|
|
/* module unwind tables */
|
|
struct unwind_table *table;
|
|
|
|
- spin_lock_irqsave(&unwind_lock, flags);
|
|
+ raw_spin_lock_irqsave(&unwind_lock, flags);
|
|
list_for_each_entry(table, &unwind_tables, list) {
|
|
if (addr >= table->begin_addr &&
|
|
addr < table->end_addr) {
|
|
@@ -207,7 +207,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
|
|
break;
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&unwind_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
|
}
|
|
|
|
pr_debug("%s: idx = %p\n", __func__, idx);
|
|
@@ -469,9 +469,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
|
|
tab->begin_addr = text_addr;
|
|
tab->end_addr = text_addr + text_size;
|
|
|
|
- spin_lock_irqsave(&unwind_lock, flags);
|
|
+ raw_spin_lock_irqsave(&unwind_lock, flags);
|
|
list_add_tail(&tab->list, &unwind_tables);
|
|
- spin_unlock_irqrestore(&unwind_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
|
|
|
return tab;
|
|
}
|
|
@@ -483,9 +483,9 @@ void unwind_table_del(struct unwind_table *tab)
|
|
if (!tab)
|
|
return;
|
|
|
|
- spin_lock_irqsave(&unwind_lock, flags);
|
|
+ raw_spin_lock_irqsave(&unwind_lock, flags);
|
|
list_del(&tab->list);
|
|
- spin_unlock_irqrestore(&unwind_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
|
|
|
kfree(tab);
|
|
}
|
|
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
|
|
index 104ca40d8d18..49aea48530e6 100644
|
|
--- a/arch/arm/mach-at91/at91rm9200_time.c
|
|
+++ b/arch/arm/mach-at91/at91rm9200_time.c
|
|
@@ -130,6 +130,7 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
|
|
break;
|
|
case CLOCK_EVT_MODE_SHUTDOWN:
|
|
case CLOCK_EVT_MODE_UNUSED:
|
|
+ remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
|
|
case CLOCK_EVT_MODE_RESUME:
|
|
irqmask = 0;
|
|
break;
|
|
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
|
|
index a94758b42737..dd300f35385f 100644
|
|
--- a/arch/arm/mach-at91/at91sam926x_time.c
|
|
+++ b/arch/arm/mach-at91/at91sam926x_time.c
|
|
@@ -67,7 +67,7 @@ static struct clocksource pit_clk = {
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
};
|
|
|
|
-
|
|
+static struct irqaction at91sam926x_pit_irq;
|
|
/*
|
|
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
|
|
*/
|
|
@@ -76,6 +76,8 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
|
|
{
|
|
switch (mode) {
|
|
case CLOCK_EVT_MODE_PERIODIC:
|
|
+ /* Set up irq handler */
|
|
+ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
|
|
/* update clocksource counter */
|
|
pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
|
|
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
|
|
@@ -88,6 +90,7 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
|
|
case CLOCK_EVT_MODE_UNUSED:
|
|
/* disable irq, leaving the clocksource active */
|
|
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
|
|
+ remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
|
|
break;
|
|
case CLOCK_EVT_MODE_RESUME:
|
|
break;
|
|
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
|
|
index 36c3984aaa47..77499ea6b2a8 100644
|
|
--- a/arch/arm/mach-exynos/platsmp.c
|
|
+++ b/arch/arm/mach-exynos/platsmp.c
|
|
@@ -62,7 +62,7 @@ static void __iomem *scu_base_addr(void)
|
|
return (void __iomem *)(S5P_VA_SCU);
|
|
}
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
{
|
|
@@ -82,8 +82,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
@@ -94,7 +94,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* Set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* The secondary processor is waiting to be released from
|
|
@@ -123,7 +123,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
|
|
if (timeout == 0) {
|
|
printk(KERN_ERR "cpu1 power enable failed");
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
return -ETIMEDOUT;
|
|
}
|
|
}
|
|
@@ -151,7 +151,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
}
|
|
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
|
|
index db0117ec55f4..87daf5f78054 100644
|
|
--- a/arch/arm/mach-msm/platsmp.c
|
|
+++ b/arch/arm/mach-msm/platsmp.c
|
|
@@ -40,7 +40,7 @@ extern void msm_secondary_startup(void);
|
|
*/
|
|
volatile int pen_release = -1;
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
static inline int get_core_count(void)
|
|
{
|
|
@@ -70,8 +70,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
static __cpuinit void prepare_cold_cpu(unsigned int cpu)
|
|
@@ -108,7 +108,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* The secondary processor is waiting to be released from
|
|
@@ -142,7 +142,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
}
|
|
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
|
|
index deffbf1c9627..81ca6762c063 100644
|
|
--- a/arch/arm/mach-omap2/omap-smp.c
|
|
+++ b/arch/arm/mach-omap2/omap-smp.c
|
|
@@ -34,7 +34,7 @@
|
|
/* SCU base address */
|
|
static void __iomem *scu_base;
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
void __iomem *omap4_get_scu_base(void)
|
|
{
|
|
@@ -65,8 +65,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
@@ -77,7 +77,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* Set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* Update the AuxCoreBoot0 with boot state for secondary core.
|
|
@@ -117,7 +117,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* Now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
|
|
index 42cd7fb52414..dbc29144c921 100644
|
|
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
|
|
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
|
|
@@ -43,7 +43,7 @@
|
|
|
|
static void __iomem *wakeupgen_base;
|
|
static void __iomem *sar_base;
|
|
-static DEFINE_SPINLOCK(wakeupgen_lock);
|
|
+static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
|
|
static unsigned int irq_target_cpu[NR_IRQS];
|
|
|
|
/*
|
|
@@ -128,9 +128,9 @@ static void wakeupgen_mask(struct irq_data *d)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&wakeupgen_lock, flags);
|
|
+ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
|
|
_wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
|
|
- spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -140,9 +140,9 @@ static void wakeupgen_unmask(struct irq_data *d)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&wakeupgen_lock, flags);
|
|
+ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
|
|
_wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
|
|
- spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -183,7 +183,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&wakeupgen_lock, flags);
|
|
+ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
|
|
if (set) {
|
|
_wakeupgen_save_masks(cpu);
|
|
_wakeupgen_set_all(cpu, WKG_MASK_ALL);
|
|
@@ -191,7 +191,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
|
|
_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
|
|
_wakeupgen_restore_masks(cpu);
|
|
}
|
|
- spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
|
}
|
|
#endif
|
|
|
|
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
|
|
index eff5842f6232..acc9da268bd0 100644
|
|
--- a/arch/arm/mach-ux500/platsmp.c
|
|
+++ b/arch/arm/mach-ux500/platsmp.c
|
|
@@ -58,7 +58,7 @@ static void __iomem *scu_base_addr(void)
|
|
return NULL;
|
|
}
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
{
|
|
@@ -78,8 +78,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
@@ -90,7 +90,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* The secondary processor is waiting to be released from
|
|
@@ -111,7 +111,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
}
|
|
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
|
|
index 5bb48356d217..d28a0fa4eddb 100644
|
|
--- a/arch/arm/mm/fault.c
|
|
+++ b/arch/arm/mm/fault.c
|
|
@@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
/*
|
|
* If we're in an interrupt, or have no irqs, or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || irqs_disabled() || !mm)
|
|
+ if (irqs_disabled() || !mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
/*
|
|
@@ -427,6 +427,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
|
if (addr < TASK_SIZE)
|
|
return do_page_fault(addr, fsr, regs);
|
|
|
|
+ if (interrupts_enabled(regs))
|
|
+ local_irq_enable();
|
|
+
|
|
if (user_mode(regs))
|
|
goto bad_area;
|
|
|
|
@@ -496,6 +499,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
|
static int
|
|
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
{
|
|
+ if (interrupts_enabled(regs))
|
|
+ local_irq_enable();
|
|
+
|
|
do_bad_area(addr, fsr, regs);
|
|
return 0;
|
|
}
|
|
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
|
|
index 49c7db48c7f1..1f7a3d26d0a3 100644
|
|
--- a/arch/arm/plat-versatile/platsmp.c
|
|
+++ b/arch/arm/plat-versatile/platsmp.c
|
|
@@ -38,7 +38,7 @@ static void __cpuinit write_pen_release(int val)
|
|
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
|
|
}
|
|
|
|
-static DEFINE_SPINLOCK(boot_lock);
|
|
+static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
|
void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
{
|
|
@@ -58,8 +58,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
/*
|
|
* Synchronise with the boot thread.
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
}
|
|
|
|
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* Set synchronisation state between this boot processor
|
|
* and the secondary one
|
|
*/
|
|
- spin_lock(&boot_lock);
|
|
+ raw_spin_lock(&boot_lock);
|
|
|
|
/*
|
|
* This is really belt and braces; we hold unintended secondary
|
|
@@ -100,7 +100,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
*/
|
|
- spin_unlock(&boot_lock);
|
|
+ raw_spin_unlock(&boot_lock);
|
|
|
|
return pen_release != -1 ? -ENOSYS : 0;
|
|
}
|
|
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
|
|
index 632b649c7be0..fe16234be8af 100644
|
|
--- a/arch/avr32/mm/fault.c
|
|
+++ b/arch/avr32/mm/fault.c
|
|
@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
|
|
* If we're in an interrupt or have no user context, we must
|
|
* not take the fault...
|
|
*/
|
|
- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
|
|
+ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
local_irq_enable();
|
|
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
|
|
index 84ed8375113c..61fbd2de993d 100644
|
|
--- a/arch/blackfin/kernel/early_printk.c
|
|
+++ b/arch/blackfin/kernel/early_printk.c
|
|
@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_init(unsigned int port,
|
|
extern struct console *bfin_jc_early_init(void);
|
|
#endif
|
|
|
|
-static struct console *early_console;
|
|
-
|
|
/* Default console */
|
|
#define DEFAULT_PORT 0
|
|
#define DEFAULT_CFLAG CS8|B57600
|
|
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
|
|
index b07c00c01696..e49db4916107 100644
|
|
--- a/arch/cris/mm/fault.c
|
|
+++ b/arch/cris/mm/fault.c
|
|
@@ -112,7 +112,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
|
|
* user context, we must not take the fault.
|
|
*/
|
|
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
|
|
index 854549617dd2..5867e7d222e9 100644
|
|
--- a/arch/frv/mm/fault.c
|
|
+++ b/arch/frv/mm/fault.c
|
|
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
|
|
* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
|
|
index 3c820ea4212f..dd23e7ffe678 100644
|
|
--- a/arch/ia64/mm/fault.c
|
|
+++ b/arch/ia64/mm/fault.c
|
|
@@ -88,7 +88,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
|
/*
|
|
* If we're in an interrupt or have no user context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
|
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
|
|
index 06827fc92bbf..a2ab20703bef 100644
|
|
--- a/arch/m32r/mm/fault.c
|
|
+++ b/arch/m32r/mm/fault.c
|
|
@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|
* If we're in an interrupt or have no user context or are running in an
|
|
* atomic region then we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto bad_area_nosemaphore;
|
|
|
|
/* When running in the kernel we expect faults to occur only to
|
|
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
|
|
index 8646409adf5b..52335842e945 100644
|
|
--- a/arch/m68k/mm/fault.c
|
|
+++ b/arch/m68k/mm/fault.c
|
|
@@ -84,7 +84,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
|
|
index aba1f9a97d5d..b099a86571cb 100644
|
|
--- a/arch/microblaze/kernel/early_printk.c
|
|
+++ b/arch/microblaze/kernel/early_printk.c
|
|
@@ -21,7 +21,6 @@
|
|
#include <asm/setup.h>
|
|
#include <asm/prom.h>
|
|
|
|
-static u32 early_console_initialized;
|
|
static u32 base_addr;
|
|
|
|
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
|
|
@@ -109,27 +108,11 @@ static struct console early_serial_uart16550_console = {
|
|
};
|
|
#endif /* CONFIG_SERIAL_8250_CONSOLE */
|
|
|
|
-static struct console *early_console;
|
|
-
|
|
-void early_printk(const char *fmt, ...)
|
|
-{
|
|
- char buf[512];
|
|
- int n;
|
|
- va_list ap;
|
|
-
|
|
- if (early_console_initialized) {
|
|
- va_start(ap, fmt);
|
|
- n = vscnprintf(buf, 512, fmt, ap);
|
|
- early_console->write(early_console, buf, n);
|
|
- va_end(ap);
|
|
- }
|
|
-}
|
|
-
|
|
int __init setup_early_printk(char *opt)
|
|
{
|
|
int version = 0;
|
|
|
|
- if (early_console_initialized)
|
|
+ if (early_console)
|
|
return 1;
|
|
|
|
base_addr = of_early_console(&version);
|
|
@@ -159,7 +142,6 @@ int __init setup_early_printk(char *opt)
|
|
}
|
|
|
|
register_console(early_console);
|
|
- early_console_initialized = 1;
|
|
return 0;
|
|
}
|
|
return 1;
|
|
@@ -169,7 +151,7 @@ int __init setup_early_printk(char *opt)
|
|
* only for early console because of performance degression */
|
|
void __init remap_early_printk(void)
|
|
{
|
|
- if (!early_console_initialized || !early_console)
|
|
+ if (!early_console)
|
|
return;
|
|
printk(KERN_INFO "early_printk_console remapping from 0x%x to ",
|
|
base_addr);
|
|
@@ -195,9 +177,9 @@ void __init remap_early_printk(void)
|
|
|
|
void __init disable_early_printk(void)
|
|
{
|
|
- if (!early_console_initialized || !early_console)
|
|
+ if (!early_console)
|
|
return;
|
|
printk(KERN_WARNING "disabling early console\n");
|
|
unregister_console(early_console);
|
|
- early_console_initialized = 0;
|
|
+ early_console = NULL;
|
|
}
|
|
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
|
|
index c7fe3da48593..8e797540de8e 100644
|
|
--- a/arch/microblaze/mm/fault.c
|
|
+++ b/arch/microblaze/mm/fault.c
|
|
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
|
|
is_write = 0;
|
|
|
|
- if (unlikely(in_atomic() || !mm)) {
|
|
+ if (unlikely(!mm || pagefault_disabled())) {
|
|
if (kernel_mode(regs))
|
|
goto bad_area_nosemaphore;
|
|
|
|
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
|
|
index 7d36f6efe1df..62090aa77199 100644
|
|
--- a/arch/mips/Kconfig
|
|
+++ b/arch/mips/Kconfig
|
|
@@ -2082,7 +2082,7 @@ config CPU_R4400_WORKAROUNDS
|
|
#
|
|
config HIGHMEM
|
|
bool "High Memory Support"
|
|
- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
|
|
+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL
|
|
|
|
config CPU_SUPPORTS_HIGHMEM
|
|
bool
|
|
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
|
|
index 97e7ce9b50ed..4b93048044eb 100644
|
|
--- a/arch/mips/cavium-octeon/smp.c
|
|
+++ b/arch/mips/cavium-octeon/smp.c
|
|
@@ -257,8 +257,6 @@ DEFINE_PER_CPU(int, cpu_state);
|
|
|
|
extern void fixup_irqs(void);
|
|
|
|
-static DEFINE_SPINLOCK(smp_reserve_lock);
|
|
-
|
|
static int octeon_cpu_disable(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
@@ -266,8 +264,6 @@ static int octeon_cpu_disable(void)
|
|
if (cpu == 0)
|
|
return -EBUSY;
|
|
|
|
- spin_lock(&smp_reserve_lock);
|
|
-
|
|
set_cpu_online(cpu, false);
|
|
cpu_clear(cpu, cpu_callin_map);
|
|
local_irq_disable();
|
|
@@ -277,8 +273,6 @@ static int octeon_cpu_disable(void)
|
|
flush_cache_all();
|
|
local_flush_tlb_all();
|
|
|
|
- spin_unlock(&smp_reserve_lock);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
|
|
index 9ae813eb782e..973c995c0a86 100644
|
|
--- a/arch/mips/kernel/early_printk.c
|
|
+++ b/arch/mips/kernel/early_printk.c
|
|
@@ -25,20 +25,18 @@ early_console_write(struct console *con, const char *s, unsigned n)
|
|
}
|
|
}
|
|
|
|
-static struct console early_console __initdata = {
|
|
+static struct console early_console_prom = {
|
|
.name = "early",
|
|
.write = early_console_write,
|
|
.flags = CON_PRINTBUFFER | CON_BOOT,
|
|
.index = -1
|
|
};
|
|
|
|
-static int early_console_initialized __initdata;
|
|
-
|
|
void __init setup_early_printk(void)
|
|
{
|
|
- if (early_console_initialized)
|
|
+ if (early_console)
|
|
return;
|
|
- early_console_initialized = 1;
|
|
+ early_console = &early_console_prom;
|
|
|
|
- register_console(&early_console);
|
|
+ register_console(&early_console_prom);
|
|
}
|
|
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
|
|
index d5a338a1739c..ab4e20a44be3 100644
|
|
--- a/arch/mips/kernel/signal.c
|
|
+++ b/arch/mips/kernel/signal.c
|
|
@@ -588,6 +588,9 @@ static void do_signal(struct pt_regs *regs)
|
|
if (!user_mode(regs))
|
|
return;
|
|
|
|
+ local_irq_enable();
|
|
+ preempt_check_resched();
|
|
+
|
|
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
|
oldset = ¤t->saved_sigmask;
|
|
else
|
|
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
|
|
index 5c9ba6a536d1..27e46a1f0148 100644
|
|
--- a/arch/mips/mm/fault.c
|
|
+++ b/arch/mips/mm/fault.c
|
|
@@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
|
|
* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto bad_area_nosemaphore;
|
|
|
|
retry:
|
|
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
|
|
index eb411b3cd7e9..f02930ddb5fb 100644
|
|
--- a/arch/mn10300/mm/fault.c
|
|
+++ b/arch/mn10300/mm/fault.c
|
|
@@ -167,7 +167,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
|
|
* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
|
|
index a9b765a999ef..34a5c3713587 100644
|
|
--- a/arch/parisc/mm/fault.c
|
|
+++ b/arch/parisc/mm/fault.c
|
|
@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
|
unsigned long acc_type;
|
|
int fault;
|
|
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
|
|
index b17dd693f740..0f4ed359000d 100644
|
|
--- a/arch/powerpc/Kconfig
|
|
+++ b/arch/powerpc/Kconfig
|
|
@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT
|
|
|
|
config RWSEM_GENERIC_SPINLOCK
|
|
bool
|
|
+ default y if PREEMPT_RT_FULL
|
|
|
|
config RWSEM_XCHGADD_ALGORITHM
|
|
bool
|
|
- default y
|
|
+ default y if !PREEMPT_RT_FULL
|
|
|
|
config GENERIC_LOCKBREAK
|
|
bool
|
|
@@ -282,7 +283,7 @@ menu "Kernel options"
|
|
|
|
config HIGHMEM
|
|
bool "High memory support"
|
|
- depends on PPC32
|
|
+ depends on PPC32 && !PREEMPT_RT_FULL
|
|
|
|
source kernel/time/Kconfig
|
|
source kernel/Kconfig.hz
|
|
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
|
|
index 071592bbe025..bdb837d982bc 100644
|
|
--- a/arch/powerpc/kernel/irq.c
|
|
+++ b/arch/powerpc/kernel/irq.c
|
|
@@ -584,6 +584,7 @@ void irq_ctx_init(void)
|
|
}
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
static inline void do_softirq_onstack(void)
|
|
{
|
|
struct thread_info *curtp, *irqtp;
|
|
@@ -620,6 +621,7 @@ void do_softirq(void)
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
+#endif
|
|
|
|
irq_hw_number_t virq_to_hw(unsigned int virq)
|
|
{
|
|
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
|
|
index 7cd07b42ca1a..46c607300b29 100644
|
|
--- a/arch/powerpc/kernel/misc_32.S
|
|
+++ b/arch/powerpc/kernel/misc_32.S
|
|
@@ -36,6 +36,7 @@
|
|
|
|
.text
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
_GLOBAL(call_do_softirq)
|
|
mflr r0
|
|
stw r0,4(r1)
|
|
@@ -46,6 +47,7 @@ _GLOBAL(call_do_softirq)
|
|
lwz r0,4(r1)
|
|
mtlr r0
|
|
blr
|
|
+#endif
|
|
|
|
_GLOBAL(call_handle_irq)
|
|
mflr r0
|
|
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
|
|
index 616921ef1439..2961d7521726 100644
|
|
--- a/arch/powerpc/kernel/misc_64.S
|
|
+++ b/arch/powerpc/kernel/misc_64.S
|
|
@@ -29,6 +29,7 @@
|
|
|
|
.text
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
_GLOBAL(call_do_softirq)
|
|
mflr r0
|
|
std r0,16(r1)
|
|
@@ -39,6 +40,7 @@ _GLOBAL(call_do_softirq)
|
|
ld r0,16(r1)
|
|
mtlr r0
|
|
blr
|
|
+#endif
|
|
|
|
_GLOBAL(call_handle_irq)
|
|
ld r8,0(r6)
|
|
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
|
|
index c39c1ca77f46..8b00aab452bf 100644
|
|
--- a/arch/powerpc/kernel/udbg.c
|
|
+++ b/arch/powerpc/kernel/udbg.c
|
|
@@ -179,15 +179,13 @@ static struct console udbg_console = {
|
|
.index = 0,
|
|
};
|
|
|
|
-static int early_console_initialized;
|
|
-
|
|
/*
|
|
* Called by setup_system after ppc_md->probe and ppc_md->early_init.
|
|
* Call it again after setting udbg_putc in ppc_md->setup_arch.
|
|
*/
|
|
void __init register_early_udbg_console(void)
|
|
{
|
|
- if (early_console_initialized)
|
|
+ if (early_console)
|
|
return;
|
|
|
|
if (!udbg_putc)
|
|
@@ -197,7 +195,7 @@ void __init register_early_udbg_console(void)
|
|
printk(KERN_INFO "early console immortal !\n");
|
|
udbg_console.flags &= ~CON_BOOT;
|
|
}
|
|
- early_console_initialized = 1;
|
|
+ early_console = &udbg_console;
|
|
register_console(&udbg_console);
|
|
}
|
|
|
|
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
|
|
index 3d30a4af627f..34c1b2b80ced 100644
|
|
--- a/arch/powerpc/mm/fault.c
|
|
+++ b/arch/powerpc/mm/fault.c
|
|
@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
if (!arch_irq_disabled_regs(regs))
|
|
local_irq_enable();
|
|
|
|
- if (in_atomic() || mm == NULL) {
|
|
+ if (!mm || pagefault_disabled()) {
|
|
if (!user_mode(regs))
|
|
return SIGSEGV;
|
|
/* in_atomic() in user mode is really bad,
|
|
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
|
|
index 1e121088826f..806cbbd86ec6 100644
|
|
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
|
|
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
|
|
@@ -43,6 +43,7 @@ static irqreturn_t timebase_interrupt(int irq, void *dev)
|
|
|
|
static struct irqaction tbint_irqaction = {
|
|
.handler = timebase_interrupt,
|
|
+ .flags = IRQF_NO_THREAD,
|
|
.name = "tbint",
|
|
};
|
|
|
|
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
|
|
index d4fa03f2b6ac..5e6ff38ea69f 100644
|
|
--- a/arch/powerpc/sysdev/cpm1.c
|
|
+++ b/arch/powerpc/sysdev/cpm1.c
|
|
@@ -120,6 +120,7 @@ static irqreturn_t cpm_error_interrupt(int irq, void *dev)
|
|
|
|
static struct irqaction cpm_error_irqaction = {
|
|
.handler = cpm_error_interrupt,
|
|
+ .flags = IRQF_NO_THREAD,
|
|
.name = "error",
|
|
};
|
|
|
|
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
|
|
index 68ba1bb834bb..6bc1afa6afdc 100644
|
|
--- a/arch/s390/mm/fault.c
|
|
+++ b/arch/s390/mm/fault.c
|
|
@@ -289,7 +289,8 @@ static inline int do_exception(struct pt_regs *regs, int access)
|
|
* user context.
|
|
*/
|
|
fault = VM_FAULT_BADCONTEXT;
|
|
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
|
|
+ if (unlikely(!user_space_fault(trans_exc_code) ||
|
|
+ !mm || pagefault_disabled()))
|
|
goto out;
|
|
|
|
address = trans_exc_code & __FAIL_ADDR_MASK;
|
|
@@ -421,7 +422,8 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
|
|
unsigned long trans_exc_code;
|
|
|
|
trans_exc_code = regs->int_parm_long;
|
|
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
|
|
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm ||
|
|
+ pagefault_disabled()))
|
|
goto no_context;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
|
|
index b3744ca70be6..7dce7429fd47 100644
|
|
--- a/arch/score/mm/fault.c
|
|
+++ b/arch/score/mm/fault.c
|
|
@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
|
* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto bad_area_nosemaphore;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
|
|
index a3ee91971129..9127bc05d629 100644
|
|
--- a/arch/sh/kernel/irq.c
|
|
+++ b/arch/sh/kernel/irq.c
|
|
@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
|
|
hardirq_ctx[cpu] = NULL;
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
asmlinkage void do_softirq(void)
|
|
{
|
|
unsigned long flags;
|
|
@@ -191,6 +192,7 @@ asmlinkage void do_softirq(void)
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
+#endif
|
|
#else
|
|
static inline void handle_one_irq(unsigned int irq)
|
|
{
|
|
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
|
|
index 47475cca068a..a5b51b9d7338 100644
|
|
--- a/arch/sh/kernel/sh_bios.c
|
|
+++ b/arch/sh/kernel/sh_bios.c
|
|
@@ -144,8 +144,6 @@ static struct console bios_console = {
|
|
.index = -1,
|
|
};
|
|
|
|
-static struct console *early_console;
|
|
-
|
|
static int __init setup_early_printk(char *buf)
|
|
{
|
|
int keep_early = 0;
|
|
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
|
|
index a4919631ce03..df6cc6e1d918 100644
|
|
--- a/arch/sh/mm/fault_32.c
|
|
+++ b/arch/sh/mm/fault_32.c
|
|
@@ -166,7 +166,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
|
* If we're in an interrupt, have no user context or are running
|
|
* in an atomic region then we must not take the fault:
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto no_context;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
|
|
index dff2c3d7d370..618d4c2553ba 100644
|
|
--- a/arch/sparc/kernel/irq_64.c
|
|
+++ b/arch/sparc/kernel/irq_64.c
|
|
@@ -698,6 +698,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
|
|
set_irq_regs(old_regs);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
void do_softirq(void)
|
|
{
|
|
unsigned long flags;
|
|
@@ -723,6 +724,7 @@ void do_softirq(void)
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
+#endif
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
void fixup_irqs(void)
|
|
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
|
|
index 741df916c124..ca73a288e179 100644
|
|
--- a/arch/sparc/kernel/prom_common.c
|
|
+++ b/arch/sparc/kernel/prom_common.c
|
|
@@ -65,7 +65,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
|
|
err = -ENODEV;
|
|
|
|
mutex_lock(&of_set_property_mutex);
|
|
- write_lock(&devtree_lock);
|
|
+ raw_spin_lock(&devtree_lock);
|
|
prevp = &dp->properties;
|
|
while (*prevp) {
|
|
struct property *prop = *prevp;
|
|
@@ -92,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
|
|
}
|
|
prevp = &(*prevp)->next;
|
|
}
|
|
- write_unlock(&devtree_lock);
|
|
+ raw_spin_unlock(&devtree_lock);
|
|
mutex_unlock(&of_set_property_mutex);
|
|
|
|
/* XXX Upate procfs if necessary... */
|
|
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
|
|
index d444468b27f6..a000aa5ca82e 100644
|
|
--- a/arch/sparc/kernel/setup_32.c
|
|
+++ b/arch/sparc/kernel/setup_32.c
|
|
@@ -221,6 +221,7 @@ void __init setup_arch(char **cmdline_p)
|
|
|
|
boot_flags_init(*cmdline_p);
|
|
|
|
+ early_console = &prom_early_console;
|
|
register_console(&prom_early_console);
|
|
|
|
/* Set sparc_cpu_model */
|
|
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
|
|
index 1414d16712b2..8b37e5a5319d 100644
|
|
--- a/arch/sparc/kernel/setup_64.c
|
|
+++ b/arch/sparc/kernel/setup_64.c
|
|
@@ -487,6 +487,12 @@ static void __init init_sparc64_elf_hwcap(void)
|
|
popc_patch();
|
|
}
|
|
|
|
+static inline void register_prom_console(void)
|
|
+{
|
|
+ early_console = &prom_early_console;
|
|
+ register_console(&prom_early_console);
|
|
+}
|
|
+
|
|
void __init setup_arch(char **cmdline_p)
|
|
{
|
|
/* Initialize PROM console and command line. */
|
|
@@ -498,7 +504,7 @@ void __init setup_arch(char **cmdline_p)
|
|
#ifdef CONFIG_EARLYFB
|
|
if (btext_find_display())
|
|
#endif
|
|
- register_console(&prom_early_console);
|
|
+ register_prom_console();
|
|
|
|
if (tlb_type == hypervisor)
|
|
printk("ARCH: SUN4V\n");
|
|
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
|
|
index 5c6238d72076..5f96e95e16bf 100644
|
|
--- a/arch/sparc/mm/fault_32.c
|
|
+++ b/arch/sparc/mm/fault_32.c
|
|
@@ -248,8 +248,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
|
* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
- goto no_context;
|
|
+ if (!mm || pagefault_disabled())
|
|
+ goto no_context;
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
|
|
|
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
|
|
index 0dc7b9094a29..7550ba602e69 100644
|
|
--- a/arch/sparc/mm/fault_64.c
|
|
+++ b/arch/sparc/mm/fault_64.c
|
|
@@ -331,7 +331,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|
* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm)
|
|
+ if (!mm || pagefault_disabled())
|
|
goto intr_or_no_mm;
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
|
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
|
|
index afb9c9a0d887..ff252201e14a 100644
|
|
--- a/arch/tile/kernel/early_printk.c
|
|
+++ b/arch/tile/kernel/early_printk.c
|
|
@@ -33,25 +33,8 @@ static struct console early_hv_console = {
|
|
};
|
|
|
|
/* Direct interface for emergencies */
|
|
-static struct console *early_console = &early_hv_console;
|
|
-static int early_console_initialized;
|
|
static int early_console_complete;
|
|
|
|
-static void early_vprintk(const char *fmt, va_list ap)
|
|
-{
|
|
- char buf[512];
|
|
- int n = vscnprintf(buf, sizeof(buf), fmt, ap);
|
|
- early_console->write(early_console, buf, n);
|
|
-}
|
|
-
|
|
-void early_printk(const char *fmt, ...)
|
|
-{
|
|
- va_list ap;
|
|
- va_start(ap, fmt);
|
|
- early_vprintk(fmt, ap);
|
|
- va_end(ap);
|
|
-}
|
|
-
|
|
void early_panic(const char *fmt, ...)
|
|
{
|
|
va_list ap;
|
|
@@ -69,14 +52,13 @@ static int __initdata keep_early;
|
|
|
|
static int __init setup_early_printk(char *str)
|
|
{
|
|
- if (early_console_initialized)
|
|
+ if (early_console)
|
|
return 1;
|
|
|
|
if (str != NULL && strncmp(str, "keep", 4) == 0)
|
|
keep_early = 1;
|
|
|
|
early_console = &early_hv_console;
|
|
- early_console_initialized = 1;
|
|
register_console(early_console);
|
|
|
|
return 0;
|
|
@@ -85,12 +67,12 @@ static int __init setup_early_printk(char *str)
|
|
void __init disable_early_printk(void)
|
|
{
|
|
early_console_complete = 1;
|
|
- if (!early_console_initialized || !early_console)
|
|
+ if (!early_console)
|
|
return;
|
|
if (!keep_early) {
|
|
early_printk("disabling early console\n");
|
|
unregister_console(early_console);
|
|
- early_console_initialized = 0;
|
|
+ early_console = NULL;
|
|
} else {
|
|
early_printk("keeping early console\n");
|
|
}
|
|
@@ -98,7 +80,7 @@ void __init disable_early_printk(void)
|
|
|
|
void warn_early_printk(void)
|
|
{
|
|
- if (early_console_complete || early_console_initialized)
|
|
+ if (early_console_complete || early_console)
|
|
return;
|
|
early_printk("\
|
|
Machine shutting down before console output is fully initialized.\n\
|
|
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
|
|
index a362926d513a..32cd58d3b851 100644
|
|
--- a/arch/tile/mm/fault.c
|
|
+++ b/arch/tile/mm/fault.c
|
|
@@ -355,7 +355,7 @@ static int handle_page_fault(struct pt_regs *regs,
|
|
* If we're in an interrupt, have no user context or are running in an
|
|
* atomic region then we must not take the fault.
|
|
*/
|
|
- if (in_atomic() || !mm) {
|
|
+ if (!mm || pagefault_disabled()) {
|
|
vma = NULL; /* happy compiler */
|
|
goto bad_area_nosemaphore;
|
|
}
|
|
diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c
|
|
index ec649bf72f68..183060f76a11 100644
|
|
--- a/arch/um/kernel/early_printk.c
|
|
+++ b/arch/um/kernel/early_printk.c
|
|
@@ -16,7 +16,7 @@ static void early_console_write(struct console *con, const char *s, unsigned int
|
|
um_early_printk(s, n);
|
|
}
|
|
|
|
-static struct console early_console = {
|
|
+static struct console early_console_dev = {
|
|
.name = "earlycon",
|
|
.write = early_console_write,
|
|
.flags = CON_BOOT,
|
|
@@ -25,8 +25,10 @@ static struct console early_console = {
|
|
|
|
static int __init setup_early_printk(char *buf)
|
|
{
|
|
- register_console(&early_console);
|
|
-
|
|
+ if (!early_console) {
|
|
+ early_console = &early_console_dev;
|
|
+ register_console(&early_console_dev);
|
|
+ }
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
|
|
index f79ffc901d0e..846737e5dc05 100644
|
|
--- a/arch/um/kernel/trap.c
|
|
+++ b/arch/um/kernel/trap.c
|
|
@@ -37,7 +37,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
|
|
* If the fault was during atomic operation, don't take the fault, just
|
|
* fail.
|
|
*/
|
|
- if (in_atomic())
|
|
+ if (!mm || pagefault_disabled())
|
|
goto out_nosemaphore;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c
|
|
index 3922255f1fa8..9be0d5d02a9a 100644
|
|
--- a/arch/unicore32/kernel/early_printk.c
|
|
+++ b/arch/unicore32/kernel/early_printk.c
|
|
@@ -33,21 +33,17 @@ static struct console early_ocd_console = {
|
|
.index = -1,
|
|
};
|
|
|
|
-/* Direct interface for emergencies */
|
|
-static struct console *early_console = &early_ocd_console;
|
|
-
|
|
-static int __initdata keep_early;
|
|
-
|
|
static int __init setup_early_printk(char *buf)
|
|
{
|
|
- if (!buf)
|
|
+ int keep_early;
|
|
+
|
|
+ if (!buf || early_console)
|
|
return 0;
|
|
|
|
if (strstr(buf, "keep"))
|
|
keep_early = 1;
|
|
|
|
- if (!strncmp(buf, "ocd", 3))
|
|
- early_console = &early_ocd_console;
|
|
+ early_console = &early_ocd_console;
|
|
|
|
if (keep_early)
|
|
early_console->flags &= ~CON_BOOT;
|
|
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
|
index eda8cb92702f..b440b04d39f9 100644
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -155,10 +155,10 @@ config ARCH_MAY_HAVE_PC_FDC
|
|
def_bool ISA_DMA_API
|
|
|
|
config RWSEM_GENERIC_SPINLOCK
|
|
- def_bool !X86_XADD
|
|
+ def_bool !X86_XADD || PREEMPT_RT_FULL
|
|
|
|
config RWSEM_XCHGADD_ALGORITHM
|
|
- def_bool X86_XADD
|
|
+ def_bool X86_XADD && !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
|
|
|
|
config ARCH_HAS_CPU_IDLE_WAIT
|
|
def_bool y
|
|
@@ -750,7 +750,7 @@ config IOMMU_HELPER
|
|
config MAXSMP
|
|
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
|
|
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
|
|
- select CPUMASK_OFFSTACK
|
|
+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
|
|
---help---
|
|
Enable maximum number of CPUS and NUMA Nodes for this architecture.
|
|
If unsure, say N.
|
|
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
|
|
index 51afabd65cbf..ee4d663af29f 100644
|
|
--- a/arch/x86/crypto/aesni-intel_glue.c
|
|
+++ b/arch/x86/crypto/aesni-intel_glue.c
|
|
@@ -290,14 +290,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
- nbytes & AES_BLOCK_MASK);
|
|
+ nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -314,14 +314,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -360,14 +360,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -384,14 +384,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -446,18 +446,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
if (walk.nbytes) {
|
|
+ kernel_fpu_begin();
|
|
ctr_crypt_final(ctx, &walk);
|
|
+ kernel_fpu_end();
|
|
err = blkcipher_walk_done(desc, &walk, 0);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
|
|
index 610001d385dd..c1c23d256ac6 100644
|
|
--- a/arch/x86/include/asm/acpi.h
|
|
+++ b/arch/x86/include/asm/acpi.h
|
|
@@ -51,8 +51,8 @@
|
|
|
|
#define ACPI_ASM_MACROS
|
|
#define BREAKPOINT3
|
|
-#define ACPI_DISABLE_IRQS() local_irq_disable()
|
|
-#define ACPI_ENABLE_IRQS() local_irq_enable()
|
|
+#define ACPI_DISABLE_IRQS() local_irq_disable_nort()
|
|
+#define ACPI_ENABLE_IRQS() local_irq_enable_nort()
|
|
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
|
|
|
|
int __acpi_acquire_global_lock(unsigned int *lock);
|
|
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
|
|
index 6cbc795b7c58..76475dcea30e 100644
|
|
--- a/arch/x86/include/asm/signal.h
|
|
+++ b/arch/x86/include/asm/signal.h
|
|
@@ -31,6 +31,19 @@ typedef struct {
|
|
unsigned long sig[_NSIG_WORDS];
|
|
} sigset_t;
|
|
|
|
+/*
|
|
+ * Because some traps use the IST stack, we must keep
|
|
+ * preemption disabled while calling do_trap(), but do_trap()
|
|
+ * may call force_sig_info() which will grab the signal spin_locks
|
|
+ * for the task, which in PREEMPT_RT_FULL are mutexes.
|
|
+ * By defining ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will
|
|
+ * set TIF_NOTIFY_RESUME and set up the signal to be sent on exit
|
|
+ * of the trap.
|
|
+ */
|
|
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
|
|
+#define ARCH_RT_DELAYS_SIGNAL_SEND
|
|
+#endif
|
|
+
|
|
#else
|
|
/* Here we must cater to libcs that poke about in kernel headers. */
|
|
|
|
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
|
|
index b5d9533d2c38..0f3d7b1cf9fb 100644
|
|
--- a/arch/x86/include/asm/stackprotector.h
|
|
+++ b/arch/x86/include/asm/stackprotector.h
|
|
@@ -57,7 +57,7 @@
|
|
*/
|
|
static __always_inline void boot_init_stack_canary(void)
|
|
{
|
|
- u64 canary;
|
|
+ u64 uninitialized_var(canary);
|
|
u64 tsc;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -68,8 +68,16 @@ static __always_inline void boot_init_stack_canary(void)
|
|
* of randomness. The TSC only matters for very early init,
|
|
* there it already has some randomness on most systems. Later
|
|
* on during the bootup the random pool has true entropy too.
|
|
+ *
|
|
+ * For preempt-rt we need to weaken the randomness a bit, as
|
|
+ * we can't call into the random generator from atomic context
|
|
+ * due to locking constraints. We just leave canary
|
|
+ * uninitialized and use the TSC based randomness on top of
|
|
+ * it.
|
|
*/
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
get_random_bytes(&canary, sizeof(canary));
|
|
+#endif
|
|
tsc = __native_read_tsc();
|
|
canary += tsc + (tsc << 32UL);
|
|
|
|
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
|
|
index 6149b476d9df..ca5cfc1a3c6e 100644
|
|
--- a/arch/x86/include/asm/uv/uv_bau.h
|
|
+++ b/arch/x86/include/asm/uv/uv_bau.h
|
|
@@ -602,8 +602,8 @@ struct bau_control {
|
|
unsigned short uvhub_quiesce;
|
|
short socket_acknowledge_count[DEST_Q_SIZE];
|
|
cycles_t send_message;
|
|
- spinlock_t uvhub_lock;
|
|
- spinlock_t queue_lock;
|
|
+ raw_spinlock_t uvhub_lock;
|
|
+ raw_spinlock_t queue_lock;
|
|
/* tunables */
|
|
int max_concurr;
|
|
int max_concurr_const;
|
|
@@ -760,15 +760,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
|
|
* to be lowered below the current 'v'. atomic_add_unless can only stop
|
|
* on equal.
|
|
*/
|
|
-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
|
|
+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
|
|
{
|
|
- spin_lock(lock);
|
|
+ raw_spin_lock(lock);
|
|
if (atomic_read(v) >= u) {
|
|
- spin_unlock(lock);
|
|
+ raw_spin_unlock(lock);
|
|
return 0;
|
|
}
|
|
atomic_inc(v);
|
|
- spin_unlock(lock);
|
|
+ raw_spin_unlock(lock);
|
|
return 1;
|
|
}
|
|
|
|
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
|
|
index 21f7385badb8..953f5c666ee0 100644
|
|
--- a/arch/x86/include/asm/uv/uv_hub.h
|
|
+++ b/arch/x86/include/asm/uv/uv_hub.h
|
|
@@ -475,7 +475,7 @@ struct uv_blade_info {
|
|
unsigned short nr_online_cpus;
|
|
unsigned short pnode;
|
|
short memory_nid;
|
|
- spinlock_t nmi_lock;
|
|
+ raw_spinlock_t nmi_lock;
|
|
unsigned long nmi_count;
|
|
};
|
|
extern struct uv_blade_info *uv_blade_info;
|
|
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
|
|
index e88300d8e80a..01c76a0f2845 100644
|
|
--- a/arch/x86/kernel/apic/io_apic.c
|
|
+++ b/arch/x86/kernel/apic/io_apic.c
|
|
@@ -2555,7 +2555,8 @@ atomic_t irq_mis_count;
|
|
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
|
|
{
|
|
/* If we are moving the irq we need to mask it */
|
|
- if (unlikely(irqd_is_setaffinity_pending(data))) {
|
|
+ if (unlikely(irqd_is_setaffinity_pending(data) &&
|
|
+ !irqd_irq_inprogress(data))) {
|
|
mask_ioapic(cfg);
|
|
return true;
|
|
}
|
|
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
|
|
index 87bfa69e216e..8e56e4f72694 100644
|
|
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
|
|
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
|
|
@@ -56,7 +56,7 @@ int uv_min_hub_revision_id;
|
|
EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
|
|
unsigned int uv_apicid_hibits;
|
|
EXPORT_SYMBOL_GPL(uv_apicid_hibits);
|
|
-static DEFINE_SPINLOCK(uv_nmi_lock);
|
|
+static DEFINE_RAW_SPINLOCK(uv_nmi_lock);
|
|
|
|
static struct apic apic_x2apic_uv_x;
|
|
|
|
@@ -695,13 +695,13 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
|
|
real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
|
|
|
|
if (unlikely(real_uv_nmi)) {
|
|
- spin_lock(&uv_blade_info[bid].nmi_lock);
|
|
+ raw_spin_lock(&uv_blade_info[bid].nmi_lock);
|
|
real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
|
|
if (real_uv_nmi) {
|
|
uv_blade_info[bid].nmi_count++;
|
|
uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
|
|
}
|
|
- spin_unlock(&uv_blade_info[bid].nmi_lock);
|
|
+ raw_spin_unlock(&uv_blade_info[bid].nmi_lock);
|
|
}
|
|
|
|
if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
|
|
@@ -713,10 +713,10 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
|
|
* Use a lock so only one cpu prints at a time.
|
|
* This prevents intermixed output.
|
|
*/
|
|
- spin_lock(&uv_nmi_lock);
|
|
+ raw_spin_lock(&uv_nmi_lock);
|
|
pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
|
|
dump_stack();
|
|
- spin_unlock(&uv_nmi_lock);
|
|
+ raw_spin_unlock(&uv_nmi_lock);
|
|
|
|
return NMI_HANDLED;
|
|
}
|
|
@@ -811,7 +811,7 @@ void __init uv_system_init(void)
|
|
uv_blade_info[blade].pnode = pnode;
|
|
uv_blade_info[blade].nr_possible_cpus = 0;
|
|
uv_blade_info[blade].nr_online_cpus = 0;
|
|
- spin_lock_init(&uv_blade_info[blade].nmi_lock);
|
|
+ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock);
|
|
max_pnode = max(pnode, max_pnode);
|
|
blade++;
|
|
}
|
|
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
|
|
index 0d2db0e7caf4..ab47f313db71 100644
|
|
--- a/arch/x86/kernel/cpu/mcheck/mce.c
|
|
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
|
|
@@ -15,6 +15,7 @@
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/kobject.h>
|
|
#include <linux/uaccess.h>
|
|
+#include <linux/kthread.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/percpu.h>
|
|
@@ -38,6 +39,7 @@
|
|
#include <linux/debugfs.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/export.h>
|
|
+#include <linux/jiffies.h>
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/mce.h>
|
|
@@ -1247,17 +1249,14 @@ void mce_log_therm_throt_event(__u64 status)
|
|
* poller finds an MCE, poll 2x faster. When the poller finds no more
|
|
* errors, poll 2x slower (up to check_interval seconds).
|
|
*/
|
|
-static int check_interval = 5 * 60; /* 5 minutes */
|
|
+static unsigned long check_interval = 5 * 60; /* 5 minutes */
|
|
|
|
-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
|
|
-static DEFINE_PER_CPU(struct timer_list, mce_timer);
|
|
+static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
|
|
+static DEFINE_PER_CPU(struct hrtimer, mce_timer);
|
|
|
|
-static void mce_start_timer(unsigned long data)
|
|
+static enum hrtimer_restart mce_start_timer(struct hrtimer *timer)
|
|
{
|
|
- struct timer_list *t = &per_cpu(mce_timer, data);
|
|
- int *n;
|
|
-
|
|
- WARN_ON(smp_processor_id() != data);
|
|
+ unsigned long *n;
|
|
|
|
if (mce_available(__this_cpu_ptr(&cpu_info))) {
|
|
machine_check_poll(MCP_TIMESTAMP,
|
|
@@ -1270,21 +1269,22 @@ static void mce_start_timer(unsigned long data)
|
|
*/
|
|
n = &__get_cpu_var(mce_next_interval);
|
|
if (mce_notify_irq())
|
|
- *n = max(*n/2, HZ/100);
|
|
+ *n = max(*n/2, HZ/100UL);
|
|
else
|
|
- *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
|
|
+ *n = min(*n*2, round_jiffies_relative(check_interval*HZ));
|
|
|
|
- t->expires = jiffies + *n;
|
|
- add_timer_on(t, smp_processor_id());
|
|
+ hrtimer_forward(timer, timer->base->get_time(),
|
|
+ ns_to_ktime(jiffies_to_usecs(*n) * 1000ULL));
|
|
+ return HRTIMER_RESTART;
|
|
}
|
|
|
|
-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
|
|
+/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
|
|
static void mce_timer_delete_all(void)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_online_cpu(cpu)
|
|
- del_timer_sync(&per_cpu(mce_timer, cpu));
|
|
+ hrtimer_cancel(&per_cpu(mce_timer, cpu));
|
|
}
|
|
|
|
static void mce_do_trigger(struct work_struct *work)
|
|
@@ -1294,6 +1294,63 @@ static void mce_do_trigger(struct work_struct *work)
|
|
|
|
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
|
|
|
|
+static void __mce_notify_work(void)
|
|
+{
|
|
+ /* Not more than two messages every minute */
|
|
+ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
|
|
+
|
|
+ /* wake processes polling /dev/mcelog */
|
|
+ wake_up_interruptible(&mce_chrdev_wait);
|
|
+
|
|
+ /*
|
|
+ * There is no risk of missing notifications because
|
|
+ * work_pending is always cleared before the function is
|
|
+ * executed.
|
|
+ */
|
|
+ if (mce_helper[0] && !work_pending(&mce_trigger_work))
|
|
+ schedule_work(&mce_trigger_work);
|
|
+
|
|
+ if (__ratelimit(&ratelimit))
|
|
+ pr_info(HW_ERR "Machine check events logged\n");
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+struct task_struct *mce_notify_helper;
|
|
+
|
|
+static int mce_notify_helper_thread(void *unused)
|
|
+{
|
|
+ while (1) {
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+ schedule();
|
|
+ if (kthread_should_stop())
|
|
+ break;
|
|
+ __mce_notify_work();
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mce_notify_work_init(void)
|
|
+{
|
|
+ mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL,
|
|
+ "mce-notify");
|
|
+ if (!mce_notify_helper)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mce_notify_work(void)
|
|
+{
|
|
+ wake_up_process(mce_notify_helper);
|
|
+}
|
|
+#else
|
|
+static void mce_notify_work(void)
|
|
+{
|
|
+ __mce_notify_work();
|
|
+}
|
|
+static inline int mce_notify_work_init(void) { return 0; }
|
|
+#endif
|
|
+
|
|
/*
|
|
* Notify the user(s) about new machine check events.
|
|
* Can be called from interrupt context, but not from machine check/NMI
|
|
@@ -1301,24 +1358,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
|
|
*/
|
|
int mce_notify_irq(void)
|
|
{
|
|
- /* Not more than two messages every minute */
|
|
- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
|
|
-
|
|
if (test_and_clear_bit(0, &mce_need_notify)) {
|
|
- /* wake processes polling /dev/mcelog */
|
|
- wake_up_interruptible(&mce_chrdev_wait);
|
|
-
|
|
- /*
|
|
- * There is no risk of missing notifications because
|
|
- * work_pending is always cleared before the function is
|
|
- * executed.
|
|
- */
|
|
- if (mce_helper[0] && !work_pending(&mce_trigger_work))
|
|
- schedule_work(&mce_trigger_work);
|
|
-
|
|
- if (__ratelimit(&ratelimit))
|
|
- pr_info(HW_ERR "Machine check events logged\n");
|
|
-
|
|
+ mce_notify_work();
|
|
return 1;
|
|
}
|
|
return 0;
|
|
@@ -1514,10 +1555,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
|
|
|
|
static void __mcheck_cpu_init_timer(void)
|
|
{
|
|
- struct timer_list *t = &__get_cpu_var(mce_timer);
|
|
- int *n = &__get_cpu_var(mce_next_interval);
|
|
+ struct hrtimer *t = &__get_cpu_var(mce_timer);
|
|
+ unsigned long *n = &__get_cpu_var(mce_next_interval);
|
|
|
|
- setup_timer(t, mce_start_timer, smp_processor_id());
|
|
+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ t->function = mce_start_timer;
|
|
|
|
if (mce_ignore_ce)
|
|
return;
|
|
@@ -1525,8 +1567,9 @@ static void __mcheck_cpu_init_timer(void)
|
|
*n = check_interval * HZ;
|
|
if (!*n)
|
|
return;
|
|
- t->expires = round_jiffies(jiffies + *n);
|
|
- add_timer_on(t, smp_processor_id());
|
|
+
|
|
+ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(*n) * 1000ULL),
|
|
+ 0 , HRTIMER_MODE_REL_PINNED);
|
|
}
|
|
|
|
/* Handle unconfigured int18 (should never happen) */
|
|
@@ -2178,6 +2221,8 @@ static void __cpuinit mce_disable_cpu(void *h)
|
|
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
|
return;
|
|
|
|
+ hrtimer_cancel(&__get_cpu_var(mce_timer));
|
|
+
|
|
if (!(action & CPU_TASKS_FROZEN))
|
|
cmci_clear();
|
|
for (i = 0; i < banks; i++) {
|
|
@@ -2204,6 +2249,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
|
|
if (b->init)
|
|
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
|
|
}
|
|
+ __mcheck_cpu_init_timer();
|
|
}
|
|
|
|
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
|
|
@@ -2211,7 +2257,6 @@ static int __cpuinit
|
|
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
{
|
|
unsigned int cpu = (unsigned long)hcpu;
|
|
- struct timer_list *t = &per_cpu(mce_timer, cpu);
|
|
|
|
switch (action) {
|
|
case CPU_ONLINE:
|
|
@@ -2228,16 +2273,10 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
break;
|
|
case CPU_DOWN_PREPARE:
|
|
case CPU_DOWN_PREPARE_FROZEN:
|
|
- del_timer_sync(t);
|
|
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
|
|
break;
|
|
case CPU_DOWN_FAILED:
|
|
case CPU_DOWN_FAILED_FROZEN:
|
|
- if (!mce_ignore_ce && check_interval) {
|
|
- t->expires = round_jiffies(jiffies +
|
|
- __get_cpu_var(mce_next_interval));
|
|
- add_timer_on(t, cpu);
|
|
- }
|
|
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
|
|
break;
|
|
case CPU_POST_DEAD:
|
|
@@ -2298,6 +2337,8 @@ static __init int mcheck_init_device(void)
|
|
/* register character device /dev/mcelog */
|
|
misc_register(&mce_chrdev_device);
|
|
|
|
+ err = mce_notify_work_init();
|
|
+
|
|
return err;
|
|
}
|
|
device_initcall(mcheck_init_device);
|
|
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
|
|
index 9b9f18b49918..d15f575a861b 100644
|
|
--- a/arch/x86/kernel/early_printk.c
|
|
+++ b/arch/x86/kernel/early_printk.c
|
|
@@ -169,25 +169,9 @@ static struct console early_serial_console = {
|
|
.index = -1,
|
|
};
|
|
|
|
-/* Direct interface for emergencies */
|
|
-static struct console *early_console = &early_vga_console;
|
|
-static int __initdata early_console_initialized;
|
|
-
|
|
-asmlinkage void early_printk(const char *fmt, ...)
|
|
-{
|
|
- char buf[512];
|
|
- int n;
|
|
- va_list ap;
|
|
-
|
|
- va_start(ap, fmt);
|
|
- n = vscnprintf(buf, sizeof(buf), fmt, ap);
|
|
- early_console->write(early_console, buf, n);
|
|
- va_end(ap);
|
|
-}
|
|
-
|
|
static inline void early_console_register(struct console *con, int keep_early)
|
|
{
|
|
- if (early_console->index != -1) {
|
|
+ if (con->index != -1) {
|
|
printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
|
|
con->name);
|
|
return;
|
|
@@ -207,9 +191,8 @@ static int __init setup_early_printk(char *buf)
|
|
if (!buf)
|
|
return 0;
|
|
|
|
- if (early_console_initialized)
|
|
+ if (early_console)
|
|
return 0;
|
|
- early_console_initialized = 1;
|
|
|
|
keep = (strstr(buf, "keep") != NULL);
|
|
|
|
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
|
|
index 8b49131c7df6..ad82f7e3e54e 100644
|
|
--- a/arch/x86/kernel/entry_64.S
|
|
+++ b/arch/x86/kernel/entry_64.S
|
|
@@ -1258,6 +1258,7 @@ ENTRY(kernel_execve)
|
|
CFI_ENDPROC
|
|
END(kernel_execve)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/* Call softirq on interrupt stack. Interrupts are off. */
|
|
ENTRY(call_softirq)
|
|
CFI_STARTPROC
|
|
@@ -1277,6 +1278,7 @@ ENTRY(call_softirq)
|
|
ret
|
|
CFI_ENDPROC
|
|
END(call_softirq)
|
|
+#endif
|
|
|
|
#ifdef CONFIG_XEN
|
|
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
|
|
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
|
|
index 2861b4cafd2e..ed91a2b0209f 100644
|
|
--- a/arch/x86/kernel/hpet.c
|
|
+++ b/arch/x86/kernel/hpet.c
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/hpet.h>
|
|
#include <linux/init.h>
|
|
+#include <linux/dmi.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/pm.h>
|
|
#include <linux/io.h>
|
|
@@ -570,6 +571,30 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
|
|
#define RESERVE_TIMERS 0
|
|
#endif
|
|
|
|
+static int __init dmi_disable_hpet_msi(const struct dmi_system_id *d)
|
|
+{
|
|
+ hpet_msi_disable = 1;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct dmi_system_id __initdata dmi_hpet_table[] = {
|
|
+ /*
|
|
+ * MSI based per cpu timers lose interrupts when intel_idle()
|
|
+ * is enabled - independent of the c-state. With idle=poll the
|
|
+ * problem cannot be observed. We have no idea yet, whether
|
|
+ * this is a W510 specific issue or a general chipset oddity.
|
|
+ */
|
|
+ {
|
|
+ .callback = dmi_disable_hpet_msi,
|
|
+ .ident = "Lenovo W510",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
|
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
|
|
+ },
|
|
+ },
|
|
+ {}
|
|
+};
|
|
+
|
|
static void hpet_msi_capability_lookup(unsigned int start_timer)
|
|
{
|
|
unsigned int id;
|
|
@@ -577,6 +602,8 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
|
|
unsigned int num_timers_used = 0;
|
|
int i;
|
|
|
|
+ dmi_check_system(dmi_hpet_table);
|
|
+
|
|
if (hpet_msi_disable)
|
|
return;
|
|
|
|
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
|
|
index 58b7f27cb3e9..a7b4e1b2e6eb 100644
|
|
--- a/arch/x86/kernel/irq_32.c
|
|
+++ b/arch/x86/kernel/irq_32.c
|
|
@@ -149,6 +149,7 @@ void __cpuinit irq_ctx_init(int cpu)
|
|
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
asmlinkage void do_softirq(void)
|
|
{
|
|
unsigned long flags;
|
|
@@ -179,6 +180,7 @@ asmlinkage void do_softirq(void)
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
+#endif
|
|
|
|
bool handle_irq(unsigned irq, struct pt_regs *regs)
|
|
{
|
|
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
|
|
index d04d3ecded62..831f247b5798 100644
|
|
--- a/arch/x86/kernel/irq_64.c
|
|
+++ b/arch/x86/kernel/irq_64.c
|
|
@@ -88,7 +88,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
|
|
return true;
|
|
}
|
|
|
|
-
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
extern void call_softirq(void);
|
|
|
|
asmlinkage void do_softirq(void)
|
|
@@ -108,3 +108,4 @@ asmlinkage void do_softirq(void)
|
|
}
|
|
local_irq_restore(flags);
|
|
}
|
|
+#endif
|
|
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
|
|
index ca8f703a1e70..129b8bb73de2 100644
|
|
--- a/arch/x86/kernel/irq_work.c
|
|
+++ b/arch/x86/kernel/irq_work.c
|
|
@@ -18,6 +18,7 @@ void smp_irq_work_interrupt(struct pt_regs *regs)
|
|
irq_exit();
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
void arch_irq_work_raise(void)
|
|
{
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
@@ -28,3 +29,4 @@ void arch_irq_work_raise(void)
|
|
apic_wait_icr_idle();
|
|
#endif
|
|
}
|
|
+#endif
|
|
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
|
|
index 15d3c454e5e3..1579b3a69559 100644
|
|
--- a/arch/x86/kernel/kprobes.c
|
|
+++ b/arch/x86/kernel/kprobes.c
|
|
@@ -486,7 +486,6 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
|
|
* stepping.
|
|
*/
|
|
regs->ip = (unsigned long)p->ainsn.insn;
|
|
- preempt_enable_no_resched();
|
|
return;
|
|
}
|
|
#endif
|
|
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
|
|
index ae6847303e26..02ffbf5fa34e 100644
|
|
--- a/arch/x86/kernel/process_32.c
|
|
+++ b/arch/x86/kernel/process_32.c
|
|
@@ -36,6 +36,7 @@
|
|
#include <linux/uaccess.h>
|
|
#include <linux/io.h>
|
|
#include <linux/kdebug.h>
|
|
+#include <linux/highmem.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/ldt.h>
|
|
@@ -285,6 +286,41 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
|
|
__switch_to_xtra(prev_p, next_p, tss);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /*
|
|
+ * Save @prev's kmap_atomic stack
|
|
+ */
|
|
+ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
|
|
+ if (unlikely(prev_p->kmap_idx)) {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < prev_p->kmap_idx; i++) {
|
|
+ int idx = i + KM_TYPE_NR * smp_processor_id();
|
|
+
|
|
+ pte_t *ptep = kmap_pte - idx;
|
|
+ prev_p->kmap_pte[i] = *ptep;
|
|
+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
|
+ }
|
|
+
|
|
+ __this_cpu_write(__kmap_atomic_idx, 0);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Restore @next_p's kmap_atomic stack
|
|
+ */
|
|
+ if (unlikely(next_p->kmap_idx)) {
|
|
+ int i;
|
|
+
|
|
+ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
|
|
+
|
|
+ for (i = 0; i < next_p->kmap_idx; i++) {
|
|
+ int idx = i + KM_TYPE_NR * smp_processor_id();
|
|
+
|
|
+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
/*
|
|
* Leave lazy mode, flushing any hypercalls made here.
|
|
* This must be done before restoring TLS segments so
|
|
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
|
|
index 115eac431483..c67d83fe0d00 100644
|
|
--- a/arch/x86/kernel/signal.c
|
|
+++ b/arch/x86/kernel/signal.c
|
|
@@ -824,6 +824,15 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
|
|
mce_notify_process();
|
|
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
|
|
|
|
+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
|
|
+ if (unlikely(current->forced_info.si_signo)) {
|
|
+ struct task_struct *t = current;
|
|
+ force_sig_info(t->forced_info.si_signo,
|
|
+ &t->forced_info, t);
|
|
+ t->forced_info.si_signo = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
/* deal with pending signal delivery */
|
|
if (thread_info_flags & _TIF_SIGPENDING)
|
|
do_signal(regs);
|
|
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
|
|
index adc049fef8ca..2db25041ad49 100644
|
|
--- a/arch/x86/kernel/traps.c
|
|
+++ b/arch/x86/kernel/traps.c
|
|
@@ -87,9 +87,21 @@ static inline void conditional_sti(struct pt_regs *regs)
|
|
local_irq_enable();
|
|
}
|
|
|
|
-static inline void preempt_conditional_sti(struct pt_regs *regs)
|
|
+static inline void conditional_sti_ist(struct pt_regs *regs)
|
|
{
|
|
+#ifdef CONFIG_X86_64
|
|
+ /*
|
|
+ * X86_64 uses a per CPU stack on the IST for certain traps
|
|
+ * like int3. The task can not be preempted when using one
|
|
+ * of these stacks, thus preemption must be disabled, otherwise
|
|
+ * the stack can be corrupted if the task is scheduled out,
|
|
+ * and another task comes in and uses this stack.
|
|
+ *
|
|
+ * On x86_32 the task keeps its own stack and it is OK if the
|
|
+ * task schedules out.
|
|
+ */
|
|
inc_preempt_count();
|
|
+#endif
|
|
if (regs->flags & X86_EFLAGS_IF)
|
|
local_irq_enable();
|
|
}
|
|
@@ -100,11 +112,13 @@ static inline void conditional_cli(struct pt_regs *regs)
|
|
local_irq_disable();
|
|
}
|
|
|
|
-static inline void preempt_conditional_cli(struct pt_regs *regs)
|
|
+static inline void conditional_cli_ist(struct pt_regs *regs)
|
|
{
|
|
if (regs->flags & X86_EFLAGS_IF)
|
|
local_irq_disable();
|
|
+#ifdef CONFIG_X86_64
|
|
dec_preempt_count();
|
|
+#endif
|
|
}
|
|
|
|
static void __kprobes
|
|
@@ -332,9 +346,9 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
|
|
* as we may switch to the interrupt stack.
|
|
*/
|
|
debug_stack_usage_inc();
|
|
- preempt_conditional_sti(regs);
|
|
+ conditional_sti_ist(regs);
|
|
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
|
|
- preempt_conditional_cli(regs);
|
|
+ conditional_cli_ist(regs);
|
|
debug_stack_usage_dec();
|
|
}
|
|
|
|
@@ -464,12 +478,12 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
|
debug_stack_usage_inc();
|
|
|
|
/* It's safe to allow irq's after DR6 has been saved */
|
|
- preempt_conditional_sti(regs);
|
|
+ conditional_sti_ist(regs);
|
|
|
|
if (regs->flags & X86_VM_MASK) {
|
|
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
|
|
X86_TRAP_DB);
|
|
- preempt_conditional_cli(regs);
|
|
+ conditional_cli_ist(regs);
|
|
debug_stack_usage_dec();
|
|
return;
|
|
}
|
|
@@ -489,7 +503,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
|
si_code = get_si_code(tsk->thread.debugreg6);
|
|
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
|
|
send_sigtrap(tsk, regs, error_code, si_code);
|
|
- preempt_conditional_cli(regs);
|
|
+ conditional_cli_ist(regs);
|
|
debug_stack_usage_dec();
|
|
|
|
return;
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index 578613da251e..b4bdc60f171f 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -683,8 +683,38 @@ static void update_divide_count(struct kvm_lapic *apic)
|
|
apic->divide_count);
|
|
}
|
|
|
|
+
|
|
+static enum hrtimer_restart apic_timer_fn(struct hrtimer *data);
|
|
+
|
|
+static void apic_timer_expired(struct hrtimer *data)
|
|
+{
|
|
+ int ret, i = 0;
|
|
+ enum hrtimer_restart r;
|
|
+ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
|
|
+
|
|
+ r = apic_timer_fn(data);
|
|
+
|
|
+ if (r == HRTIMER_RESTART) {
|
|
+ do {
|
|
+ ret = hrtimer_start_expires(data, HRTIMER_MODE_ABS);
|
|
+ if (ret == -ETIME)
|
|
+ hrtimer_add_expires_ns(&ktimer->timer,
|
|
+ ktimer->period);
|
|
+ i++;
|
|
+ } while (ret == -ETIME && i < 10);
|
|
+
|
|
+ if (ret == -ETIME) {
|
|
+ printk_once(KERN_ERR "%s: failed to reprogram timer\n",
|
|
+ __func__);
|
|
+ WARN_ON_ONCE(1);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
static void start_apic_timer(struct kvm_lapic *apic)
|
|
{
|
|
+ int ret;
|
|
ktime_t now;
|
|
atomic_set(&apic->lapic_timer.pending, 0);
|
|
|
|
@@ -714,9 +744,11 @@ static void start_apic_timer(struct kvm_lapic *apic)
|
|
}
|
|
}
|
|
|
|
- hrtimer_start(&apic->lapic_timer.timer,
|
|
+ ret = hrtimer_start(&apic->lapic_timer.timer,
|
|
ktime_add_ns(now, apic->lapic_timer.period),
|
|
HRTIMER_MODE_ABS);
|
|
+ if (ret == -ETIME)
|
|
+ apic_timer_expired(&apic->lapic_timer.timer);
|
|
|
|
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
|
|
PRIx64 ", "
|
|
@@ -746,8 +778,10 @@ static void start_apic_timer(struct kvm_lapic *apic)
|
|
ns = (tscdeadline - guest_tsc) * 1000000ULL;
|
|
do_div(ns, this_tsc_khz);
|
|
}
|
|
- hrtimer_start(&apic->lapic_timer.timer,
|
|
+ ret = hrtimer_start(&apic->lapic_timer.timer,
|
|
ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
|
|
+ if (ret == -ETIME)
|
|
+ apic_timer_expired(&apic->lapic_timer.timer);
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
@@ -1177,6 +1211,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
|
|
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
|
|
HRTIMER_MODE_ABS);
|
|
apic->lapic_timer.timer.function = kvm_timer_fn;
|
|
+ apic->lapic_timer.timer.irqsafe = 1;
|
|
apic->lapic_timer.t_ops = &lapic_timer_ops;
|
|
apic->lapic_timer.kvm = vcpu->kvm;
|
|
apic->lapic_timer.vcpu = vcpu;
|
|
@@ -1273,7 +1308,8 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
|
|
|
|
timer = &apic->lapic_timer.timer;
|
|
if (hrtimer_cancel(timer))
|
|
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
|
|
+ if (hrtimer_start_expires(timer, HRTIMER_MODE_ABS) == -ETIME)
|
|
+ apic_timer_expired(timer);
|
|
}
|
|
|
|
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 318a2454366f..dc5430c77132 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -4902,6 +4902,13 @@ int kvm_arch_init(void *opaque)
|
|
goto out;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
|
+ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+#endif
|
|
+
|
|
r = kvm_mmu_module_init();
|
|
if (r)
|
|
goto out;
|
|
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
|
|
index b723f2e09f44..c97bee9dc47c 100644
|
|
--- a/arch/x86/mm/fault.c
|
|
+++ b/arch/x86/mm/fault.c
|
|
@@ -1100,7 +1100,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
* If we're in an interrupt, have no user context or are running
|
|
* in an atomic region then we must not take the fault:
|
|
*/
|
|
- if (unlikely(in_atomic() || !mm)) {
|
|
+ if (unlikely(!mm || pagefault_disabled())) {
|
|
bad_area_nosemaphore(regs, error_code, address);
|
|
return;
|
|
}
|
|
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
|
|
index 6f31ee56c008..ab8683ade5c8 100644
|
|
--- a/arch/x86/mm/highmem_32.c
|
|
+++ b/arch/x86/mm/highmem_32.c
|
|
@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|
type = kmap_atomic_idx_push();
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
- BUG_ON(!pte_none(*(kmap_pte-idx)));
|
|
+ WARN_ON(!pte_none(*(kmap_pte-idx)));
|
|
set_pte(kmap_pte-idx, mk_pte(page, prot));
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
|
|
index 59880afa851f..0664c9920e58 100644
|
|
--- a/arch/x86/platform/uv/tlb_uv.c
|
|
+++ b/arch/x86/platform/uv/tlb_uv.c
|
|
@@ -39,7 +39,7 @@ static int timeout_base_ns[] = {
|
|
static int timeout_us;
|
|
static int nobau;
|
|
static int baudisabled;
|
|
-static spinlock_t disable_lock;
|
|
+static raw_spinlock_t disable_lock;
|
|
static cycles_t congested_cycles;
|
|
|
|
/* tunables: */
|
|
@@ -545,7 +545,7 @@ int handle_uv2_busy(struct bau_control *bcp)
|
|
cycles_t ttm;
|
|
|
|
stat->s_uv2_wars++;
|
|
- spin_lock(&hmaster->uvhub_lock);
|
|
+ raw_spin_lock(&hmaster->uvhub_lock);
|
|
/* try for the original first */
|
|
if (busy_one != normal) {
|
|
if (!normal_busy(bcp))
|
|
@@ -595,12 +595,12 @@ int handle_uv2_busy(struct bau_control *bcp)
|
|
* free up.
|
|
*/
|
|
stat->s_uv2_war_waits++;
|
|
- spin_unlock(&hmaster->uvhub_lock);
|
|
+ raw_spin_unlock(&hmaster->uvhub_lock);
|
|
ttm = get_cycles();
|
|
do {
|
|
cpu_relax();
|
|
} while (normal_busy(bcp));
|
|
- spin_lock(&hmaster->uvhub_lock);
|
|
+ raw_spin_lock(&hmaster->uvhub_lock);
|
|
/* switch to the original descriptor */
|
|
bcp->using_desc = normal;
|
|
bau_desc_old = bcp->descriptor_base;
|
|
@@ -610,7 +610,7 @@ int handle_uv2_busy(struct bau_control *bcp)
|
|
bau_desc_new += (ITEMS_PER_DESC * normal);
|
|
*bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
|
|
}
|
|
- spin_unlock(&hmaster->uvhub_lock);
|
|
+ raw_spin_unlock(&hmaster->uvhub_lock);
|
|
return FLUSH_RETRY_BUSYBUG;
|
|
}
|
|
|
|
@@ -724,9 +724,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
|
|
|
|
quiesce_local_uvhub(hmaster);
|
|
|
|
- spin_lock(&hmaster->queue_lock);
|
|
+ raw_spin_lock(&hmaster->queue_lock);
|
|
reset_with_ipi(&bau_desc->distribution, bcp);
|
|
- spin_unlock(&hmaster->queue_lock);
|
|
+ raw_spin_unlock(&hmaster->queue_lock);
|
|
|
|
end_uvhub_quiesce(hmaster);
|
|
|
|
@@ -746,9 +746,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
|
|
|
|
quiesce_local_uvhub(hmaster);
|
|
|
|
- spin_lock(&hmaster->queue_lock);
|
|
+ raw_spin_lock(&hmaster->queue_lock);
|
|
reset_with_ipi(&bau_desc->distribution, bcp);
|
|
- spin_unlock(&hmaster->queue_lock);
|
|
+ raw_spin_unlock(&hmaster->queue_lock);
|
|
|
|
end_uvhub_quiesce(hmaster);
|
|
|
|
@@ -765,7 +765,7 @@ static void disable_for_congestion(struct bau_control *bcp,
|
|
struct ptc_stats *stat)
|
|
{
|
|
/* let only one cpu do this disabling */
|
|
- spin_lock(&disable_lock);
|
|
+ raw_spin_lock(&disable_lock);
|
|
|
|
if (!baudisabled && bcp->period_requests &&
|
|
((bcp->period_time / bcp->period_requests) > congested_cycles)) {
|
|
@@ -784,7 +784,7 @@ static void disable_for_congestion(struct bau_control *bcp,
|
|
}
|
|
}
|
|
|
|
- spin_unlock(&disable_lock);
|
|
+ raw_spin_unlock(&disable_lock);
|
|
}
|
|
|
|
static void count_max_concurr(int stat, struct bau_control *bcp,
|
|
@@ -833,7 +833,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
|
|
*/
|
|
static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
|
|
{
|
|
- spinlock_t *lock = &hmaster->uvhub_lock;
|
|
+ raw_spinlock_t *lock = &hmaster->uvhub_lock;
|
|
atomic_t *v;
|
|
|
|
v = &hmaster->active_descriptor_count;
|
|
@@ -1850,8 +1850,8 @@ static void __init init_per_cpu_tunables(void)
|
|
bcp->cong_reps = congested_reps;
|
|
bcp->cong_period = congested_period;
|
|
bcp->clocks_per_100_usec = usec_2_cycles(100);
|
|
- spin_lock_init(&bcp->queue_lock);
|
|
- spin_lock_init(&bcp->uvhub_lock);
|
|
+ raw_spin_lock_init(&bcp->queue_lock);
|
|
+ raw_spin_lock_init(&bcp->uvhub_lock);
|
|
}
|
|
}
|
|
|
|
@@ -2078,7 +2078,7 @@ static int __init uv_bau_init(void)
|
|
}
|
|
|
|
nuvhubs = uv_num_possible_blades();
|
|
- spin_lock_init(&disable_lock);
|
|
+ raw_spin_lock_init(&disable_lock);
|
|
congested_cycles = usec_2_cycles(congested_respns_us);
|
|
|
|
uv_base_pnode = 0x7fffffff;
|
|
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
|
|
index 5032e0d19b86..eb55dd021c64 100644
|
|
--- a/arch/x86/platform/uv/uv_time.c
|
|
+++ b/arch/x86/platform/uv/uv_time.c
|
|
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
|
|
|
|
/* There is one of these allocated per node */
|
|
struct uv_rtc_timer_head {
|
|
- spinlock_t lock;
|
|
+ raw_spinlock_t lock;
|
|
/* next cpu waiting for timer, local node relative: */
|
|
int next_cpu;
|
|
/* number of cpus on this node: */
|
|
@@ -178,7 +178,7 @@ static __init int uv_rtc_allocate_timers(void)
|
|
uv_rtc_deallocate_timers();
|
|
return -ENOMEM;
|
|
}
|
|
- spin_lock_init(&head->lock);
|
|
+ raw_spin_lock_init(&head->lock);
|
|
head->ncpus = uv_blade_nr_possible_cpus(bid);
|
|
head->next_cpu = -1;
|
|
blade_info[bid] = head;
|
|
@@ -232,7 +232,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
|
|
unsigned long flags;
|
|
int next_cpu;
|
|
|
|
- spin_lock_irqsave(&head->lock, flags);
|
|
+ raw_spin_lock_irqsave(&head->lock, flags);
|
|
|
|
next_cpu = head->next_cpu;
|
|
*t = expires;
|
|
@@ -244,12 +244,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
|
|
if (uv_setup_intr(cpu, expires)) {
|
|
*t = ULLONG_MAX;
|
|
uv_rtc_find_next_timer(head, pnode);
|
|
- spin_unlock_irqrestore(&head->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
|
return -ETIME;
|
|
}
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&head->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
@@ -268,7 +268,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
|
unsigned long flags;
|
|
int rc = 0;
|
|
|
|
- spin_lock_irqsave(&head->lock, flags);
|
|
+ raw_spin_lock_irqsave(&head->lock, flags);
|
|
|
|
if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
|
|
rc = 1;
|
|
@@ -280,7 +280,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
|
uv_rtc_find_next_timer(head, pnode);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&head->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
|
|
|
return rc;
|
|
}
|
|
@@ -300,13 +300,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
|
static cycle_t uv_read_rtc(struct clocksource *cs)
|
|
{
|
|
unsigned long offset;
|
|
+ cycle_t cycles;
|
|
|
|
+ preempt_disable();
|
|
if (uv_get_min_hub_revision_id() == 1)
|
|
offset = 0;
|
|
else
|
|
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
|
|
|
|
- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
|
|
+ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
|
|
+ preempt_enable();
|
|
+
|
|
+ return cycles;
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
|
|
index b3877be25ab3..e793593861f6 100644
|
|
--- a/arch/xtensa/mm/fault.c
|
|
+++ b/arch/xtensa/mm/fault.c
|
|
@@ -56,7 +56,7 @@ void do_page_fault(struct pt_regs *regs)
|
|
/* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
- if (in_atomic() || !mm) {
|
|
+ if (!mm || pagefault_disabled()) {
|
|
bad_page_fault(regs, address, SIGSEGV);
|
|
return;
|
|
}
|
|
diff --git a/block/blk-core.c b/block/blk-core.c
|
|
index 1175e57104cc..79eedf47a770 100644
|
|
--- a/block/blk-core.c
|
|
+++ b/block/blk-core.c
|
|
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(blk_delay_queue);
|
|
**/
|
|
void blk_start_queue(struct request_queue *q)
|
|
{
|
|
- WARN_ON(!irqs_disabled());
|
|
+ WARN_ON_NONRT(!irqs_disabled());
|
|
|
|
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
|
__blk_run_queue(q);
|
|
@@ -302,7 +302,11 @@ void __blk_run_queue(struct request_queue *q)
|
|
{
|
|
if (unlikely(blk_queue_stopped(q)))
|
|
return;
|
|
-
|
|
+ /*
|
|
+ * q->request_fn() can drop q->queue_lock and reenable
|
|
+ * interrupts, but must return with q->queue_lock held and
|
|
+ * interrupts disabled.
|
|
+ */
|
|
q->request_fn(q);
|
|
}
|
|
EXPORT_SYMBOL(__blk_run_queue);
|
|
@@ -2782,11 +2786,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
|
|
* this lock).
|
|
*/
|
|
if (from_schedule) {
|
|
- spin_unlock(q->queue_lock);
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
blk_run_queue_async(q);
|
|
} else {
|
|
__blk_run_queue(q);
|
|
- spin_unlock(q->queue_lock);
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
}
|
|
|
|
}
|
|
@@ -2812,7 +2816,6 @@ static void flush_plug_callbacks(struct blk_plug *plug)
|
|
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
{
|
|
struct request_queue *q;
|
|
- unsigned long flags;
|
|
struct request *rq;
|
|
LIST_HEAD(list);
|
|
unsigned int depth;
|
|
@@ -2833,11 +2836,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
q = NULL;
|
|
depth = 0;
|
|
|
|
- /*
|
|
- * Save and disable interrupts here, to avoid doing it for every
|
|
- * queue lock we have to take.
|
|
- */
|
|
- local_irq_save(flags);
|
|
while (!list_empty(&list)) {
|
|
rq = list_entry_rq(list.next);
|
|
list_del_init(&rq->queuelist);
|
|
@@ -2850,7 +2848,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
queue_unplugged(q, depth, from_schedule);
|
|
q = rq->q;
|
|
depth = 0;
|
|
- spin_lock(q->queue_lock);
|
|
+ spin_lock_irq(q->queue_lock);
|
|
}
|
|
|
|
/*
|
|
@@ -2877,8 +2875,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
*/
|
|
if (q)
|
|
queue_unplugged(q, depth, from_schedule);
|
|
-
|
|
- local_irq_restore(flags);
|
|
}
|
|
|
|
void blk_finish_plug(struct blk_plug *plug)
|
|
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
|
|
index fb95dd2f889a..6b542018ed1e 100644
|
|
--- a/block/blk-ioc.c
|
|
+++ b/block/blk-ioc.c
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/blkdev.h>
|
|
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
|
|
#include <linux/slab.h>
|
|
+#include <linux/delay.h>
|
|
|
|
#include "blk.h"
|
|
|
|
@@ -110,7 +111,7 @@ static void ioc_release_fn(struct work_struct *work)
|
|
spin_unlock(q->queue_lock);
|
|
} else {
|
|
spin_unlock_irqrestore(&ioc->lock, flags);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
|
}
|
|
}
|
|
@@ -188,7 +189,7 @@ retry:
|
|
spin_unlock(icq->q->queue_lock);
|
|
} else {
|
|
spin_unlock_irqrestore(&ioc->lock, flags);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
goto retry;
|
|
}
|
|
}
|
|
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
|
|
index 58916afbbda5..f7ca9b411d2a 100644
|
|
--- a/block/blk-iopoll.c
|
|
+++ b/block/blk-iopoll.c
|
|
@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll *iop)
|
|
list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
|
|
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(blk_iopoll_sched);
|
|
|
|
@@ -135,6 +136,7 @@ static void blk_iopoll_softirq(struct softirq_action *h)
|
|
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
|
|
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
/**
|
|
@@ -204,6 +206,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
|
|
&__get_cpu_var(blk_cpu_iopoll));
|
|
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
|
|
index 467c8de88642..3fe236829aeb 100644
|
|
--- a/block/blk-softirq.c
|
|
+++ b/block/blk-softirq.c
|
|
@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
|
|
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
/*
|
|
@@ -93,6 +94,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
|
|
&__get_cpu_var(blk_cpu_done));
|
|
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
@@ -150,6 +152,7 @@ do_local:
|
|
goto do_local;
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
|
|
index 4f7d3f57d05c..16dddf2d665f 100644
|
|
--- a/drivers/acpi/acpica/acglobal.h
|
|
+++ b/drivers/acpi/acpica/acglobal.h
|
|
@@ -251,7 +251,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
|
|
* interrupt level
|
|
*/
|
|
ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
|
|
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
|
|
+ACPI_EXTERN acpi_raw_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
|
|
|
|
/*****************************************************************************
|
|
*
|
|
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
|
|
index 6b6c83b87b52..fcc264f5b629 100644
|
|
--- a/drivers/acpi/acpica/hwregs.c
|
|
+++ b/drivers/acpi/acpica/hwregs.c
|
|
@@ -271,14 +271,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
|
|
ACPI_BITMASK_ALL_FIXED_STATUS,
|
|
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
|
|
|
|
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
|
+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
|
|
|
|
/* Clear the fixed events in PM1 A/B */
|
|
|
|
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
|
|
ACPI_BITMASK_ALL_FIXED_STATUS);
|
|
|
|
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
|
|
+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
goto exit;
|
|
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
|
|
index a716fede4f25..5c6494f1dc01 100644
|
|
--- a/drivers/acpi/acpica/hwxface.c
|
|
+++ b/drivers/acpi/acpica/hwxface.c
|
|
@@ -366,7 +366,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
|
|
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
|
}
|
|
|
|
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
|
+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
|
|
|
|
/*
|
|
* At this point, we know that the parent register is one of the
|
|
@@ -427,7 +427,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
|
|
|
|
unlock_and_exit:
|
|
|
|
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
|
|
+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
|
|
return_ACPI_STATUS(status);
|
|
}
|
|
|
|
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
|
|
index 43174df33121..5aab30aa48a7 100644
|
|
--- a/drivers/acpi/acpica/utmutex.c
|
|
+++ b/drivers/acpi/acpica/utmutex.c
|
|
@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
|
|
return_ACPI_STATUS (status);
|
|
}
|
|
|
|
- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
|
|
+ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
|
|
if (ACPI_FAILURE (status)) {
|
|
return_ACPI_STATUS (status);
|
|
}
|
|
@@ -135,7 +135,7 @@ void acpi_ut_mutex_terminate(void)
|
|
/* Delete the spinlocks */
|
|
|
|
acpi_os_delete_lock(acpi_gbl_gpe_lock);
|
|
- acpi_os_delete_lock(acpi_gbl_hardware_lock);
|
|
+ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
|
|
|
|
/* Delete the reader/writer lock */
|
|
|
|
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
|
|
index fad2734a46d0..5bfeea9522a5 100644
|
|
--- a/drivers/ata/libata-sff.c
|
|
+++ b/drivers/ata/libata-sff.c
|
|
@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
|
|
unsigned long flags;
|
|
unsigned int consumed;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
return consumed;
|
|
}
|
|
@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|
unsigned long flags;
|
|
|
|
/* FIXME: use a bounce buffer */
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
buf = kmap_atomic(page);
|
|
|
|
/* do the actual data transfer */
|
|
@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|
do_write);
|
|
|
|
kunmap_atomic(buf);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
} else {
|
|
buf = page_address(page);
|
|
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
|
|
@@ -864,7 +864,7 @@ next_sg:
|
|
unsigned long flags;
|
|
|
|
/* FIXME: use bounce buffer */
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
buf = kmap_atomic(page);
|
|
|
|
/* do the actual data transfer */
|
|
@@ -872,7 +872,7 @@ next_sg:
|
|
count, rw);
|
|
|
|
kunmap_atomic(buf);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
} else {
|
|
buf = page_address(page);
|
|
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
|
|
diff --git a/drivers/char/random.c b/drivers/char/random.c
|
|
index 85172faa1569..0aac27740e00 100644
|
|
--- a/drivers/char/random.c
|
|
+++ b/drivers/char/random.c
|
|
@@ -448,7 +448,7 @@ static struct entropy_store input_pool = {
|
|
.poolinfo = &poolinfo_table[0],
|
|
.name = "input",
|
|
.limit = 1,
|
|
- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
|
|
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
|
|
.pool = input_pool_data
|
|
};
|
|
|
|
@@ -457,7 +457,7 @@ static struct entropy_store blocking_pool = {
|
|
.name = "blocking",
|
|
.limit = 1,
|
|
.pull = &input_pool,
|
|
- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
|
|
+ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
|
|
.pool = blocking_pool_data
|
|
};
|
|
|
|
@@ -465,7 +465,7 @@ static struct entropy_store nonblocking_pool = {
|
|
.poolinfo = &poolinfo_table[1],
|
|
.name = "nonblocking",
|
|
.pull = &input_pool,
|
|
- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
|
|
+ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
|
|
.pool = nonblocking_pool_data
|
|
};
|
|
|
|
@@ -679,8 +679,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
|
|
preempt_disable();
|
|
/* if over the trickle threshold, use only 1 in 4096 samples */
|
|
if (input_pool.entropy_count > trickle_thresh &&
|
|
- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
|
|
- goto out;
|
|
+ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
|
|
+ preempt_enable();
|
|
+ return;
|
|
+ }
|
|
+ preempt_enable();
|
|
|
|
sample.jiffies = jiffies;
|
|
sample.cycles = get_cycles();
|
|
@@ -722,8 +725,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
|
|
credit_entropy_bits(&input_pool,
|
|
min_t(int, fls(delta>>1), 11));
|
|
}
|
|
-out:
|
|
- preempt_enable();
|
|
}
|
|
|
|
void add_input_randomness(unsigned int type, unsigned int code,
|
|
@@ -744,18 +745,16 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
|
|
|
|
static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
|
|
|
|
-void add_interrupt_randomness(int irq, int irq_flags)
|
|
+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
|
|
{
|
|
struct entropy_store *r;
|
|
struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
|
|
- struct pt_regs *regs = get_irq_regs();
|
|
unsigned long now = jiffies;
|
|
__u32 input[4], cycles = get_cycles();
|
|
|
|
input[0] = cycles ^ jiffies;
|
|
input[1] = irq;
|
|
- if (regs) {
|
|
- __u64 ip = instruction_pointer(regs);
|
|
+ if (ip) {
|
|
input[2] = ip;
|
|
input[3] = ip >> 32;
|
|
}
|
|
@@ -769,7 +768,11 @@ void add_interrupt_randomness(int irq, int irq_flags)
|
|
fast_pool->last = now;
|
|
|
|
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
|
|
+#else
|
|
+ mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
|
|
+#endif
|
|
/*
|
|
* If we don't have a valid cycle counter, and we see
|
|
* back-to-back timer interrupts, then skip giving credit for
|
|
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
|
|
index 32cb929b8eb6..ac0bb2e8a8ea 100644
|
|
--- a/drivers/clocksource/tcb_clksrc.c
|
|
+++ b/drivers/clocksource/tcb_clksrc.c
|
|
@@ -23,8 +23,7 @@
|
|
* this 32 bit free-running counter. the second channel is not used.
|
|
*
|
|
* - The third channel may be used to provide a 16-bit clockevent
|
|
- * source, used in either periodic or oneshot mode. This runs
|
|
- * at 32 KiHZ, and can handle delays of up to two seconds.
|
|
+ * source, used in either periodic or oneshot mode.
|
|
*
|
|
* A boot clocksource and clockevent source are also currently needed,
|
|
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
|
|
@@ -74,6 +73,7 @@ static struct clocksource clksrc = {
|
|
struct tc_clkevt_device {
|
|
struct clock_event_device clkevt;
|
|
struct clk *clk;
|
|
+ u32 freq;
|
|
void __iomem *regs;
|
|
};
|
|
|
|
@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
|
|
return container_of(clkevt, struct tc_clkevt_device, clkevt);
|
|
}
|
|
|
|
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
|
|
- * because using one of the divided clocks would usually mean the
|
|
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
|
|
- *
|
|
- * A divided clock could be good for high resolution timers, since
|
|
- * 30.5 usec resolution can seem "low".
|
|
- */
|
|
static u32 timer_clock;
|
|
|
|
static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|
|
@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|
|
case CLOCK_EVT_MODE_PERIODIC:
|
|
clk_enable(tcd->clk);
|
|
|
|
- /* slow clock, count up to RC, then irq and restart */
|
|
+ /* count up to RC, then irq and restart */
|
|
__raw_writel(timer_clock
|
|
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
|
|
regs + ATMEL_TC_REG(2, CMR));
|
|
- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
|
|
+ __raw_writel((tcd->freq + HZ/2)/HZ,
|
|
+ tcaddr + ATMEL_TC_REG(2, RC));
|
|
|
|
/* Enable clock and interrupts on RC compare */
|
|
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
|
|
@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|
|
case CLOCK_EVT_MODE_ONESHOT:
|
|
clk_enable(tcd->clk);
|
|
|
|
- /* slow clock, count up to RC, then irq and stop */
|
|
+ /* count up to RC, then irq and stop */
|
|
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
|
|
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
|
|
regs + ATMEL_TC_REG(2, CMR));
|
|
@@ -158,8 +152,12 @@ static struct tc_clkevt_device clkevt = {
|
|
.features = CLOCK_EVT_FEAT_PERIODIC
|
|
| CLOCK_EVT_FEAT_ONESHOT,
|
|
.shift = 32,
|
|
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
|
/* Should be lower than at91rm9200's system timer */
|
|
.rating = 125,
|
|
+#else
|
|
+ .rating = 200,
|
|
+#endif
|
|
.set_next_event = tc_next_event,
|
|
.set_mode = tc_mode,
|
|
},
|
|
@@ -185,8 +183,9 @@ static struct irqaction tc_irqaction = {
|
|
.handler = ch2_irq,
|
|
};
|
|
|
|
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
|
|
+static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
|
|
{
|
|
+ unsigned divisor = atmel_tc_divisors[divisor_idx];
|
|
struct clk *t2_clk = tc->clk[2];
|
|
int irq = tc->irq[2];
|
|
|
|
@@ -194,11 +193,17 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
|
|
clkevt.clk = t2_clk;
|
|
tc_irqaction.dev_id = &clkevt;
|
|
|
|
- timer_clock = clk32k_divisor_idx;
|
|
+ timer_clock = divisor_idx;
|
|
|
|
- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
|
|
- clkevt.clkevt.max_delta_ns
|
|
- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
|
|
+ if (!divisor)
|
|
+ clkevt.freq = 32768;
|
|
+ else
|
|
+ clkevt.freq = clk_get_rate(t2_clk)/divisor;
|
|
+
|
|
+ clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC,
|
|
+ clkevt.clkevt.shift);
|
|
+ clkevt.clkevt.max_delta_ns =
|
|
+ clockevent_delta2ns(0xffff, &clkevt.clkevt);
|
|
clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
|
|
clkevt.clkevt.cpumask = cpumask_of(0);
|
|
|
|
@@ -327,8 +332,11 @@ static int __init tcb_clksrc_init(void)
|
|
clocksource_register_hz(&clksrc, divided_rate);
|
|
|
|
/* channel 2: periodic and oneshot timer support */
|
|
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
|
setup_clkevents(tc, clk32k_divisor_idx);
|
|
-
|
|
+#else
|
|
+ setup_clkevents(tc, best_divisor_idx);
|
|
+#endif
|
|
return 0;
|
|
}
|
|
arch_initcall(tcb_clksrc_init);
|
|
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
|
|
index 4461540653a8..58ba0595c9a2 100644
|
|
--- a/drivers/gpio/gpio-omap.c
|
|
+++ b/drivers/gpio/gpio-omap.c
|
|
@@ -67,7 +67,7 @@ struct gpio_bank {
|
|
u32 saved_risingdetect;
|
|
u32 level_mask;
|
|
u32 toggle_mask;
|
|
- spinlock_t lock;
|
|
+ raw_spinlock_t lock;
|
|
struct gpio_chip chip;
|
|
struct clk *dbck;
|
|
u32 mod_usage;
|
|
@@ -395,9 +395,9 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
|
|
(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
|
|
return -EINVAL;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
|
|
__irq_set_handler_locked(d->irq, handle_level_irq);
|
|
@@ -514,14 +514,14 @@ static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
if (enable)
|
|
bank->suspend_wakeup |= gpio_bit;
|
|
else
|
|
bank->suspend_wakeup &= ~gpio_bit;
|
|
|
|
__raw_writel(bank->suspend_wakeup, bank->base + bank->regs->wkup_en);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -555,7 +555,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
|
|
if (!bank->mod_usage)
|
|
pm_runtime_get_sync(bank->dev);
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
/* Set trigger to none. You need to enable the desired trigger with
|
|
* request_irq() or set_irq_type().
|
|
*/
|
|
@@ -581,7 +581,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
|
|
|
|
bank->mod_usage |= 1 << offset;
|
|
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -592,7 +592,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
|
|
void __iomem *base = bank->base;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
|
|
if (bank->regs->wkup_en) {
|
|
/* Disable wake-up during idle for dynamic tick */
|
|
@@ -615,7 +615,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
|
|
}
|
|
|
|
_reset_gpio(bank, bank->chip.base + offset);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
/*
|
|
* If this is the last gpio to be freed in the bank,
|
|
@@ -720,9 +720,9 @@ static void gpio_irq_shutdown(struct irq_data *d)
|
|
unsigned int gpio = irq_to_gpio(bank, d->irq);
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
_reset_gpio(bank, gpio);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
}
|
|
|
|
static void gpio_ack_irq(struct irq_data *d)
|
|
@@ -739,10 +739,10 @@ static void gpio_mask_irq(struct irq_data *d)
|
|
unsigned int gpio = irq_to_gpio(bank, d->irq);
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
_set_gpio_irqenable(bank, gpio, 0);
|
|
_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
}
|
|
|
|
static void gpio_unmask_irq(struct irq_data *d)
|
|
@@ -753,7 +753,7 @@ static void gpio_unmask_irq(struct irq_data *d)
|
|
u32 trigger = irqd_get_trigger_type(d);
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
if (trigger)
|
|
_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
|
|
|
|
@@ -765,7 +765,7 @@ static void gpio_unmask_irq(struct irq_data *d)
|
|
}
|
|
|
|
_set_gpio_irqenable(bank, gpio, 1);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
}
|
|
|
|
static struct irq_chip gpio_irq_chip = {
|
|
@@ -788,10 +788,10 @@ static int omap_mpuio_suspend_noirq(struct device *dev)
|
|
OMAP_MPUIO_GPIO_MASKIT / bank->stride;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
bank->saved_wakeup = __raw_readl(mask_reg);
|
|
__raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -804,9 +804,9 @@ static int omap_mpuio_resume_noirq(struct device *dev)
|
|
OMAP_MPUIO_GPIO_MASKIT / bank->stride;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
__raw_writel(bank->saved_wakeup, mask_reg);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -849,9 +849,9 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
|
|
unsigned long flags;
|
|
|
|
bank = container_of(chip, struct gpio_bank, chip);
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
_set_gpio_direction(bank, offset, 1);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
@@ -882,10 +882,10 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
|
|
unsigned long flags;
|
|
|
|
bank = container_of(chip, struct gpio_bank, chip);
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
bank->set_dataout(bank, offset, value);
|
|
_set_gpio_direction(bank, offset, 0);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
@@ -903,9 +903,9 @@ static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
|
|
dev_err(bank->dev, "Could not get gpio dbck\n");
|
|
}
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
_set_gpio_debounce(bank, offset, debounce);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -916,9 +916,9 @@ static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
|
|
unsigned long flags;
|
|
|
|
bank = container_of(chip, struct gpio_bank, chip);
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
bank->set_dataout(bank, offset, value);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
}
|
|
|
|
static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
|
|
@@ -1109,7 +1109,7 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
|
|
else
|
|
bank->set_dataout = _set_gpio_dataout_mask;
|
|
|
|
- spin_lock_init(&bank->lock);
|
|
+ raw_spin_lock_init(&bank->lock);
|
|
|
|
/* Static mapping, never released */
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
@@ -1169,11 +1169,11 @@ static int omap_gpio_suspend(struct device *dev)
|
|
|
|
wakeup_enable = bank->base + bank->regs->wkup_en;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
bank->saved_wakeup = __raw_readl(wakeup_enable);
|
|
_gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
|
|
_gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -1191,10 +1191,10 @@ static int omap_gpio_resume(struct device *dev)
|
|
if (!bank->regs->wkup_en || !bank->saved_wakeup)
|
|
return 0;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
_gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
|
|
_gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -1211,7 +1211,7 @@ static int omap_gpio_runtime_suspend(struct device *dev)
|
|
unsigned long flags;
|
|
u32 wake_low, wake_hi;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
|
|
/*
|
|
* Only edges can generate a wakeup event to the PRCM.
|
|
@@ -1263,7 +1263,7 @@ update_gpio_context_count:
|
|
bank->get_context_loss_count(bank->dev);
|
|
|
|
_gpio_dbck_disable(bank);
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -1276,7 +1276,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
|
|
u32 l = 0, gen, gen0, gen1;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&bank->lock, flags);
|
|
+ raw_spin_lock_irqsave(&bank->lock, flags);
|
|
_gpio_dbck_enable(bank);
|
|
|
|
/*
|
|
@@ -1291,7 +1291,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
|
|
bank->base + bank->regs->risingdetect);
|
|
|
|
if (!bank->workaround_enabled) {
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1302,7 +1302,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
|
|
!context_lost_cnt_after) {
|
|
omap_gpio_restore_context(bank);
|
|
} else {
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1361,7 +1361,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
|
|
}
|
|
|
|
bank->workaround_enabled = false;
|
|
- spin_unlock_irqrestore(&bank->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
|
|
index dc33ba57e33f..07dfb998de5a 100644
|
|
--- a/drivers/gpu/drm/drm_irq.c
|
|
+++ b/drivers/gpu/drm/drm_irq.c
|
|
@@ -619,11 +619,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
|
|
* code gets preempted or delayed for some reason.
|
|
*/
|
|
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
|
|
- /* Disable preemption to make it very likely to
|
|
- * succeed in the first iteration even on PREEMPT_RT kernel.
|
|
- */
|
|
- preempt_disable();
|
|
-
|
|
/* Get system timestamp before query. */
|
|
do_gettimeofday(&stime);
|
|
|
|
@@ -633,8 +628,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
|
|
/* Get system timestamp after query. */
|
|
do_gettimeofday(&raw_time);
|
|
|
|
- preempt_enable();
|
|
-
|
|
/* Return as no-op if scanout query unsupported or failed. */
|
|
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
|
|
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
index e97ed611d8c0..4fd9b3f8be99 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
@@ -1297,7 +1297,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|
goto err;
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
trace_i915_gem_ring_dispatch(ring, seqno);
|
|
+#endif
|
|
|
|
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
|
|
exec_len = args->batch_len;
|
|
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
|
|
index 2c8016ad0e26..6fd6037741b1 100644
|
|
--- a/drivers/ide/alim15x3.c
|
|
+++ b/drivers/ide/alim15x3.c
|
|
@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
|
|
|
|
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
|
|
if (m5229_revision < 0xC2) {
|
|
/*
|
|
@@ -325,7 +325,7 @@ out:
|
|
}
|
|
pci_dev_put(north);
|
|
pci_dev_put(isa_dev);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
|
|
index 58c51cddc100..d2a40595b334 100644
|
|
--- a/drivers/ide/hpt366.c
|
|
+++ b/drivers/ide/hpt366.c
|
|
@@ -1241,7 +1241,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
|
|
|
|
dma_old = inb(base + 2);
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
|
|
dma_new = dma_old;
|
|
pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
|
|
@@ -1252,7 +1252,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
|
|
if (dma_new != dma_old)
|
|
outb(dma_new, base + 2);
|
|
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
|
|
hwif->name, base, base + 7);
|
|
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
|
|
index 19763977568c..4169433faab5 100644
|
|
--- a/drivers/ide/ide-io-std.c
|
|
+++ b/drivers/ide/ide-io-std.c
|
|
@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
|
|
unsigned long uninitialized_var(flags);
|
|
|
|
if ((io_32bit & 2) && !mmio) {
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
ata_vlb_sync(io_ports->nsect_addr);
|
|
}
|
|
|
|
@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
|
|
insl(data_addr, buf, words);
|
|
|
|
if ((io_32bit & 2) && !mmio)
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
if (((len + 1) & 3) < 2)
|
|
return;
|
|
@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
|
|
unsigned long uninitialized_var(flags);
|
|
|
|
if ((io_32bit & 2) && !mmio) {
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
ata_vlb_sync(io_ports->nsect_addr);
|
|
}
|
|
|
|
@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
|
|
outsl(data_addr, buf, words);
|
|
|
|
if ((io_32bit & 2) && !mmio)
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
if (((len + 1) & 3) < 2)
|
|
return;
|
|
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
|
|
index 177db6d5b2f5..079ae6bebf18 100644
|
|
--- a/drivers/ide/ide-io.c
|
|
+++ b/drivers/ide/ide-io.c
|
|
@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data)
|
|
/* disable_irq_nosync ?? */
|
|
disable_irq(hwif->irq);
|
|
/* local CPU only, as if we were handling an interrupt */
|
|
- local_irq_disable();
|
|
+ local_irq_disable_nort();
|
|
if (hwif->polling) {
|
|
startstop = handler(drive);
|
|
} else if (drive_is_ready(drive)) {
|
|
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
|
|
index 376f2dc410c5..f014dd1b73dc 100644
|
|
--- a/drivers/ide/ide-iops.c
|
|
+++ b/drivers/ide/ide-iops.c
|
|
@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
|
|
if ((stat & ATA_BUSY) == 0)
|
|
break;
|
|
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
*rstat = stat;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
/*
|
|
* Allow status to settle, then read it again.
|
|
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
|
|
index 068cef0a987a..38e69e1793d8 100644
|
|
--- a/drivers/ide/ide-probe.c
|
|
+++ b/drivers/ide/ide-probe.c
|
|
@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
|
|
int bswap = 1;
|
|
|
|
/* local CPU only; some systems need this */
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
/* read 512 bytes of id info */
|
|
hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
drive->dev_flags |= IDE_DFLAG_ID_READ;
|
|
#ifdef DEBUG
|
|
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
|
|
index 729428edeba2..3a9a1fc297b8 100644
|
|
--- a/drivers/ide/ide-taskfile.c
|
|
+++ b/drivers/ide/ide-taskfile.c
|
|
@@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
|
|
|
|
page_is_high = PageHighMem(page);
|
|
if (page_is_high)
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
|
|
buf = kmap_atomic(page) + offset;
|
|
|
|
@@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
|
|
kunmap_atomic(buf);
|
|
|
|
if (page_is_high)
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
len -= nr_bytes;
|
|
}
|
|
@@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
|
|
}
|
|
|
|
if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
|
|
- local_irq_disable();
|
|
+ local_irq_disable_nort();
|
|
|
|
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
|
|
|
|
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
|
|
index fa080ebd568f..ffeebc7e9f1c 100644
|
|
--- a/drivers/idle/i7300_idle.c
|
|
+++ b/drivers/idle/i7300_idle.c
|
|
@@ -75,7 +75,7 @@ static unsigned long past_skip;
|
|
|
|
static struct pci_dev *fbd_dev;
|
|
|
|
-static spinlock_t i7300_idle_lock;
|
|
+static raw_spinlock_t i7300_idle_lock;
|
|
static int i7300_idle_active;
|
|
|
|
static u8 i7300_idle_thrtctl_saved;
|
|
@@ -457,7 +457,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
|
|
idle_begin_time = ktime_get();
|
|
}
|
|
|
|
- spin_lock_irqsave(&i7300_idle_lock, flags);
|
|
+ raw_spin_lock_irqsave(&i7300_idle_lock, flags);
|
|
if (val == IDLE_START) {
|
|
|
|
cpumask_set_cpu(smp_processor_id(), idle_cpumask);
|
|
@@ -506,7 +506,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
|
|
}
|
|
}
|
|
end:
|
|
- spin_unlock_irqrestore(&i7300_idle_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&i7300_idle_lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
@@ -548,7 +548,7 @@ struct debugfs_file_info {
|
|
|
|
static int __init i7300_idle_init(void)
|
|
{
|
|
- spin_lock_init(&i7300_idle_lock);
|
|
+ raw_spin_lock_init(&i7300_idle_lock);
|
|
total_us = 0;
|
|
|
|
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
|
|
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
|
|
index 213965d5bc8b..d75a0d779391 100644
|
|
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
|
|
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
|
|
@@ -799,7 +799,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
|
|
|
|
ipoib_mcast_stop_thread(dev, 0);
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
netif_addr_lock(dev);
|
|
spin_lock(&priv->lock);
|
|
|
|
@@ -881,7 +881,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
|
|
|
|
spin_unlock(&priv->lock);
|
|
netif_addr_unlock(dev);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
/* We have to cancel outside of the spinlock */
|
|
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
|
|
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
|
|
index da739d9d1905..18fdafe051b9 100644
|
|
--- a/drivers/input/gameport/gameport.c
|
|
+++ b/drivers/input/gameport/gameport.c
|
|
@@ -87,12 +87,12 @@ static int gameport_measure_speed(struct gameport *gameport)
|
|
tx = 1 << 30;
|
|
|
|
for(i = 0; i < 50; i++) {
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
GET_TIME(t1);
|
|
for (t = 0; t < 50; t++) gameport_read(gameport);
|
|
GET_TIME(t2);
|
|
GET_TIME(t3);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
udelay(i * 10);
|
|
if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
|
|
}
|
|
@@ -111,11 +111,11 @@ static int gameport_measure_speed(struct gameport *gameport)
|
|
tx = 1 << 30;
|
|
|
|
for(i = 0; i < 50; i++) {
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
rdtscl(t1);
|
|
for (t = 0; t < 50; t++) gameport_read(gameport);
|
|
rdtscl(t2);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
udelay(i * 10);
|
|
if (t2 - t1 < tx) tx = t2 - t1;
|
|
}
|
|
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
|
|
index 3bfbccc3f0a7..a335e68aa1cd 100644
|
|
--- a/drivers/md/dm.c
|
|
+++ b/drivers/md/dm.c
|
|
@@ -1676,14 +1676,14 @@ static void dm_request_fn(struct request_queue *q)
|
|
if (map_request(ti, clone, md))
|
|
goto requeued;
|
|
|
|
- BUG_ON(!irqs_disabled());
|
|
+ BUG_ON_NONRT(!irqs_disabled());
|
|
spin_lock(q->queue_lock);
|
|
}
|
|
|
|
goto out;
|
|
|
|
requeued:
|
|
- BUG_ON(!irqs_disabled());
|
|
+ BUG_ON_NONRT(!irqs_disabled());
|
|
spin_lock(q->queue_lock);
|
|
|
|
delay_and_out:
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index c276ad09ace9..91af61a89363 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -1321,8 +1321,9 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
|
struct raid5_percpu *percpu;
|
|
unsigned long cpu;
|
|
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
percpu = per_cpu_ptr(conf->percpu, cpu);
|
|
+ spin_lock(&percpu->lock);
|
|
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
|
|
ops_run_biofill(sh);
|
|
overlap_clear++;
|
|
@@ -1374,7 +1375,8 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
|
if (test_and_clear_bit(R5_Overlap, &dev->flags))
|
|
wake_up(&sh->raid_conf->wait_for_overlap);
|
|
}
|
|
- put_cpu();
|
|
+ spin_unlock(&percpu->lock);
|
|
+ put_cpu_light();
|
|
}
|
|
|
|
#ifdef CONFIG_MULTICORE_RAID456
|
|
@@ -4692,8 +4694,10 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
|
|
{
|
|
if (conf->level == 6 && !percpu->spare_page)
|
|
percpu->spare_page = alloc_page(GFP_KERNEL);
|
|
- if (!percpu->scribble)
|
|
+ if (!percpu->scribble) {
|
|
percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
|
|
+ spin_lock_init(&percpu->lock);
|
|
+ }
|
|
|
|
if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
|
|
free_scratch_buffer(conf, percpu);
|
|
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
|
|
index f1ed18e54db4..cf44cfcf089e 100644
|
|
--- a/drivers/md/raid5.h
|
|
+++ b/drivers/md/raid5.h
|
|
@@ -418,6 +418,7 @@ struct r5conf {
|
|
int recovery_disabled;
|
|
/* per cpu variables */
|
|
struct raid5_percpu {
|
|
+ spinlock_t lock; /* Protection for -RT */
|
|
struct page *spare_page; /* Used when checking P/Q in raid6 */
|
|
void *scribble; /* space for constructing buffer
|
|
* lists and performing address
|
|
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
|
|
index c7795096d43b..ecda1c4c75c7 100644
|
|
--- a/drivers/misc/Kconfig
|
|
+++ b/drivers/misc/Kconfig
|
|
@@ -72,6 +72,7 @@ config AB8500_PWM
|
|
config ATMEL_TCLIB
|
|
bool "Atmel AT32/AT91 Timer/Counter Library"
|
|
depends on (AVR32 || ARCH_AT91)
|
|
+ default y if PREEMPT_RT_FULL
|
|
help
|
|
Select this if you want a library to allocate the Timer/Counter
|
|
blocks found on many Atmel processors. This facilitates using
|
|
@@ -87,8 +88,7 @@ config ATMEL_TCB_CLKSRC
|
|
are combined to make a single 32-bit timer.
|
|
|
|
When GENERIC_CLOCKEVENTS is defined, the third timer channel
|
|
- may be used as a clock event device supporting oneshot mode
|
|
- (delays of up to two seconds) based on the 32 KiHz clock.
|
|
+ may be used as a clock event device supporting oneshot mode.
|
|
|
|
config ATMEL_TCB_CLKSRC_BLOCK
|
|
int
|
|
@@ -102,6 +102,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
|
|
TC can be used for other purposes, such as PWM generation and
|
|
interval timing.
|
|
|
|
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
|
|
+ bool "TC Block use 32 KiHz clock"
|
|
+ depends on ATMEL_TCB_CLKSRC
|
|
+ default y if !PREEMPT_RT_FULL
|
|
+ help
|
|
+ Select this to use 32 KiHz base clock rate as TC block clock
|
|
+ source for clock events.
|
|
+
|
|
config IBM_ASM
|
|
tristate "Device driver for IBM RSA service processor"
|
|
depends on X86 && PCI && INPUT && EXPERIMENTAL
|
|
@@ -123,6 +131,35 @@ config IBM_ASM
|
|
for information on the specific driver level and support statement
|
|
for your IBM server.
|
|
|
|
+config HWLAT_DETECTOR
|
|
+ tristate "Testing module to detect hardware-induced latencies"
|
|
+ depends on DEBUG_FS
|
|
+ depends on RING_BUFFER
|
|
+ default m
|
|
+ ---help---
|
|
+ A simple hardware latency detector. Use this module to detect
|
|
+ large latencies introduced by the behavior of the underlying
|
|
+ system firmware external to Linux. We do this using periodic
|
|
+ use of stop_machine to grab all available CPUs and measure
|
|
+ for unexplainable gaps in the CPU timestamp counter(s). By
|
|
+ default, the module is not enabled until the "enable" file
|
|
+ within the "hwlat_detector" debugfs directory is toggled.
|
|
+
|
|
+ This module is often used to detect SMI (System Management
|
|
+ Interrupts) on x86 systems, though is not x86 specific. To
|
|
+ this end, we default to using a sample window of 1 second,
|
|
+ during which we will sample for 0.5 seconds. If an SMI or
|
|
+ similar event occurs during that time, it is recorded
|
|
+ into an 8K samples global ring buffer until retreived.
|
|
+
|
|
+ WARNING: This software should never be enabled (it can be built
|
|
+ but should not be turned on after it is loaded) in a production
|
|
+ environment where high latencies are a concern since the
|
|
+ sampling mechanism actually introduces latencies for
|
|
+ regular tasks while the CPU(s) are being held.
|
|
+
|
|
+ If unsure, say N
|
|
+
|
|
config PHANTOM
|
|
tristate "Sensable PHANToM (PCI)"
|
|
depends on PCI
|
|
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
|
|
index 3e1d80106f04..c7c058ab3be4 100644
|
|
--- a/drivers/misc/Makefile
|
|
+++ b/drivers/misc/Makefile
|
|
@@ -50,6 +50,7 @@
|
|
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
|
|
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
|
|
obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
|
|
+obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
|
|
obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
|
|
obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
|
|
obj-$(CONFIG_SUN4I_VIBRATOR) += sun4i-vibrator.o
|
|
diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
|
|
new file mode 100644
|
|
index 000000000000..d2676b867e41
|
|
--- /dev/null
|
|
+++ b/drivers/misc/hwlat_detector.c
|
|
@@ -0,0 +1,1239 @@
|
|
+/*
|
|
+ * hwlat_detector.c - A simple Hardware Latency detector.
|
|
+ *
|
|
+ * Use this module to detect large system latencies induced by the behavior of
|
|
+ * certain underlying system hardware or firmware, independent of Linux itself.
|
|
+ * The code was developed originally to detect the presence of SMIs on Intel
|
|
+ * and AMD systems, although there is no dependency upon x86 herein.
|
|
+ *
|
|
+ * The classical example usage of this module is in detecting the presence of
|
|
+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
|
|
+ * somewhat special form of hardware interrupt spawned from earlier CPU debug
|
|
+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
|
|
+ * LPC (or other device) to generate a special interrupt under certain
|
|
+ * circumstances, for example, upon expiration of a special SMI timer device,
|
|
+ * due to certain external thermal readings, on certain I/O address accesses,
|
|
+ * and other situations. An SMI hits a special CPU pin, triggers a special
|
|
+ * SMI mode (complete with special memory map), and the OS is unaware.
|
|
+ *
|
|
+ * Although certain hardware-inducing latencies are necessary (for example,
|
|
+ * a modern system often requires an SMI handler for correct thermal control
|
|
+ * and remote management) they can wreak havoc upon any OS-level performance
|
|
+ * guarantees toward low-latency, especially when the OS is not even made
|
|
+ * aware of the presence of these interrupts. For this reason, we need a
|
|
+ * somewhat brute force mechanism to detect these interrupts. In this case,
|
|
+ * we do it by hogging all of the CPU(s) for configurable timer intervals,
|
|
+ * sampling the built-in CPU timer, looking for discontiguous readings.
|
|
+ *
|
|
+ * WARNING: This implementation necessarily introduces latencies. Therefore,
|
|
+ * you should NEVER use this module in a production environment
|
|
+ * requiring any kind of low-latency performance guarantee(s).
|
|
+ *
|
|
+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
|
|
+ *
|
|
+ * Includes useful feedback from Clark Williams <clark@redhat.com>
|
|
+ *
|
|
+ * This file is licensed under the terms of the GNU General Public
|
|
+ * License version 2. This program is licensed "as is" without any
|
|
+ * warranty of any kind, whether express or implied.
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/ring_buffer.h>
|
|
+#include <linux/time.h>
|
|
+#include <linux/hrtimer.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/seq_file.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/trace_clock.h>
|
|
+
|
|
+#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
|
|
+#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
|
|
+#define U64STR_SIZE 22 /* 20 digits max */
|
|
+
|
|
+#define VERSION "1.0.0"
|
|
+#define BANNER "hwlat_detector: "
|
|
+#define DRVNAME "hwlat_detector"
|
|
+#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
|
|
+#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
|
|
+#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
|
|
+
|
|
+/* Module metadata */
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
|
|
+MODULE_DESCRIPTION("A simple hardware latency detector");
|
|
+MODULE_VERSION(VERSION);
|
|
+
|
|
+/* Module parameters */
|
|
+
|
|
+static int debug;
|
|
+static int enabled;
|
|
+static int threshold;
|
|
+
|
|
+module_param(debug, int, 0); /* enable debug */
|
|
+module_param(enabled, int, 0); /* enable detector */
|
|
+module_param(threshold, int, 0); /* latency threshold */
|
|
+
|
|
+/* Buffering and sampling */
|
|
+
|
|
+static struct ring_buffer *ring_buffer; /* sample buffer */
|
|
+static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */
|
|
+static unsigned long buf_size = BUF_SIZE_DEFAULT;
|
|
+static struct task_struct *kthread; /* sampling thread */
|
|
+
|
|
+/* DebugFS filesystem entries */
|
|
+
|
|
+static struct dentry *debug_dir; /* debugfs directory */
|
|
+static struct dentry *debug_max; /* maximum TSC delta */
|
|
+static struct dentry *debug_count; /* total detect count */
|
|
+static struct dentry *debug_sample_width; /* sample width us */
|
|
+static struct dentry *debug_sample_window; /* sample window us */
|
|
+static struct dentry *debug_sample; /* raw samples us */
|
|
+static struct dentry *debug_threshold; /* threshold us */
|
|
+static struct dentry *debug_enable; /* enable/disable */
|
|
+
|
|
+/* Individual samples and global state */
|
|
+
|
|
+struct sample; /* latency sample */
|
|
+struct data; /* Global state */
|
|
+
|
|
+/* Sampling functions */
|
|
+static int __buffer_add_sample(struct sample *sample);
|
|
+static struct sample *buffer_get_sample(struct sample *sample);
|
|
+
|
|
+/* Threading and state */
|
|
+static int kthread_fn(void *unused);
|
|
+static int start_kthread(void);
|
|
+static int stop_kthread(void);
|
|
+static void __reset_stats(void);
|
|
+static int init_stats(void);
|
|
+
|
|
+/* Debugfs interface */
|
|
+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos, const u64 *entry);
|
|
+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos, u64 *entry);
|
|
+static int debug_sample_fopen(struct inode *inode, struct file *filp);
|
|
+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos);
|
|
+static int debug_sample_release(struct inode *inode, struct file *filp);
|
|
+static int debug_enable_fopen(struct inode *inode, struct file *filp);
|
|
+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos);
|
|
+static ssize_t debug_enable_fwrite(struct file *file,
|
|
+ const char __user *user_buffer,
|
|
+ size_t user_size, loff_t *offset);
|
|
+
|
|
+/* Initialization functions */
|
|
+static int init_debugfs(void);
|
|
+static void free_debugfs(void);
|
|
+static int detector_init(void);
|
|
+static void detector_exit(void);
|
|
+
|
|
+/* Individual latency samples are stored here when detected and packed into
|
|
+ * the ring_buffer circular buffer, where they are overwritten when
|
|
+ * more than buf_size/sizeof(sample) samples are received. */
|
|
+struct sample {
|
|
+ u64 seqnum; /* unique sequence */
|
|
+ u64 duration; /* ktime delta */
|
|
+ u64 outer_duration; /* ktime delta (outer loop) */
|
|
+ struct timespec timestamp; /* wall time */
|
|
+ unsigned long lost;
|
|
+};
|
|
+
|
|
+/* keep the global state somewhere. */
|
|
+static struct data {
|
|
+
|
|
+ struct mutex lock; /* protect changes */
|
|
+
|
|
+ u64 count; /* total since reset */
|
|
+ u64 max_sample; /* max hardware latency */
|
|
+ u64 threshold; /* sample threshold level */
|
|
+
|
|
+ u64 sample_window; /* total sampling window (on+off) */
|
|
+ u64 sample_width; /* active sampling portion of window */
|
|
+
|
|
+ atomic_t sample_open; /* whether the sample file is open */
|
|
+
|
|
+ wait_queue_head_t wq; /* waitqeue for new sample values */
|
|
+
|
|
+} data;
|
|
+
|
|
+/**
|
|
+ * __buffer_add_sample - add a new latency sample recording to the ring buffer
|
|
+ * @sample: The new latency sample value
|
|
+ *
|
|
+ * This receives a new latency sample and records it in a global ring buffer.
|
|
+ * No additional locking is used in this case.
|
|
+ */
|
|
+static int __buffer_add_sample(struct sample *sample)
|
|
+{
|
|
+ return ring_buffer_write(ring_buffer,
|
|
+ sizeof(struct sample), sample);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * buffer_get_sample - remove a hardware latency sample from the ring buffer
|
|
+ * @sample: Pre-allocated storage for the sample
|
|
+ *
|
|
+ * This retrieves a hardware latency sample from the global circular buffer
|
|
+ */
|
|
+static struct sample *buffer_get_sample(struct sample *sample)
|
|
+{
|
|
+ struct ring_buffer_event *e = NULL;
|
|
+ struct sample *s = NULL;
|
|
+ unsigned int cpu = 0;
|
|
+
|
|
+ if (!sample)
|
|
+ return NULL;
|
|
+
|
|
+ mutex_lock(&ring_buffer_mutex);
|
|
+ for_each_online_cpu(cpu) {
|
|
+ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
|
|
+ if (e)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (e) {
|
|
+ s = ring_buffer_event_data(e);
|
|
+ memcpy(sample, s, sizeof(struct sample));
|
|
+ } else
|
|
+ sample = NULL;
|
|
+ mutex_unlock(&ring_buffer_mutex);
|
|
+
|
|
+ return sample;
|
|
+}
|
|
+
|
|
+#ifndef CONFIG_TRACING
|
|
+#define time_type ktime_t
|
|
+#define time_get() ktime_get()
|
|
+#define time_to_us(x) ktime_to_us(x)
|
|
+#define time_sub(a, b) ktime_sub(a, b)
|
|
+#define init_time(a, b) (a).tv64 = b
|
|
+#define time_u64(a) (a).tv64
|
|
+#else
|
|
+#define time_type u64
|
|
+#define time_get() trace_clock_local()
|
|
+#define time_to_us(x) div_u64(x, 1000)
|
|
+#define time_sub(a, b) ((a) - (b))
|
|
+#define init_time(a, b) a = b
|
|
+#define time_u64(a) a
|
|
+#endif
|
|
+/**
|
|
+ * get_sample - sample the CPU TSC and look for likely hardware latencies
|
|
+ *
|
|
+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
|
|
+ * hardware-induced latency. Called with interrupts disabled and with data.lock held.
|
|
+ */
|
|
+static int get_sample(void)
|
|
+{
|
|
+ time_type start, t1, t2, last_t2;
|
|
+ s64 diff, total = 0;
|
|
+ u64 sample = 0;
|
|
+ u64 outer_sample = 0;
|
|
+ int ret = -1;
|
|
+
|
|
+ init_time(last_t2, 0);
|
|
+ start = time_get(); /* start timestamp */
|
|
+
|
|
+ do {
|
|
+
|
|
+ t1 = time_get(); /* we'll look for a discontinuity */
|
|
+ t2 = time_get();
|
|
+
|
|
+ if (time_u64(last_t2)) {
|
|
+ /* Check the delta from the outer loop (t2 to next t1) */
|
|
+ diff = time_to_us(time_sub(t1, last_t2));
|
|
+ /* This shouldn't happen */
|
|
+ if (diff < 0) {
|
|
+ printk(KERN_ERR BANNER "time running backwards\n");
|
|
+ goto out;
|
|
+ }
|
|
+ if (diff > outer_sample)
|
|
+ outer_sample = diff;
|
|
+ }
|
|
+ last_t2 = t2;
|
|
+
|
|
+ total = time_to_us(time_sub(t2, start)); /* sample width */
|
|
+
|
|
+ /* This checks the inner loop (t1 to t2) */
|
|
+ diff = time_to_us(time_sub(t2, t1)); /* current diff */
|
|
+
|
|
+ /* This shouldn't happen */
|
|
+ if (diff < 0) {
|
|
+ printk(KERN_ERR BANNER "time running backwards\n");
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (diff > sample)
|
|
+ sample = diff; /* only want highest value */
|
|
+
|
|
+ } while (total <= data.sample_width);
|
|
+
|
|
+ ret = 0;
|
|
+
|
|
+ /* If we exceed the threshold value, we have found a hardware latency */
|
|
+ if (sample > data.threshold || outer_sample > data.threshold) {
|
|
+ struct sample s;
|
|
+
|
|
+ ret = 1;
|
|
+
|
|
+ data.count++;
|
|
+ s.seqnum = data.count;
|
|
+ s.duration = sample;
|
|
+ s.outer_duration = outer_sample;
|
|
+ s.timestamp = CURRENT_TIME;
|
|
+ __buffer_add_sample(&s);
|
|
+
|
|
+ /* Keep a running maximum ever recorded hardware latency */
|
|
+ if (sample > data.max_sample)
|
|
+ data.max_sample = sample;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
|
|
+ * @unused: A required part of the kthread API.
|
|
+ *
|
|
+ * Used to periodically sample the CPU TSC via a call to get_sample. We
|
|
+ * disable interrupts, which does (intentionally) introduce latency since we
|
|
+ * need to ensure nothing else might be running (and thus pre-empting).
|
|
+ * Obviously this should never be used in production environments.
|
|
+ *
|
|
+ * Currently this runs on which ever CPU it was scheduled on, but most
|
|
+ * real-worald hardware latency situations occur across several CPUs,
|
|
+ * but we might later generalize this if we find there are any actualy
|
|
+ * systems with alternate SMI delivery or other hardware latencies.
|
|
+ */
|
|
+static int kthread_fn(void *unused)
|
|
+{
|
|
+ int ret;
|
|
+ u64 interval;
|
|
+
|
|
+ while (!kthread_should_stop()) {
|
|
+
|
|
+ mutex_lock(&data.lock);
|
|
+
|
|
+ local_irq_disable();
|
|
+ ret = get_sample();
|
|
+ local_irq_enable();
|
|
+
|
|
+ if (ret > 0)
|
|
+ wake_up(&data.wq); /* wake up reader(s) */
|
|
+
|
|
+ interval = data.sample_window - data.sample_width;
|
|
+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
|
|
+
|
|
+ mutex_unlock(&data.lock);
|
|
+
|
|
+ if (msleep_interruptible(interval))
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * start_kthread - Kick off the hardware latency sampling/detector kthread
|
|
+ *
|
|
+ * This starts a kernel thread that will sit and sample the CPU timestamp
|
|
+ * counter (TSC or similar) and look for potential hardware latencies.
|
|
+ */
|
|
+static int start_kthread(void)
|
|
+{
|
|
+ kthread = kthread_run(kthread_fn, NULL,
|
|
+ DRVNAME);
|
|
+ if (IS_ERR(kthread)) {
|
|
+ printk(KERN_ERR BANNER "could not start sampling thread\n");
|
|
+ enabled = 0;
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
|
|
+ *
|
|
+ * This kicks the running hardware latency sampling/detector kernel thread and
|
|
+ * tells it to stop sampling now. Use this on unload and at system shutdown.
|
|
+ */
|
|
+static int stop_kthread(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = kthread_stop(kthread);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * __reset_stats - Reset statistics for the hardware latency detector
|
|
+ *
|
|
+ * We use data to store various statistics and global state. We call this
|
|
+ * function in order to reset those when "enable" is toggled on or off, and
|
|
+ * also at initialization. Should be called with data.lock held.
|
|
+ */
|
|
+static void __reset_stats(void)
|
|
+{
|
|
+ data.count = 0;
|
|
+ data.max_sample = 0;
|
|
+ ring_buffer_reset(ring_buffer); /* flush out old sample entries */
|
|
+}
|
|
+
|
|
+/**
|
|
+ * init_stats - Setup global state statistics for the hardware latency detector
|
|
+ *
|
|
+ * We use data to store various statistics and global state. We also use
|
|
+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
|
|
+ * induced system latencies. This function initializes these structures and
|
|
+ * allocates the global ring buffer also.
|
|
+ */
|
|
+static int init_stats(void)
|
|
+{
|
|
+ int ret = -ENOMEM;
|
|
+
|
|
+ mutex_init(&data.lock);
|
|
+ init_waitqueue_head(&data.wq);
|
|
+ atomic_set(&data.sample_open, 0);
|
|
+
|
|
+ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
|
|
+
|
|
+ if (WARN(!ring_buffer, KERN_ERR BANNER
|
|
+ "failed to allocate ring buffer!\n"))
|
|
+ goto out;
|
|
+
|
|
+ __reset_stats();
|
|
+ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
|
|
+ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
|
|
+ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
|
|
+
|
|
+ ret = 0;
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+
|
|
+}
|
|
+
|
|
+/*
|
|
+ * simple_data_read - Wrapper read function for global state debugfs entries
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The userspace provided buffer to read value into
|
|
+ * @cnt: The maximum number of bytes to read
|
|
+ * @ppos: The current "file" position
|
|
+ * @entry: The entry to read from
|
|
+ *
|
|
+ * This function provides a generic read implementation for the global state
|
|
+ * "data" structure debugfs filesystem entries. It would be nice to use
|
|
+ * simple_attr_read directly, but we need to make sure that the data.lock
|
|
+ * is held during the actual read.
|
|
+ */
|
|
+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos, const u64 *entry)
|
|
+{
|
|
+ char buf[U64STR_SIZE];
|
|
+ u64 val = 0;
|
|
+ int len = 0;
|
|
+
|
|
+ memset(buf, 0, sizeof(buf));
|
|
+
|
|
+ if (!entry)
|
|
+ return -EFAULT;
|
|
+
|
|
+ mutex_lock(&data.lock);
|
|
+ val = *entry;
|
|
+ mutex_unlock(&data.lock);
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
|
|
+
|
|
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
|
|
+
|
|
+}
|
|
+
|
|
+/*
|
|
+ * simple_data_write - Wrapper write function for global state debugfs entries
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The userspace provided buffer to write value from
|
|
+ * @cnt: The maximum number of bytes to write
|
|
+ * @ppos: The current "file" position
|
|
+ * @entry: The entry to write to
|
|
+ *
|
|
+ * This function provides a generic write implementation for the global state
|
|
+ * "data" structure debugfs filesystem entries. It would be nice to use
|
|
+ * simple_attr_write directly, but we need to make sure that the data.lock
|
|
+ * is held during the actual write.
|
|
+ */
|
|
+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos, u64 *entry)
|
|
+{
|
|
+ char buf[U64STR_SIZE];
|
|
+ int csize = min(cnt, sizeof(buf));
|
|
+ u64 val = 0;
|
|
+ int err = 0;
|
|
+
|
|
+ memset(buf, '\0', sizeof(buf));
|
|
+ if (copy_from_user(buf, ubuf, csize))
|
|
+ return -EFAULT;
|
|
+
|
|
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
|
|
+ err = strict_strtoull(buf, 10, &val);
|
|
+ if (err)
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&data.lock);
|
|
+ *entry = val;
|
|
+ mutex_unlock(&data.lock);
|
|
+
|
|
+ return csize;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_count_fopen - Open function for "count" debugfs entry
|
|
+ * @inode: The in-kernel inode representation of the debugfs "file"
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ *
|
|
+ * This function provides an open implementation for the "count" debugfs
|
|
+ * interface to the hardware latency detector.
|
|
+ */
|
|
+static int debug_count_fopen(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_count_fread - Read function for "count" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The userspace provided buffer to read value into
|
|
+ * @cnt: The maximum number of bytes to read
|
|
+ * @ppos: The current "file" position
|
|
+ *
|
|
+ * This function provides a read implementation for the "count" debugfs
|
|
+ * interface to the hardware latency detector. Can be used to read the
|
|
+ * number of latency readings exceeding the configured threshold since
|
|
+ * the detector was last reset (e.g. by writing a zero into "count").
|
|
+ */
|
|
+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_count_fwrite - Write function for "count" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The user buffer that contains the value to write
|
|
+ * @cnt: The maximum number of bytes to write to "file"
|
|
+ * @ppos: The current position in the debugfs "file"
|
|
+ *
|
|
+ * This function provides a write implementation for the "count" debugfs
|
|
+ * interface to the hardware latency detector. Can be used to write a
|
|
+ * desired value, especially to zero the total count.
|
|
+ */
|
|
+static ssize_t debug_count_fwrite(struct file *filp,
|
|
+ const char __user *ubuf,
|
|
+ size_t cnt,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
|
|
+ * @inode: The in-kernel inode representation of the debugfs "file"
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ *
|
|
+ * This function provides an open implementation for the "enable" debugfs
|
|
+ * interface to the hardware latency detector.
|
|
+ */
|
|
+static int debug_enable_fopen(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_enable_fread - Read function for "enable" debugfs interface
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The userspace provided buffer to read value into
|
|
+ * @cnt: The maximum number of bytes to read
|
|
+ * @ppos: The current "file" position
|
|
+ *
|
|
+ * This function provides a read implementation for the "enable" debugfs
|
|
+ * interface to the hardware latency detector. Can be used to determine
|
|
+ * whether the detector is currently enabled ("0\n" or "1\n" returned).
|
|
+ */
|
|
+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ char buf[4];
|
|
+
|
|
+ if ((cnt < sizeof(buf)) || (*ppos))
|
|
+ return 0;
|
|
+
|
|
+ buf[0] = enabled ? '1' : '0';
|
|
+ buf[1] = '\n';
|
|
+ buf[2] = '\0';
|
|
+ if (copy_to_user(ubuf, buf, strlen(buf)))
|
|
+ return -EFAULT;
|
|
+ return *ppos = strlen(buf);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_enable_fwrite - Write function for "enable" debugfs interface
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The user buffer that contains the value to write
|
|
+ * @cnt: The maximum number of bytes to write to "file"
|
|
+ * @ppos: The current position in the debugfs "file"
|
|
+ *
|
|
+ * This function provides a write implementation for the "enable" debugfs
|
|
+ * interface to the hardware latency detector. Can be used to enable or
|
|
+ * disable the detector, which will have the side-effect of possibly
|
|
+ * also resetting the global stats and kicking off the measuring
|
|
+ * kthread (on an enable) or the converse (upon a disable).
|
|
+ */
|
|
+static ssize_t debug_enable_fwrite(struct file *filp,
|
|
+ const char __user *ubuf,
|
|
+ size_t cnt,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ char buf[4];
|
|
+ int csize = min(cnt, sizeof(buf));
|
|
+ long val = 0;
|
|
+ int err = 0;
|
|
+
|
|
+ memset(buf, '\0', sizeof(buf));
|
|
+ if (copy_from_user(buf, ubuf, csize))
|
|
+ return -EFAULT;
|
|
+
|
|
+ buf[sizeof(buf)-1] = '\0'; /* just in case */
|
|
+ err = strict_strtoul(buf, 10, &val);
|
|
+ if (0 != err)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (val) {
|
|
+ if (enabled)
|
|
+ goto unlock;
|
|
+ enabled = 1;
|
|
+ __reset_stats();
|
|
+ if (start_kthread())
|
|
+ return -EFAULT;
|
|
+ } else {
|
|
+ if (!enabled)
|
|
+ goto unlock;
|
|
+ enabled = 0;
|
|
+ err = stop_kthread();
|
|
+ if (err) {
|
|
+ printk(KERN_ERR BANNER "cannot stop kthread\n");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ wake_up(&data.wq); /* reader(s) should return */
|
|
+ }
|
|
+unlock:
|
|
+ return csize;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_max_fopen - Open function for "max" debugfs entry
|
|
+ * @inode: The in-kernel inode representation of the debugfs "file"
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ *
|
|
+ * This function provides an open implementation for the "max" debugfs
|
|
+ * interface to the hardware latency detector.
|
|
+ */
|
|
+static int debug_max_fopen(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_max_fread - Read function for "max" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The userspace provided buffer to read value into
|
|
+ * @cnt: The maximum number of bytes to read
|
|
+ * @ppos: The current "file" position
|
|
+ *
|
|
+ * This function provides a read implementation for the "max" debugfs
|
|
+ * interface to the hardware latency detector. Can be used to determine
|
|
+ * the maximum latency value observed since it was last reset.
|
|
+ */
|
|
+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_max_fwrite - Write function for "max" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The user buffer that contains the value to write
|
|
+ * @cnt: The maximum number of bytes to write to "file"
|
|
+ * @ppos: The current position in the debugfs "file"
|
|
+ *
|
|
+ * This function provides a write implementation for the "max" debugfs
|
|
+ * interface to the hardware latency detector. Can be used to reset the
|
|
+ * maximum or set it to some other desired value - if, then, subsequent
|
|
+ * measurements exceed this value, the maximum will be updated.
|
|
+ */
|
|
+static ssize_t debug_max_fwrite(struct file *filp,
|
|
+ const char __user *ubuf,
|
|
+ size_t cnt,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * debug_sample_fopen - An open function for "sample" debugfs interface
|
|
+ * @inode: The in-kernel inode representation of this debugfs "file"
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ *
|
|
+ * This function handles opening the "sample" file within the hardware
|
|
+ * latency detector debugfs directory interface. This file is used to read
|
|
+ * raw samples from the global ring_buffer and allows the user to see a
|
|
+ * running latency history. Can be opened blocking or non-blocking,
|
|
+ * affecting whether it behaves as a buffer read pipe, or does not.
|
|
+ * Implements simple locking to prevent multiple simultaneous use.
|
|
+ */
|
|
+static int debug_sample_fopen(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ if (!atomic_add_unless(&data.sample_open, 1, 1))
|
|
+ return -EBUSY;
|
|
+ else
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_sample_fread - A read function for "sample" debugfs interface
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The user buffer that will contain the samples read
|
|
+ * @cnt: The maximum bytes to read from the debugfs "file"
|
|
+ * @ppos: The current position in the debugfs "file"
|
|
+ *
|
|
+ * This function handles reading from the "sample" file within the hardware
|
|
+ * latency detector debugfs directory interface. This file is used to read
|
|
+ * raw samples from the global ring_buffer and allows the user to see a
|
|
+ * running latency history. By default this will block pending a new
|
|
+ * value written into the sample buffer, unless there are already a
|
|
+ * number of value(s) waiting in the buffer, or the sample file was
|
|
+ * previously opened in a non-blocking mode of operation.
|
|
+ */
|
|
+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ int len = 0;
|
|
+ char buf[64];
|
|
+ struct sample *sample = NULL;
|
|
+
|
|
+ if (!enabled)
|
|
+ return 0;
|
|
+
|
|
+ sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
|
|
+ if (!sample)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ while (!buffer_get_sample(sample)) {
|
|
+
|
|
+ DEFINE_WAIT(wait);
|
|
+
|
|
+ if (filp->f_flags & O_NONBLOCK) {
|
|
+ len = -EAGAIN;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
|
|
+ schedule();
|
|
+ finish_wait(&data.wq, &wait);
|
|
+
|
|
+ if (signal_pending(current)) {
|
|
+ len = -EINTR;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (!enabled) { /* enable was toggled */
|
|
+ len = 0;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
|
|
+ sample->timestamp.tv_sec,
|
|
+ sample->timestamp.tv_nsec,
|
|
+ sample->duration,
|
|
+ sample->outer_duration);
|
|
+
|
|
+
|
|
+ /* handling partial reads is more trouble than it's worth */
|
|
+ if (len > cnt)
|
|
+ goto out;
|
|
+
|
|
+ if (copy_to_user(ubuf, buf, len))
|
|
+ len = -EFAULT;
|
|
+
|
|
+out:
|
|
+ kfree(sample);
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_sample_release - Release function for "sample" debugfs interface
|
|
+ * @inode: The in-kernel inode represenation of the debugfs "file"
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ *
|
|
+ * This function completes the close of the debugfs interface "sample" file.
|
|
+ * Frees the sample_open "lock" so that other users may open the interface.
|
|
+ */
|
|
+static int debug_sample_release(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ atomic_dec(&data.sample_open);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_threshold_fopen - Open function for "threshold" debugfs entry
|
|
+ * @inode: The in-kernel inode representation of the debugfs "file"
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ *
|
|
+ * This function provides an open implementation for the "threshold" debugfs
|
|
+ * interface to the hardware latency detector.
|
|
+ */
|
|
+static int debug_threshold_fopen(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_threshold_fread - Read function for "threshold" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The userspace provided buffer to read value into
|
|
+ * @cnt: The maximum number of bytes to read
|
|
+ * @ppos: The current "file" position
|
|
+ *
|
|
+ * This function provides a read implementation for the "threshold" debugfs
|
|
+ * interface to the hardware latency detector. It can be used to determine
|
|
+ * the current threshold level at which a latency will be recorded in the
|
|
+ * global ring buffer, typically on the order of 10us.
|
|
+ */
|
|
+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The user buffer that contains the value to write
|
|
+ * @cnt: The maximum number of bytes to write to "file"
|
|
+ * @ppos: The current position in the debugfs "file"
|
|
+ *
|
|
+ * This function provides a write implementation for the "threshold" debugfs
|
|
+ * interface to the hardware latency detector. It can be used to configure
|
|
+ * the threshold level at which any subsequently detected latencies will
|
|
+ * be recorded into the global ring buffer.
|
|
+ */
|
|
+static ssize_t debug_threshold_fwrite(struct file *filp,
|
|
+ const char __user *ubuf,
|
|
+ size_t cnt,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
|
|
+
|
|
+ if (enabled)
|
|
+ wake_up_process(kthread);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_width_fopen - Open function for "width" debugfs entry
|
|
+ * @inode: The in-kernel inode representation of the debugfs "file"
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ *
|
|
+ * This function provides an open implementation for the "width" debugfs
|
|
+ * interface to the hardware latency detector.
|
|
+ */
|
|
+static int debug_width_fopen(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_width_fread - Read function for "width" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The userspace provided buffer to read value into
|
|
+ * @cnt: The maximum number of bytes to read
|
|
+ * @ppos: The current "file" position
|
|
+ *
|
|
+ * This function provides a read implementation for the "width" debugfs
|
|
+ * interface to the hardware latency detector. It can be used to determine
|
|
+ * for how many us of the total window us we will actively sample for any
|
|
+ * hardware-induced latecy periods. Obviously, it is not possible to
|
|
+ * sample constantly and have the system respond to a sample reader, or,
|
|
+ * worse, without having the system appear to have gone out to lunch.
|
|
+ */
|
|
+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_width_fwrite - Write function for "width" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The user buffer that contains the value to write
|
|
+ * @cnt: The maximum number of bytes to write to "file"
|
|
+ * @ppos: The current position in the debugfs "file"
|
|
+ *
|
|
+ * This function provides a write implementation for the "width" debugfs
|
|
+ * interface to the hardware latency detector. It can be used to configure
|
|
+ * for how many us of the total window us we will actively sample for any
|
|
+ * hardware-induced latency periods. Obviously, it is not possible to
|
|
+ * sample constantly and have the system respond to a sample reader, or,
|
|
+ * worse, without having the system appear to have gone out to lunch. It
|
|
+ * is enforced that width is less that the total window size.
|
|
+ */
|
|
+static ssize_t debug_width_fwrite(struct file *filp,
|
|
+ const char __user *ubuf,
|
|
+ size_t cnt,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ char buf[U64STR_SIZE];
|
|
+ int csize = min(cnt, sizeof(buf));
|
|
+ u64 val = 0;
|
|
+ int err = 0;
|
|
+
|
|
+ memset(buf, '\0', sizeof(buf));
|
|
+ if (copy_from_user(buf, ubuf, csize))
|
|
+ return -EFAULT;
|
|
+
|
|
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
|
|
+ err = strict_strtoull(buf, 10, &val);
|
|
+ if (0 != err)
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&data.lock);
|
|
+ if (val < data.sample_window)
|
|
+ data.sample_width = val;
|
|
+ else {
|
|
+ mutex_unlock(&data.lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ mutex_unlock(&data.lock);
|
|
+
|
|
+ if (enabled)
|
|
+ wake_up_process(kthread);
|
|
+
|
|
+ return csize;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_window_fopen - Open function for "window" debugfs entry
|
|
+ * @inode: The in-kernel inode representation of the debugfs "file"
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ *
|
|
+ * This function provides an open implementation for the "window" debugfs
|
|
+ * interface to the hardware latency detector. The window is the total time
|
|
+ * in us that will be considered one sample period. Conceptually, windows
|
|
+ * occur back-to-back and contain a sample width period during which
|
|
+ * actual sampling occurs.
|
|
+ */
|
|
+static int debug_window_fopen(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_window_fread - Read function for "window" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The userspace provided buffer to read value into
|
|
+ * @cnt: The maximum number of bytes to read
|
|
+ * @ppos: The current "file" position
|
|
+ *
|
|
+ * This function provides a read implementation for the "window" debugfs
|
|
+ * interface to the hardware latency detector. The window is the total time
|
|
+ * in us that will be considered one sample period. Conceptually, windows
|
|
+ * occur back-to-back and contain a sample width period during which
|
|
+ * actual sampling occurs. Can be used to read the total window size.
|
|
+ */
|
|
+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * debug_window_fwrite - Write function for "window" debugfs entry
|
|
+ * @filp: The active open file structure for the debugfs "file"
|
|
+ * @ubuf: The user buffer that contains the value to write
|
|
+ * @cnt: The maximum number of bytes to write to "file"
|
|
+ * @ppos: The current position in the debugfs "file"
|
|
+ *
|
|
+ * This function provides a write implementation for the "window" debufds
|
|
+ * interface to the hardware latency detetector. The window is the total time
|
|
+ * in us that will be considered one sample period. Conceptually, windows
|
|
+ * occur back-to-back and contain a sample width period during which
|
|
+ * actual sampling occurs. Can be used to write a new total window size. It
|
|
+ * is enfoced that any value written must be greater than the sample width
|
|
+ * size, or an error results.
|
|
+ */
|
|
+static ssize_t debug_window_fwrite(struct file *filp,
|
|
+ const char __user *ubuf,
|
|
+ size_t cnt,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ char buf[U64STR_SIZE];
|
|
+ int csize = min(cnt, sizeof(buf));
|
|
+ u64 val = 0;
|
|
+ int err = 0;
|
|
+
|
|
+ memset(buf, '\0', sizeof(buf));
|
|
+ if (copy_from_user(buf, ubuf, csize))
|
|
+ return -EFAULT;
|
|
+
|
|
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
|
|
+ err = strict_strtoull(buf, 10, &val);
|
|
+ if (0 != err)
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&data.lock);
|
|
+ if (data.sample_width < val)
|
|
+ data.sample_window = val;
|
|
+ else {
|
|
+ mutex_unlock(&data.lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ mutex_unlock(&data.lock);
|
|
+
|
|
+ return csize;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Function pointers for the "count" debugfs file operations
|
|
+ */
|
|
+static const struct file_operations count_fops = {
|
|
+ .open = debug_count_fopen,
|
|
+ .read = debug_count_fread,
|
|
+ .write = debug_count_fwrite,
|
|
+ .owner = THIS_MODULE,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Function pointers for the "enable" debugfs file operations
|
|
+ */
|
|
+static const struct file_operations enable_fops = {
|
|
+ .open = debug_enable_fopen,
|
|
+ .read = debug_enable_fread,
|
|
+ .write = debug_enable_fwrite,
|
|
+ .owner = THIS_MODULE,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Function pointers for the "max" debugfs file operations
|
|
+ */
|
|
+static const struct file_operations max_fops = {
|
|
+ .open = debug_max_fopen,
|
|
+ .read = debug_max_fread,
|
|
+ .write = debug_max_fwrite,
|
|
+ .owner = THIS_MODULE,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Function pointers for the "sample" debugfs file operations
|
|
+ */
|
|
+static const struct file_operations sample_fops = {
|
|
+ .open = debug_sample_fopen,
|
|
+ .read = debug_sample_fread,
|
|
+ .release = debug_sample_release,
|
|
+ .owner = THIS_MODULE,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Function pointers for the "threshold" debugfs file operations
|
|
+ */
|
|
+static const struct file_operations threshold_fops = {
|
|
+ .open = debug_threshold_fopen,
|
|
+ .read = debug_threshold_fread,
|
|
+ .write = debug_threshold_fwrite,
|
|
+ .owner = THIS_MODULE,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Function pointers for the "width" debugfs file operations
|
|
+ */
|
|
+static const struct file_operations width_fops = {
|
|
+ .open = debug_width_fopen,
|
|
+ .read = debug_width_fread,
|
|
+ .write = debug_width_fwrite,
|
|
+ .owner = THIS_MODULE,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Function pointers for the "window" debugfs file operations
|
|
+ */
|
|
+static const struct file_operations window_fops = {
|
|
+ .open = debug_window_fopen,
|
|
+ .read = debug_window_fread,
|
|
+ .write = debug_window_fwrite,
|
|
+ .owner = THIS_MODULE,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * init_debugfs - A function to initialize the debugfs interface files
|
|
+ *
|
|
+ * This function creates entries in debugfs for "hwlat_detector", including
|
|
+ * files to read values from the detector, current samples, and the
|
|
+ * maximum sample that has been captured since the hardware latency
|
|
+ * dectector was started.
|
|
+ */
|
|
+static int init_debugfs(void)
|
|
+{
|
|
+ int ret = -ENOMEM;
|
|
+
|
|
+ debug_dir = debugfs_create_dir(DRVNAME, NULL);
|
|
+ if (!debug_dir)
|
|
+ goto err_debug_dir;
|
|
+
|
|
+ debug_sample = debugfs_create_file("sample", 0444,
|
|
+ debug_dir, NULL,
|
|
+ &sample_fops);
|
|
+ if (!debug_sample)
|
|
+ goto err_sample;
|
|
+
|
|
+ debug_count = debugfs_create_file("count", 0444,
|
|
+ debug_dir, NULL,
|
|
+ &count_fops);
|
|
+ if (!debug_count)
|
|
+ goto err_count;
|
|
+
|
|
+ debug_max = debugfs_create_file("max", 0444,
|
|
+ debug_dir, NULL,
|
|
+ &max_fops);
|
|
+ if (!debug_max)
|
|
+ goto err_max;
|
|
+
|
|
+ debug_sample_window = debugfs_create_file("window", 0644,
|
|
+ debug_dir, NULL,
|
|
+ &window_fops);
|
|
+ if (!debug_sample_window)
|
|
+ goto err_window;
|
|
+
|
|
+ debug_sample_width = debugfs_create_file("width", 0644,
|
|
+ debug_dir, NULL,
|
|
+ &width_fops);
|
|
+ if (!debug_sample_width)
|
|
+ goto err_width;
|
|
+
|
|
+ debug_threshold = debugfs_create_file("threshold", 0644,
|
|
+ debug_dir, NULL,
|
|
+ &threshold_fops);
|
|
+ if (!debug_threshold)
|
|
+ goto err_threshold;
|
|
+
|
|
+ debug_enable = debugfs_create_file("enable", 0644,
|
|
+ debug_dir, &enabled,
|
|
+ &enable_fops);
|
|
+ if (!debug_enable)
|
|
+ goto err_enable;
|
|
+
|
|
+ else {
|
|
+ ret = 0;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+err_enable:
|
|
+ debugfs_remove(debug_threshold);
|
|
+err_threshold:
|
|
+ debugfs_remove(debug_sample_width);
|
|
+err_width:
|
|
+ debugfs_remove(debug_sample_window);
|
|
+err_window:
|
|
+ debugfs_remove(debug_max);
|
|
+err_max:
|
|
+ debugfs_remove(debug_count);
|
|
+err_count:
|
|
+ debugfs_remove(debug_sample);
|
|
+err_sample:
|
|
+ debugfs_remove(debug_dir);
|
|
+err_debug_dir:
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * free_debugfs - A function to cleanup the debugfs file interface
|
|
+ */
|
|
+static void free_debugfs(void)
|
|
+{
|
|
+ /* could also use a debugfs_remove_recursive */
|
|
+ debugfs_remove(debug_enable);
|
|
+ debugfs_remove(debug_threshold);
|
|
+ debugfs_remove(debug_sample_width);
|
|
+ debugfs_remove(debug_sample_window);
|
|
+ debugfs_remove(debug_max);
|
|
+ debugfs_remove(debug_count);
|
|
+ debugfs_remove(debug_sample);
|
|
+ debugfs_remove(debug_dir);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * detector_init - Standard module initialization code
|
|
+ */
|
|
+static int detector_init(void)
|
|
+{
|
|
+ int ret = -ENOMEM;
|
|
+
|
|
+ printk(KERN_INFO BANNER "version %s\n", VERSION);
|
|
+
|
|
+ ret = init_stats();
|
|
+ if (0 != ret)
|
|
+ goto out;
|
|
+
|
|
+ ret = init_debugfs();
|
|
+ if (0 != ret)
|
|
+ goto err_stats;
|
|
+
|
|
+ if (enabled)
|
|
+ ret = start_kthread();
|
|
+
|
|
+ goto out;
|
|
+
|
|
+err_stats:
|
|
+ ring_buffer_free(ring_buffer);
|
|
+out:
|
|
+ return ret;
|
|
+
|
|
+}
|
|
+
|
|
+/**
|
|
+ * detector_exit - Standard module cleanup code
|
|
+ */
|
|
+static void detector_exit(void)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ if (enabled) {
|
|
+ enabled = 0;
|
|
+ err = stop_kthread();
|
|
+ if (err)
|
|
+ printk(KERN_ERR BANNER "cannot stop kthread\n");
|
|
+ }
|
|
+
|
|
+ free_debugfs();
|
|
+ ring_buffer_free(ring_buffer); /* free up the ring buffer */
|
|
+
|
|
+}
|
|
+
|
|
+module_init(detector_init);
|
|
+module_exit(detector_exit);
|
|
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
|
|
index 032b84791a16..6f8a1a718f74 100644
|
|
--- a/drivers/mmc/host/mmci.c
|
|
+++ b/drivers/mmc/host/mmci.c
|
|
@@ -910,15 +910,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
|
|
struct sg_mapping_iter *sg_miter = &host->sg_miter;
|
|
struct variant_data *variant = host->variant;
|
|
void __iomem *base = host->base;
|
|
- unsigned long flags;
|
|
u32 status;
|
|
|
|
status = readl(base + MMCISTATUS);
|
|
|
|
dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
|
|
|
|
- local_irq_save(flags);
|
|
-
|
|
do {
|
|
unsigned int remain, len;
|
|
char *buffer;
|
|
@@ -958,8 +955,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
|
|
|
|
sg_miter_stop(sg_miter);
|
|
|
|
- local_irq_restore(flags);
|
|
-
|
|
/*
|
|
* If we have less than the fifo 'half-full' threshold to transfer,
|
|
* trigger a PIO interrupt as soon as any data is available.
|
|
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
|
|
index b98285446a5a..8b53d9287b8a 100644
|
|
--- a/drivers/net/Kconfig
|
|
+++ b/drivers/net/Kconfig
|
|
@@ -156,6 +156,7 @@ config MACVTAP
|
|
|
|
config NETCONSOLE
|
|
tristate "Network console logging support"
|
|
+ depends on !PREEMPT_RT_FULL
|
|
---help---
|
|
If you want to log kernel messages over the network, enable this.
|
|
See <file:Documentation/networking/netconsole.txt> for details.
|
|
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
|
|
index 5673d6e68da2..4d0112bbf188 100644
|
|
--- a/drivers/net/ethernet/3com/3c59x.c
|
|
+++ b/drivers/net/ethernet/3com/3c59x.c
|
|
@@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev)
|
|
{
|
|
struct vortex_private *vp = netdev_priv(dev);
|
|
unsigned long flags;
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
#endif
|
|
|
|
@@ -1920,12 +1920,12 @@ static void vortex_tx_timeout(struct net_device *dev)
|
|
* Block interrupts because vortex_interrupt does a bare spin_lock()
|
|
*/
|
|
unsigned long flags;
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
if (vp->full_bus_master_tx)
|
|
boomerang_interrupt(dev->irq, dev);
|
|
else
|
|
vortex_interrupt(dev->irq, dev);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
|
|
index 65fe6324d817..47520ad0fb6d 100644
|
|
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
|
|
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
|
|
@@ -2239,11 +2239,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
|
|
}
|
|
|
|
tpd_req = atl1c_cal_tpd_req(skb);
|
|
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
|
|
- if (netif_msg_pktdata(adapter))
|
|
- dev_info(&adapter->pdev->dev, "tx locked\n");
|
|
- return NETDEV_TX_LOCKED;
|
|
- }
|
|
+ spin_lock_irqsave(&adapter->tx_lock, flags);
|
|
|
|
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
|
|
/* no enough descriptor, just stop queue */
|
|
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
|
|
index d4a747a12037..b7520f17f476 100644
|
|
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
|
|
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
|
|
@@ -1861,8 +1861,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
|
|
return NETDEV_TX_OK;
|
|
}
|
|
tpd_req = atl1e_cal_tdp_req(skb);
|
|
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
|
|
- return NETDEV_TX_LOCKED;
|
|
+ spin_lock_irqsave(&adapter->tx_lock, flags);
|
|
|
|
if (atl1e_tpd_avail(adapter) < tpd_req) {
|
|
/* no enough descriptor, just stop queue */
|
|
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
|
|
index 906117016fc4..6b9e00634c67 100644
|
|
--- a/drivers/net/ethernet/cadence/at91_ether.c
|
|
+++ b/drivers/net/ethernet/cadence/at91_ether.c
|
|
@@ -201,7 +201,9 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
|
|
struct net_device *dev = (struct net_device *) dev_id;
|
|
struct at91_private *lp = netdev_priv(dev);
|
|
unsigned int phy;
|
|
+ unsigned long flags;
|
|
|
|
+ spin_lock_irqsave(&lp->lock, flags);
|
|
/*
|
|
* This hander is triggered on both edges, but the PHY chips expect
|
|
* level-triggering. We therefore have to check if the PHY actually has
|
|
@@ -243,6 +245,7 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
|
|
|
|
done:
|
|
disable_mdi();
|
|
+ spin_unlock_irqrestore(&lp->lock, flags);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
@@ -399,9 +402,11 @@ static void at91ether_check_link(unsigned long dev_id)
|
|
struct net_device *dev = (struct net_device *) dev_id;
|
|
struct at91_private *lp = netdev_priv(dev);
|
|
|
|
+ spin_lock_irq(&lp->lock);
|
|
enable_mdi();
|
|
update_linkspeed(dev, 1);
|
|
disable_mdi();
|
|
+ spin_unlock_irq(&lp->lock);
|
|
|
|
mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
|
|
}
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
|
|
index 47a84359d4e4..279c04e339ff 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
|
|
@@ -1678,8 +1678,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
|
|
struct cmdQ *q = &sge->cmdQ[qid];
|
|
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
|
|
|
|
- if (!spin_trylock(&q->lock))
|
|
- return NETDEV_TX_LOCKED;
|
|
+ spin_lock(&q->lock);
|
|
|
|
reclaim_completed_tx(sge, q);
|
|
|
|
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
|
|
index fea3641d9398..d9a5fe09d992 100644
|
|
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
|
|
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
|
|
@@ -1946,6 +1946,7 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
|
|
pci_iounmap(pdev, tp->base_addr);
|
|
free_netdev (dev);
|
|
pci_release_regions (pdev);
|
|
+ pci_disable_device (pdev);
|
|
pci_set_drvdata (pdev, NULL);
|
|
|
|
/* pci_power_off (pdev, -1); */
|
|
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
|
|
index d5de9a474aea..922a8176e90c 100644
|
|
--- a/drivers/net/ethernet/freescale/gianfar.c
|
|
+++ b/drivers/net/ethernet/freescale/gianfar.c
|
|
@@ -1257,7 +1257,7 @@ static int gfar_suspend(struct device *dev)
|
|
|
|
if (netif_running(ndev)) {
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
lock_rx_qs(priv);
|
|
|
|
@@ -1275,7 +1275,7 @@ static int gfar_suspend(struct device *dev)
|
|
|
|
unlock_rx_qs(priv);
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
disable_napi(priv);
|
|
|
|
@@ -1316,7 +1316,7 @@ static int gfar_resume(struct device *dev)
|
|
/* Disable Magic Packet mode, in case something
|
|
* else woke us up.
|
|
*/
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
lock_rx_qs(priv);
|
|
|
|
@@ -1328,7 +1328,7 @@ static int gfar_resume(struct device *dev)
|
|
|
|
unlock_rx_qs(priv);
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
netif_device_attach(ndev);
|
|
|
|
@@ -1652,7 +1652,7 @@ void stop_gfar(struct net_device *dev)
|
|
|
|
|
|
/* Lock it down */
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
lock_rx_qs(priv);
|
|
|
|
@@ -1660,7 +1660,7 @@ void stop_gfar(struct net_device *dev)
|
|
|
|
unlock_rx_qs(priv);
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
/* Free the IRQs */
|
|
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
|
|
@@ -2312,7 +2312,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
|
|
u32 tempval;
|
|
|
|
regs = priv->gfargrp[0].regs;
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_rx_qs(priv);
|
|
|
|
if (features & NETIF_F_HW_VLAN_TX) {
|
|
@@ -2344,7 +2344,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
|
|
gfar_change_mtu(dev, dev->mtu);
|
|
|
|
unlock_rx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
|
|
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
|
|
@@ -2956,7 +2956,7 @@ static void adjust_link(struct net_device *dev)
|
|
struct phy_device *phydev = priv->phydev;
|
|
int new_state = 0;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
|
|
if (phydev->link) {
|
|
@@ -3023,7 +3023,7 @@ static void adjust_link(struct net_device *dev)
|
|
if (new_state && netif_msg_link(priv))
|
|
phy_print_status(phydev);
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
|
|
/* Update the hash table based on the current list of multicast
|
|
@@ -3223,14 +3223,14 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
|
|
dev->stats.tx_dropped++;
|
|
priv->extra_stats.tx_underrun++;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
|
|
/* Reactivate the Tx Queues */
|
|
gfar_write(®s->tstat, gfargrp->tstat);
|
|
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
|
|
}
|
|
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
|
|
index 8d74efd04bb9..a49e50cbcfb8 100644
|
|
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
|
|
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
|
|
@@ -486,7 +486,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
|
|
|
|
/* Halt TX and RX, and process the frames which
|
|
* have already been received */
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
lock_rx_qs(priv);
|
|
|
|
@@ -494,7 +494,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
|
|
|
|
unlock_rx_qs(priv);
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
for (i = 0; i < priv->num_rx_queues; i++)
|
|
gfar_clean_rx_ring(priv->rx_queue[i],
|
|
@@ -535,7 +535,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
|
|
if (dev->flags & IFF_UP) {
|
|
/* Halt TX and RX, and process the frames which
|
|
* have already been received */
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
lock_rx_qs(priv);
|
|
|
|
@@ -543,7 +543,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
|
|
|
|
unlock_tx_qs(priv);
|
|
unlock_rx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
for (i = 0; i < priv->num_rx_queues; i++)
|
|
gfar_clean_rx_ring(priv->rx_queue[i],
|
|
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
|
|
index cd14a4d449c2..da304d24bb84 100644
|
|
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
|
|
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
|
|
@@ -68,7 +68,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
|
|
return count;
|
|
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_rx_qs(priv);
|
|
|
|
/* Set the new stashing value */
|
|
@@ -84,7 +84,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
|
|
gfar_write(®s->attr, temp);
|
|
|
|
unlock_rx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
return count;
|
|
}
|
|
@@ -112,7 +112,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
|
|
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
|
|
return count;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_rx_qs(priv);
|
|
|
|
if (length > priv->rx_buffer_size)
|
|
@@ -140,7 +140,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
|
|
|
|
out:
|
|
unlock_rx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
return count;
|
|
}
|
|
@@ -171,7 +171,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
|
|
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
|
|
return count;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_rx_qs(priv);
|
|
|
|
if (index > priv->rx_stash_size)
|
|
@@ -189,7 +189,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
|
|
|
|
out:
|
|
unlock_rx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
return count;
|
|
}
|
|
@@ -219,7 +219,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
|
|
if (length > GFAR_MAX_FIFO_THRESHOLD)
|
|
return count;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
|
|
priv->fifo_threshold = length;
|
|
@@ -230,7 +230,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
|
|
gfar_write(®s->fifo_tx_thr, temp);
|
|
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
return count;
|
|
}
|
|
@@ -259,7 +259,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
|
|
if (num > GFAR_MAX_FIFO_STARVE)
|
|
return count;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
|
|
priv->fifo_starve = num;
|
|
@@ -270,7 +270,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
|
|
gfar_write(®s->fifo_tx_starve, temp);
|
|
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
return count;
|
|
}
|
|
@@ -300,7 +300,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
|
|
if (num > GFAR_MAX_FIFO_STARVE_OFF)
|
|
return count;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
lock_tx_qs(priv);
|
|
|
|
priv->fifo_starve_off = num;
|
|
@@ -311,7 +311,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
|
|
gfar_write(®s->fifo_tx_starve_shutoff, temp);
|
|
|
|
unlock_tx_qs(priv);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
|
|
return count;
|
|
}
|
|
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
|
|
index 8b3da6eb5bc4..15fcc9fd622c 100644
|
|
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
|
|
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
|
|
@@ -1308,7 +1308,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
|
|
"%s-queue%d", dev->name, i);
|
|
ret = ibmebus_request_irq(pr->eq->attr.ist1,
|
|
ehea_recv_irq_handler,
|
|
- IRQF_DISABLED, pr->int_send_name,
|
|
+ IRQF_NO_THREAD, pr->int_send_name,
|
|
pr);
|
|
if (ret) {
|
|
netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
|
|
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
|
|
index 6338ef8606ae..ad2f094afaae 100644
|
|
--- a/drivers/net/ethernet/neterion/s2io.c
|
|
+++ b/drivers/net/ethernet/neterion/s2io.c
|
|
@@ -4089,12 +4089,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
[skb->priority & (MAX_TX_FIFOS - 1)];
|
|
fifo = &mac_control->fifos[queue];
|
|
|
|
- if (do_spin_lock)
|
|
- spin_lock_irqsave(&fifo->tx_lock, flags);
|
|
- else {
|
|
- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
|
|
- return NETDEV_TX_LOCKED;
|
|
- }
|
|
+ spin_lock_irqsave(&fifo->tx_lock, flags);
|
|
|
|
if (sp->config.multiq) {
|
|
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
|
|
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
|
|
index 8b1c2eb4c118..84be5d0d031c 100644
|
|
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
|
|
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
|
|
@@ -2128,10 +2128,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
adapter->stats.tx_length_errors++;
|
|
return NETDEV_TX_OK;
|
|
}
|
|
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
|
|
- /* Collision - tell upper layer to requeue */
|
|
- return NETDEV_TX_LOCKED;
|
|
- }
|
|
+
|
|
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
|
|
+
|
|
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
|
|
netif_stop_queue(netdev);
|
|
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
|
|
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
|
|
index df7fd8d083dc..7ce74f6437b0 100644
|
|
--- a/drivers/net/ethernet/realtek/8139too.c
|
|
+++ b/drivers/net/ethernet/realtek/8139too.c
|
|
@@ -2240,7 +2240,7 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
|
|
*/
|
|
static void rtl8139_poll_controller(struct net_device *dev)
|
|
{
|
|
- disable_irq(dev->irq);
|
|
+ disable_irq_nosync(dev->irq);
|
|
rtl8139_interrupt(dev->irq, dev);
|
|
enable_irq(dev->irq);
|
|
}
|
|
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
|
|
index 32f0bcd5e30d..ccaf5809d810 100644
|
|
--- a/drivers/net/ethernet/tehuti/tehuti.c
|
|
+++ b/drivers/net/ethernet/tehuti/tehuti.c
|
|
@@ -1606,13 +1606,8 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
|
|
unsigned long flags;
|
|
|
|
ENTER;
|
|
- local_irq_save(flags);
|
|
- if (!spin_trylock(&priv->tx_lock)) {
|
|
- local_irq_restore(flags);
|
|
- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
|
|
- BDX_DRV_NAME, ndev->name);
|
|
- return NETDEV_TX_LOCKED;
|
|
- }
|
|
+
|
|
+ spin_lock_irqsave(&priv->tx_lock, flags);
|
|
|
|
/* build tx descriptor */
|
|
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
|
|
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
|
|
index 1470d3e86e3c..8873a3bf6d95 100644
|
|
--- a/drivers/net/rionet.c
|
|
+++ b/drivers/net/rionet.c
|
|
@@ -178,11 +178,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|
unsigned long flags;
|
|
int add_num = 1;
|
|
|
|
- local_irq_save(flags);
|
|
- if (!spin_trylock(&rnet->tx_lock)) {
|
|
- local_irq_restore(flags);
|
|
- return NETDEV_TX_LOCKED;
|
|
- }
|
|
+ spin_lock_irqsave(&rnet->tx_lock, flags);
|
|
|
|
if (is_multicast_ether_addr(eth->h_dest))
|
|
add_num = nact;
|
|
diff --git a/drivers/of/base.c b/drivers/of/base.c
|
|
index d439d0611559..12172327a4e4 100644
|
|
--- a/drivers/of/base.c
|
|
+++ b/drivers/of/base.c
|
|
@@ -54,7 +54,7 @@ static DEFINE_MUTEX(of_aliases_mutex);
|
|
/* use when traversing tree through the allnext, child, sibling,
|
|
* or parent members of struct device_node.
|
|
*/
|
|
-DEFINE_RWLOCK(devtree_lock);
|
|
+DEFINE_RAW_SPINLOCK(devtree_lock);
|
|
|
|
int of_n_addr_cells(struct device_node *np)
|
|
{
|
|
@@ -163,16 +163,14 @@ void of_node_put(struct device_node *node)
|
|
EXPORT_SYMBOL(of_node_put);
|
|
#endif /* CONFIG_OF_DYNAMIC */
|
|
|
|
-struct property *of_find_property(const struct device_node *np,
|
|
- const char *name,
|
|
- int *lenp)
|
|
+static struct property *__of_find_property(const struct device_node *np,
|
|
+ const char *name, int *lenp)
|
|
{
|
|
struct property *pp;
|
|
|
|
if (!np)
|
|
return NULL;
|
|
|
|
- read_lock(&devtree_lock);
|
|
for (pp = np->properties; pp != 0; pp = pp->next) {
|
|
if (of_prop_cmp(pp->name, name) == 0) {
|
|
if (lenp != 0)
|
|
@@ -180,7 +178,20 @@ struct property *of_find_property(const struct device_node *np,
|
|
break;
|
|
}
|
|
}
|
|
- read_unlock(&devtree_lock);
|
|
+
|
|
+ return pp;
|
|
+}
|
|
+
|
|
+struct property *of_find_property(const struct device_node *np,
|
|
+ const char *name,
|
|
+ int *lenp)
|
|
+{
|
|
+ struct property *pp;
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
+ pp = __of_find_property(np, name, lenp);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
|
|
return pp;
|
|
}
|
|
@@ -198,13 +209,13 @@ struct device_node *of_find_all_nodes(struct device_node *prev)
|
|
{
|
|
struct device_node *np;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock(&devtree_lock);
|
|
np = prev ? prev->allnext : allnodes;
|
|
for (; np != NULL; np = np->allnext)
|
|
if (of_node_get(np))
|
|
break;
|
|
of_node_put(prev);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock(&devtree_lock);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_find_all_nodes);
|
|
@@ -213,8 +224,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
|
|
* Find a property with a given name for a given node
|
|
* and return the value.
|
|
*/
|
|
+static const void *__of_get_property(const struct device_node *np,
|
|
+ const char *name, int *lenp)
|
|
+{
|
|
+ struct property *pp = __of_find_property(np, name, lenp);
|
|
+
|
|
+ return pp ? pp->value : NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Find a property with a given name for a given node
|
|
+ * and return the value.
|
|
+ */
|
|
const void *of_get_property(const struct device_node *np, const char *name,
|
|
- int *lenp)
|
|
+ int *lenp)
|
|
{
|
|
struct property *pp = of_find_property(np, name, lenp);
|
|
|
|
@@ -225,13 +248,13 @@ EXPORT_SYMBOL(of_get_property);
|
|
/** Checks if the given "compat" string matches one of the strings in
|
|
* the device's "compatible" property
|
|
*/
|
|
-int of_device_is_compatible(const struct device_node *device,
|
|
- const char *compat)
|
|
+static int __of_device_is_compatible(const struct device_node *device,
|
|
+ const char *compat)
|
|
{
|
|
const char* cp;
|
|
- int cplen, l;
|
|
+ int uninitialized_var(cplen), l;
|
|
|
|
- cp = of_get_property(device, "compatible", &cplen);
|
|
+ cp = __of_get_property(device, "compatible", &cplen);
|
|
if (cp == NULL)
|
|
return 0;
|
|
while (cplen > 0) {
|
|
@@ -244,6 +267,21 @@ int of_device_is_compatible(const struct device_node *device,
|
|
|
|
return 0;
|
|
}
|
|
+
|
|
+/** Checks if the given "compat" string matches one of the strings in
|
|
+ * the device's "compatible" property
|
|
+ */
|
|
+int of_device_is_compatible(const struct device_node *device,
|
|
+ const char *compat)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ int res;
|
|
+
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
+ res = __of_device_is_compatible(device, compat);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
+ return res;
|
|
+}
|
|
EXPORT_SYMBOL(of_device_is_compatible);
|
|
|
|
/**
|
|
@@ -303,13 +341,14 @@ EXPORT_SYMBOL(of_device_is_available);
|
|
struct device_node *of_get_parent(const struct device_node *node)
|
|
{
|
|
struct device_node *np;
|
|
+ unsigned long flags;
|
|
|
|
if (!node)
|
|
return NULL;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
np = of_node_get(node->parent);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_get_parent);
|
|
@@ -328,14 +367,15 @@ EXPORT_SYMBOL(of_get_parent);
|
|
struct device_node *of_get_next_parent(struct device_node *node)
|
|
{
|
|
struct device_node *parent;
|
|
+ unsigned long flags;
|
|
|
|
if (!node)
|
|
return NULL;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
parent = of_node_get(node->parent);
|
|
of_node_put(node);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return parent;
|
|
}
|
|
|
|
@@ -351,14 +391,15 @@ struct device_node *of_get_next_child(const struct device_node *node,
|
|
struct device_node *prev)
|
|
{
|
|
struct device_node *next;
|
|
+ unsigned long flags;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
next = prev ? prev->sibling : node->child;
|
|
for (; next; next = next->sibling)
|
|
if (of_node_get(next))
|
|
break;
|
|
of_node_put(prev);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return next;
|
|
}
|
|
EXPORT_SYMBOL(of_get_next_child);
|
|
@@ -373,14 +414,15 @@ EXPORT_SYMBOL(of_get_next_child);
|
|
struct device_node *of_find_node_by_path(const char *path)
|
|
{
|
|
struct device_node *np = allnodes;
|
|
+ unsigned long flags;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
for (; np; np = np->allnext) {
|
|
if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
|
|
&& of_node_get(np))
|
|
break;
|
|
}
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_find_node_by_path);
|
|
@@ -400,15 +442,16 @@ struct device_node *of_find_node_by_name(struct device_node *from,
|
|
const char *name)
|
|
{
|
|
struct device_node *np;
|
|
+ unsigned long flags;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
np = from ? from->allnext : allnodes;
|
|
for (; np; np = np->allnext)
|
|
if (np->name && (of_node_cmp(np->name, name) == 0)
|
|
&& of_node_get(np))
|
|
break;
|
|
of_node_put(from);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_find_node_by_name);
|
|
@@ -429,15 +472,16 @@ struct device_node *of_find_node_by_type(struct device_node *from,
|
|
const char *type)
|
|
{
|
|
struct device_node *np;
|
|
+ unsigned long flags;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
np = from ? from->allnext : allnodes;
|
|
for (; np; np = np->allnext)
|
|
if (np->type && (of_node_cmp(np->type, type) == 0)
|
|
&& of_node_get(np))
|
|
break;
|
|
of_node_put(from);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_find_node_by_type);
|
|
@@ -460,18 +504,20 @@ struct device_node *of_find_compatible_node(struct device_node *from,
|
|
const char *type, const char *compatible)
|
|
{
|
|
struct device_node *np;
|
|
+ unsigned long flags;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
np = from ? from->allnext : allnodes;
|
|
for (; np; np = np->allnext) {
|
|
if (type
|
|
&& !(np->type && (of_node_cmp(np->type, type) == 0)))
|
|
continue;
|
|
- if (of_device_is_compatible(np, compatible) && of_node_get(np))
|
|
+ if (__of_device_is_compatible(np, compatible) &&
|
|
+ of_node_get(np))
|
|
break;
|
|
}
|
|
of_node_put(from);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_find_compatible_node);
|
|
@@ -493,8 +539,9 @@ struct device_node *of_find_node_with_property(struct device_node *from,
|
|
{
|
|
struct device_node *np;
|
|
struct property *pp;
|
|
+ unsigned long flags;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
np = from ? from->allnext : allnodes;
|
|
for (; np; np = np->allnext) {
|
|
for (pp = np->properties; pp != 0; pp = pp->next) {
|
|
@@ -506,20 +553,14 @@ struct device_node *of_find_node_with_property(struct device_node *from,
|
|
}
|
|
out:
|
|
of_node_put(from);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_find_node_with_property);
|
|
|
|
-/**
|
|
- * of_match_node - Tell if an device_node has a matching of_match structure
|
|
- * @matches: array of of device match structures to search in
|
|
- * @node: the of device structure to match against
|
|
- *
|
|
- * Low level utility function used by device matching.
|
|
- */
|
|
-const struct of_device_id *of_match_node(const struct of_device_id *matches,
|
|
- const struct device_node *node)
|
|
+static
|
|
+const struct of_device_id *__of_match_node(const struct of_device_id *matches,
|
|
+ const struct device_node *node)
|
|
{
|
|
if (!matches)
|
|
return NULL;
|
|
@@ -533,14 +574,33 @@ const struct of_device_id *of_match_node(const struct of_device_id *matches,
|
|
match &= node->type
|
|
&& !strcmp(matches->type, node->type);
|
|
if (matches->compatible[0])
|
|
- match &= of_device_is_compatible(node,
|
|
- matches->compatible);
|
|
+ match &= __of_device_is_compatible(node,
|
|
+ matches->compatible);
|
|
if (match)
|
|
return matches;
|
|
matches++;
|
|
}
|
|
return NULL;
|
|
}
|
|
+
|
|
+/**
|
|
+ * of_match_node - Tell if an device_node has a matching of_match structure
|
|
+ * @matches: array of of device match structures to search in
|
|
+ * @node: the of device structure to match against
|
|
+ *
|
|
+ * Low level utility function used by device matching.
|
|
+ */
|
|
+const struct of_device_id *of_match_node(const struct of_device_id *matches,
|
|
+ const struct device_node *node)
|
|
+{
|
|
+ const struct of_device_id *match;
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
+ match = __of_match_node(matches, node);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
+ return match;
|
|
+}
|
|
EXPORT_SYMBOL(of_match_node);
|
|
|
|
/**
|
|
@@ -559,15 +619,16 @@ struct device_node *of_find_matching_node(struct device_node *from,
|
|
const struct of_device_id *matches)
|
|
{
|
|
struct device_node *np;
|
|
+ unsigned long flags;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
np = from ? from->allnext : allnodes;
|
|
for (; np; np = np->allnext) {
|
|
- if (of_match_node(matches, np) && of_node_get(np))
|
|
+ if (__of_match_node(matches, np) && of_node_get(np))
|
|
break;
|
|
}
|
|
of_node_put(from);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_find_matching_node);
|
|
@@ -610,12 +671,12 @@ struct device_node *of_find_node_by_phandle(phandle handle)
|
|
{
|
|
struct device_node *np;
|
|
|
|
- read_lock(&devtree_lock);
|
|
+ raw_spin_lock(&devtree_lock);
|
|
for (np = allnodes; np; np = np->allnext)
|
|
if (np->phandle == handle)
|
|
break;
|
|
of_node_get(np);
|
|
- read_unlock(&devtree_lock);
|
|
+ raw_spin_unlock(&devtree_lock);
|
|
return np;
|
|
}
|
|
EXPORT_SYMBOL(of_find_node_by_phandle);
|
|
@@ -943,18 +1004,18 @@ int prom_add_property(struct device_node *np, struct property *prop)
|
|
unsigned long flags;
|
|
|
|
prop->next = NULL;
|
|
- write_lock_irqsave(&devtree_lock, flags);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
next = &np->properties;
|
|
while (*next) {
|
|
if (strcmp(prop->name, (*next)->name) == 0) {
|
|
/* duplicate ! don't insert it */
|
|
- write_unlock_irqrestore(&devtree_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
return -1;
|
|
}
|
|
next = &(*next)->next;
|
|
}
|
|
*next = prop;
|
|
- write_unlock_irqrestore(&devtree_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
|
|
#ifdef CONFIG_PROC_DEVICETREE
|
|
/* try to add to proc as well if it was initialized */
|
|
@@ -979,7 +1040,7 @@ int prom_remove_property(struct device_node *np, struct property *prop)
|
|
unsigned long flags;
|
|
int found = 0;
|
|
|
|
- write_lock_irqsave(&devtree_lock, flags);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
next = &np->properties;
|
|
while (*next) {
|
|
if (*next == prop) {
|
|
@@ -992,7 +1053,7 @@ int prom_remove_property(struct device_node *np, struct property *prop)
|
|
}
|
|
next = &(*next)->next;
|
|
}
|
|
- write_unlock_irqrestore(&devtree_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
|
|
if (!found)
|
|
return -ENODEV;
|
|
@@ -1022,7 +1083,7 @@ int prom_update_property(struct device_node *np,
|
|
unsigned long flags;
|
|
int found = 0;
|
|
|
|
- write_lock_irqsave(&devtree_lock, flags);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
next = &np->properties;
|
|
while (*next) {
|
|
if (*next == oldprop) {
|
|
@@ -1036,7 +1097,7 @@ int prom_update_property(struct device_node *np,
|
|
}
|
|
next = &(*next)->next;
|
|
}
|
|
- write_unlock_irqrestore(&devtree_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
|
|
if (!found)
|
|
return -ENODEV;
|
|
@@ -1066,12 +1127,12 @@ void of_attach_node(struct device_node *np)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- write_lock_irqsave(&devtree_lock, flags);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
np->sibling = np->parent->child;
|
|
np->allnext = allnodes;
|
|
np->parent->child = np;
|
|
allnodes = np;
|
|
- write_unlock_irqrestore(&devtree_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
}
|
|
|
|
/**
|
|
@@ -1085,7 +1146,7 @@ void of_detach_node(struct device_node *np)
|
|
struct device_node *parent;
|
|
unsigned long flags;
|
|
|
|
- write_lock_irqsave(&devtree_lock, flags);
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
|
|
parent = np->parent;
|
|
if (!parent)
|
|
@@ -1116,7 +1177,7 @@ void of_detach_node(struct device_node *np)
|
|
of_node_set_flag(np, OF_DETACHED);
|
|
|
|
out_unlock:
|
|
- write_unlock_irqrestore(&devtree_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
}
|
|
#endif /* defined(CONFIG_OF_DYNAMIC) */
|
|
|
|
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
|
|
index 2a581642c237..8e6a88ea3916 100644
|
|
--- a/drivers/pci/access.c
|
|
+++ b/drivers/pci/access.c
|
|
@@ -463,7 +463,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
|
|
WARN_ON(!dev->block_cfg_access);
|
|
|
|
dev->block_cfg_access = 0;
|
|
- wake_up_all(&pci_cfg_wait);
|
|
+ wake_up_all_locked(&pci_cfg_wait);
|
|
raw_spin_unlock_irqrestore(&pci_lock, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
|
|
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
|
|
index 335e85192807..7f791b975257 100644
|
|
--- a/drivers/scsi/fcoe/fcoe.c
|
|
+++ b/drivers/scsi/fcoe/fcoe.c
|
|
@@ -1222,7 +1222,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
|
|
struct sk_buff *skb;
|
|
#ifdef CONFIG_SMP
|
|
struct fcoe_percpu_s *p0;
|
|
- unsigned targ_cpu = get_cpu();
|
|
+ unsigned targ_cpu = get_cpu_light();
|
|
#endif /* CONFIG_SMP */
|
|
|
|
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
|
|
@@ -1278,7 +1278,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
|
|
kfree_skb(skb);
|
|
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
|
}
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
#else
|
|
/*
|
|
* This a non-SMP scenario where the singular Rx thread is
|
|
@@ -1494,11 +1494,11 @@ err2:
|
|
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
|
|
{
|
|
struct fcoe_percpu_s *fps;
|
|
- int rc;
|
|
+ int rc, cpu = get_cpu_light();
|
|
|
|
- fps = &get_cpu_var(fcoe_percpu);
|
|
+ fps = &per_cpu(fcoe_percpu, cpu);
|
|
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
|
|
- put_cpu_var(fcoe_percpu);
|
|
+ put_cpu_light();
|
|
|
|
return rc;
|
|
}
|
|
@@ -1738,7 +1738,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|
*/
|
|
hp = (struct fcoe_hdr *) skb_network_header(skb);
|
|
|
|
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
|
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu_light());
|
|
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
|
|
if (stats->ErrorFrames < 5)
|
|
printk(KERN_WARNING "fcoe: FCoE version "
|
|
@@ -1770,13 +1770,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|
goto drop;
|
|
|
|
if (!fcoe_filter_frames(lport, fp)) {
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
fc_exch_recv(lport, fp);
|
|
return;
|
|
}
|
|
drop:
|
|
stats->ErrorFrames++;
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
|
|
index 249a106888d9..753fcb94d8c2 100644
|
|
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
|
|
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
|
|
@@ -719,7 +719,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
|
|
unsigned long sel_time = 0;
|
|
struct fcoe_dev_stats *stats;
|
|
|
|
- stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
|
|
+ stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu_light());
|
|
|
|
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
|
|
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
|
|
@@ -752,7 +752,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
|
|
sel_time = fcf->time;
|
|
}
|
|
}
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
if (sel_time && !fip->sel_fcf && !fip->sel_time) {
|
|
sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
|
|
fip->sel_time = sel_time;
|
|
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
|
|
index aceffadb21c7..fb4e6cec4be7 100644
|
|
--- a/drivers/scsi/libfc/fc_exch.c
|
|
+++ b/drivers/scsi/libfc/fc_exch.c
|
|
@@ -724,10 +724,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
|
}
|
|
memset(ep, 0, sizeof(*ep));
|
|
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
pool = per_cpu_ptr(mp->pool, cpu);
|
|
spin_lock_bh(&pool->lock);
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
|
|
/* peek cache of free slot */
|
|
if (pool->left != FC_XID_UNKNOWN) {
|
|
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
|
|
index 51ee663c1310..38c9f30d1064 100644
|
|
--- a/drivers/scsi/libsas/sas_ata.c
|
|
+++ b/drivers/scsi/libsas/sas_ata.c
|
|
@@ -191,7 +191,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
|
|
/* TODO: audit callers to ensure they are ready for qc_issue to
|
|
* unconditionally re-enable interrupts
|
|
*/
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
spin_unlock(ap->lock);
|
|
|
|
/* If the device fell off, no sense in issuing commands */
|
|
@@ -261,7 +261,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
|
out:
|
|
spin_lock(ap->lock);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
|
|
index 6e457643c639..28d9a8db8d07 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_inline.h
|
|
+++ b/drivers/scsi/qla2xxx/qla_inline.h
|
|
@@ -36,12 +36,12 @@ qla2x00_poll(struct rsp_que *rsp)
|
|
{
|
|
unsigned long flags;
|
|
struct qla_hw_data *ha = rsp->hw;
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
if (IS_QLA82XX(ha))
|
|
qla82xx_poll(0, rsp);
|
|
else
|
|
ha->isp_ops->intr_handler(0, rsp);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
|
|
static inline uint8_t *
|
|
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
|
|
index 5fe219142d41..2d5c0cb6aa87 100644
|
|
--- a/drivers/tty/serial/8250/8250.c
|
|
+++ b/drivers/tty/serial/8250/8250.c
|
|
@@ -38,6 +38,7 @@
|
|
#include <linux/nmi.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/kdb.h>
|
|
#ifdef CONFIG_SPARC
|
|
#include <linux/sunserialcore.h>
|
|
#endif
|
|
@@ -80,7 +81,16 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
|
|
#define DEBUG_INTR(fmt...) do { } while (0)
|
|
#endif
|
|
|
|
-#define PASS_LIMIT 512
|
|
+/*
|
|
+ * On -rt we can have a more delays, and legitimately
|
|
+ * so - so don't drop work spuriously and spam the
|
|
+ * syslog:
|
|
+ */
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define PASS_LIMIT 1000000
|
|
+#else
|
|
+# define PASS_LIMIT 512
|
|
+#endif
|
|
|
|
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
|
|
|
|
@@ -2840,14 +2850,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
|
|
|
|
touch_nmi_watchdog();
|
|
|
|
- local_irq_save(flags);
|
|
- if (port->sysrq) {
|
|
- /* serial8250_handle_irq() already took the lock */
|
|
- locked = 0;
|
|
- } else if (oops_in_progress) {
|
|
- locked = spin_trylock(&port->lock);
|
|
- } else
|
|
- spin_lock(&port->lock);
|
|
+ if (port->sysrq || oops_in_progress || in_kdb_printk())
|
|
+ locked = spin_trylock_irqsave(&port->lock, flags);
|
|
+ else
|
|
+ spin_lock_irqsave(&port->lock, flags);
|
|
|
|
/*
|
|
* First save the IER then disable the interrupts
|
|
@@ -2879,8 +2885,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
|
|
serial8250_modem_status(up);
|
|
|
|
if (locked)
|
|
- spin_unlock(&port->lock);
|
|
- local_irq_restore(flags);
|
|
+ spin_unlock_irqrestore(&port->lock, flags);
|
|
}
|
|
|
|
static int __init serial8250_console_setup(struct console *co, char *options)
|
|
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
|
|
index b69356c227c4..7b29c9224e0a 100644
|
|
--- a/drivers/tty/serial/amba-pl011.c
|
|
+++ b/drivers/tty/serial/amba-pl011.c
|
|
@@ -1788,13 +1788,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
|
|
|
clk_enable(uap->clk);
|
|
|
|
- local_irq_save(flags);
|
|
+ /*
|
|
+ * local_irq_save(flags);
|
|
+ *
|
|
+ * This local_irq_save() is nonsense. If we come in via sysrq
|
|
+ * handling then interrupts are already disabled. Aside of
|
|
+ * that the port.sysrq check is racy on SMP regardless.
|
|
+ */
|
|
if (uap->port.sysrq)
|
|
locked = 0;
|
|
else if (oops_in_progress)
|
|
- locked = spin_trylock(&uap->port.lock);
|
|
+ locked = spin_trylock_irqsave(&uap->port.lock, flags);
|
|
else
|
|
- spin_lock(&uap->port.lock);
|
|
+ spin_lock_irqsave(&uap->port.lock, flags);
|
|
|
|
/*
|
|
* First save the CR then disable the interrupts
|
|
@@ -1816,8 +1822,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
|
writew(old_cr, uap->port.membase + UART011_CR);
|
|
|
|
if (locked)
|
|
- spin_unlock(&uap->port.lock);
|
|
- local_irq_restore(flags);
|
|
+ spin_unlock_irqrestore(&uap->port.lock, flags);
|
|
|
|
clk_disable(uap->clk);
|
|
}
|
|
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
|
|
index 88620e1ad5fc..2fdd52dfa91f 100644
|
|
--- a/drivers/tty/serial/imx.c
|
|
+++ b/drivers/tty/serial/imx.c
|
|
@@ -1215,8 +1215,14 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
|
|
struct imx_port_ucrs old_ucr;
|
|
unsigned int ucr1;
|
|
unsigned long flags;
|
|
+ int locked = 1;
|
|
|
|
- spin_lock_irqsave(&sport->port.lock, flags);
|
|
+ if (sport->port.sysrq)
|
|
+ locked = 0;
|
|
+ else if (oops_in_progress)
|
|
+ locked = spin_trylock_irqsave(&sport->port.lock, flags);
|
|
+ else
|
|
+ spin_lock_irqsave(&sport->port.lock, flags);
|
|
|
|
/*
|
|
* First, save UCR1/2/3 and then disable interrupts
|
|
@@ -1243,7 +1249,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
|
|
|
|
imx_port_ucrs_restore(&sport->port, &old_ucr);
|
|
|
|
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
|
+ if (locked)
|
|
+ spin_unlock_irqrestore(&sport->port.lock, flags);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
|
|
index d00b38eb268e..f6974929c5bb 100644
|
|
--- a/drivers/tty/serial/omap-serial.c
|
|
+++ b/drivers/tty/serial/omap-serial.c
|
|
@@ -1064,13 +1064,10 @@ serial_omap_console_write(struct console *co, const char *s,
|
|
|
|
pm_runtime_get_sync(&up->pdev->dev);
|
|
|
|
- local_irq_save(flags);
|
|
- if (up->port.sysrq)
|
|
- locked = 0;
|
|
- else if (oops_in_progress)
|
|
- locked = spin_trylock(&up->port.lock);
|
|
+ if (up->port.sysrq || oops_in_progress)
|
|
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
|
|
else
|
|
- spin_lock(&up->port.lock);
|
|
+ spin_lock_irqsave(&up->port.lock, flags);
|
|
|
|
/*
|
|
* First save the IER then disable the interrupts
|
|
@@ -1099,8 +1096,7 @@ serial_omap_console_write(struct console *co, const char *s,
|
|
pm_runtime_mark_last_busy(&up->pdev->dev);
|
|
pm_runtime_put_autosuspend(&up->pdev->dev);
|
|
if (locked)
|
|
- spin_unlock(&up->port.lock);
|
|
- local_irq_restore(flags);
|
|
+ spin_unlock_irqrestore(&up->port.lock, flags);
|
|
}
|
|
|
|
static int __init
|
|
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
|
|
index 4f02f9ce05c5..7c8fe9b570ef 100644
|
|
--- a/drivers/tty/tty_buffer.c
|
|
+++ b/drivers/tty/tty_buffer.c
|
|
@@ -496,10 +496,14 @@ void tty_flip_buffer_push(struct tty_struct *tty)
|
|
tty->buf.tail->commit = tty->buf.tail->used;
|
|
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
if (tty->low_latency)
|
|
flush_to_ldisc(&tty->buf.work);
|
|
else
|
|
schedule_work(&tty->buf.work);
|
|
+#else
|
|
+ flush_to_ldisc(&tty->buf.work);
|
|
+#endif
|
|
}
|
|
EXPORT_SYMBOL(tty_flip_buffer_push);
|
|
|
|
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
|
|
index 24b95db75d84..7894759ce77c 100644
|
|
--- a/drivers/tty/tty_ldisc.c
|
|
+++ b/drivers/tty/tty_ldisc.c
|
|
@@ -53,7 +53,7 @@ static void put_ldisc(struct tty_ldisc *ld)
|
|
* We really want an "atomic_dec_and_lock_irqsave()",
|
|
* but we don't have it, so this does it by hand.
|
|
*/
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
|
|
struct tty_ldisc_ops *ldo = ld->ops;
|
|
|
|
@@ -64,7 +64,7 @@ static void put_ldisc(struct tty_ldisc *ld)
|
|
kfree(ld);
|
|
return;
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
wake_up(&tty_ldisc_idle);
|
|
}
|
|
|
|
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
|
|
index f1d0e3ca46b5..c1347e4a3de6 100644
|
|
--- a/drivers/usb/core/hcd.c
|
|
+++ b/drivers/usb/core/hcd.c
|
|
@@ -2143,7 +2143,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
|
|
* when the first handler doesn't use it. So let's just
|
|
* assume it's never used.
|
|
*/
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
|
|
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
|
|
rc = IRQ_NONE;
|
|
@@ -2152,7 +2152,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
|
|
else
|
|
rc = IRQ_HANDLED;
|
|
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(usb_hcd_irq);
|
|
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
|
|
index 243ef1adf969..238372e7e39d 100644
|
|
--- a/drivers/usb/gadget/ci13xxx_udc.c
|
|
+++ b/drivers/usb/gadget/ci13xxx_udc.c
|
|
@@ -834,7 +834,7 @@ static struct {
|
|
} dbg_data = {
|
|
.idx = 0,
|
|
.tty = 0,
|
|
- .lck = __RW_LOCK_UNLOCKED(lck)
|
|
+ .lck = __RW_LOCK_UNLOCKED(dbg_data.lck)
|
|
};
|
|
|
|
/**
|
|
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
|
|
index 235171f29460..0157357f5533 100644
|
|
--- a/drivers/usb/host/ohci-hcd.c
|
|
+++ b/drivers/usb/host/ohci-hcd.c
|
|
@@ -829,9 +829,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
|
|
}
|
|
|
|
if (ints & OHCI_INTR_WDH) {
|
|
- spin_lock (&ohci->lock);
|
|
- dl_done_list (ohci);
|
|
- spin_unlock (&ohci->lock);
|
|
+ if (ohci->hcca->done_head == 0) {
|
|
+ ints &= ~OHCI_INTR_WDH;
|
|
+ } else {
|
|
+ spin_lock (&ohci->lock);
|
|
+ dl_done_list (ohci);
|
|
+ spin_unlock (&ohci->lock);
|
|
+ }
|
|
}
|
|
|
|
if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) {
|
|
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
|
|
index 908e18455413..bdd178862e7c 100644
|
|
--- a/fs/autofs4/autofs_i.h
|
|
+++ b/fs/autofs4/autofs_i.h
|
|
@@ -34,6 +34,7 @@
|
|
#include <linux/sched.h>
|
|
#include <linux/mount.h>
|
|
#include <linux/namei.h>
|
|
+#include <linux/delay.h>
|
|
#include <asm/current.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
|
|
index 54effed1ce59..43df7e68bb26 100644
|
|
--- a/fs/autofs4/expire.c
|
|
+++ b/fs/autofs4/expire.c
|
|
@@ -162,7 +162,7 @@ again:
|
|
parent = p->d_parent;
|
|
if (!spin_trylock(&parent->d_lock)) {
|
|
spin_unlock(&p->d_lock);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
goto relock;
|
|
}
|
|
spin_unlock(&p->d_lock);
|
|
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
|
|
index 4106264fbc65..ae47cc2c9f2f 100644
|
|
--- a/fs/btrfs/ctree.c
|
|
+++ b/fs/btrfs/ctree.c
|
|
@@ -77,7 +77,7 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
|
|
{
|
|
int i;
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+#if (defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_PREEMPT_RT_BASE))
|
|
/* lockdep really cares that we take all of these spinlocks
|
|
* in the right order. If any of the locks in the path are not
|
|
* currently blocking, it is going to complain. So, make really
|
|
@@ -104,7 +104,7 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
|
|
}
|
|
}
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+#if (defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_PREEMPT_RT_BASE))
|
|
if (held)
|
|
btrfs_clear_lock_blocking_rw(held, held_rw);
|
|
#endif
|
|
diff --git a/fs/buffer.c b/fs/buffer.c
|
|
index ed2dc709883a..ddcfe59eacba 100644
|
|
--- a/fs/buffer.c
|
|
+++ b/fs/buffer.c
|
|
@@ -281,8 +281,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
* decide that the page is now completely done.
|
|
*/
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
clear_buffer_async_read(bh);
|
|
unlock_buffer(bh);
|
|
tmp = bh;
|
|
@@ -295,8 +294,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
} while (tmp != bh);
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
|
|
/*
|
|
* If none of the buffers had errors and they are all
|
|
@@ -308,9 +306,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
return;
|
|
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -344,8 +340,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|
}
|
|
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
|
|
clear_buffer_async_write(bh);
|
|
unlock_buffer(bh);
|
|
@@ -357,15 +352,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
}
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
end_page_writeback(page);
|
|
return;
|
|
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
EXPORT_SYMBOL(end_buffer_async_write);
|
|
|
|
@@ -3197,6 +3189,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
|
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
|
|
if (ret) {
|
|
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
|
+ buffer_head_init_locks(ret);
|
|
preempt_disable();
|
|
__this_cpu_inc(bh_accounting.nr);
|
|
recalc_bh_state();
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index 8038a780696f..2275905be142 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -37,6 +37,7 @@
|
|
#include <linux/rculist_bl.h>
|
|
#include <linux/prefetch.h>
|
|
#include <linux/ratelimit.h>
|
|
+#include <linux/delay.h>
|
|
#include "internal.h"
|
|
#include "mount.h"
|
|
|
|
@@ -472,7 +473,7 @@ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
|
|
if (inode && !spin_trylock(&inode->i_lock)) {
|
|
relock:
|
|
spin_unlock(&dentry->d_lock);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
return dentry; /* try again with same dentry */
|
|
}
|
|
if (IS_ROOT(dentry))
|
|
@@ -858,7 +859,7 @@ relock:
|
|
|
|
if (!spin_trylock(&dentry->d_lock)) {
|
|
spin_unlock(&dcache_lru_lock);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
goto relock;
|
|
}
|
|
|
|
@@ -2054,7 +2055,7 @@ again:
|
|
if (dentry->d_count == 1) {
|
|
if (inode && !spin_trylock(&inode->i_lock)) {
|
|
spin_unlock(&dentry->d_lock);
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
goto again;
|
|
}
|
|
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
|
|
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
|
|
index 33c9599c14e7..05bc433dcca8 100644
|
|
--- a/fs/eventpoll.c
|
|
+++ b/fs/eventpoll.c
|
|
@@ -488,12 +488,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
|
|
*/
|
|
static void ep_poll_safewake(wait_queue_head_t *wq)
|
|
{
|
|
- int this_cpu = get_cpu();
|
|
+ int this_cpu = get_cpu_light();
|
|
|
|
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
|
|
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
|
|
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
}
|
|
|
|
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
|
|
diff --git a/fs/exec.c b/fs/exec.c
|
|
index 91d238fd1ea2..25399a54076c 100644
|
|
--- a/fs/exec.c
|
|
+++ b/fs/exec.c
|
|
@@ -840,10 +840,12 @@ static int exec_mmap(struct mm_struct *mm)
|
|
}
|
|
}
|
|
task_lock(tsk);
|
|
+ preempt_disable_rt();
|
|
active_mm = tsk->active_mm;
|
|
tsk->mm = mm;
|
|
tsk->active_mm = mm;
|
|
activate_mm(active_mm, mm);
|
|
+ preempt_enable_rt();
|
|
task_unlock(tsk);
|
|
arch_pick_mmap_layout(mm);
|
|
if (old_mm) {
|
|
diff --git a/fs/file.c b/fs/file.c
|
|
index d512ca5ea28f..8664f921a1cb 100644
|
|
--- a/fs/file.c
|
|
+++ b/fs/file.c
|
|
@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *rcu)
|
|
kfree(fdt->open_fds);
|
|
kfree(fdt);
|
|
} else {
|
|
- fddef = &get_cpu_var(fdtable_defer_list);
|
|
+ fddef = &per_cpu(fdtable_defer_list, get_cpu_light());
|
|
spin_lock(&fddef->lock);
|
|
fdt->next = fddef->next;
|
|
fddef->next = fdt;
|
|
/* vmallocs are handled from the workqueue context */
|
|
schedule_work(&fddef->wq);
|
|
spin_unlock(&fddef->lock);
|
|
- put_cpu_var(fdtable_defer_list);
|
|
+ put_cpu_light();
|
|
}
|
|
}
|
|
|
|
@@ -421,7 +421,7 @@ struct files_struct init_files = {
|
|
.close_on_exec = init_files.close_on_exec_init,
|
|
.open_fds = init_files.open_fds_init,
|
|
},
|
|
- .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
|
|
+ .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
|
|
};
|
|
|
|
/*
|
|
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
|
|
index 05f0754f2b46..d8efcbc7b97a 100644
|
|
--- a/fs/jbd/checkpoint.c
|
|
+++ b/fs/jbd/checkpoint.c
|
|
@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *journal)
|
|
if (journal->j_flags & JFS_ABORT)
|
|
return;
|
|
spin_unlock(&journal->j_state_lock);
|
|
+ if (current->plug)
|
|
+ io_schedule();
|
|
mutex_lock(&journal->j_checkpoint_mutex);
|
|
|
|
/*
|
|
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
|
|
index c78841ee81cf..a4d273b61596 100644
|
|
--- a/fs/jbd2/checkpoint.c
|
|
+++ b/fs/jbd2/checkpoint.c
|
|
@@ -125,6 +125,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
|
|
if (journal->j_flags & JBD2_ABORT)
|
|
return;
|
|
write_unlock(&journal->j_state_lock);
|
|
+ if (current->plug)
|
|
+ io_schedule();
|
|
mutex_lock(&journal->j_checkpoint_mutex);
|
|
|
|
/*
|
|
diff --git a/fs/namespace.c b/fs/namespace.c
|
|
index f7be8d9c1cd6..7ce90052faac 100644
|
|
--- a/fs/namespace.c
|
|
+++ b/fs/namespace.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/fs_struct.h> /* get_fs_root et.al. */
|
|
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
|
|
#include <linux/uaccess.h>
|
|
+#include <linux/delay.h>
|
|
#include "pnode.h"
|
|
#include "internal.h"
|
|
|
|
@@ -313,8 +314,11 @@ int mnt_want_write(struct vfsmount *m)
|
|
* incremented count after it has set MNT_WRITE_HOLD.
|
|
*/
|
|
smp_mb();
|
|
- while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
|
|
- cpu_relax();
|
|
+ while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) {
|
|
+ preempt_enable();
|
|
+ cpu_chill();
|
|
+ preempt_disable();
|
|
+ }
|
|
/*
|
|
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
|
|
* be set to match its requirements. So we must not load that until
|
|
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
|
|
index fa9c05f97af4..f5d45653bd27 100644
|
|
--- a/fs/ntfs/aops.c
|
|
+++ b/fs/ntfs/aops.c
|
|
@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
"0x%llx.", (unsigned long long)bh->b_blocknr);
|
|
}
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
clear_buffer_async_read(bh);
|
|
unlock_buffer(bh);
|
|
tmp = bh;
|
|
@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
} while (tmp != bh);
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
/*
|
|
* If none of the buffers had errors then we can set the page uptodate,
|
|
* but we first have to perform the post read mst fixups, if the
|
|
@@ -146,13 +144,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
recs = PAGE_CACHE_SIZE / rec_size;
|
|
/* Should have been verified before we got here... */
|
|
BUG_ON(!recs);
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
kaddr = kmap_atomic(page);
|
|
for (i = 0; i < recs; i++)
|
|
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
|
|
i * rec_size), rec_size);
|
|
kunmap_atomic(kaddr);
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
flush_dcache_page(page);
|
|
if (likely(page_uptodate && !PageError(page)))
|
|
SetPageUptodate(page);
|
|
@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
unlock_page(page);
|
|
return;
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
|
|
/**
|
|
diff --git a/fs/timerfd.c b/fs/timerfd.c
|
|
index dffeb3795af1..57f0e4e86f0a 100644
|
|
--- a/fs/timerfd.c
|
|
+++ b/fs/timerfd.c
|
|
@@ -313,7 +313,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
|
|
if (hrtimer_try_to_cancel(&ctx->t.tmr) >= 0)
|
|
break;
|
|
}
|
|
spin_unlock_irq(&ctx->wqh.lock);
|
|
- cpu_relax();
|
|
+ hrtimer_wait_for_timer(&ctx->t.tmr);
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
|
|
index 828662f70d64..13d86a8dae43 100644
|
|
--- a/fs/xfs/xfs_linux.h
|
|
+++ b/fs/xfs/xfs_linux.h
|
|
@@ -97,7 +97,7 @@
|
|
/*
|
|
* Feature macros (disable/enable)
|
|
*/
|
|
-#ifdef CONFIG_SMP
|
|
+#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT_FULL)
|
|
#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
|
|
#else
|
|
#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
|
|
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
|
|
index 6fbc4cab5834..6fee3092805c 100644
|
|
--- a/include/acpi/platform/aclinux.h
|
|
+++ b/include/acpi/platform/aclinux.h
|
|
@@ -72,6 +72,7 @@
|
|
|
|
#define acpi_cache_t struct kmem_cache
|
|
#define acpi_spinlock spinlock_t *
|
|
+#define acpi_raw_spinlock raw_spinlock_t *
|
|
#define acpi_cpu_flags unsigned long
|
|
|
|
#else /* !__KERNEL__ */
|
|
@@ -175,6 +176,19 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
|
|
lock ? AE_OK : AE_NO_MEMORY; \
|
|
})
|
|
|
|
+#define acpi_os_create_raw_lock(__handle) \
|
|
+({ \
|
|
+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
|
|
+ \
|
|
+ if (lock) { \
|
|
+ *(__handle) = lock; \
|
|
+ raw_spin_lock_init(*(__handle)); \
|
|
+ } \
|
|
+ lock ? AE_OK : AE_NO_MEMORY; \
|
|
+})
|
|
+
|
|
+#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
|
|
+
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* __ACLINUX_H__ */
|
|
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
|
|
index 2520a6e241dc..0e41ade43796 100644
|
|
--- a/include/asm-generic/bug.h
|
|
+++ b/include/asm-generic/bug.h
|
|
@@ -3,6 +3,10 @@
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
+#ifndef __ASSEMBLY__
|
|
+extern void __WARN_ON(const char *func, const char *file, const int line);
|
|
+#endif /* __ASSEMBLY__ */
|
|
+
|
|
#ifdef CONFIG_BUG
|
|
|
|
#ifdef CONFIG_GENERIC_BUG
|
|
@@ -202,4 +206,18 @@ extern void warn_slowpath_null(const char *file, const int line);
|
|
# define WARN_ON_SMP(x) ({0;})
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define BUG_ON_RT(c) BUG_ON(c)
|
|
+# define BUG_ON_NONRT(c) do { } while (0)
|
|
+# define WARN_ON_RT(condition) WARN_ON(condition)
|
|
+# define WARN_ON_NONRT(condition) do { } while (0)
|
|
+# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
|
|
+#else
|
|
+# define BUG_ON_RT(c) do { } while (0)
|
|
+# define BUG_ON_NONRT(c) BUG_ON(c)
|
|
+# define WARN_ON_RT(condition) do { } while (0)
|
|
+# define WARN_ON_NONRT(condition) WARN_ON(condition)
|
|
+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
|
|
+#endif
|
|
+
|
|
#endif
|
|
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
|
|
index 2533fddd34a6..d8d4c898c1bb 100644
|
|
--- a/include/asm-generic/cmpxchg-local.h
|
|
+++ b/include/asm-generic/cmpxchg-local.h
|
|
@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
|
|
if (size == 8 && sizeof(unsigned long) != 8)
|
|
wrong_size_cmpxchg(ptr);
|
|
|
|
- local_irq_save(flags);
|
|
+ raw_local_irq_save(flags);
|
|
switch (size) {
|
|
case 1: prev = *(u8 *)ptr;
|
|
if (prev == old)
|
|
@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
|
|
default:
|
|
wrong_size_cmpxchg(ptr);
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ raw_local_irq_restore(flags);
|
|
return prev;
|
|
}
|
|
|
|
@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
|
|
u64 prev;
|
|
unsigned long flags;
|
|
|
|
- local_irq_save(flags);
|
|
+ raw_local_irq_save(flags);
|
|
prev = *(u64 *)ptr;
|
|
if (prev == old)
|
|
*(u64 *)ptr = new;
|
|
- local_irq_restore(flags);
|
|
+ raw_local_irq_restore(flags);
|
|
return prev;
|
|
}
|
|
|
|
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
|
|
index 458f497738a4..3f8e27b16ed5 100644
|
|
--- a/include/linux/buffer_head.h
|
|
+++ b/include/linux/buffer_head.h
|
|
@@ -72,8 +72,52 @@ struct buffer_head {
|
|
struct address_space *b_assoc_map; /* mapping this buffer is
|
|
associated with */
|
|
atomic_t b_count; /* users using this buffer_head */
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spinlock_t b_uptodate_lock;
|
|
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
|
|
+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
|
|
+ spinlock_t b_state_lock;
|
|
+ spinlock_t b_journal_head_lock;
|
|
+#endif
|
|
+#endif
|
|
};
|
|
|
|
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+ local_irq_save(flags);
|
|
+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
|
|
+#else
|
|
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
|
|
+#endif
|
|
+ return flags;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
|
|
+{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
|
|
+ local_irq_restore(flags);
|
|
+#else
|
|
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline void buffer_head_init_locks(struct buffer_head *bh)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spin_lock_init(&bh->b_uptodate_lock);
|
|
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
|
|
+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
|
|
+ spin_lock_init(&bh->b_state_lock);
|
|
+ spin_lock_init(&bh->b_journal_head_lock);
|
|
+#endif
|
|
+#endif
|
|
+}
|
|
+
|
|
/*
|
|
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
|
|
* and buffer_foo() functions.
|
|
diff --git a/include/linux/console.h b/include/linux/console.h
|
|
index f59e9428d06b..773835d81cd7 100644
|
|
--- a/include/linux/console.h
|
|
+++ b/include/linux/console.h
|
|
@@ -135,6 +135,7 @@ struct console {
|
|
for (con = console_drivers; con != NULL; con = con->next)
|
|
|
|
extern int console_set_on_cmdline;
|
|
+extern struct console *early_console;
|
|
|
|
extern int add_preferred_console(char *name, int idx, char *options);
|
|
extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
|
|
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
|
|
index 25fd7410791c..b5bdd206a47b 100644
|
|
--- a/include/linux/cpu.h
|
|
+++ b/include/linux/cpu.h
|
|
@@ -177,6 +177,8 @@ extern struct bus_type cpu_subsys;
|
|
|
|
extern void get_online_cpus(void);
|
|
extern void put_online_cpus(void);
|
|
+extern void pin_current_cpu(void);
|
|
+extern void unpin_current_cpu(void);
|
|
extern void cpu_hotplug_disable(void);
|
|
extern void cpu_hotplug_enable(void);
|
|
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
|
|
@@ -201,6 +203,8 @@ static inline void cpu_hotplug_driver_unlock(void)
|
|
|
|
#define get_online_cpus() do { } while (0)
|
|
#define put_online_cpus() do { } while (0)
|
|
+static inline void pin_current_cpu(void) { }
|
|
+static inline void unpin_current_cpu(void) { }
|
|
#define cpu_hotplug_disable() do { } while (0)
|
|
#define cpu_hotplug_enable() do { } while (0)
|
|
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
diff --git a/include/linux/delay.h b/include/linux/delay.h
|
|
index a6ecb34cf547..37caab306336 100644
|
|
--- a/include/linux/delay.h
|
|
+++ b/include/linux/delay.h
|
|
@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds)
|
|
msleep(seconds * 1000);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+extern void cpu_chill(void);
|
|
+#else
|
|
+# define cpu_chill() cpu_relax()
|
|
+#endif
|
|
+
|
|
#endif /* defined(_LINUX_DELAY_H) */
|
|
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
|
|
index 16cad53020d8..c3eb01d57c1d 100644
|
|
--- a/include/linux/ftrace_event.h
|
|
+++ b/include/linux/ftrace_event.h
|
|
@@ -49,7 +49,8 @@ struct trace_entry {
|
|
unsigned char flags;
|
|
unsigned char preempt_count;
|
|
int pid;
|
|
- int padding;
|
|
+ unsigned short migrate_disable;
|
|
+ unsigned short padding;
|
|
};
|
|
|
|
#define FTRACE_MAX_EVENT \
|
|
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
|
|
index bb7f30971858..318d91ecc667 100644
|
|
--- a/include/linux/hardirq.h
|
|
+++ b/include/linux/hardirq.h
|
|
@@ -60,7 +60,11 @@
|
|
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
|
|
#define NMI_OFFSET (1UL << NMI_SHIFT)
|
|
|
|
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
|
|
+#else
|
|
+# define SOFTIRQ_DISABLE_OFFSET (0)
|
|
+#endif
|
|
|
|
#ifndef PREEMPT_ACTIVE
|
|
#define PREEMPT_ACTIVE_BITS 1
|
|
@@ -73,10 +77,17 @@
|
|
#endif
|
|
|
|
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
|
|
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
|
|
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
|
|
| NMI_MASK))
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
|
|
+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
|
|
+#else
|
|
+# define softirq_count() (0UL)
|
|
+extern int in_serving_softirq(void);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Are we doing bottom half or hardware interrupt processing?
|
|
* Are we in a softirq context? Interrupt context?
|
|
@@ -86,7 +97,6 @@
|
|
#define in_irq() (hardirq_count())
|
|
#define in_softirq() (softirq_count())
|
|
#define in_interrupt() (irq_count())
|
|
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
|
|
|
|
/*
|
|
* Are we in NMI context?
|
|
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
|
|
index cc07d2777bbe..7259cd308281 100644
|
|
--- a/include/linux/hrtimer.h
|
|
+++ b/include/linux/hrtimer.h
|
|
@@ -111,6 +111,11 @@ struct hrtimer {
|
|
enum hrtimer_restart (*function)(struct hrtimer *);
|
|
struct hrtimer_clock_base *base;
|
|
unsigned long state;
|
|
+ struct list_head cb_entry;
|
|
+ int irqsafe;
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ ktime_t praecox;
|
|
+#endif
|
|
#ifdef CONFIG_TIMER_STATS
|
|
int start_pid;
|
|
void *start_site;
|
|
@@ -147,6 +152,7 @@ struct hrtimer_clock_base {
|
|
int index;
|
|
clockid_t clockid;
|
|
struct timerqueue_head active;
|
|
+ struct list_head expired;
|
|
ktime_t resolution;
|
|
ktime_t (*get_time)(void);
|
|
ktime_t softirq_time;
|
|
@@ -189,6 +195,9 @@ struct hrtimer_cpu_base {
|
|
unsigned long nr_hangs;
|
|
ktime_t max_hang_time;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ wait_queue_head_t wait;
|
|
+#endif
|
|
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
|
|
};
|
|
|
|
@@ -382,6 +391,13 @@ static inline int hrtimer_restart(struct hrtimer *timer)
|
|
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
|
|
}
|
|
|
|
+/* Softirq preemption could deadlock timer removal */
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
|
|
+#else
|
|
+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
|
|
+#endif
|
|
+
|
|
/* Query timers: */
|
|
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
|
|
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
|
|
diff --git a/include/linux/idr.h b/include/linux/idr.h
|
|
index 52a9da295296..dffed8f9b073 100644
|
|
--- a/include/linux/idr.h
|
|
+++ b/include/linux/idr.h
|
|
@@ -136,7 +136,7 @@ struct ida {
|
|
struct ida_bitmap *free_bitmap;
|
|
};
|
|
|
|
-#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, }
|
|
+#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
|
|
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
|
|
|
|
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
|
|
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
|
|
index b11e298e989f..670b809e2333 100644
|
|
--- a/include/linux/init_task.h
|
|
+++ b/include/linux/init_task.h
|
|
@@ -142,6 +142,12 @@ extern struct task_group root_task_group;
|
|
# define INIT_PERF_EVENTS(tsk)
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define INIT_TIMER_LIST .posix_timer_list = NULL,
|
|
+#else
|
|
+# define INIT_TIMER_LIST
|
|
+#endif
|
|
+
|
|
#define INIT_TASK_COMM "swapper"
|
|
|
|
/*
|
|
@@ -197,6 +203,7 @@ extern struct task_group root_task_group;
|
|
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
|
|
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
|
|
.timer_slack_ns = 50000, /* 50 usec default slack */ \
|
|
+ INIT_TIMER_LIST \
|
|
.pids = { \
|
|
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
|
|
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
|
|
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
|
|
index 2aea5d22db07..fb0576196171 100644
|
|
--- a/include/linux/interrupt.h
|
|
+++ b/include/linux/interrupt.h
|
|
@@ -59,6 +59,7 @@
|
|
* IRQF_NO_THREAD - Interrupt cannot be threaded
|
|
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
|
|
* resume time.
|
|
+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
|
|
*/
|
|
#define IRQF_DISABLED 0x00000020
|
|
#define IRQF_SAMPLE_RANDOM 0x00000040
|
|
@@ -73,6 +74,7 @@
|
|
#define IRQF_FORCE_RESUME 0x00008000
|
|
#define IRQF_NO_THREAD 0x00010000
|
|
#define IRQF_EARLY_RESUME 0x00020000
|
|
+#define IRQF_NO_SOFTIRQ_CALL 0x00040000
|
|
|
|
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
|
|
|
|
@@ -217,7 +219,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
|
|
#ifdef CONFIG_LOCKDEP
|
|
# define local_irq_enable_in_hardirq() do { } while (0)
|
|
#else
|
|
-# define local_irq_enable_in_hardirq() local_irq_enable()
|
|
+# define local_irq_enable_in_hardirq() local_irq_enable_nort()
|
|
#endif
|
|
|
|
extern void disable_irq_nosync(unsigned int irq);
|
|
@@ -267,6 +269,7 @@ struct irq_affinity_notify {
|
|
unsigned int irq;
|
|
struct kref kref;
|
|
struct work_struct work;
|
|
+ struct list_head list;
|
|
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
|
|
void (*release)(struct kref *ref);
|
|
};
|
|
@@ -394,9 +397,13 @@ static inline int disable_irq_wake(unsigned int irq)
|
|
|
|
|
|
#ifdef CONFIG_IRQ_FORCED_THREADING
|
|
-extern bool force_irqthreads;
|
|
+# ifndef CONFIG_PREEMPT_RT_BASE
|
|
+ extern bool force_irqthreads;
|
|
+# else
|
|
+# define force_irqthreads (true)
|
|
+# endif
|
|
#else
|
|
-#define force_irqthreads (0)
|
|
+#define force_irqthreads (false)
|
|
#endif
|
|
|
|
#ifndef __ARCH_SET_SOFTIRQ_PENDING
|
|
@@ -450,8 +457,14 @@ struct softirq_action
|
|
void (*action)(struct softirq_action *);
|
|
};
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
asmlinkage void do_softirq(void);
|
|
asmlinkage void __do_softirq(void);
|
|
+static inline void thread_do_softirq(void) { do_softirq(); }
|
|
+#else
|
|
+extern void thread_do_softirq(void);
|
|
+#endif
|
|
+
|
|
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
|
|
extern void softirq_init(void);
|
|
extern void __raise_softirq_irqoff(unsigned int nr);
|
|
@@ -459,6 +472,8 @@ extern void __raise_softirq_irqoff(unsigned int nr);
|
|
extern void raise_softirq_irqoff(unsigned int nr);
|
|
extern void raise_softirq(unsigned int nr);
|
|
|
|
+extern void softirq_check_pending_idle(void);
|
|
+
|
|
/* This is the worklist that queues up per-cpu softirq work.
|
|
*
|
|
* send_remote_sendirq() adds work to these lists, and
|
|
@@ -499,8 +514,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
|
|
to be executed on some cpu at least once after this.
|
|
* If the tasklet is already scheduled, but its execution is still not
|
|
started, it will be executed only once.
|
|
- * If this tasklet is already running on another CPU (or schedule is called
|
|
- from tasklet itself), it is rescheduled for later.
|
|
+ * If this tasklet is already running on another CPU, it is rescheduled
|
|
+ for later.
|
|
+ * Schedule must not be called from the tasklet itself (a lockup occurs)
|
|
* Tasklet is strictly serialized wrt itself, but not
|
|
wrt another tasklets. If client needs some intertask synchronization,
|
|
he makes it with spinlocks.
|
|
@@ -525,27 +541,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
|
|
enum
|
|
{
|
|
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
|
|
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
|
|
+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
|
|
+ TASKLET_STATE_PENDING /* Tasklet is pending */
|
|
};
|
|
|
|
-#ifdef CONFIG_SMP
|
|
+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
|
|
+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
|
|
+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
|
|
+
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
static inline int tasklet_trylock(struct tasklet_struct *t)
|
|
{
|
|
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
|
|
}
|
|
|
|
+static inline int tasklet_tryunlock(struct tasklet_struct *t)
|
|
+{
|
|
+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
|
|
+}
|
|
+
|
|
static inline void tasklet_unlock(struct tasklet_struct *t)
|
|
{
|
|
smp_mb__before_clear_bit();
|
|
clear_bit(TASKLET_STATE_RUN, &(t)->state);
|
|
}
|
|
|
|
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
|
|
-{
|
|
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
|
|
-}
|
|
+extern void tasklet_unlock_wait(struct tasklet_struct *t);
|
|
+
|
|
#else
|
|
#define tasklet_trylock(t) 1
|
|
+#define tasklet_tryunlock(t) 1
|
|
#define tasklet_unlock_wait(t) do { } while (0)
|
|
#define tasklet_unlock(t) do { } while (0)
|
|
#endif
|
|
@@ -594,17 +619,8 @@ static inline void tasklet_disable(struct tasklet_struct *t)
|
|
smp_mb();
|
|
}
|
|
|
|
-static inline void tasklet_enable(struct tasklet_struct *t)
|
|
-{
|
|
- smp_mb__before_atomic_dec();
|
|
- atomic_dec(&t->count);
|
|
-}
|
|
-
|
|
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
|
|
-{
|
|
- smp_mb__before_atomic_dec();
|
|
- atomic_dec(&t->count);
|
|
-}
|
|
+extern void tasklet_enable(struct tasklet_struct *t);
|
|
+extern void tasklet_hi_enable(struct tasklet_struct *t);
|
|
|
|
extern void tasklet_kill(struct tasklet_struct *t);
|
|
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
|
|
@@ -636,6 +652,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
|
|
tasklet_kill(&ttimer->tasklet);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+extern void softirq_early_init(void);
|
|
+#else
|
|
+static inline void softirq_early_init(void) { }
|
|
+#endif
|
|
+
|
|
/*
|
|
* Autoprobing for irqs:
|
|
*
|
|
diff --git a/include/linux/irq.h b/include/linux/irq.h
|
|
index b27cfcfd3a59..496fa99c0c19 100644
|
|
--- a/include/linux/irq.h
|
|
+++ b/include/linux/irq.h
|
|
@@ -73,6 +73,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
|
|
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
|
|
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
|
|
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
|
|
+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
|
|
*/
|
|
enum {
|
|
IRQ_TYPE_NONE = 0x00000000,
|
|
@@ -97,12 +98,14 @@ enum {
|
|
IRQ_NESTED_THREAD = (1 << 15),
|
|
IRQ_NOTHREAD = (1 << 16),
|
|
IRQ_PER_CPU_DEVID = (1 << 17),
|
|
+ IRQ_NO_SOFTIRQ_CALL = (1 << 18),
|
|
};
|
|
|
|
#define IRQF_MODIFY_MASK \
|
|
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
|
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
|
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
|
|
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
|
|
+ IRQ_NO_SOFTIRQ_CALL)
|
|
|
|
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
|
|
|
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
|
|
index 1e1e41787cd9..970930b7b821 100644
|
|
--- a/include/linux/irqdesc.h
|
|
+++ b/include/linux/irqdesc.h
|
|
@@ -56,6 +56,7 @@ struct irq_desc {
|
|
unsigned int irqs_unhandled;
|
|
atomic_t threads_handled;
|
|
int threads_handled_last;
|
|
+ u64 random_ip;
|
|
raw_spinlock_t lock;
|
|
struct cpumask *percpu_enabled;
|
|
#ifdef CONFIG_SMP
|
|
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
|
|
index d176d658fe25..a52b35d4229f 100644
|
|
--- a/include/linux/irqflags.h
|
|
+++ b/include/linux/irqflags.h
|
|
@@ -25,8 +25,6 @@
|
|
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
|
|
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
|
|
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
|
|
-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
|
|
-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
|
|
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
|
|
#else
|
|
# define trace_hardirqs_on() do { } while (0)
|
|
@@ -39,9 +37,15 @@
|
|
# define trace_softirqs_enabled(p) 0
|
|
# define trace_hardirq_enter() do { } while (0)
|
|
# define trace_hardirq_exit() do { } while (0)
|
|
+# define INIT_TRACE_IRQFLAGS
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
|
|
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
|
|
+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
|
|
+#else
|
|
# define lockdep_softirq_enter() do { } while (0)
|
|
# define lockdep_softirq_exit() do { } while (0)
|
|
-# define INIT_TRACE_IRQFLAGS
|
|
#endif
|
|
|
|
#if defined(CONFIG_IRQSOFF_TRACER) || \
|
|
@@ -147,4 +151,23 @@
|
|
|
|
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
|
|
|
|
+/*
|
|
+ * local_irq* variants depending on RT/!RT
|
|
+ */
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define local_irq_disable_nort() do { } while (0)
|
|
+# define local_irq_enable_nort() do { } while (0)
|
|
+# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0)
|
|
+# define local_irq_restore_nort(flags) do { (void)(flags); } while (0)
|
|
+# define local_irq_disable_rt() local_irq_disable()
|
|
+# define local_irq_enable_rt() local_irq_enable()
|
|
+#else
|
|
+# define local_irq_disable_nort() local_irq_disable()
|
|
+# define local_irq_enable_nort() local_irq_enable()
|
|
+# define local_irq_save_nort(flags) local_irq_save(flags)
|
|
+# define local_irq_restore_nort(flags) local_irq_restore(flags)
|
|
+# define local_irq_disable_rt() do { } while (0)
|
|
+# define local_irq_enable_rt() do { } while (0)
|
|
+#endif
|
|
+
|
|
#endif
|
|
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
|
|
index 6230f8556a4e..11c313e8561d 100644
|
|
--- a/include/linux/jbd_common.h
|
|
+++ b/include/linux/jbd_common.h
|
|
@@ -37,32 +37,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
|
|
|
|
static inline void jbd_lock_bh_state(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_lock(BH_State, &bh->b_state);
|
|
+#else
|
|
+ spin_lock(&bh->b_state_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
return bit_spin_trylock(BH_State, &bh->b_state);
|
|
+#else
|
|
+ return spin_trylock(&bh->b_state_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
return bit_spin_is_locked(BH_State, &bh->b_state);
|
|
+#else
|
|
+ return spin_is_locked(&bh->b_state_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_unlock(BH_State, &bh->b_state);
|
|
+#else
|
|
+ spin_unlock(&bh->b_state_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_lock(BH_JournalHead, &bh->b_state);
|
|
+#else
|
|
+ spin_lock(&bh->b_journal_head_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_unlock(BH_JournalHead, &bh->b_state);
|
|
+#else
|
|
+ spin_unlock(&bh->b_journal_head_lock);
|
|
+#endif
|
|
}
|
|
|
|
#endif
|
|
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
|
|
index c513a40510f5..f47f3e0675a3 100644
|
|
--- a/include/linux/jump_label.h
|
|
+++ b/include/linux/jump_label.h
|
|
@@ -51,7 +51,8 @@
|
|
#include <linux/compiler.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
|
|
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \
|
|
+ !defined(CONFIG_PREEMPT_BASE)
|
|
|
|
struct static_key {
|
|
atomic_t enabled;
|
|
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
|
|
index 064725854db8..0d1ebfc9ff43 100644
|
|
--- a/include/linux/kdb.h
|
|
+++ b/include/linux/kdb.h
|
|
@@ -150,12 +150,14 @@ extern int kdb_register(char *, kdb_func_t, char *, char *, short);
|
|
extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
|
|
short, kdb_repeat_t);
|
|
extern int kdb_unregister(char *);
|
|
+#define in_kdb_printk() (kdb_trap_printk)
|
|
#else /* ! CONFIG_KGDB_KDB */
|
|
#define kdb_printf(...)
|
|
#define kdb_init(x)
|
|
#define kdb_register(...)
|
|
#define kdb_register_repeat(...)
|
|
#define kdb_uregister(x)
|
|
+#define in_kdb_printk() (0)
|
|
#endif /* CONFIG_KGDB_KDB */
|
|
enum {
|
|
KDB_NOT_INITIALIZED,
|
|
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
|
|
index b795ee5cd208..85f00b34ef15 100644
|
|
--- a/include/linux/kernel.h
|
|
+++ b/include/linux/kernel.h
|
|
@@ -375,7 +375,7 @@ extern enum system_states {
|
|
SYSTEM_HALT,
|
|
SYSTEM_POWER_OFF,
|
|
SYSTEM_RESTART,
|
|
- SYSTEM_SUSPEND_DISK,
|
|
+ SYSTEM_SUSPEND,
|
|
} system_state;
|
|
|
|
#define TAINT_PROPRIETARY_MODULE 0
|
|
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
|
|
index 87f402ccec55..cdfcef312c79 100644
|
|
--- a/include/linux/lglock.h
|
|
+++ b/include/linux/lglock.h
|
|
@@ -71,6 +71,8 @@
|
|
extern void name##_global_lock_online(void); \
|
|
extern void name##_global_unlock_online(void); \
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#define DEFINE_LGLOCK(name) \
|
|
\
|
|
DEFINE_SPINLOCK(name##_cpu_lock); \
|
|
@@ -197,4 +199,130 @@
|
|
preempt_enable(); \
|
|
} \
|
|
EXPORT_SYMBOL(name##_global_unlock);
|
|
+
|
|
+#else /* !PREEMPT_RT_FULL */
|
|
+#define DEFINE_LGLOCK(name) \
|
|
+ \
|
|
+ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
|
|
+ DEFINE_SPINLOCK(name##_cpu_lock); \
|
|
+ cpumask_t name##_cpus __read_mostly; \
|
|
+ DEFINE_LGLOCK_LOCKDEP(name); \
|
|
+ \
|
|
+ static int \
|
|
+ name##_lg_cpu_callback(struct notifier_block *nb, \
|
|
+ unsigned long action, void *hcpu) \
|
|
+ { \
|
|
+ switch (action & ~CPU_TASKS_FROZEN) { \
|
|
+ case CPU_UP_PREPARE: \
|
|
+ spin_lock(&name##_cpu_lock); \
|
|
+ cpu_set((unsigned long)hcpu, name##_cpus); \
|
|
+ spin_unlock(&name##_cpu_lock); \
|
|
+ break; \
|
|
+ case CPU_UP_CANCELED: case CPU_DEAD: \
|
|
+ spin_lock(&name##_cpu_lock); \
|
|
+ cpu_clear((unsigned long)hcpu, name##_cpus); \
|
|
+ spin_unlock(&name##_cpu_lock); \
|
|
+ } \
|
|
+ return NOTIFY_OK; \
|
|
+ } \
|
|
+ static struct notifier_block name##_lg_cpu_notifier = { \
|
|
+ .notifier_call = name##_lg_cpu_callback, \
|
|
+ }; \
|
|
+ void name##_lock_init(void) { \
|
|
+ int i; \
|
|
+ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
|
|
+ for_each_possible_cpu(i) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ lock = &per_cpu(name##_lock, i); \
|
|
+ rt_mutex_init(lock); \
|
|
+ } \
|
|
+ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
|
|
+ get_online_cpus(); \
|
|
+ for_each_online_cpu(i) \
|
|
+ cpu_set(i, name##_cpus); \
|
|
+ put_online_cpus(); \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_lock_init); \
|
|
+ \
|
|
+ void name##_local_lock(void) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ migrate_disable(); \
|
|
+ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
|
|
+ lock = &__get_cpu_var(name##_lock); \
|
|
+ __rt_spin_lock(lock); \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_local_lock); \
|
|
+ \
|
|
+ void name##_local_unlock(void) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
|
|
+ lock = &__get_cpu_var(name##_lock); \
|
|
+ __rt_spin_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_local_unlock); \
|
|
+ \
|
|
+ void name##_local_lock_cpu(int cpu) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
|
|
+ lock = &per_cpu(name##_lock, cpu); \
|
|
+ __rt_spin_lock(lock); \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_local_lock_cpu); \
|
|
+ \
|
|
+ void name##_local_unlock_cpu(int cpu) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
|
|
+ lock = &per_cpu(name##_lock, cpu); \
|
|
+ __rt_spin_unlock(lock); \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_local_unlock_cpu); \
|
|
+ \
|
|
+ void name##_global_lock_online(void) { \
|
|
+ int i; \
|
|
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
|
|
+ spin_lock(&name##_cpu_lock); \
|
|
+ for_each_cpu(i, &name##_cpus) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ lock = &per_cpu(name##_lock, i); \
|
|
+ __rt_spin_lock(lock); \
|
|
+ } \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_global_lock_online); \
|
|
+ \
|
|
+ void name##_global_unlock_online(void) { \
|
|
+ int i; \
|
|
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
|
|
+ for_each_cpu(i, &name##_cpus) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ lock = &per_cpu(name##_lock, i); \
|
|
+ __rt_spin_unlock(lock); \
|
|
+ } \
|
|
+ spin_unlock(&name##_cpu_lock); \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_global_unlock_online); \
|
|
+ \
|
|
+ void name##_global_lock(void) { \
|
|
+ int i; \
|
|
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
|
|
+ for_each_possible_cpu(i) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ lock = &per_cpu(name##_lock, i); \
|
|
+ __rt_spin_lock(lock); \
|
|
+ } \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_global_lock); \
|
|
+ \
|
|
+ void name##_global_unlock(void) { \
|
|
+ int i; \
|
|
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
|
|
+ for_each_possible_cpu(i) { \
|
|
+ struct rt_mutex *lock; \
|
|
+ lock = &per_cpu(name##_lock, i); \
|
|
+ __rt_spin_unlock(lock); \
|
|
+ } \
|
|
+ } \
|
|
+ EXPORT_SYMBOL(name##_global_unlock);
|
|
+#endif /* PRREMPT_RT_FULL */
|
|
+
|
|
#endif
|
|
diff --git a/include/linux/list.h b/include/linux/list.h
|
|
index 1712c7e1f56b..004af788d152 100644
|
|
--- a/include/linux/list.h
|
|
+++ b/include/linux/list.h
|
|
@@ -362,6 +362,17 @@ static inline void list_splice_tail_init(struct list_head *list,
|
|
list_entry((ptr)->next, type, member)
|
|
|
|
/**
|
|
+ * list_last_entry - get the last element from a list
|
|
+ * @ptr: the list head to take the element from.
|
|
+ * @type: the type of the struct this is embedded in.
|
|
+ * @member: the name of the list_struct within the struct.
|
|
+ *
|
|
+ * Note, that list is expected to be not empty.
|
|
+ */
|
|
+#define list_last_entry(ptr, type, member) \
|
|
+ list_entry((ptr)->prev, type, member)
|
|
+
|
|
+/**
|
|
* list_next_entry - get the next element in list
|
|
* @pos: the type * to cursor
|
|
* @member: the name of the list_struct within the struct.
|
|
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
|
|
index 31f9d75adc5b..becd7a61263f 100644
|
|
--- a/include/linux/list_bl.h
|
|
+++ b/include/linux/list_bl.h
|
|
@@ -2,6 +2,7 @@
|
|
#define _LINUX_LIST_BL_H
|
|
|
|
#include <linux/list.h>
|
|
+#include <linux/spinlock.h>
|
|
#include <linux/bit_spinlock.h>
|
|
|
|
/*
|
|
@@ -32,13 +33,22 @@
|
|
|
|
struct hlist_bl_head {
|
|
struct hlist_bl_node *first;
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ raw_spinlock_t lock;
|
|
+#endif
|
|
};
|
|
|
|
struct hlist_bl_node {
|
|
struct hlist_bl_node *next, **pprev;
|
|
};
|
|
-#define INIT_HLIST_BL_HEAD(ptr) \
|
|
- ((ptr)->first = NULL)
|
|
+
|
|
+static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
|
|
+{
|
|
+ h->first = NULL;
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ raw_spin_lock_init(&h->lock);
|
|
+#endif
|
|
+}
|
|
|
|
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
|
|
{
|
|
@@ -117,12 +127,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
|
|
|
|
static inline void hlist_bl_lock(struct hlist_bl_head *b)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_lock(0, (unsigned long *)b);
|
|
+#else
|
|
+ raw_spin_lock(&b->lock);
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
+ __set_bit(0, (unsigned long *)b);
|
|
+#endif
|
|
+#endif
|
|
}
|
|
|
|
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
__bit_spin_unlock(0, (unsigned long *)b);
|
|
+#else
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
+ __clear_bit(0, (unsigned long *)b);
|
|
+#endif
|
|
+ raw_spin_unlock(&b->lock);
|
|
+#endif
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
|
|
new file mode 100644
|
|
index 000000000000..a5eea5df7cc2
|
|
--- /dev/null
|
|
+++ b/include/linux/locallock.h
|
|
@@ -0,0 +1,253 @@
|
|
+#ifndef _LINUX_LOCALLOCK_H
|
|
+#define _LINUX_LOCALLOCK_H
|
|
+
|
|
+#include <linux/spinlock.h>
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+
|
|
+#ifdef CONFIG_DEBUG_SPINLOCK
|
|
+# define LL_WARN(cond) WARN_ON(cond)
|
|
+#else
|
|
+# define LL_WARN(cond) do { } while (0)
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * per cpu lock based substitute for local_irq_*()
|
|
+ */
|
|
+struct local_irq_lock {
|
|
+ spinlock_t lock;
|
|
+ struct task_struct *owner;
|
|
+ int nestcnt;
|
|
+ unsigned long flags;
|
|
+};
|
|
+
|
|
+#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
|
|
+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
|
|
+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
|
|
+
|
|
+#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
|
|
+ DECLARE_PER_CPU(struct local_irq_lock, lvar)
|
|
+
|
|
+#define local_irq_lock_init(lvar) \
|
|
+ do { \
|
|
+ int __cpu; \
|
|
+ for_each_possible_cpu(__cpu) \
|
|
+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
|
|
+ } while (0)
|
|
+
|
|
+static inline void __local_lock(struct local_irq_lock *lv)
|
|
+{
|
|
+ if (lv->owner != current) {
|
|
+ spin_lock(&lv->lock);
|
|
+ LL_WARN(lv->owner);
|
|
+ LL_WARN(lv->nestcnt);
|
|
+ lv->owner = current;
|
|
+ }
|
|
+ lv->nestcnt++;
|
|
+}
|
|
+
|
|
+#define local_lock(lvar) \
|
|
+ do { __local_lock(&get_local_var(lvar)); } while (0)
|
|
+
|
|
+static inline int __local_trylock(struct local_irq_lock *lv)
|
|
+{
|
|
+ if (lv->owner != current && spin_trylock(&lv->lock)) {
|
|
+ LL_WARN(lv->owner);
|
|
+ LL_WARN(lv->nestcnt);
|
|
+ lv->owner = current;
|
|
+ lv->nestcnt = 1;
|
|
+ return 1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#define local_trylock(lvar) \
|
|
+ ({ \
|
|
+ int __locked; \
|
|
+ __locked = __local_trylock(&get_local_var(lvar)); \
|
|
+ if (!__locked) \
|
|
+ put_local_var(lvar); \
|
|
+ __locked; \
|
|
+ })
|
|
+
|
|
+static inline void __local_unlock(struct local_irq_lock *lv)
|
|
+{
|
|
+ LL_WARN(lv->nestcnt == 0);
|
|
+ LL_WARN(lv->owner != current);
|
|
+ if (--lv->nestcnt)
|
|
+ return;
|
|
+
|
|
+ lv->owner = NULL;
|
|
+ spin_unlock(&lv->lock);
|
|
+}
|
|
+
|
|
+#define local_unlock(lvar) \
|
|
+ do { \
|
|
+ __local_unlock(&__get_cpu_var(lvar)); \
|
|
+ put_local_var(lvar); \
|
|
+ } while (0)
|
|
+
|
|
+static inline void __local_lock_irq(struct local_irq_lock *lv)
|
|
+{
|
|
+ spin_lock_irqsave(&lv->lock, lv->flags);
|
|
+ LL_WARN(lv->owner);
|
|
+ LL_WARN(lv->nestcnt);
|
|
+ lv->owner = current;
|
|
+ lv->nestcnt = 1;
|
|
+}
|
|
+
|
|
+#define local_lock_irq(lvar) \
|
|
+ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
|
|
+
|
|
+#define local_lock_irq_on(lvar, cpu) \
|
|
+ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
|
|
+
|
|
+static inline void __local_unlock_irq(struct local_irq_lock *lv)
|
|
+{
|
|
+ LL_WARN(!lv->nestcnt);
|
|
+ LL_WARN(lv->owner != current);
|
|
+ lv->owner = NULL;
|
|
+ lv->nestcnt = 0;
|
|
+ spin_unlock_irq(&lv->lock);
|
|
+}
|
|
+
|
|
+#define local_unlock_irq(lvar) \
|
|
+ do { \
|
|
+ __local_unlock_irq(&__get_cpu_var(lvar)); \
|
|
+ put_local_var(lvar); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_unlock_irq_on(lvar, cpu) \
|
|
+ do { \
|
|
+ __local_unlock_irq(&per_cpu(lvar, cpu)); \
|
|
+ } while (0)
|
|
+
|
|
+static inline int __local_lock_irqsave(struct local_irq_lock *lv)
|
|
+{
|
|
+ if (lv->owner != current) {
|
|
+ __local_lock_irq(lv);
|
|
+ return 0;
|
|
+ } else {
|
|
+ lv->nestcnt++;
|
|
+ return 1;
|
|
+ }
|
|
+}
|
|
+
|
|
+#define local_lock_irqsave(lvar, _flags) \
|
|
+ do { \
|
|
+ if (__local_lock_irqsave(&get_local_var(lvar))) \
|
|
+ put_local_var(lvar); \
|
|
+ _flags = __get_cpu_var(lvar).flags; \
|
|
+ } while (0)
|
|
+
|
|
+#define local_lock_irqsave_on(lvar, _flags, cpu) \
|
|
+ do { \
|
|
+ __local_lock_irqsave(&per_cpu(lvar, cpu)); \
|
|
+ _flags = per_cpu(lvar, cpu).flags; \
|
|
+ } while (0)
|
|
+
|
|
+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
|
|
+ unsigned long flags)
|
|
+{
|
|
+ LL_WARN(!lv->nestcnt);
|
|
+ LL_WARN(lv->owner != current);
|
|
+ if (--lv->nestcnt)
|
|
+ return 0;
|
|
+
|
|
+ lv->owner = NULL;
|
|
+ spin_unlock_irqrestore(&lv->lock, lv->flags);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#define local_unlock_irqrestore(lvar, flags) \
|
|
+ do { \
|
|
+ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
|
|
+ put_local_var(lvar); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_unlock_irqrestore_on(lvar, flags, cpu) \
|
|
+ do { \
|
|
+ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_spin_trylock_irq(lvar, lock) \
|
|
+ ({ \
|
|
+ int __locked; \
|
|
+ local_lock_irq(lvar); \
|
|
+ __locked = spin_trylock(lock); \
|
|
+ if (!__locked) \
|
|
+ local_unlock_irq(lvar); \
|
|
+ __locked; \
|
|
+ })
|
|
+
|
|
+#define local_spin_lock_irq(lvar, lock) \
|
|
+ do { \
|
|
+ local_lock_irq(lvar); \
|
|
+ spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_spin_unlock_irq(lvar, lock) \
|
|
+ do { \
|
|
+ spin_unlock(lock); \
|
|
+ local_unlock_irq(lvar); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_spin_lock_irqsave(lvar, lock, flags) \
|
|
+ do { \
|
|
+ local_lock_irqsave(lvar, flags); \
|
|
+ spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define local_spin_unlock_irqrestore(lvar, lock, flags) \
|
|
+ do { \
|
|
+ spin_unlock(lock); \
|
|
+ local_unlock_irqrestore(lvar, flags); \
|
|
+ } while (0)
|
|
+
|
|
+#define get_locked_var(lvar, var) \
|
|
+ (*({ \
|
|
+ local_lock(lvar); \
|
|
+ &__get_cpu_var(var); \
|
|
+ }))
|
|
+
|
|
+#define put_locked_var(lvar, var) local_unlock(lvar)
|
|
+
|
|
+#define local_lock_cpu(lvar) \
|
|
+ ({ \
|
|
+ local_lock(lvar); \
|
|
+ smp_processor_id(); \
|
|
+ })
|
|
+
|
|
+#define local_unlock_cpu(lvar) local_unlock(lvar)
|
|
+
|
|
+#else /* PREEMPT_RT_BASE */
|
|
+
|
|
+#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
|
|
+#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
|
|
+
|
|
+static inline void local_irq_lock_init(int lvar) { }
|
|
+
|
|
+#define local_lock(lvar) preempt_disable()
|
|
+#define local_unlock(lvar) preempt_enable()
|
|
+#define local_lock_irq(lvar) local_irq_disable()
|
|
+#define local_unlock_irq(lvar) local_irq_enable()
|
|
+#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
|
|
+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
|
|
+
|
|
+#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
|
|
+#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
|
|
+#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
|
|
+#define local_spin_lock_irqsave(lvar, lock, flags) \
|
|
+ spin_lock_irqsave(lock, flags)
|
|
+#define local_spin_unlock_irqrestore(lvar, lock, flags) \
|
|
+ spin_unlock_irqrestore(lock, flags)
|
|
+
|
|
+#define get_locked_var(lvar, var) get_cpu_var(var)
|
|
+#define put_locked_var(lvar, var) put_cpu_var(var)
|
|
+
|
|
+#define local_lock_cpu(lvar) get_cpu()
|
|
+#define local_unlock_cpu(lvar) put_cpu()
|
|
+
|
|
+#endif
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
|
index ceebf63ef5da..ff9d780666eb 100644
|
|
--- a/include/linux/mm.h
|
|
+++ b/include/linux/mm.h
|
|
@@ -1203,27 +1203,59 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
|
|
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
|
|
* When freeing, reset page->mapping so free_pages_check won't complain.
|
|
*/
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#define __pte_lockptr(page) &((page)->ptl)
|
|
-#define pte_lock_init(_page) do { \
|
|
- spin_lock_init(__pte_lockptr(_page)); \
|
|
-} while (0)
|
|
+
|
|
+static inline struct page *pte_lock_init(struct page *page)
|
|
+{
|
|
+ spin_lock_init(__pte_lockptr(page));
|
|
+ return page;
|
|
+}
|
|
+
|
|
#define pte_lock_deinit(page) ((page)->mapping = NULL)
|
|
+
|
|
+#else /* !PREEMPT_RT_FULL */
|
|
+
|
|
+/*
|
|
+ * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the
|
|
+ * page frame, hence it only has a pointer and we need to dynamically
|
|
+ * allocate the lock when we allocate PTE-pages.
|
|
+ *
|
|
+ * This is an overall win, since only a small fraction of the pages
|
|
+ * will be PTE pages under normal circumstances.
|
|
+ */
|
|
+
|
|
+#define __pte_lockptr(page) ((page)->ptl)
|
|
+
|
|
+extern struct page *pte_lock_init(struct page *page);
|
|
+extern void pte_lock_deinit(struct page *page);
|
|
+
|
|
+#endif /* PREEMPT_RT_FULL */
|
|
+
|
|
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
|
|
#else /* !USE_SPLIT_PTLOCKS */
|
|
/*
|
|
* We use mm->page_table_lock to guard all pagetable pages of the mm.
|
|
*/
|
|
-#define pte_lock_init(page) do {} while (0)
|
|
+static inline struct page *pte_lock_init(struct page *page) { return page; }
|
|
#define pte_lock_deinit(page) do {} while (0)
|
|
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
|
|
#endif /* USE_SPLIT_PTLOCKS */
|
|
|
|
-static inline void pgtable_page_ctor(struct page *page)
|
|
+static inline struct page *__pgtable_page_ctor(struct page *page)
|
|
{
|
|
- pte_lock_init(page);
|
|
- inc_zone_page_state(page, NR_PAGETABLE);
|
|
+ page = pte_lock_init(page);
|
|
+ if (page)
|
|
+ inc_zone_page_state(page, NR_PAGETABLE);
|
|
+ return page;
|
|
}
|
|
|
|
+#define pgtable_page_ctor(page) \
|
|
+do { \
|
|
+ page = __pgtable_page_ctor(page); \
|
|
+} while (0)
|
|
+
|
|
static inline void pgtable_page_dtor(struct page *page)
|
|
{
|
|
pte_lock_deinit(page);
|
|
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
|
|
index 16cb641bb89f..465f37bb2168 100644
|
|
--- a/include/linux/mm_types.h
|
|
+++ b/include/linux/mm_types.h
|
|
@@ -12,6 +12,7 @@
|
|
#include <linux/completion.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/page-debug-flags.h>
|
|
+#include <linux/rcupdate.h>
|
|
#include <asm/page.h>
|
|
#include <asm/mmu.h>
|
|
|
|
@@ -128,7 +129,11 @@ struct page {
|
|
* system if PG_buddy is set.
|
|
*/
|
|
#if USE_SPLIT_PTLOCKS
|
|
- spinlock_t ptl;
|
|
+# ifndef CONFIG_PREEMPT_RT_FULL
|
|
+ spinlock_t ptl;
|
|
+# else
|
|
+ spinlock_t *ptl;
|
|
+# endif
|
|
#endif
|
|
struct kmem_cache *slab; /* SLUB: Pointer to slab */
|
|
struct page *first_page; /* Compound tail pages */
|
|
@@ -399,6 +404,9 @@ struct mm_struct {
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
struct cpumask cpumask_allocation;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ struct rcu_head delayed_drop;
|
|
+#endif
|
|
};
|
|
|
|
static inline void mm_init_cpumask(struct mm_struct *mm)
|
|
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
|
|
index 9121595a8ebf..bdf1da2cf4b6 100644
|
|
--- a/include/linux/mutex.h
|
|
+++ b/include/linux/mutex.h
|
|
@@ -17,6 +17,17 @@
|
|
|
|
#include <linux/atomic.h>
|
|
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
|
|
+ , .dep_map = { .name = #lockname }
|
|
+#else
|
|
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/mutex_rt.h>
|
|
+#else
|
|
+
|
|
/*
|
|
* Simple, straightforward mutexes with strict semantics:
|
|
*
|
|
@@ -95,13 +106,6 @@ do { \
|
|
static inline void mutex_destroy(struct mutex *lock) {}
|
|
#endif
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
|
|
- , .dep_map = { .name = #lockname }
|
|
-#else
|
|
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
|
|
-#endif
|
|
-
|
|
#define __MUTEX_INITIALIZER(lockname) \
|
|
{ .count = ATOMIC_INIT(1) \
|
|
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
|
|
@@ -167,6 +171,9 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
|
|
*/
|
|
extern int mutex_trylock(struct mutex *lock);
|
|
extern void mutex_unlock(struct mutex *lock);
|
|
+
|
|
+#endif /* !PREEMPT_RT_FULL */
|
|
+
|
|
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
|
|
|
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
|
|
diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
|
|
new file mode 100644
|
|
index 000000000000..c38a44b14da5
|
|
--- /dev/null
|
|
+++ b/include/linux/mutex_rt.h
|
|
@@ -0,0 +1,84 @@
|
|
+#ifndef __LINUX_MUTEX_RT_H
|
|
+#define __LINUX_MUTEX_RT_H
|
|
+
|
|
+#ifndef __LINUX_MUTEX_H
|
|
+#error "Please include mutex.h"
|
|
+#endif
|
|
+
|
|
+#include <linux/rtmutex.h>
|
|
+
|
|
+/* FIXME: Just for __lockfunc */
|
|
+#include <linux/spinlock.h>
|
|
+
|
|
+struct mutex {
|
|
+ struct rt_mutex lock;
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+};
|
|
+
|
|
+#define __MUTEX_INITIALIZER(mutexname) \
|
|
+ { \
|
|
+ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
|
|
+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
|
|
+ }
|
|
+
|
|
+#define DEFINE_MUTEX(mutexname) \
|
|
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
|
|
+
|
|
+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
|
|
+extern void __lockfunc _mutex_lock(struct mutex *lock);
|
|
+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
|
|
+extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
|
|
+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
|
|
+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
|
|
+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
|
|
+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
|
|
+extern int __lockfunc _mutex_trylock(struct mutex *lock);
|
|
+extern void __lockfunc _mutex_unlock(struct mutex *lock);
|
|
+
|
|
+#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
|
|
+#define mutex_lock(l) _mutex_lock(l)
|
|
+#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
|
|
+#define mutex_lock_killable(l) _mutex_lock_killable(l)
|
|
+#define mutex_trylock(l) _mutex_trylock(l)
|
|
+#define mutex_unlock(l) _mutex_unlock(l)
|
|
+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
|
|
+# define mutex_lock_interruptible_nested(l, s) \
|
|
+ _mutex_lock_interruptible_nested(l, s)
|
|
+# define mutex_lock_killable_nested(l, s) \
|
|
+ _mutex_lock_killable_nested(l, s)
|
|
+
|
|
+# define mutex_lock_nest_lock(lock, nest_lock) \
|
|
+do { \
|
|
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
|
|
+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
|
|
+} while (0)
|
|
+
|
|
+#else
|
|
+# define mutex_lock_nested(l, s) _mutex_lock(l)
|
|
+# define mutex_lock_interruptible_nested(l, s) \
|
|
+ _mutex_lock_interruptible(l)
|
|
+# define mutex_lock_killable_nested(l, s) \
|
|
+ _mutex_lock_killable(l)
|
|
+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
|
+#endif
|
|
+
|
|
+# define mutex_init(mutex) \
|
|
+do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ rt_mutex_init(&(mutex)->lock); \
|
|
+ __mutex_do_init((mutex), #mutex, &__key); \
|
|
+} while (0)
|
|
+
|
|
+# define __mutex_init(mutex, name, key) \
|
|
+do { \
|
|
+ rt_mutex_init(&(mutex)->lock); \
|
|
+ __mutex_do_init((mutex), name, key); \
|
|
+} while (0)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 5b465010f102..551b2cefc9b8 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -1109,7 +1109,7 @@ struct net_device {
|
|
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
|
|
unsigned char addr_assign_type; /* hw address assignment type */
|
|
unsigned char addr_len; /* hardware address length */
|
|
- unsigned char neigh_priv_len;
|
|
+ unsigned short neigh_priv_len;
|
|
unsigned short dev_id; /* for shared network cards */
|
|
|
|
spinlock_t addr_list_lock;
|
|
@@ -1747,6 +1747,7 @@ struct softnet_data {
|
|
unsigned dropped;
|
|
struct sk_buff_head input_pkt_queue;
|
|
struct napi_struct backlog;
|
|
+ struct sk_buff_head tofree_queue;
|
|
};
|
|
|
|
static inline void input_queue_head_incr(struct softnet_data *sd)
|
|
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
|
|
index 8d674a786744..3f2bc5755a96 100644
|
|
--- a/include/linux/netfilter/x_tables.h
|
|
+++ b/include/linux/netfilter/x_tables.h
|
|
@@ -186,6 +186,7 @@ struct xt_counters_info {
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/netdevice.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
/**
|
|
* struct xt_action_param - parameters for matches/targets
|
|
@@ -466,6 +467,8 @@ extern void xt_free_table_info(struct xt_table_info *info);
|
|
*/
|
|
DECLARE_PER_CPU(seqcount_t, xt_recseq);
|
|
|
|
+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
|
|
+
|
|
/**
|
|
* xt_write_recseq_begin - start of a write section
|
|
*
|
|
@@ -480,6 +483,9 @@ static inline unsigned int xt_write_recseq_begin(void)
|
|
{
|
|
unsigned int addend;
|
|
|
|
+ /* RT protection */
|
|
+ local_lock(xt_write_lock);
|
|
+
|
|
/*
|
|
* Low order bit of sequence is set if we already
|
|
* called xt_write_recseq_begin().
|
|
@@ -510,6 +516,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
|
|
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
|
|
smp_wmb();
|
|
__this_cpu_add(xt_recseq.sequence, addend);
|
|
+ local_unlock(xt_write_lock);
|
|
}
|
|
|
|
/*
|
|
diff --git a/include/linux/of.h b/include/linux/of.h
|
|
index ac58796df055..18af81b35914 100644
|
|
--- a/include/linux/of.h
|
|
+++ b/include/linux/of.h
|
|
@@ -90,7 +90,7 @@ static inline void of_node_put(struct device_node *node) { }
|
|
extern struct device_node *allnodes;
|
|
extern struct device_node *of_chosen;
|
|
extern struct device_node *of_aliases;
|
|
-extern rwlock_t devtree_lock;
|
|
+extern raw_spinlock_t devtree_lock;
|
|
|
|
static inline bool of_have_populated_dt(void)
|
|
{
|
|
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
|
|
index a88cdba27809..5f0fe2d0fc42 100644
|
|
--- a/include/linux/page_cgroup.h
|
|
+++ b/include/linux/page_cgroup.h
|
|
@@ -24,6 +24,9 @@ enum {
|
|
*/
|
|
struct page_cgroup {
|
|
unsigned long flags;
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spinlock_t pcg_lock;
|
|
+#endif
|
|
struct mem_cgroup *mem_cgroup;
|
|
};
|
|
|
|
@@ -74,12 +77,20 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
|
|
* Don't take this lock in IRQ context.
|
|
* This lock is for pc->mem_cgroup, USED, MIGRATION
|
|
*/
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_lock(PCG_LOCK, &pc->flags);
|
|
+#else
|
|
+ spin_lock(&pc->pcg_lock);
|
|
+#endif
|
|
}
|
|
|
|
static inline void unlock_page_cgroup(struct page_cgroup *pc)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
bit_spin_unlock(PCG_LOCK, &pc->flags);
|
|
+#else
|
|
+ spin_unlock(&pc->pcg_lock);
|
|
+#endif
|
|
}
|
|
|
|
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
|
|
@@ -102,6 +113,10 @@ static inline void __init page_cgroup_init_flatmem(void)
|
|
{
|
|
}
|
|
|
|
+static inline void page_cgroup_lock_init(struct page_cgroup *pc)
|
|
+{
|
|
+}
|
|
+
|
|
#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
|
|
|
|
#include <linux/swap.h>
|
|
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
|
|
index 21638ae14e07..5a56f18345df 100644
|
|
--- a/include/linux/percpu.h
|
|
+++ b/include/linux/percpu.h
|
|
@@ -48,6 +48,31 @@
|
|
preempt_enable(); \
|
|
} while (0)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# define get_local_var(var) get_cpu_var(var)
|
|
+# define put_local_var(var) put_cpu_var(var)
|
|
+# define get_local_ptr(var) get_cpu_ptr(var)
|
|
+# define put_local_ptr(var) put_cpu_ptr(var)
|
|
+#else
|
|
+# define get_local_var(var) (*({ \
|
|
+ migrate_disable(); \
|
|
+ &__get_cpu_var(var); }))
|
|
+
|
|
+# define put_local_var(var) do { \
|
|
+ (void)&(var); \
|
|
+ migrate_enable(); \
|
|
+} while (0)
|
|
+
|
|
+# define get_local_ptr(var) ({ \
|
|
+ migrate_disable(); \
|
|
+ this_cpu_ptr(var); })
|
|
+
|
|
+# define put_local_ptr(var) do { \
|
|
+ (void)(var); \
|
|
+ migrate_enable(); \
|
|
+} while (0)
|
|
+#endif
|
|
+
|
|
/* minimum unit size, also is the maximum supported allocation size */
|
|
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
|
|
|
|
diff --git a/include/linux/pid.h b/include/linux/pid.h
|
|
index b152d44fb181..7f336833f2c5 100644
|
|
--- a/include/linux/pid.h
|
|
+++ b/include/linux/pid.h
|
|
@@ -2,6 +2,7 @@
|
|
#define _LINUX_PID_H
|
|
|
|
#include <linux/rcupdate.h>
|
|
+#include <linux/atomic.h>
|
|
|
|
enum pid_type
|
|
{
|
|
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
|
|
index 87a03c746f17..8a42fdceed1e 100644
|
|
--- a/include/linux/preempt.h
|
|
+++ b/include/linux/preempt.h
|
|
@@ -54,11 +54,17 @@ do { \
|
|
dec_preempt_count(); \
|
|
} while (0)
|
|
|
|
-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
|
+# define preempt_check_resched_rt() do { } while (0)
|
|
+#else
|
|
+# define preempt_enable_no_resched() preempt_enable()
|
|
+# define preempt_check_resched_rt() preempt_check_resched()
|
|
+#endif
|
|
|
|
#define preempt_enable() \
|
|
do { \
|
|
- preempt_enable_no_resched(); \
|
|
+ sched_preempt_enable_no_resched(); \
|
|
barrier(); \
|
|
preempt_check_resched(); \
|
|
} while (0)
|
|
@@ -107,9 +113,31 @@ do { \
|
|
#define preempt_disable_notrace() barrier()
|
|
#define preempt_enable_no_resched_notrace() barrier()
|
|
#define preempt_enable_notrace() barrier()
|
|
+#define preempt_check_resched_rt() barrier()
|
|
|
|
#endif /* CONFIG_PREEMPT_COUNT */
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define preempt_disable_rt() preempt_disable()
|
|
+# define preempt_enable_rt() preempt_enable()
|
|
+# define preempt_disable_nort() do { } while (0)
|
|
+# define preempt_enable_nort() do { } while (0)
|
|
+# ifdef CONFIG_SMP
|
|
+ extern void migrate_disable(void);
|
|
+ extern void migrate_enable(void);
|
|
+# else /* CONFIG_SMP */
|
|
+# define migrate_disable() do { } while (0)
|
|
+# define migrate_enable() do { } while (0)
|
|
+# endif /* CONFIG_SMP */
|
|
+#else
|
|
+# define preempt_disable_rt() do { } while (0)
|
|
+# define preempt_enable_rt() do { } while (0)
|
|
+# define preempt_disable_nort() preempt_disable()
|
|
+# define preempt_enable_nort() preempt_enable()
|
|
+# define migrate_disable() preempt_disable()
|
|
+# define migrate_enable() preempt_enable()
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
|
struct preempt_notifier;
|
|
diff --git a/include/linux/printk.h b/include/linux/printk.h
|
|
index d633115489d1..84533089df58 100644
|
|
--- a/include/linux/printk.h
|
|
+++ b/include/linux/printk.h
|
|
@@ -88,8 +88,15 @@ int no_printk(const char *fmt, ...)
|
|
return 0;
|
|
}
|
|
|
|
+#ifdef CONFIG_EARLY_PRINTK
|
|
extern asmlinkage __printf(1, 2)
|
|
void early_printk(const char *fmt, ...);
|
|
+extern void printk_kill(void);
|
|
+#else
|
|
+static inline __printf(1, 2) __cold
|
|
+void early_printk(const char *s, ...) { }
|
|
+static inline void printk_kill(void) { }
|
|
+#endif
|
|
|
|
extern int printk_needs_cpu(int cpu);
|
|
extern void printk_tick(void);
|
|
@@ -114,7 +121,6 @@ extern int __printk_ratelimit(const char *func);
|
|
#define printk_ratelimit() __printk_ratelimit(__func__)
|
|
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
|
|
unsigned int interval_msec);
|
|
-
|
|
extern int printk_delay_msec;
|
|
extern int dmesg_restrict;
|
|
extern int kptr_restrict;
|
|
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
|
|
index ffc444c38b0a..7ddfbf97e32d 100644
|
|
--- a/include/linux/radix-tree.h
|
|
+++ b/include/linux/radix-tree.h
|
|
@@ -230,7 +230,13 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
|
|
unsigned long index, unsigned long max_scan);
|
|
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
|
|
unsigned long index, unsigned long max_scan);
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
int radix_tree_preload(gfp_t gfp_mask);
|
|
+#else
|
|
+static inline int radix_tree_preload(gfp_t gm) { return 0; }
|
|
+#endif
|
|
+
|
|
void radix_tree_init(void);
|
|
void *radix_tree_tag_set(struct radix_tree_root *root,
|
|
unsigned long index, unsigned int tag);
|
|
@@ -255,7 +261,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
|
|
|
|
static inline void radix_tree_preload_end(void)
|
|
{
|
|
- preempt_enable();
|
|
+ preempt_enable_nort();
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/linux/random.h b/include/linux/random.h
|
|
index 54b1fd3efdfa..34a63324477f 100644
|
|
--- a/include/linux/random.h
|
|
+++ b/include/linux/random.h
|
|
@@ -51,7 +51,7 @@ struct rnd_state {
|
|
extern void add_device_randomness(const void *, unsigned int);
|
|
extern void add_input_randomness(unsigned int type, unsigned int code,
|
|
unsigned int value);
|
|
-extern void add_interrupt_randomness(int irq, int irq_flags);
|
|
+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
|
|
|
|
extern void get_random_bytes(void *buf, int nbytes);
|
|
extern void get_random_bytes_arch(void *buf, int nbytes);
|
|
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
|
|
index 20fb776a1d4a..aaf8b7dc360c 100644
|
|
--- a/include/linux/rcupdate.h
|
|
+++ b/include/linux/rcupdate.h
|
|
@@ -101,6 +101,9 @@ extern void call_rcu(struct rcu_head *head,
|
|
|
|
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+#define call_rcu_bh call_rcu
|
|
+#else
|
|
/**
|
|
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
|
|
* @head: structure to be used for queueing the RCU updates.
|
|
@@ -121,6 +124,7 @@ extern void call_rcu(struct rcu_head *head,
|
|
*/
|
|
extern void call_rcu_bh(struct rcu_head *head,
|
|
void (*func)(struct rcu_head *head));
|
|
+#endif
|
|
|
|
/**
|
|
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
|
|
@@ -156,6 +160,11 @@ void synchronize_rcu(void);
|
|
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
|
|
*/
|
|
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+#define sched_rcu_preempt_depth() rcu_preempt_depth()
|
|
+#else
|
|
+static inline int sched_rcu_preempt_depth(void) { return 0; }
|
|
+#endif
|
|
|
|
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
|
|
|
@@ -179,6 +188,8 @@ static inline int rcu_preempt_depth(void)
|
|
return 0;
|
|
}
|
|
|
|
+#define sched_rcu_preempt_depth() rcu_preempt_depth()
|
|
+
|
|
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
|
|
|
/* Internal to kernel */
|
|
@@ -324,7 +335,14 @@ static inline int rcu_read_lock_held(void)
|
|
* rcu_read_lock_bh_held() is defined out of line to avoid #include-file
|
|
* hell.
|
|
*/
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline int rcu_read_lock_bh_held(void)
|
|
+{
|
|
+ return rcu_read_lock_held();
|
|
+}
|
|
+#else
|
|
extern int rcu_read_lock_bh_held(void);
|
|
+#endif
|
|
|
|
/**
|
|
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
|
|
@@ -773,10 +791,14 @@ static inline void rcu_read_unlock(void)
|
|
static inline void rcu_read_lock_bh(void)
|
|
{
|
|
local_bh_disable();
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ rcu_read_lock();
|
|
+#else
|
|
__acquire(RCU_BH);
|
|
rcu_lock_acquire(&rcu_bh_lock_map);
|
|
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
|
"rcu_read_lock_bh() used illegally while idle");
|
|
+#endif
|
|
}
|
|
|
|
/*
|
|
@@ -786,10 +808,14 @@ static inline void rcu_read_lock_bh(void)
|
|
*/
|
|
static inline void rcu_read_unlock_bh(void)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ rcu_read_unlock();
|
|
+#else
|
|
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
|
"rcu_read_unlock_bh() used illegally while idle");
|
|
rcu_lock_release(&rcu_bh_lock_map);
|
|
__release(RCU_BH);
|
|
+#endif
|
|
local_bh_enable();
|
|
}
|
|
|
|
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
|
|
index e8ee5dd0854c..ba517b57c371 100644
|
|
--- a/include/linux/rcutree.h
|
|
+++ b/include/linux/rcutree.h
|
|
@@ -57,7 +57,11 @@ static inline void exit_rcu(void)
|
|
|
|
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
extern void synchronize_rcu_bh(void);
|
|
+#else
|
|
+# define synchronize_rcu_bh synchronize_rcu
|
|
+#endif
|
|
extern void synchronize_sched_expedited(void);
|
|
extern void synchronize_rcu_expedited(void);
|
|
|
|
@@ -85,19 +89,29 @@ static inline void synchronize_rcu_bh_expedited(void)
|
|
}
|
|
|
|
extern void rcu_barrier(void);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define rcu_barrier_bh rcu_barrier
|
|
+#else
|
|
extern void rcu_barrier_bh(void);
|
|
+#endif
|
|
extern void rcu_barrier_sched(void);
|
|
|
|
extern unsigned long rcutorture_testseq;
|
|
extern unsigned long rcutorture_vernum;
|
|
extern long rcu_batches_completed(void);
|
|
-extern long rcu_batches_completed_bh(void);
|
|
extern long rcu_batches_completed_sched(void);
|
|
|
|
extern void rcu_force_quiescent_state(void);
|
|
-extern void rcu_bh_force_quiescent_state(void);
|
|
extern void rcu_sched_force_quiescent_state(void);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+extern void rcu_bh_force_quiescent_state(void);
|
|
+extern long rcu_batches_completed_bh(void);
|
|
+#else
|
|
+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
|
|
+# define rcu_batches_completed_bh rcu_batches_completed
|
|
+#endif
|
|
+
|
|
/* A context switch is a grace period for RCU-sched and RCU-bh. */
|
|
static inline int rcu_blocking_is_gp(void)
|
|
{
|
|
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
|
|
index de17134244f3..9340b866ce21 100644
|
|
--- a/include/linux/rtmutex.h
|
|
+++ b/include/linux/rtmutex.h
|
|
@@ -14,7 +14,7 @@
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/plist.h>
|
|
-#include <linux/spinlock_types.h>
|
|
+#include <linux/spinlock_types_raw.h>
|
|
|
|
extern int max_lock_depth; /* for sysctl */
|
|
|
|
@@ -29,9 +29,10 @@ struct rt_mutex {
|
|
raw_spinlock_t wait_lock;
|
|
struct plist_head wait_list;
|
|
struct task_struct *owner;
|
|
-#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
int save_state;
|
|
- const char *name, *file;
|
|
+#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
+ const char *file;
|
|
+ const char *name;
|
|
int line;
|
|
void *magic;
|
|
#endif
|
|
@@ -56,19 +57,39 @@ struct hrtimer_sleeper;
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
|
|
, .name = #mutexname, .file = __FILE__, .line = __LINE__
|
|
-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
|
|
+
|
|
+# define rt_mutex_init(mutex) \
|
|
+ do { \
|
|
+ raw_spin_lock_init(&(mutex)->wait_lock); \
|
|
+ __rt_mutex_init(mutex, #mutex); \
|
|
+ } while (0)
|
|
+
|
|
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
|
|
#else
|
|
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
|
|
-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
|
|
+
|
|
+# define rt_mutex_init(mutex) \
|
|
+ do { \
|
|
+ raw_spin_lock_init(&(mutex)->wait_lock); \
|
|
+ __rt_mutex_init(mutex, #mutex); \
|
|
+ } while (0)
|
|
+
|
|
# define rt_mutex_debug_task_free(t) do { } while (0)
|
|
#endif
|
|
|
|
-#define __RT_MUTEX_INITIALIZER(mutexname) \
|
|
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
|
+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
|
|
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
|
, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
|
|
, .owner = NULL \
|
|
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
|
|
+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
|
|
+
|
|
+
|
|
+#define __RT_MUTEX_INITIALIZER(mutexname) \
|
|
+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
|
|
+
|
|
+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
|
|
+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
|
|
+ , .save_state = 1 }
|
|
|
|
#define DEFINE_RT_MUTEX(mutexname) \
|
|
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
|
|
@@ -88,11 +109,10 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
|
|
extern void rt_mutex_destroy(struct rt_mutex *lock);
|
|
|
|
extern void rt_mutex_lock(struct rt_mutex *lock);
|
|
-extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
|
|
- int detect_deadlock);
|
|
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
|
|
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
|
|
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
|
|
- struct hrtimer_sleeper *timeout,
|
|
- int detect_deadlock);
|
|
+ struct hrtimer_sleeper *timeout);
|
|
|
|
extern int rt_mutex_trylock(struct rt_mutex *lock);
|
|
|
|
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
|
|
new file mode 100644
|
|
index 000000000000..853ee367fef4
|
|
--- /dev/null
|
|
+++ b/include/linux/rwlock_rt.h
|
|
@@ -0,0 +1,123 @@
|
|
+#ifndef __LINUX_RWLOCK_RT_H
|
|
+#define __LINUX_RWLOCK_RT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_H
|
|
+#error Do not include directly. Use spinlock.h
|
|
+#endif
|
|
+
|
|
+#define rwlock_init(rwl) \
|
|
+do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ rt_mutex_init(&(rwl)->lock); \
|
|
+ __rt_rwlock_init(rwl, #rwl, &__key); \
|
|
+} while (0)
|
|
+
|
|
+extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
|
|
+extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
|
|
+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
|
|
+extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
|
|
+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
|
|
+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
|
|
+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
|
|
+extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
|
|
+extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
|
|
+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
|
|
+
|
|
+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
|
|
+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
|
|
+
|
|
+#define write_trylock_irqsave(lock, flags) \
|
|
+ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
|
|
+
|
|
+#define read_lock_irqsave(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ migrate_disable(); \
|
|
+ flags = rt_read_lock_irqsave(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_lock_irqsave(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ migrate_disable(); \
|
|
+ flags = rt_write_lock_irqsave(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define read_lock(lock) \
|
|
+ do { \
|
|
+ migrate_disable(); \
|
|
+ rt_read_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define read_lock_bh(lock) \
|
|
+ do { \
|
|
+ local_bh_disable(); \
|
|
+ migrate_disable(); \
|
|
+ rt_read_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define read_lock_irq(lock) read_lock(lock)
|
|
+
|
|
+#define write_lock(lock) \
|
|
+ do { \
|
|
+ migrate_disable(); \
|
|
+ rt_write_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_lock_bh(lock) \
|
|
+ do { \
|
|
+ local_bh_disable(); \
|
|
+ migrate_disable(); \
|
|
+ rt_write_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_lock_irq(lock) write_lock(lock)
|
|
+
|
|
+#define read_unlock(lock) \
|
|
+ do { \
|
|
+ rt_read_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define read_unlock_bh(lock) \
|
|
+ do { \
|
|
+ rt_read_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ local_bh_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define read_unlock_irq(lock) read_unlock(lock)
|
|
+
|
|
+#define write_unlock(lock) \
|
|
+ do { \
|
|
+ rt_write_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_unlock_bh(lock) \
|
|
+ do { \
|
|
+ rt_write_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ local_bh_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_unlock_irq(lock) write_unlock(lock)
|
|
+
|
|
+#define read_unlock_irqrestore(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ (void) flags; \
|
|
+ rt_read_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_unlock_irqrestore(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ (void) flags; \
|
|
+ rt_write_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
|
|
index cc0072e93e36..d0da966ad7a0 100644
|
|
--- a/include/linux/rwlock_types.h
|
|
+++ b/include/linux/rwlock_types.h
|
|
@@ -1,6 +1,10 @@
|
|
#ifndef __LINUX_RWLOCK_TYPES_H
|
|
#define __LINUX_RWLOCK_TYPES_H
|
|
|
|
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
|
|
+# error "Do not include directly, include spinlock_types.h"
|
|
+#endif
|
|
+
|
|
/*
|
|
* include/linux/rwlock_types.h - generic rwlock type definitions
|
|
* and initializers
|
|
@@ -43,6 +47,7 @@ typedef struct {
|
|
RW_DEP_MAP_INIT(lockname) }
|
|
#endif
|
|
|
|
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
|
|
+#define DEFINE_RWLOCK(name) \
|
|
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
|
|
|
|
#endif /* __LINUX_RWLOCK_TYPES_H */
|
|
diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
|
|
new file mode 100644
|
|
index 000000000000..b13832119591
|
|
--- /dev/null
|
|
+++ b/include/linux/rwlock_types_rt.h
|
|
@@ -0,0 +1,33 @@
|
|
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
|
|
+#define __LINUX_RWLOCK_TYPES_RT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
+#error "Do not include directly. Include spinlock_types.h instead"
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * rwlocks - rtmutex which allows single reader recursion
|
|
+ */
|
|
+typedef struct {
|
|
+ struct rt_mutex lock;
|
|
+ int read_depth;
|
|
+ unsigned int break_lock;
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+} rwlock_t;
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
|
+#else
|
|
+# define RW_DEP_MAP_INIT(lockname)
|
|
+#endif
|
|
+
|
|
+#define __RW_LOCK_UNLOCKED(name) \
|
|
+ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
|
|
+ RW_DEP_MAP_INIT(name) }
|
|
+
|
|
+#define DEFINE_RWLOCK(name) \
|
|
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
|
|
index 54bd7cd7ecbd..3f7df4a2668d 100644
|
|
--- a/include/linux/rwsem.h
|
|
+++ b/include/linux/rwsem.h
|
|
@@ -16,6 +16,10 @@
|
|
|
|
#include <linux/atomic.h>
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+#include <linux/rwsem_rt.h>
|
|
+#else /* PREEMPT_RT_FULL */
|
|
+
|
|
struct rw_semaphore;
|
|
|
|
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
|
@@ -130,4 +134,6 @@ extern void down_write_nested(struct rw_semaphore *sem, int subclass);
|
|
# define down_write_nested(sem, subclass) down_write(sem)
|
|
#endif
|
|
|
|
+#endif /* !PREEMPT_RT_FULL */
|
|
+
|
|
#endif /* _LINUX_RWSEM_H */
|
|
diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
|
|
new file mode 100644
|
|
index 000000000000..802c6909900a
|
|
--- /dev/null
|
|
+++ b/include/linux/rwsem_rt.h
|
|
@@ -0,0 +1,105 @@
|
|
+#ifndef _LINUX_RWSEM_RT_H
|
|
+#define _LINUX_RWSEM_RT_H
|
|
+
|
|
+#ifndef _LINUX_RWSEM_H
|
|
+#error "Include rwsem.h"
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * RW-semaphores are a spinlock plus a reader-depth count.
|
|
+ *
|
|
+ * Note that the semantics are different from the usual
|
|
+ * Linux rw-sems, in PREEMPT_RT mode we do not allow
|
|
+ * multiple readers to hold the lock at once, we only allow
|
|
+ * a read-lock owner to read-lock recursively. This is
|
|
+ * better for latency, makes the implementation inherently
|
|
+ * fair and makes it simpler as well.
|
|
+ */
|
|
+
|
|
+#include <linux/rtmutex.h>
|
|
+
|
|
+struct rw_semaphore {
|
|
+ struct rt_mutex lock;
|
|
+ int read_depth;
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+};
|
|
+
|
|
+#define __RWSEM_INITIALIZER(name) \
|
|
+ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
|
|
+ RW_DEP_MAP_INIT(name) }
|
|
+
|
|
+#define DECLARE_RWSEM(lockname) \
|
|
+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
|
|
+
|
|
+extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
|
|
+ struct lock_class_key *key);
|
|
+
|
|
+# define rt_init_rwsem(sem) \
|
|
+do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ rt_mutex_init(&(sem)->lock); \
|
|
+ __rt_rwsem_init((sem), #sem, &__key); \
|
|
+} while (0)
|
|
+
|
|
+extern void rt_down_write(struct rw_semaphore *rwsem);
|
|
+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
|
|
+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
|
|
+extern void rt_down_read(struct rw_semaphore *rwsem);
|
|
+extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
|
|
+extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
|
|
+extern void rt_up_read(struct rw_semaphore *rwsem);
|
|
+extern void rt_up_write(struct rw_semaphore *rwsem);
|
|
+extern void rt_downgrade_write(struct rw_semaphore *rwsem);
|
|
+
|
|
+#define init_rwsem(sem) rt_init_rwsem(sem)
|
|
+#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
|
|
+
|
|
+static inline void down_read(struct rw_semaphore *sem)
|
|
+{
|
|
+ rt_down_read(sem);
|
|
+}
|
|
+
|
|
+static inline int down_read_trylock(struct rw_semaphore *sem)
|
|
+{
|
|
+ return rt_down_read_trylock(sem);
|
|
+}
|
|
+
|
|
+static inline void down_write(struct rw_semaphore *sem)
|
|
+{
|
|
+ rt_down_write(sem);
|
|
+}
|
|
+
|
|
+static inline int down_write_trylock(struct rw_semaphore *sem)
|
|
+{
|
|
+ return rt_down_write_trylock(sem);
|
|
+}
|
|
+
|
|
+static inline void up_read(struct rw_semaphore *sem)
|
|
+{
|
|
+ rt_up_read(sem);
|
|
+}
|
|
+
|
|
+static inline void up_write(struct rw_semaphore *sem)
|
|
+{
|
|
+ rt_up_write(sem);
|
|
+}
|
|
+
|
|
+static inline void downgrade_write(struct rw_semaphore *sem)
|
|
+{
|
|
+ rt_downgrade_write(sem);
|
|
+}
|
|
+
|
|
+static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
|
|
+{
|
|
+ return rt_down_read_nested(sem, subclass);
|
|
+}
|
|
+
|
|
+static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
|
|
+{
|
|
+ rt_down_write_nested(sem, subclass);
|
|
+}
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index d529bd9e6680..daf469059630 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -63,6 +63,7 @@ struct sched_param {
|
|
#include <linux/nodemask.h>
|
|
#include <linux/mm_types.h>
|
|
|
|
+#include <asm/kmap_types.h>
|
|
#include <asm/page.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/cputime.h>
|
|
@@ -90,6 +91,7 @@ struct sched_param {
|
|
#include <linux/latencytop.h>
|
|
#include <linux/cred.h>
|
|
#include <linux/llist.h>
|
|
+#include <linux/hardirq.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
@@ -1114,6 +1116,7 @@ struct sched_domain;
|
|
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
|
|
#define WF_FORK 0x02 /* child wakeup after fork */
|
|
#define WF_MIGRATED 0x04 /* internal use, task got migrated */
|
|
+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
|
|
|
|
#define ENQUEUE_WAKEUP 1
|
|
#define ENQUEUE_HEAD 2
|
|
@@ -1270,6 +1273,7 @@ enum perf_event_task_context {
|
|
|
|
struct task_struct {
|
|
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
|
|
+ volatile long saved_state; /* saved state for "spinlock sleepers" */
|
|
void *stack;
|
|
atomic_t usage;
|
|
unsigned int flags; /* per process flags, defined below */
|
|
@@ -1309,6 +1313,12 @@ struct task_struct {
|
|
#endif
|
|
|
|
unsigned int policy;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ int migrate_disable;
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ int migrate_disable_atomic;
|
|
+#endif
|
|
+#endif
|
|
cpumask_t cpus_allowed;
|
|
|
|
#ifdef CONFIG_PREEMPT_RCU
|
|
@@ -1414,6 +1424,9 @@ struct task_struct {
|
|
|
|
struct task_cputime cputime_expires;
|
|
struct list_head cpu_timers[3];
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ struct task_struct *posix_timer_list;
|
|
+#endif
|
|
|
|
/* process credentials */
|
|
const struct cred __rcu *real_cred; /* objective and real subjective task
|
|
@@ -1447,10 +1460,15 @@ struct task_struct {
|
|
/* signal handlers */
|
|
struct signal_struct *signal;
|
|
struct sighand_struct *sighand;
|
|
+ struct sigqueue *sigqueue_cache;
|
|
|
|
sigset_t blocked, real_blocked;
|
|
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
|
|
struct sigpending pending;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /* TODO: move me into ->restart_block ? */
|
|
+ struct siginfo forced_info;
|
|
+#endif
|
|
|
|
unsigned long sas_ss_sp;
|
|
size_t sas_ss_size;
|
|
@@ -1485,6 +1503,9 @@ struct task_struct {
|
|
/* mutex deadlock detection */
|
|
struct mutex_waiter *blocked_on;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ int pagefault_disabled;
|
|
+#endif
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
unsigned int irq_events;
|
|
unsigned long hardirq_enable_ip;
|
|
@@ -1617,6 +1638,12 @@ struct task_struct {
|
|
unsigned long trace;
|
|
/* bitmask and counter of trace recursion */
|
|
unsigned long trace_recursion;
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ u64 preempt_timestamp_hist;
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ long timer_offset;
|
|
+#endif
|
|
+#endif
|
|
#endif /* CONFIG_TRACING */
|
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
|
|
struct memcg_batch_info {
|
|
@@ -1629,10 +1656,32 @@ struct task_struct {
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
atomic_t ptrace_bp_refcnt;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ struct rcu_head put_rcu;
|
|
+ int softirq_nestcnt;
|
|
+#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
|
|
+ int kmap_idx;
|
|
+ pte_t kmap_pte[KM_TYPE_NR];
|
|
+# endif
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_DEBUG_PREEMPT
|
|
+ unsigned long preempt_disable_ip;
|
|
+#endif
|
|
};
|
|
|
|
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
|
|
-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
|
|
+#else
|
|
+static inline bool cur_pf_disabled(void) { return false; }
|
|
+#endif
|
|
+
|
|
+static inline bool pagefault_disabled(void)
|
|
+{
|
|
+ return in_atomic() || cur_pf_disabled();
|
|
+}
|
|
|
|
/*
|
|
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
|
|
@@ -1802,6 +1851,15 @@ extern struct pid *cad_pid;
|
|
extern void free_task(struct task_struct *tsk);
|
|
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+extern void __put_task_struct_cb(struct rcu_head *rhp);
|
|
+
|
|
+static inline void put_task_struct(struct task_struct *t)
|
|
+{
|
|
+ if (atomic_dec_and_test(&t->usage))
|
|
+ call_rcu(&t->put_rcu, __put_task_struct_cb);
|
|
+}
|
|
+#else
|
|
extern void __put_task_struct(struct task_struct *t);
|
|
|
|
static inline void put_task_struct(struct task_struct *t)
|
|
@@ -1809,6 +1867,7 @@ static inline void put_task_struct(struct task_struct *t)
|
|
if (atomic_dec_and_test(&t->usage))
|
|
__put_task_struct(t);
|
|
}
|
|
+#endif
|
|
|
|
extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
|
extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
|
@@ -1832,6 +1891,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
|
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
|
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
|
#define PF_KSWAPD 0x00040000 /* I am kswapd */
|
|
+#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
|
|
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
|
|
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
|
|
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
|
|
@@ -1943,6 +2003,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
|
|
|
|
extern int set_cpus_allowed_ptr(struct task_struct *p,
|
|
const struct cpumask *new_mask);
|
|
+int migrate_me(void);
|
|
+void tell_sched_cpu_down_begin(int cpu);
|
|
+void tell_sched_cpu_down_done(int cpu);
|
|
+
|
|
#else
|
|
static inline void do_set_cpus_allowed(struct task_struct *p,
|
|
const struct cpumask *new_mask)
|
|
@@ -1955,6 +2019,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
+static inline int migrate_me(void) { return 0; }
|
|
+static inline void tell_sched_cpu_down_begin(int cpu) { }
|
|
+static inline void tell_sched_cpu_down_done(int cpu) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_NO_HZ
|
|
@@ -2130,6 +2197,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
|
|
#ifdef CONFIG_RT_MUTEXES
|
|
extern int rt_mutex_getprio(struct task_struct *p);
|
|
extern void rt_mutex_setprio(struct task_struct *p, int prio);
|
|
+extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
|
|
extern void rt_mutex_adjust_pi(struct task_struct *p);
|
|
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
|
|
{
|
|
@@ -2140,6 +2208,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
|
|
{
|
|
return p->normal_prio;
|
|
}
|
|
+static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
# define rt_mutex_adjust_pi(p) do { } while (0)
|
|
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
|
|
{
|
|
@@ -2232,6 +2304,7 @@ extern void xtime_update(unsigned long ticks);
|
|
|
|
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
|
|
extern int wake_up_process(struct task_struct *tsk);
|
|
+extern int wake_up_lock_sleeper(struct task_struct * tsk);
|
|
extern void wake_up_new_task(struct task_struct *tsk);
|
|
#ifdef CONFIG_SMP
|
|
extern void kick_process(struct task_struct *tsk);
|
|
@@ -2322,12 +2395,24 @@ extern struct mm_struct * mm_alloc(void);
|
|
|
|
/* mmdrop drops the mm and the page tables */
|
|
extern void __mmdrop(struct mm_struct *);
|
|
+
|
|
static inline void mmdrop(struct mm_struct * mm)
|
|
{
|
|
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
|
|
__mmdrop(mm);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+extern void __mmdrop_delayed(struct rcu_head *rhp);
|
|
+static inline void mmdrop_delayed(struct mm_struct *mm)
|
|
+{
|
|
+ if (atomic_dec_and_test(&mm->mm_count))
|
|
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
|
|
+}
|
|
+#else
|
|
+# define mmdrop_delayed(mm) mmdrop(mm)
|
|
+#endif
|
|
+
|
|
/* mmput gets rid of the mappings and all user-space */
|
|
extern void mmput(struct mm_struct *);
|
|
/* Grab a reference to a task's mm, if it is not already going away */
|
|
@@ -2672,7 +2757,7 @@ extern int _cond_resched(void);
|
|
|
|
extern int __cond_resched_lock(spinlock_t *lock);
|
|
|
|
-#ifdef CONFIG_PREEMPT_COUNT
|
|
+#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
|
|
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
|
|
#else
|
|
#define PREEMPT_LOCK_OFFSET 0
|
|
@@ -2683,12 +2768,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
|
|
__cond_resched_lock(lock); \
|
|
})
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
extern int __cond_resched_softirq(void);
|
|
|
|
#define cond_resched_softirq() ({ \
|
|
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
|
|
__cond_resched_softirq(); \
|
|
})
|
|
+#else
|
|
+# define cond_resched_softirq() cond_resched()
|
|
+#endif
|
|
|
|
/*
|
|
* Does a critical section need to be broken due to another
|
|
@@ -2760,6 +2849,26 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
+static inline int __migrate_disabled(struct task_struct *p)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ return p->migrate_disable;
|
|
+#else
|
|
+ return 0;
|
|
+#endif
|
|
+}
|
|
+
|
|
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
|
|
+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (p->migrate_disable)
|
|
+ return cpumask_of(task_cpu(p));
|
|
+#endif
|
|
+
|
|
+ return &p->cpus_allowed;
|
|
+}
|
|
+
|
|
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
|
|
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
|
|
|
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
|
|
index 600060e25ec6..c2dcae449b59 100644
|
|
--- a/include/linux/seqlock.h
|
|
+++ b/include/linux/seqlock.h
|
|
@@ -30,92 +30,12 @@
|
|
#include <linux/preempt.h>
|
|
#include <asm/processor.h>
|
|
|
|
-typedef struct {
|
|
- unsigned sequence;
|
|
- spinlock_t lock;
|
|
-} seqlock_t;
|
|
-
|
|
-/*
|
|
- * These macros triggered gcc-3.x compile-time problems. We think these are
|
|
- * OK now. Be cautious.
|
|
- */
|
|
-#define __SEQLOCK_UNLOCKED(lockname) \
|
|
- { 0, __SPIN_LOCK_UNLOCKED(lockname) }
|
|
-
|
|
-#define seqlock_init(x) \
|
|
- do { \
|
|
- (x)->sequence = 0; \
|
|
- spin_lock_init(&(x)->lock); \
|
|
- } while (0)
|
|
-
|
|
-#define DEFINE_SEQLOCK(x) \
|
|
- seqlock_t x = __SEQLOCK_UNLOCKED(x)
|
|
-
|
|
-/* Lock out other writers and update the count.
|
|
- * Acts like a normal spin_lock/unlock.
|
|
- * Don't need preempt_disable() because that is in the spin_lock already.
|
|
- */
|
|
-static inline void write_seqlock(seqlock_t *sl)
|
|
-{
|
|
- spin_lock(&sl->lock);
|
|
- ++sl->sequence;
|
|
- smp_wmb();
|
|
-}
|
|
-
|
|
-static inline void write_sequnlock(seqlock_t *sl)
|
|
-{
|
|
- smp_wmb();
|
|
- sl->sequence++;
|
|
- spin_unlock(&sl->lock);
|
|
-}
|
|
-
|
|
-static inline int write_tryseqlock(seqlock_t *sl)
|
|
-{
|
|
- int ret = spin_trylock(&sl->lock);
|
|
-
|
|
- if (ret) {
|
|
- ++sl->sequence;
|
|
- smp_wmb();
|
|
- }
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/* Start of read calculation -- fetch last complete writer token */
|
|
-static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
|
|
-{
|
|
- unsigned ret;
|
|
-
|
|
-repeat:
|
|
- ret = ACCESS_ONCE(sl->sequence);
|
|
- if (unlikely(ret & 1)) {
|
|
- cpu_relax();
|
|
- goto repeat;
|
|
- }
|
|
- smp_rmb();
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Test if reader processed invalid data.
|
|
- *
|
|
- * If sequence value changed then writer changed data while in section.
|
|
- */
|
|
-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
|
|
-{
|
|
- smp_rmb();
|
|
-
|
|
- return unlikely(sl->sequence != start);
|
|
-}
|
|
-
|
|
-
|
|
/*
|
|
* Version using sequence counter only.
|
|
* This can be used when code has its own mutex protecting the
|
|
* updating starting before the write_seqcountbeqin() and ending
|
|
* after the write_seqcount_end().
|
|
*/
|
|
-
|
|
typedef struct seqcount {
|
|
unsigned sequence;
|
|
} seqcount_t;
|
|
@@ -218,7 +138,6 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
|
|
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
|
|
{
|
|
smp_rmb();
|
|
-
|
|
return __read_seqcount_retry(s, start);
|
|
}
|
|
|
|
@@ -227,18 +146,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
|
|
* Sequence counter only version assumes that callers are using their
|
|
* own mutexing.
|
|
*/
|
|
-static inline void write_seqcount_begin(seqcount_t *s)
|
|
+static inline void __write_seqcount_begin(seqcount_t *s)
|
|
{
|
|
s->sequence++;
|
|
smp_wmb();
|
|
}
|
|
|
|
-static inline void write_seqcount_end(seqcount_t *s)
|
|
+static inline void write_seqcount_begin(seqcount_t *s)
|
|
+{
|
|
+ preempt_disable_rt();
|
|
+ __write_seqcount_begin(s);
|
|
+}
|
|
+
|
|
+static inline void __write_seqcount_end(seqcount_t *s)
|
|
{
|
|
smp_wmb();
|
|
s->sequence++;
|
|
}
|
|
|
|
+static inline void write_seqcount_end(seqcount_t *s)
|
|
+{
|
|
+ __write_seqcount_end(s);
|
|
+ preempt_enable_rt();
|
|
+}
|
|
+
|
|
/**
|
|
* write_seqcount_barrier - invalidate in-progress read-side seq operations
|
|
* @s: pointer to seqcount_t
|
|
@@ -252,31 +183,124 @@ static inline void write_seqcount_barrier(seqcount_t *s)
|
|
s->sequence+=2;
|
|
}
|
|
|
|
+typedef struct {
|
|
+ struct seqcount seqcount;
|
|
+ spinlock_t lock;
|
|
+} seqlock_t;
|
|
+
|
|
+/*
|
|
+ * These macros triggered gcc-3.x compile-time problems. We think these are
|
|
+ * OK now. Be cautious.
|
|
+ */
|
|
+#define __SEQLOCK_UNLOCKED(lockname) \
|
|
+ { \
|
|
+ .seqcount = SEQCNT_ZERO, \
|
|
+ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
|
|
+ }
|
|
+
|
|
+#define seqlock_init(x) \
|
|
+ do { \
|
|
+ seqcount_init(&(x)->seqcount); \
|
|
+ spin_lock_init(&(x)->lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define DEFINE_SEQLOCK(x) \
|
|
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
|
|
+
|
|
+/*
|
|
+ * Read side functions for starting and finalizing a read side section.
|
|
+ */
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+static inline unsigned read_seqbegin(const seqlock_t *sl)
|
|
+{
|
|
+ return read_seqcount_begin(&sl->seqcount);
|
|
+}
|
|
+#else
|
|
+/*
|
|
+ * Starvation safe read side for RT
|
|
+ */
|
|
+static inline unsigned read_seqbegin(seqlock_t *sl)
|
|
+{
|
|
+ unsigned ret;
|
|
+
|
|
+repeat:
|
|
+ ret = sl->seqcount.sequence;
|
|
+ if (unlikely(ret & 1)) {
|
|
+ /*
|
|
+ * Take the lock and let the writer proceed (i.e. evtl
|
|
+ * boost it), otherwise we could loop here forever.
|
|
+ */
|
|
+ spin_lock(&sl->lock);
|
|
+ spin_unlock(&sl->lock);
|
|
+ goto repeat;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
|
+{
|
|
+ return read_seqcount_retry(&sl->seqcount, start);
|
|
+}
|
|
+
|
|
/*
|
|
- * Possible sw/hw IRQ protected versions of the interfaces.
|
|
+ * Lock out other writers and update the count.
|
|
+ * Acts like a normal spin_lock/unlock.
|
|
+ * Don't need preempt_disable() because that is in the spin_lock already.
|
|
*/
|
|
+static inline void write_seqlock(seqlock_t *sl)
|
|
+{
|
|
+ spin_lock(&sl->lock);
|
|
+ __write_seqcount_begin(&sl->seqcount);
|
|
+}
|
|
+
|
|
+static inline void write_sequnlock(seqlock_t *sl)
|
|
+{
|
|
+ __write_seqcount_end(&sl->seqcount);
|
|
+ spin_unlock(&sl->lock);
|
|
+}
|
|
+
|
|
+static inline void write_seqlock_bh(seqlock_t *sl)
|
|
+{
|
|
+ spin_lock_bh(&sl->lock);
|
|
+ __write_seqcount_begin(&sl->seqcount);
|
|
+}
|
|
+
|
|
+static inline void write_sequnlock_bh(seqlock_t *sl)
|
|
+{
|
|
+ __write_seqcount_end(&sl->seqcount);
|
|
+ spin_unlock_bh(&sl->lock);
|
|
+}
|
|
+
|
|
+static inline void write_seqlock_irq(seqlock_t *sl)
|
|
+{
|
|
+ spin_lock_irq(&sl->lock);
|
|
+ __write_seqcount_begin(&sl->seqcount);
|
|
+}
|
|
+
|
|
+static inline void write_sequnlock_irq(seqlock_t *sl)
|
|
+{
|
|
+ __write_seqcount_end(&sl->seqcount);
|
|
+ spin_unlock_irq(&sl->lock);
|
|
+}
|
|
+
|
|
+static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&sl->lock, flags);
|
|
+ __write_seqcount_begin(&sl->seqcount);
|
|
+ return flags;
|
|
+}
|
|
+
|
|
#define write_seqlock_irqsave(lock, flags) \
|
|
- do { local_irq_save(flags); write_seqlock(lock); } while (0)
|
|
-#define write_seqlock_irq(lock) \
|
|
- do { local_irq_disable(); write_seqlock(lock); } while (0)
|
|
-#define write_seqlock_bh(lock) \
|
|
- do { local_bh_disable(); write_seqlock(lock); } while (0)
|
|
-
|
|
-#define write_sequnlock_irqrestore(lock, flags) \
|
|
- do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
|
|
-#define write_sequnlock_irq(lock) \
|
|
- do { write_sequnlock(lock); local_irq_enable(); } while(0)
|
|
-#define write_sequnlock_bh(lock) \
|
|
- do { write_sequnlock(lock); local_bh_enable(); } while(0)
|
|
-
|
|
-#define read_seqbegin_irqsave(lock, flags) \
|
|
- ({ local_irq_save(flags); read_seqbegin(lock); })
|
|
-
|
|
-#define read_seqretry_irqrestore(lock, iv, flags) \
|
|
- ({ \
|
|
- int ret = read_seqretry(lock, iv); \
|
|
- local_irq_restore(flags); \
|
|
- ret; \
|
|
- })
|
|
+ do { flags = __write_seqlock_irqsave(lock); } while (0)
|
|
+
|
|
+static inline void
|
|
+write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
|
+{
|
|
+ __write_seqcount_end(&sl->seqcount);
|
|
+ spin_unlock_irqrestore(&sl->lock, flags);
|
|
+}
|
|
|
|
#endif /* __LINUX_SEQLOCK_H */
|
|
diff --git a/include/linux/signal.h b/include/linux/signal.h
|
|
index 7987ce74874b..24cc7a4459bb 100644
|
|
--- a/include/linux/signal.h
|
|
+++ b/include/linux/signal.h
|
|
@@ -229,6 +229,7 @@ static inline void init_sigpending(struct sigpending *sig)
|
|
}
|
|
|
|
extern void flush_sigqueue(struct sigpending *queue);
|
|
+extern void flush_task_sigqueue(struct task_struct *tsk);
|
|
|
|
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
|
|
static inline int valid_signal(unsigned long sig)
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index 4424db24396d..c6c346ec5c10 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -132,6 +132,7 @@ struct sk_buff_head {
|
|
|
|
__u32 qlen;
|
|
spinlock_t lock;
|
|
+ raw_spinlock_t raw_lock;
|
|
};
|
|
|
|
struct sk_buff;
|
|
@@ -984,6 +985,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
|
|
__skb_queue_head_init(list);
|
|
}
|
|
|
|
+static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
|
|
+{
|
|
+ raw_spin_lock_init(&list->raw_lock);
|
|
+ __skb_queue_head_init(list);
|
|
+}
|
|
+
|
|
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
|
|
struct lock_class_key *class)
|
|
{
|
|
diff --git a/include/linux/smp.h b/include/linux/smp.h
|
|
index 10530d92c04b..3001ba51376b 100644
|
|
--- a/include/linux/smp.h
|
|
+++ b/include/linux/smp.h
|
|
@@ -80,7 +80,6 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data,
|
|
|
|
int smp_call_function_any(const struct cpumask *mask,
|
|
smp_call_func_t func, void *info, int wait);
|
|
-
|
|
/*
|
|
* Generic and arch helpers
|
|
*/
|
|
@@ -219,6 +218,9 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
|
|
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
|
|
#define put_cpu() preempt_enable()
|
|
|
|
+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
|
|
+#define put_cpu_light() migrate_enable()
|
|
+
|
|
/*
|
|
* Callback to arch code if there's nosmp or maxcpus=0 on the
|
|
* boot command line:
|
|
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
|
|
index 7d537ced949a..0c11a7c43374 100644
|
|
--- a/include/linux/spinlock.h
|
|
+++ b/include/linux/spinlock.h
|
|
@@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
|
|
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
|
|
|
|
/* Include rwlock functions */
|
|
-#include <linux/rwlock.h>
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/rwlock_rt.h>
|
|
+#else
|
|
+# include <linux/rwlock.h>
|
|
+#endif
|
|
|
|
/*
|
|
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
|
|
@@ -265,6 +269,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
|
|
# include <linux/spinlock_api_up.h>
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/spinlock_rt.h>
|
|
+#else /* PREEMPT_RT_FULL */
|
|
+
|
|
/*
|
|
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
|
|
*/
|
|
@@ -394,4 +402,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
|
|
#define atomic_dec_and_lock(atomic, lock) \
|
|
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
|
|
|
|
+#endif /* !PREEMPT_RT_FULL */
|
|
+
|
|
#endif /* __LINUX_SPINLOCK_H */
|
|
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
|
|
index 51df117abe46..3f68f5018fff 100644
|
|
--- a/include/linux/spinlock_api_smp.h
|
|
+++ b/include/linux/spinlock_api_smp.h
|
|
@@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
|
|
return 0;
|
|
}
|
|
|
|
-#include <linux/rwlock_api_smp.h>
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/rwlock_api_smp.h>
|
|
+#endif
|
|
|
|
#endif /* __LINUX_SPINLOCK_API_SMP_H */
|
|
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
|
|
new file mode 100644
|
|
index 000000000000..b3c504bb9852
|
|
--- /dev/null
|
|
+++ b/include/linux/spinlock_rt.h
|
|
@@ -0,0 +1,169 @@
|
|
+#ifndef __LINUX_SPINLOCK_RT_H
|
|
+#define __LINUX_SPINLOCK_RT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_H
|
|
+#error Do not include directly. Use spinlock.h
|
|
+#endif
|
|
+
|
|
+#include <linux/bug.h>
|
|
+
|
|
+extern void
|
|
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
|
|
+
|
|
+#define spin_lock_init(slock) \
|
|
+do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ rt_mutex_init(&(slock)->lock); \
|
|
+ __rt_spin_lock_init(slock, #slock, &__key); \
|
|
+} while (0)
|
|
+
|
|
+extern void __lockfunc rt_spin_lock(spinlock_t *lock);
|
|
+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
|
|
+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
|
|
+extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
|
|
+extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
|
|
+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
|
|
+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
|
|
+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
|
|
+extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
|
|
+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
|
|
+
|
|
+/*
|
|
+ * lockdep-less calls, for derived types like rwlock:
|
|
+ * (for trylock they can use rt_mutex_trylock() directly.
|
|
+ */
|
|
+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
|
|
+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
|
|
+
|
|
+#define spin_lock_local(lock) rt_spin_lock(lock)
|
|
+#define spin_unlock_local(lock) rt_spin_unlock(lock)
|
|
+
|
|
+#define spin_lock(lock) \
|
|
+ do { \
|
|
+ migrate_disable(); \
|
|
+ rt_spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_lock_bh(lock) \
|
|
+ do { \
|
|
+ local_bh_disable(); \
|
|
+ migrate_disable(); \
|
|
+ rt_spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_lock_irq(lock) spin_lock(lock)
|
|
+
|
|
+#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
|
|
+
|
|
+#define spin_trylock(lock) \
|
|
+({ \
|
|
+ int __locked; \
|
|
+ migrate_disable(); \
|
|
+ __locked = spin_do_trylock(lock); \
|
|
+ if (!__locked) \
|
|
+ migrate_enable(); \
|
|
+ __locked; \
|
|
+})
|
|
+
|
|
+#ifdef CONFIG_LOCKDEP
|
|
+# define spin_lock_nested(lock, subclass) \
|
|
+ do { \
|
|
+ migrate_disable(); \
|
|
+ rt_spin_lock_nested(lock, subclass); \
|
|
+ } while (0)
|
|
+
|
|
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ flags = 0; \
|
|
+ migrate_disable(); \
|
|
+ rt_spin_lock_nested(lock, subclass); \
|
|
+ } while (0)
|
|
+#else
|
|
+# define spin_lock_nested(lock, subclass) spin_lock(lock)
|
|
+
|
|
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ flags = 0; \
|
|
+ spin_lock(lock); \
|
|
+ } while (0)
|
|
+#endif
|
|
+
|
|
+#define spin_lock_irqsave(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ flags = 0; \
|
|
+ spin_lock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+#ifdef CONFIG_TRACE_IRQFLAGS
|
|
+ flags = rt_spin_lock_trace_flags(lock);
|
|
+#else
|
|
+ spin_lock(lock); /* lock_local */
|
|
+#endif
|
|
+ return flags;
|
|
+}
|
|
+
|
|
+/* FIXME: we need rt_spin_lock_nest_lock */
|
|
+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
|
|
+
|
|
+#define spin_unlock(lock) \
|
|
+ do { \
|
|
+ rt_spin_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_unlock_bh(lock) \
|
|
+ do { \
|
|
+ rt_spin_unlock(lock); \
|
|
+ migrate_enable(); \
|
|
+ local_bh_enable(); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_unlock_irq(lock) spin_unlock(lock)
|
|
+
|
|
+#define spin_unlock_irqrestore(lock, flags) \
|
|
+ do { \
|
|
+ typecheck(unsigned long, flags); \
|
|
+ (void) flags; \
|
|
+ spin_unlock(lock); \
|
|
+ } while (0)
|
|
+
|
|
+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
|
|
+#define spin_trylock_irq(lock) spin_trylock(lock)
|
|
+
|
|
+#define spin_trylock_irqsave(lock, flags) \
|
|
+ rt_spin_trylock_irqsave(lock, &(flags))
|
|
+
|
|
+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
|
|
+
|
|
+#ifdef CONFIG_GENERIC_LOCKBREAK
|
|
+# define spin_is_contended(lock) ((lock)->break_lock)
|
|
+#else
|
|
+# define spin_is_contended(lock) (((void)(lock), 0))
|
|
+#endif
|
|
+
|
|
+static inline int spin_can_lock(spinlock_t *lock)
|
|
+{
|
|
+ return !rt_mutex_is_locked(&lock->lock);
|
|
+}
|
|
+
|
|
+static inline int spin_is_locked(spinlock_t *lock)
|
|
+{
|
|
+ return rt_mutex_is_locked(&lock->lock);
|
|
+}
|
|
+
|
|
+static inline void assert_spin_locked(spinlock_t *lock)
|
|
+{
|
|
+ BUG_ON(!spin_is_locked(lock));
|
|
+}
|
|
+
|
|
+#define atomic_dec_and_lock(atomic, lock) \
|
|
+ atomic_dec_and_spin_lock(atomic, lock)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
|
|
index 73548eb13a5d..10bac715ea96 100644
|
|
--- a/include/linux/spinlock_types.h
|
|
+++ b/include/linux/spinlock_types.h
|
|
@@ -9,80 +9,15 @@
|
|
* Released under the General Public License (GPL).
|
|
*/
|
|
|
|
-#if defined(CONFIG_SMP)
|
|
-# include <asm/spinlock_types.h>
|
|
-#else
|
|
-# include <linux/spinlock_types_up.h>
|
|
-#endif
|
|
-
|
|
-#include <linux/lockdep.h>
|
|
-
|
|
-typedef struct raw_spinlock {
|
|
- arch_spinlock_t raw_lock;
|
|
-#ifdef CONFIG_GENERIC_LOCKBREAK
|
|
- unsigned int break_lock;
|
|
-#endif
|
|
-#ifdef CONFIG_DEBUG_SPINLOCK
|
|
- unsigned int magic, owner_cpu;
|
|
- void *owner;
|
|
-#endif
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
- struct lockdep_map dep_map;
|
|
-#endif
|
|
-} raw_spinlock_t;
|
|
-
|
|
-#define SPINLOCK_MAGIC 0xdead4ead
|
|
-
|
|
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
|
|
-
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
|
-#else
|
|
-# define SPIN_DEP_MAP_INIT(lockname)
|
|
-#endif
|
|
+#include <linux/spinlock_types_raw.h>
|
|
|
|
-#ifdef CONFIG_DEBUG_SPINLOCK
|
|
-# define SPIN_DEBUG_INIT(lockname) \
|
|
- .magic = SPINLOCK_MAGIC, \
|
|
- .owner_cpu = -1, \
|
|
- .owner = SPINLOCK_OWNER_INIT,
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# include <linux/spinlock_types_nort.h>
|
|
+# include <linux/rwlock_types.h>
|
|
#else
|
|
-# define SPIN_DEBUG_INIT(lockname)
|
|
+# include <linux/rtmutex.h>
|
|
+# include <linux/spinlock_types_rt.h>
|
|
+# include <linux/rwlock_types_rt.h>
|
|
#endif
|
|
|
|
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
|
|
- { \
|
|
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
|
- SPIN_DEBUG_INIT(lockname) \
|
|
- SPIN_DEP_MAP_INIT(lockname) }
|
|
-
|
|
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
|
|
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
|
|
-
|
|
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
|
|
-
|
|
-typedef struct spinlock {
|
|
- union {
|
|
- struct raw_spinlock rlock;
|
|
-
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
|
|
- struct {
|
|
- u8 __padding[LOCK_PADSIZE];
|
|
- struct lockdep_map dep_map;
|
|
- };
|
|
-#endif
|
|
- };
|
|
-} spinlock_t;
|
|
-
|
|
-#define __SPIN_LOCK_INITIALIZER(lockname) \
|
|
- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
|
|
-
|
|
-#define __SPIN_LOCK_UNLOCKED(lockname) \
|
|
- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
|
|
-
|
|
-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
|
|
-
|
|
-#include <linux/rwlock_types.h>
|
|
-
|
|
#endif /* __LINUX_SPINLOCK_TYPES_H */
|
|
diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
|
|
new file mode 100644
|
|
index 000000000000..f1dac1fb1d6a
|
|
--- /dev/null
|
|
+++ b/include/linux/spinlock_types_nort.h
|
|
@@ -0,0 +1,33 @@
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
|
|
+#define __LINUX_SPINLOCK_TYPES_NORT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
+#error "Do not include directly. Include spinlock_types.h instead"
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * The non RT version maps spinlocks to raw_spinlocks
|
|
+ */
|
|
+typedef struct spinlock {
|
|
+ union {
|
|
+ struct raw_spinlock rlock;
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
|
|
+ struct {
|
|
+ u8 __padding[LOCK_PADSIZE];
|
|
+ struct lockdep_map dep_map;
|
|
+ };
|
|
+#endif
|
|
+ };
|
|
+} spinlock_t;
|
|
+
|
|
+#define __SPIN_LOCK_INITIALIZER(lockname) \
|
|
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
|
|
+
|
|
+#define __SPIN_LOCK_UNLOCKED(lockname) \
|
|
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
|
|
+
|
|
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
|
|
new file mode 100644
|
|
index 000000000000..edffc4d53fc9
|
|
--- /dev/null
|
|
+++ b/include/linux/spinlock_types_raw.h
|
|
@@ -0,0 +1,56 @@
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
|
|
+#define __LINUX_SPINLOCK_TYPES_RAW_H
|
|
+
|
|
+#if defined(CONFIG_SMP)
|
|
+# include <asm/spinlock_types.h>
|
|
+#else
|
|
+# include <linux/spinlock_types_up.h>
|
|
+#endif
|
|
+
|
|
+#include <linux/lockdep.h>
|
|
+
|
|
+typedef struct raw_spinlock {
|
|
+ arch_spinlock_t raw_lock;
|
|
+#ifdef CONFIG_GENERIC_LOCKBREAK
|
|
+ unsigned int break_lock;
|
|
+#endif
|
|
+#ifdef CONFIG_DEBUG_SPINLOCK
|
|
+ unsigned int magic, owner_cpu;
|
|
+ void *owner;
|
|
+#endif
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+} raw_spinlock_t;
|
|
+
|
|
+#define SPINLOCK_MAGIC 0xdead4ead
|
|
+
|
|
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
|
+#else
|
|
+# define SPIN_DEP_MAP_INIT(lockname)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_DEBUG_SPINLOCK
|
|
+# define SPIN_DEBUG_INIT(lockname) \
|
|
+ .magic = SPINLOCK_MAGIC, \
|
|
+ .owner_cpu = -1, \
|
|
+ .owner = SPINLOCK_OWNER_INIT,
|
|
+#else
|
|
+# define SPIN_DEBUG_INIT(lockname)
|
|
+#endif
|
|
+
|
|
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
|
|
+ { \
|
|
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
|
+ SPIN_DEBUG_INIT(lockname) \
|
|
+ SPIN_DEP_MAP_INIT(lockname) }
|
|
+
|
|
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
|
|
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
|
|
+
|
|
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
|
|
new file mode 100644
|
|
index 000000000000..1fe8fc069d9b
|
|
--- /dev/null
|
|
+++ b/include/linux/spinlock_types_rt.h
|
|
@@ -0,0 +1,49 @@
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
|
|
+#define __LINUX_SPINLOCK_TYPES_RT_H
|
|
+
|
|
+#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
+#error "Do not include directly. Include spinlock_types.h instead"
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
|
|
+ */
|
|
+typedef struct spinlock {
|
|
+ struct rt_mutex lock;
|
|
+ unsigned int break_lock;
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ struct lockdep_map dep_map;
|
|
+#endif
|
|
+} spinlock_t;
|
|
+
|
|
+#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
+# define __RT_SPIN_INITIALIZER(name) \
|
|
+ { \
|
|
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
|
|
+ .save_state = 1, \
|
|
+ .file = __FILE__, \
|
|
+ .line = __LINE__ , \
|
|
+ }
|
|
+#else
|
|
+# define __RT_SPIN_INITIALIZER(name) \
|
|
+ { \
|
|
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
|
|
+ .save_state = 1, \
|
|
+ }
|
|
+#endif
|
|
+
|
|
+/*
|
|
+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
|
|
+*/
|
|
+
|
|
+#define __SPIN_LOCK_UNLOCKED(name) \
|
|
+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
|
|
+ SPIN_DEP_MAP_INIT(name) }
|
|
+
|
|
+#define __DEFINE_SPINLOCK(name) \
|
|
+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
|
|
+
|
|
+#define DEFINE_SPINLOCK(name) \
|
|
+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
|
|
index c34b4c82b0dc..4fbc9f78be20 100644
|
|
--- a/include/linux/sysctl.h
|
|
+++ b/include/linux/sysctl.h
|
|
@@ -933,6 +933,7 @@ enum
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/rbtree.h>
|
|
+#include <linux/atomic.h>
|
|
|
|
/* For the /proc/sys support */
|
|
struct ctl_table;
|
|
diff --git a/include/linux/timer.h b/include/linux/timer.h
|
|
index 6abd9138beda..b703477bd2d6 100644
|
|
--- a/include/linux/timer.h
|
|
+++ b/include/linux/timer.h
|
|
@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list *timer);
|
|
|
|
extern int try_to_del_timer_sync(struct timer_list *timer);
|
|
|
|
-#ifdef CONFIG_SMP
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
extern int del_timer_sync(struct timer_list *timer);
|
|
#else
|
|
# define del_timer_sync(t) del_timer(t)
|
|
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
|
|
index 5ca0951e1855..44b37510c14f 100644
|
|
--- a/include/linux/uaccess.h
|
|
+++ b/include/linux/uaccess.h
|
|
@@ -6,38 +6,37 @@
|
|
|
|
/*
|
|
* These routines enable/disable the pagefault handler in that
|
|
- * it will not take any locks and go straight to the fixup table.
|
|
- *
|
|
- * They have great resemblance to the preempt_disable/enable calls
|
|
- * and in fact they are identical; this is because currently there is
|
|
- * no other way to make the pagefault handlers do this. So we do
|
|
- * disable preemption but we don't necessarily care about that.
|
|
+ * it will not take any MM locks and go straight to the fixup table.
|
|
*/
|
|
-static inline void pagefault_disable(void)
|
|
+static inline void raw_pagefault_disable(void)
|
|
{
|
|
inc_preempt_count();
|
|
- /*
|
|
- * make sure to have issued the store before a pagefault
|
|
- * can hit.
|
|
- */
|
|
barrier();
|
|
}
|
|
|
|
-static inline void pagefault_enable(void)
|
|
+static inline void raw_pagefault_enable(void)
|
|
{
|
|
- /*
|
|
- * make sure to issue those last loads/stores before enabling
|
|
- * the pagefault handler again.
|
|
- */
|
|
barrier();
|
|
dec_preempt_count();
|
|
- /*
|
|
- * make sure we do..
|
|
- */
|
|
barrier();
|
|
preempt_check_resched();
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+static inline void pagefault_disable(void)
|
|
+{
|
|
+ raw_pagefault_disable();
|
|
+}
|
|
+
|
|
+static inline void pagefault_enable(void)
|
|
+{
|
|
+ raw_pagefault_enable();
|
|
+}
|
|
+#else
|
|
+extern void pagefault_disable(void);
|
|
+extern void pagefault_enable(void);
|
|
+#endif
|
|
+
|
|
#ifndef ARCH_HAS_NOCACHE_UACCESS
|
|
|
|
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
|
|
@@ -77,9 +76,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
|
|
mm_segment_t old_fs = get_fs(); \
|
|
\
|
|
set_fs(KERNEL_DS); \
|
|
- pagefault_disable(); \
|
|
+ raw_pagefault_disable(); \
|
|
ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
|
|
- pagefault_enable(); \
|
|
+ raw_pagefault_enable(); \
|
|
set_fs(old_fs); \
|
|
ret; \
|
|
})
|
|
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
|
|
index 65efb92da996..1b3f2efd3ad0 100644
|
|
--- a/include/linux/vmstat.h
|
|
+++ b/include/linux/vmstat.h
|
|
@@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
|
|
|
static inline void __count_vm_event(enum vm_event_item item)
|
|
{
|
|
+ preempt_disable_rt();
|
|
__this_cpu_inc(vm_event_states.event[item]);
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
static inline void count_vm_event(enum vm_event_item item)
|
|
@@ -39,7 +41,9 @@ static inline void count_vm_event(enum vm_event_item item)
|
|
|
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
+ preempt_disable_rt();
|
|
__this_cpu_add(vm_event_states.event[item], delta);
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
|
diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h
|
|
new file mode 100644
|
|
index 000000000000..44fee60c05ab
|
|
--- /dev/null
|
|
+++ b/include/linux/wait-simple.h
|
|
@@ -0,0 +1,172 @@
|
|
+#ifndef _LINUX_WAIT_SIMPLE_H
|
|
+#define _LINUX_WAIT_SIMPLE_H
|
|
+
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/list.h>
|
|
+
|
|
+#include <asm/current.h>
|
|
+
|
|
+struct swaiter {
|
|
+ struct task_struct *task;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+#define DEFINE_SWAITER(name) \
|
|
+ struct swaiter name = { \
|
|
+ .task = current, \
|
|
+ .node = LIST_HEAD_INIT((name).node), \
|
|
+ }
|
|
+
|
|
+struct swait_head {
|
|
+ raw_spinlock_t lock;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+#define SWAIT_HEAD_INITIALIZER(name) { \
|
|
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
|
|
+ .list = LIST_HEAD_INIT((name).list), \
|
|
+ }
|
|
+
|
|
+#define DEFINE_SWAIT_HEAD(name) \
|
|
+ struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
|
|
+
|
|
+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
|
|
+
|
|
+#define init_swait_head(swh) \
|
|
+ do { \
|
|
+ static struct lock_class_key __key; \
|
|
+ \
|
|
+ __init_swait_head((swh), &__key); \
|
|
+ } while (0)
|
|
+
|
|
+/*
|
|
+ * Waiter functions
|
|
+ */
|
|
+extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
|
|
+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
|
|
+extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
|
|
+extern void swait_finish(struct swait_head *head, struct swaiter *w);
|
|
+
|
|
+/*
|
|
+ * Wakeup functions
|
|
+ */
|
|
+extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
|
|
+extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
|
|
+
|
|
+#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1)
|
|
+#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1)
|
|
+#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0)
|
|
+#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0)
|
|
+
|
|
+/*
|
|
+ * Event API
|
|
+ */
|
|
+#define __swait_event(wq, condition) \
|
|
+do { \
|
|
+ DEFINE_SWAITER(__wait); \
|
|
+ \
|
|
+ for (;;) { \
|
|
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
|
|
+ if (condition) \
|
|
+ break; \
|
|
+ schedule(); \
|
|
+ } \
|
|
+ swait_finish(&wq, &__wait); \
|
|
+} while (0)
|
|
+
|
|
+/**
|
|
+ * swait_event - sleep until a condition gets true
|
|
+ * @wq: the waitqueue to wait on
|
|
+ * @condition: a C expression for the event to wait for
|
|
+ *
|
|
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
|
|
+ * @condition evaluates to true. The @condition is checked each time
|
|
+ * the waitqueue @wq is woken up.
|
|
+ *
|
|
+ * wake_up() has to be called after changing any variable that could
|
|
+ * change the result of the wait condition.
|
|
+ */
|
|
+#define swait_event(wq, condition) \
|
|
+do { \
|
|
+ if (condition) \
|
|
+ break; \
|
|
+ __swait_event(wq, condition); \
|
|
+} while (0)
|
|
+
|
|
+#define __swait_event_interruptible(wq, condition, ret) \
|
|
+do { \
|
|
+ DEFINE_SWAITER(__wait); \
|
|
+ \
|
|
+ for (;;) { \
|
|
+ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
|
|
+ if (condition) \
|
|
+ break; \
|
|
+ if (signal_pending(current)) { \
|
|
+ ret = -ERESTARTSYS; \
|
|
+ break; \
|
|
+ } \
|
|
+ schedule(); \
|
|
+ } \
|
|
+ swait_finish(&wq, &__wait); \
|
|
+} while (0)
|
|
+
|
|
+/**
|
|
+ * swait_event_interruptible - sleep until a condition gets true
|
|
+ * @wq: the waitqueue to wait on
|
|
+ * @condition: a C expression for the event to wait for
|
|
+ *
|
|
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
|
|
+ * @condition evaluates to true. The @condition is checked each time
|
|
+ * the waitqueue @wq is woken up.
|
|
+ *
|
|
+ * wake_up() has to be called after changing any variable that could
|
|
+ * change the result of the wait condition.
|
|
+ */
|
|
+#define swait_event_interruptible(wq, condition) \
|
|
+({ \
|
|
+ int __ret = 0; \
|
|
+ if (!(condition)) \
|
|
+ __swait_event_interruptible(wq, condition, __ret); \
|
|
+ __ret; \
|
|
+})
|
|
+
|
|
+#define __swait_event_timeout(wq, condition, ret) \
|
|
+do { \
|
|
+ DEFINE_SWAITER(__wait); \
|
|
+ \
|
|
+ for (;;) { \
|
|
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
|
|
+ if (condition) \
|
|
+ break; \
|
|
+ ret = schedule_timeout(ret); \
|
|
+ if (!ret) \
|
|
+ break; \
|
|
+ } \
|
|
+ swait_finish(&wq, &__wait); \
|
|
+} while (0)
|
|
+
|
|
+/**
|
|
+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
|
|
+ * @wq: the waitqueue to wait on
|
|
+ * @condition: a C expression for the event to wait for
|
|
+ * @timeout: timeout, in jiffies
|
|
+ *
|
|
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
|
|
+ * @condition evaluates to true. The @condition is checked each time
|
|
+ * the waitqueue @wq is woken up.
|
|
+ *
|
|
+ * wake_up() has to be called after changing any variable that could
|
|
+ * change the result of the wait condition.
|
|
+ *
|
|
+ * The function returns 0 if the @timeout elapsed, and the remaining
|
|
+ * jiffies if the condition evaluated to true before the timeout elapsed.
|
|
+ */
|
|
+#define swait_event_timeout(wq, condition, timeout) \
|
|
+({ \
|
|
+ long __ret = timeout; \
|
|
+ if (!(condition)) \
|
|
+ __swait_event_timeout(wq, condition, __ret); \
|
|
+ __ret; \
|
|
+})
|
|
+
|
|
+#endif
|
|
diff --git a/include/linux/work-simple.h b/include/linux/work-simple.h
|
|
new file mode 100644
|
|
index 000000000000..f175fa9a6016
|
|
--- /dev/null
|
|
+++ b/include/linux/work-simple.h
|
|
@@ -0,0 +1,24 @@
|
|
+#ifndef _LINUX_SWORK_H
|
|
+#define _LINUX_SWORK_H
|
|
+
|
|
+#include <linux/list.h>
|
|
+
|
|
+struct swork_event {
|
|
+ struct list_head item;
|
|
+ unsigned long flags;
|
|
+ void (*func)(struct swork_event *);
|
|
+};
|
|
+
|
|
+static inline void INIT_SWORK(struct swork_event *event,
|
|
+ void (*func)(struct swork_event *))
|
|
+{
|
|
+ event->flags = 0;
|
|
+ event->func = func;
|
|
+}
|
|
+
|
|
+bool swork_queue(struct swork_event *sev);
|
|
+
|
|
+int swork_get(void);
|
|
+void swork_put(void);
|
|
+
|
|
+#endif /* _LINUX_SWORK_H */
|
|
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
|
|
index 34c996f46181..8e626fa6ccde 100644
|
|
--- a/include/net/neighbour.h
|
|
+++ b/include/net/neighbour.h
|
|
@@ -394,7 +394,7 @@ struct neighbour_cb {
|
|
|
|
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
|
|
|
|
-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
|
|
+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
|
|
const struct net_device *dev)
|
|
{
|
|
unsigned int seq;
|
|
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
|
|
index bbd023a1c9b9..643911ea0934 100644
|
|
--- a/include/net/netns/ipv4.h
|
|
+++ b/include/net/netns/ipv4.h
|
|
@@ -47,6 +47,7 @@ struct netns_ipv4 {
|
|
|
|
int sysctl_icmp_echo_ignore_all;
|
|
int sysctl_icmp_echo_ignore_broadcasts;
|
|
+ int sysctl_icmp_echo_sysrq;
|
|
int sysctl_icmp_ignore_bogus_error_responses;
|
|
int sysctl_icmp_ratelimit;
|
|
int sysctl_icmp_ratemask;
|
|
diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
|
|
new file mode 100644
|
|
index 000000000000..28646db2c775
|
|
--- /dev/null
|
|
+++ b/include/trace/events/hist.h
|
|
@@ -0,0 +1,69 @@
|
|
+#undef TRACE_SYSTEM
|
|
+#define TRACE_SYSTEM hist
|
|
+
|
|
+#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
+#define _TRACE_HIST_H
|
|
+
|
|
+#include "latency_hist.h"
|
|
+#include <linux/tracepoint.h>
|
|
+
|
|
+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
|
|
+#define trace_preemptirqsoff_hist(a,b)
|
|
+#else
|
|
+TRACE_EVENT(preemptirqsoff_hist,
|
|
+
|
|
+ TP_PROTO(int reason, int starthist),
|
|
+
|
|
+ TP_ARGS(reason, starthist),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(int, reason )
|
|
+ __field(int, starthist )
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->reason = reason;
|
|
+ __entry->starthist = starthist;
|
|
+ ),
|
|
+
|
|
+ TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
|
|
+ __entry->starthist ? "start" : "stop")
|
|
+);
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+#define trace_hrtimer_interrupt(a,b,c,d)
|
|
+#else
|
|
+TRACE_EVENT(hrtimer_interrupt,
|
|
+
|
|
+ TP_PROTO(int cpu, long long offset, struct task_struct *curr, struct task_struct *task),
|
|
+
|
|
+ TP_ARGS(cpu, offset, curr, task),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(int, cpu )
|
|
+ __field(long long, offset )
|
|
+ __array(char, ccomm, TASK_COMM_LEN)
|
|
+ __field(int, cprio )
|
|
+ __array(char, tcomm, TASK_COMM_LEN)
|
|
+ __field(int, tprio )
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->cpu = cpu;
|
|
+ __entry->offset = offset;
|
|
+ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
|
|
+ __entry->cprio = curr->prio;
|
|
+ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>", task != NULL ? TASK_COMM_LEN : 7);
|
|
+ __entry->tprio = task != NULL ? task->prio : -1;
|
|
+ ),
|
|
+
|
|
+ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
|
|
+ __entry->cpu, __entry->offset, __entry->ccomm, __entry->cprio, __entry->tcomm, __entry->tprio)
|
|
+);
|
|
+#endif
|
|
+
|
|
+#endif /* _TRACE_HIST_H */
|
|
+
|
|
+/* This part must be outside protection */
|
|
+#include <trace/define_trace.h>
|
|
diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h
|
|
new file mode 100644
|
|
index 000000000000..d6b5d77c57aa
|
|
--- /dev/null
|
|
+++ b/include/trace/events/latency_hist.h
|
|
@@ -0,0 +1,30 @@
|
|
+#ifndef _LATENCY_HIST_H
|
|
+#define _LATENCY_HIST_H
|
|
+
|
|
+enum hist_action {
|
|
+ IRQS_ON,
|
|
+ PREEMPT_ON,
|
|
+ TRACE_STOP,
|
|
+ IRQS_OFF,
|
|
+ PREEMPT_OFF,
|
|
+ TRACE_START,
|
|
+};
|
|
+
|
|
+static char *actions[] = {
|
|
+ "IRQS_ON",
|
|
+ "PREEMPT_ON",
|
|
+ "TRACE_STOP",
|
|
+ "IRQS_OFF",
|
|
+ "PREEMPT_OFF",
|
|
+ "TRACE_START",
|
|
+};
|
|
+
|
|
+static inline char *getaction(int action)
|
|
+{
|
|
+ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
|
|
+ return(actions[action]);
|
|
+ return("unknown");
|
|
+}
|
|
+
|
|
+#endif /* _LATENCY_HIST_H */
|
|
+
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index f1b0e7828ee2..aa42bfe73726 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -474,7 +474,7 @@ config RCU_FANOUT_EXACT
|
|
|
|
config RCU_FAST_NO_HZ
|
|
bool "Accelerate last non-dyntick-idle CPU's grace periods"
|
|
- depends on NO_HZ && SMP
|
|
+ depends on NO_HZ && SMP && !PREEMPT_RT_FULL
|
|
default n
|
|
help
|
|
This option causes RCU to attempt to accelerate grace periods
|
|
@@ -499,7 +499,7 @@ config TREE_RCU_TRACE
|
|
config RCU_BOOST
|
|
bool "Enable RCU priority boosting"
|
|
depends on RT_MUTEXES && PREEMPT_RCU
|
|
- default n
|
|
+ default y if PREEMPT_RT_FULL
|
|
help
|
|
This option boosts the priority of preempted RCU readers that
|
|
block the current preemptible RCU grace period for too long.
|
|
@@ -747,6 +747,7 @@ config RT_GROUP_SCHED
|
|
bool "Group scheduling for SCHED_RR/FIFO"
|
|
depends on EXPERIMENTAL
|
|
depends on CGROUP_SCHED
|
|
+ depends on !PREEMPT_RT_FULL
|
|
default n
|
|
help
|
|
This feature lets you explicitly allocate real CPU bandwidth
|
|
@@ -1266,6 +1267,7 @@ config SLAB
|
|
|
|
config SLUB
|
|
bool "SLUB (Unqueued Allocator)"
|
|
+ depends on !PREEMPT_RT_FULL
|
|
help
|
|
SLUB is a slab allocator that minimizes cache line usage
|
|
instead of managing queues of cached objects (SLAB approach).
|
|
@@ -1277,6 +1279,7 @@ config SLUB
|
|
config SLOB
|
|
depends on EXPERT
|
|
bool "SLOB (Simple Allocator)"
|
|
+ depends on !PREEMPT_RT_FULL
|
|
help
|
|
SLOB replaces the stock allocator with a drastically simpler
|
|
allocator. SLOB is generally more space efficient but
|
|
diff --git a/init/Makefile b/init/Makefile
|
|
index 0bf677aa0872..6b473cd16509 100644
|
|
--- a/init/Makefile
|
|
+++ b/init/Makefile
|
|
@@ -29,4 +29,4 @@ silent_chk_compile.h = :
|
|
include/generated/compile.h: FORCE
|
|
@$($(quiet)chk_compile.h)
|
|
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
|
|
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
|
|
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
|
|
diff --git a/init/main.c b/init/main.c
|
|
index f30baf52f80b..8991ac907229 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -68,6 +68,7 @@
|
|
#include <linux/shmem_fs.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/perf_event.h>
|
|
+#include <linux/posix-timers.h>
|
|
#include <linux/random.h>
|
|
|
|
#include <asm/io.h>
|
|
@@ -500,6 +501,7 @@ asmlinkage void __init start_kernel(void)
|
|
setup_command_line(command_line);
|
|
setup_nr_cpu_ids();
|
|
setup_per_cpu_areas();
|
|
+ softirq_early_init();
|
|
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
|
|
|
|
build_all_zonelists(NULL);
|
|
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
|
|
index 28bd64ddeda3..e6302726d732 100644
|
|
--- a/ipc/mqueue.c
|
|
+++ b/ipc/mqueue.c
|
|
@@ -813,12 +813,17 @@ static inline void pipelined_send(struct mqueue_inode_info *info,
|
|
struct msg_msg *message,
|
|
struct ext_wait_queue *receiver)
|
|
{
|
|
+ /*
|
|
+ * Keep them in one critical section for PREEMPT_RT:
|
|
+ */
|
|
+ preempt_disable_rt();
|
|
receiver->msg = message;
|
|
list_del(&receiver->list);
|
|
receiver->state = STATE_PENDING;
|
|
wake_up_process(receiver->task);
|
|
smp_wmb();
|
|
receiver->state = STATE_READY;
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
|
|
@@ -832,15 +837,19 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
|
|
wake_up_interruptible(&info->wait_q);
|
|
return;
|
|
}
|
|
+ /*
|
|
+ * Keep them in one critical section for PREEMPT_RT:
|
|
+ */
|
|
+ preempt_disable_rt();
|
|
msg_insert(sender->msg, info);
|
|
list_del(&sender->list);
|
|
sender->state = STATE_PENDING;
|
|
wake_up_process(sender->task);
|
|
smp_wmb();
|
|
sender->state = STATE_READY;
|
|
+ preempt_enable_rt();
|
|
}
|
|
-
|
|
-SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
|
|
+ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
|
|
size_t, msg_len, unsigned int, msg_prio,
|
|
const struct timespec __user *, u_abs_timeout)
|
|
{
|
|
diff --git a/ipc/msg.c b/ipc/msg.c
|
|
index 25f1a6139584..876b8444625f 100644
|
|
--- a/ipc/msg.c
|
|
+++ b/ipc/msg.c
|
|
@@ -259,12 +259,20 @@ static void expunge_all(struct msg_queue *msq, int res)
|
|
while (tmp != &msq->q_receivers) {
|
|
struct msg_receiver *msr;
|
|
|
|
+ /*
|
|
+ * Make sure that the wakeup doesnt preempt
|
|
+ * this CPU prematurely. (on PREEMPT_RT)
|
|
+ */
|
|
+ preempt_disable_rt();
|
|
+
|
|
msr = list_entry(tmp, struct msg_receiver, r_list);
|
|
tmp = tmp->next;
|
|
msr->r_msg = NULL;
|
|
wake_up_process(msr->r_tsk);
|
|
smp_mb();
|
|
msr->r_msg = ERR_PTR(res);
|
|
+
|
|
+ preempt_enable_rt();
|
|
}
|
|
}
|
|
|
|
@@ -613,6 +621,12 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
|
|
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
|
|
msr->r_msgtype, msr->r_mode)) {
|
|
|
|
+ /*
|
|
+ * Make sure that the wakeup doesnt preempt
|
|
+ * this CPU prematurely. (on PREEMPT_RT)
|
|
+ */
|
|
+ preempt_disable_rt();
|
|
+
|
|
list_del(&msr->r_list);
|
|
if (msr->r_maxsize < msg->m_ts) {
|
|
msr->r_msg = NULL;
|
|
@@ -626,9 +640,11 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
|
|
wake_up_process(msr->r_tsk);
|
|
smp_mb();
|
|
msr->r_msg = msg;
|
|
+ preempt_enable_rt();
|
|
|
|
return 1;
|
|
}
|
|
+ preempt_enable_rt();
|
|
}
|
|
}
|
|
return 0;
|
|
diff --git a/ipc/sem.c b/ipc/sem.c
|
|
index 5215a81420df..5eaf6846d8cd 100644
|
|
--- a/ipc/sem.c
|
|
+++ b/ipc/sem.c
|
|
@@ -461,6 +461,13 @@ undo:
|
|
static void wake_up_sem_queue_prepare(struct list_head *pt,
|
|
struct sem_queue *q, int error)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ struct task_struct *p = q->sleeper;
|
|
+ get_task_struct(p);
|
|
+ q->status = error;
|
|
+ wake_up_process(p);
|
|
+ put_task_struct(p);
|
|
+#else
|
|
if (list_empty(pt)) {
|
|
/*
|
|
* Hold preempt off so that we don't get preempted and have the
|
|
@@ -472,6 +479,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
|
|
q->pid = error;
|
|
|
|
list_add_tail(&q->simple_list, pt);
|
|
+#endif
|
|
}
|
|
|
|
/**
|
|
@@ -485,6 +493,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
|
|
*/
|
|
static void wake_up_sem_queue_do(struct list_head *pt)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
struct sem_queue *q, *t;
|
|
int did_something;
|
|
|
|
@@ -497,6 +506,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
|
|
}
|
|
if (did_something)
|
|
preempt_enable();
|
|
+#endif
|
|
}
|
|
|
|
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
|
|
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
|
|
index 2251882daf53..033ebc06c262 100644
|
|
--- a/kernel/Kconfig.locks
|
|
+++ b/kernel/Kconfig.locks
|
|
@@ -199,4 +199,4 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
|
|
def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
|
|
|
|
config MUTEX_SPIN_ON_OWNER
|
|
- def_bool SMP && !DEBUG_MUTEXES
|
|
+ def_bool SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL
|
|
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
|
|
index 3f9c97419f02..c9f006b2095c 100644
|
|
--- a/kernel/Kconfig.preempt
|
|
+++ b/kernel/Kconfig.preempt
|
|
@@ -1,3 +1,10 @@
|
|
+config PREEMPT
|
|
+ bool
|
|
+ select PREEMPT_COUNT
|
|
+
|
|
+config PREEMPT_RT_BASE
|
|
+ bool
|
|
+ select PREEMPT
|
|
|
|
choice
|
|
prompt "Preemption Model"
|
|
@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY
|
|
|
|
Select this if you are building a kernel for a desktop system.
|
|
|
|
-config PREEMPT
|
|
+config PREEMPT__LL
|
|
bool "Preemptible Kernel (Low-Latency Desktop)"
|
|
- select PREEMPT_COUNT
|
|
+ select PREEMPT
|
|
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
|
|
help
|
|
This option reduces the latency of the kernel by making
|
|
@@ -52,6 +59,21 @@ config PREEMPT
|
|
embedded system with latency requirements in the milliseconds
|
|
range.
|
|
|
|
+config PREEMPT_RTB
|
|
+ bool "Preemptible Kernel (Basic RT)"
|
|
+ select PREEMPT_RT_BASE
|
|
+ help
|
|
+ This option is basically the same as (Low-Latency Desktop) but
|
|
+ enables changes which are preliminary for the full preemptiple
|
|
+ RT kernel.
|
|
+
|
|
+config PREEMPT_RT_FULL
|
|
+ bool "Fully Preemptible Kernel (RT)"
|
|
+ depends on IRQ_FORCED_THREADING
|
|
+ select PREEMPT_RT_BASE
|
|
+ help
|
|
+ All and everything
|
|
+
|
|
endchoice
|
|
|
|
config PREEMPT_COUNT
|
|
diff --git a/kernel/Makefile b/kernel/Makefile
|
|
index cb41b9547c9f..e7bbd158744b 100644
|
|
--- a/kernel/Makefile
|
|
+++ b/kernel/Makefile
|
|
@@ -7,10 +7,10 @@ obj-y = fork.o exec_domain.o panic.o printk.o \
|
|
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
|
|
signal.o sys.o kmod.o workqueue.o pid.o \
|
|
rcupdate.o extable.o params.o posix-timers.o \
|
|
- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
|
|
- hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
|
|
+ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
|
|
+ hrtimer.o nsproxy.o srcu.o semaphore.o \
|
|
notifier.o ksysfs.o cred.o \
|
|
- async.o range.o groups.o
|
|
+ async.o range.o groups.o wait-simple.o
|
|
|
|
ifdef CONFIG_FUNCTION_TRACER
|
|
# Do not trace debug files and internal ftrace files
|
|
@@ -29,7 +29,11 @@ obj-$(CONFIG_FREEZER) += freezer.o
|
|
obj-$(CONFIG_PROFILING) += profile.o
|
|
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
|
obj-y += time/
|
|
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
|
|
+obj-y += mutex.o
|
|
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
|
|
+obj-y += rwsem.o
|
|
+endif
|
|
obj-$(CONFIG_LOCKDEP) += lockdep.o
|
|
ifeq ($(CONFIG_PROC_FS),y)
|
|
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
|
|
@@ -41,6 +45,7 @@ endif
|
|
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
|
|
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
|
|
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
|
|
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
|
|
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
|
|
obj-$(CONFIG_SMP) += smp.o
|
|
ifneq ($(CONFIG_SMP),y)
|
|
diff --git a/kernel/cpu.c b/kernel/cpu.c
|
|
index 04671bf91c12..903e11b4f142 100644
|
|
--- a/kernel/cpu.c
|
|
+++ b/kernel/cpu.c
|
|
@@ -58,6 +58,284 @@ static struct {
|
|
.refcount = 0,
|
|
};
|
|
|
|
+/**
|
|
+ * hotplug_pcp - per cpu hotplug descriptor
|
|
+ * @unplug: set when pin_current_cpu() needs to sync tasks
|
|
+ * @sync_tsk: the task that waits for tasks to finish pinned sections
|
|
+ * @refcount: counter of tasks in pinned sections
|
|
+ * @grab_lock: set when the tasks entering pinned sections should wait
|
|
+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
|
|
+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
|
|
+ * @mutex_init: zero if the mutex hasn't been initialized yet.
|
|
+ *
|
|
+ * Although @unplug and @sync_tsk may point to the same task, the @unplug
|
|
+ * is used as a flag and still exists after @sync_tsk has exited and
|
|
+ * @sync_tsk set to NULL.
|
|
+ */
|
|
+struct hotplug_pcp {
|
|
+ struct task_struct *unplug;
|
|
+ struct task_struct *sync_tsk;
|
|
+ int refcount;
|
|
+ int grab_lock;
|
|
+ struct completion synced;
|
|
+ struct completion unplug_wait;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ spinlock_t lock;
|
|
+#else
|
|
+ struct mutex mutex;
|
|
+#endif
|
|
+ int mutex_init;
|
|
+};
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
|
|
+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
|
|
+#else
|
|
+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
|
|
+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
|
|
+#endif
|
|
+
|
|
+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
|
|
+
|
|
+/**
|
|
+ * pin_current_cpu - Prevent the current cpu from being unplugged
|
|
+ *
|
|
+ * Lightweight version of get_online_cpus() to prevent cpu from being
|
|
+ * unplugged when code runs in a migration disabled region.
|
|
+ *
|
|
+ * Must be called with preemption disabled (preempt_count = 1)!
|
|
+ */
|
|
+void pin_current_cpu(void)
|
|
+{
|
|
+ struct hotplug_pcp *hp;
|
|
+ int force = 0;
|
|
+
|
|
+retry:
|
|
+ hp = &__get_cpu_var(hotplug_pcp);
|
|
+
|
|
+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
|
|
+ hp->unplug == current || (current->flags & PF_STOMPER)) {
|
|
+ hp->refcount++;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (hp->grab_lock) {
|
|
+ preempt_enable();
|
|
+ hotplug_lock(hp);
|
|
+ hotplug_unlock(hp);
|
|
+ } else {
|
|
+ preempt_enable();
|
|
+ /*
|
|
+ * Try to push this task off of this CPU.
|
|
+ */
|
|
+ if (!migrate_me()) {
|
|
+ preempt_disable();
|
|
+ hp = &__get_cpu_var(hotplug_pcp);
|
|
+ if (!hp->grab_lock) {
|
|
+ /*
|
|
+ * Just let it continue it's already pinned
|
|
+ * or about to sleep.
|
|
+ */
|
|
+ force = 1;
|
|
+ goto retry;
|
|
+ }
|
|
+ preempt_enable();
|
|
+ }
|
|
+ }
|
|
+ preempt_disable();
|
|
+ goto retry;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * unpin_current_cpu - Allow unplug of current cpu
|
|
+ *
|
|
+ * Must be called with preemption or interrupts disabled!
|
|
+ */
|
|
+void unpin_current_cpu(void)
|
|
+{
|
|
+ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
|
|
+
|
|
+ WARN_ON(hp->refcount <= 0);
|
|
+
|
|
+ /* This is safe. sync_unplug_thread is pinned to this cpu */
|
|
+ if (!--hp->refcount && hp->unplug && hp->unplug != current &&
|
|
+ !(current->flags & PF_STOMPER))
|
|
+ wake_up_process(hp->unplug);
|
|
+}
|
|
+
|
|
+static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
|
|
+{
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ while (hp->refcount) {
|
|
+ schedule_preempt_disabled();
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int sync_unplug_thread(void *data)
|
|
+{
|
|
+ struct hotplug_pcp *hp = data;
|
|
+
|
|
+ wait_for_completion(&hp->unplug_wait);
|
|
+ preempt_disable();
|
|
+ hp->unplug = current;
|
|
+ wait_for_pinned_cpus(hp);
|
|
+
|
|
+ /*
|
|
+ * This thread will synchronize the cpu_down() with threads
|
|
+ * that have pinned the CPU. When the pinned CPU count reaches
|
|
+ * zero, we inform the cpu_down code to continue to the next step.
|
|
+ */
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ preempt_enable();
|
|
+ complete(&hp->synced);
|
|
+
|
|
+ /*
|
|
+ * If all succeeds, the next step will need tasks to wait till
|
|
+ * the CPU is offline before continuing. To do this, the grab_lock
|
|
+ * is set and tasks going into pin_current_cpu() will block on the
|
|
+ * mutex. But we still need to wait for those that are already in
|
|
+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
|
|
+ * will kick this thread out.
|
|
+ */
|
|
+ while (!hp->grab_lock && !kthread_should_stop()) {
|
|
+ schedule();
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ }
|
|
+
|
|
+ /* Make sure grab_lock is seen before we see a stale completion */
|
|
+ smp_mb();
|
|
+
|
|
+ /*
|
|
+ * Now just before cpu_down() enters stop machine, we need to make
|
|
+ * sure all tasks that are in pinned CPU sections are out, and new
|
|
+ * tasks will now grab the lock, keeping them from entering pinned
|
|
+ * CPU sections.
|
|
+ */
|
|
+ if (!kthread_should_stop()) {
|
|
+ preempt_disable();
|
|
+ wait_for_pinned_cpus(hp);
|
|
+ preempt_enable();
|
|
+ complete(&hp->synced);
|
|
+ }
|
|
+
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ while (!kthread_should_stop()) {
|
|
+ schedule();
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ }
|
|
+ set_current_state(TASK_RUNNING);
|
|
+
|
|
+ /*
|
|
+ * Force this thread off this CPU as it's going down and
|
|
+ * we don't want any more work on this CPU.
|
|
+ */
|
|
+ current->flags &= ~PF_THREAD_BOUND;
|
|
+ do_set_cpus_allowed(current, cpu_present_mask);
|
|
+ migrate_me();
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void __cpu_unplug_sync(struct hotplug_pcp *hp)
|
|
+{
|
|
+ wake_up_process(hp->sync_tsk);
|
|
+ wait_for_completion(&hp->synced);
|
|
+}
|
|
+
|
|
+static void __cpu_unplug_wait(unsigned int cpu)
|
|
+{
|
|
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
|
+
|
|
+ complete(&hp->unplug_wait);
|
|
+ wait_for_completion(&hp->synced);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Start the sync_unplug_thread on the target cpu and wait for it to
|
|
+ * complete.
|
|
+ */
|
|
+static int cpu_unplug_begin(unsigned int cpu)
|
|
+{
|
|
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
|
+ int err;
|
|
+
|
|
+ /* Protected by cpu_hotplug.lock */
|
|
+ if (!hp->mutex_init) {
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ spin_lock_init(&hp->lock);
|
|
+#else
|
|
+ mutex_init(&hp->mutex);
|
|
+#endif
|
|
+ hp->mutex_init = 1;
|
|
+ }
|
|
+
|
|
+ /* Inform the scheduler to migrate tasks off this CPU */
|
|
+ tell_sched_cpu_down_begin(cpu);
|
|
+
|
|
+ init_completion(&hp->synced);
|
|
+ init_completion(&hp->unplug_wait);
|
|
+
|
|
+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
|
|
+ if (IS_ERR(hp->sync_tsk)) {
|
|
+ err = PTR_ERR(hp->sync_tsk);
|
|
+ hp->sync_tsk = NULL;
|
|
+ return err;
|
|
+ }
|
|
+ kthread_bind(hp->sync_tsk, cpu);
|
|
+
|
|
+ /*
|
|
+ * Wait for tasks to get out of the pinned sections,
|
|
+ * it's still OK if new tasks enter. Some CPU notifiers will
|
|
+ * wait for tasks that are going to enter these sections and
|
|
+ * we must not have them block.
|
|
+ */
|
|
+ wake_up_process(hp->sync_tsk);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cpu_unplug_sync(unsigned int cpu)
|
|
+{
|
|
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
|
+
|
|
+ init_completion(&hp->synced);
|
|
+ /* The completion needs to be initialzied before setting grab_lock */
|
|
+ smp_wmb();
|
|
+
|
|
+ /* Grab the mutex before setting grab_lock */
|
|
+ hotplug_lock(hp);
|
|
+ hp->grab_lock = 1;
|
|
+
|
|
+ /*
|
|
+ * The CPU notifiers have been completed.
|
|
+ * Wait for tasks to get out of pinned CPU sections and have new
|
|
+ * tasks block until the CPU is completely down.
|
|
+ */
|
|
+ __cpu_unplug_sync(hp);
|
|
+
|
|
+ /* All done with the sync thread */
|
|
+ kthread_stop(hp->sync_tsk);
|
|
+ hp->sync_tsk = NULL;
|
|
+}
|
|
+
|
|
+static void cpu_unplug_done(unsigned int cpu)
|
|
+{
|
|
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
|
|
+
|
|
+ hp->unplug = NULL;
|
|
+ /* Let all tasks know cpu unplug is finished before cleaning up */
|
|
+ smp_wmb();
|
|
+
|
|
+ if (hp->sync_tsk)
|
|
+ kthread_stop(hp->sync_tsk);
|
|
+
|
|
+ if (hp->grab_lock) {
|
|
+ hotplug_unlock(hp);
|
|
+ /* protected by cpu_hotplug.lock */
|
|
+ hp->grab_lock = 0;
|
|
+ }
|
|
+ tell_sched_cpu_down_done(cpu);
|
|
+}
|
|
+
|
|
void get_online_cpus(void)
|
|
{
|
|
might_sleep();
|
|
@@ -231,13 +509,15 @@ static int __ref take_cpu_down(void *_param)
|
|
/* Requires cpu_add_remove_lock to be held */
|
|
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|
{
|
|
- int err, nr_calls = 0;
|
|
+ int mycpu, err, nr_calls = 0;
|
|
void *hcpu = (void *)(long)cpu;
|
|
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
|
|
struct take_cpu_down_param tcd_param = {
|
|
.mod = mod,
|
|
.hcpu = hcpu,
|
|
};
|
|
+ cpumask_var_t cpumask;
|
|
+ cpumask_var_t cpumask_org;
|
|
|
|
if (num_online_cpus() == 1)
|
|
return -EBUSY;
|
|
@@ -245,7 +525,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|
if (!cpu_online(cpu))
|
|
return -EINVAL;
|
|
|
|
+ /* Move the downtaker off the unplug cpu */
|
|
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
|
|
+ return -ENOMEM;
|
|
+ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
|
|
+ free_cpumask_var(cpumask);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
|
|
+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
|
|
+ set_cpus_allowed_ptr(current, cpumask);
|
|
+ free_cpumask_var(cpumask);
|
|
+ migrate_disable();
|
|
+ mycpu = smp_processor_id();
|
|
+ if (mycpu == cpu) {
|
|
+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
|
|
+ migrate_enable();
|
|
+ err = -EBUSY;
|
|
+ goto restore_cpus;
|
|
+ }
|
|
+ migrate_enable();
|
|
+
|
|
cpu_hotplug_begin();
|
|
+ err = cpu_unplug_begin(cpu);
|
|
+ if (err) {
|
|
+ printk("cpu_unplug_begin(%d) failed\n", cpu);
|
|
+ goto out_cancel;
|
|
+ }
|
|
|
|
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
|
|
if (err) {
|
|
@@ -256,6 +563,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|
goto out_release;
|
|
}
|
|
|
|
+ __cpu_unplug_wait(cpu);
|
|
+
|
|
+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
|
|
+ cpu_unplug_sync(cpu);
|
|
+
|
|
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
|
if (err) {
|
|
/* CPU didn't die: tell everyone. Can't complain. */
|
|
@@ -284,9 +596,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|
check_for_tasks(cpu);
|
|
|
|
out_release:
|
|
+ cpu_unplug_done(cpu);
|
|
+out_cancel:
|
|
cpu_hotplug_done();
|
|
if (!err)
|
|
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
|
|
+restore_cpus:
|
|
+ set_cpus_allowed_ptr(current, cpumask_org);
|
|
+ free_cpumask_var(cpumask_org);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/kernel/cred.c b/kernel/cred.c
|
|
index e70683d9ec32..fed7c3fd9477 100644
|
|
--- a/kernel/cred.c
|
|
+++ b/kernel/cred.c
|
|
@@ -36,7 +36,7 @@ static struct kmem_cache *cred_jar;
|
|
static struct thread_group_cred init_tgcred = {
|
|
.usage = ATOMIC_INIT(2),
|
|
.tgid = 0,
|
|
- .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
|
|
+ .lock = __SPIN_LOCK_UNLOCKED(init_tgcred.lock),
|
|
};
|
|
#endif
|
|
|
|
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
|
|
index 572e604c5621..67b22fdfd13c 100644
|
|
--- a/kernel/debug/kdb/kdb_io.c
|
|
+++ b/kernel/debug/kdb/kdb_io.c
|
|
@@ -554,7 +554,6 @@ int vkdb_printf(const char *fmt, va_list ap)
|
|
int linecount;
|
|
int colcount;
|
|
int logging, saved_loglevel = 0;
|
|
- int saved_trap_printk;
|
|
int got_printf_lock = 0;
|
|
int retlen = 0;
|
|
int fnd, len;
|
|
@@ -565,8 +564,6 @@ int vkdb_printf(const char *fmt, va_list ap)
|
|
unsigned long uninitialized_var(flags);
|
|
|
|
preempt_disable();
|
|
- saved_trap_printk = kdb_trap_printk;
|
|
- kdb_trap_printk = 0;
|
|
|
|
/* Serialize kdb_printf if multiple cpus try to write at once.
|
|
* But if any cpu goes recursive in kdb, just print the output,
|
|
@@ -844,7 +841,6 @@ kdb_print_out:
|
|
} else {
|
|
__release(kdb_printf_lock);
|
|
}
|
|
- kdb_trap_printk = saved_trap_printk;
|
|
preempt_enable();
|
|
return retlen;
|
|
}
|
|
@@ -854,9 +850,11 @@ int kdb_printf(const char *fmt, ...)
|
|
va_list ap;
|
|
int r;
|
|
|
|
+ kdb_trap_printk++;
|
|
va_start(ap, fmt);
|
|
r = vkdb_printf(fmt, ap);
|
|
va_end(ap);
|
|
+ kdb_trap_printk--;
|
|
|
|
return r;
|
|
}
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 461b6e0007b1..4cb6f0c37583 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -5581,6 +5581,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
|
|
|
|
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
hwc->hrtimer.function = perf_swevent_hrtimer;
|
|
+ hwc->hrtimer.irqsafe = 1;
|
|
|
|
/*
|
|
* Since hrtimers have a fixed rate, we can do a static freq->period
|
|
diff --git a/kernel/exit.c b/kernel/exit.c
|
|
index 38980ea0b2d6..12d299f5744c 100644
|
|
--- a/kernel/exit.c
|
|
+++ b/kernel/exit.c
|
|
@@ -144,7 +144,7 @@ static void __exit_signal(struct task_struct *tsk)
|
|
* Do this under ->siglock, we can race with another thread
|
|
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
|
|
*/
|
|
- flush_sigqueue(&tsk->pending);
|
|
+ flush_task_sigqueue(tsk);
|
|
tsk->sighand = NULL;
|
|
spin_unlock(&sighand->siglock);
|
|
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index be2495f0eed7..b62e4681a10f 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -91,7 +91,7 @@ int max_threads; /* tunable limit on nr_threads */
|
|
|
|
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
|
|
|
|
-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
|
|
+DEFINE_RWLOCK(tasklist_lock); /* outer */
|
|
|
|
#ifdef CONFIG_PROVE_RCU
|
|
int lockdep_tasklist_lock_is_held(void)
|
|
@@ -202,7 +202,18 @@ void __put_task_struct(struct task_struct *tsk)
|
|
if (!profile_handoff_task(tsk))
|
|
free_task(tsk);
|
|
}
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
EXPORT_SYMBOL_GPL(__put_task_struct);
|
|
+#else
|
|
+void __put_task_struct_cb(struct rcu_head *rhp)
|
|
+{
|
|
+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
|
|
+
|
|
+ __put_task_struct(tsk);
|
|
+
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
|
|
+#endif
|
|
|
|
/*
|
|
* macro override instead of weak attribute alias, to workaround
|
|
@@ -563,6 +574,19 @@ void __mmdrop(struct mm_struct *mm)
|
|
}
|
|
EXPORT_SYMBOL_GPL(__mmdrop);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+/*
|
|
+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
|
|
+ * want another facility to make this work.
|
|
+ */
|
|
+void __mmdrop_delayed(struct rcu_head *rhp)
|
|
+{
|
|
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
|
|
+
|
|
+ __mmdrop(mm);
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* Decrement the use count and release all resources for an mm.
|
|
*/
|
|
@@ -1103,6 +1127,9 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
|
|
*/
|
|
static void posix_cpu_timers_init(struct task_struct *tsk)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ tsk->posix_timer_list = NULL;
|
|
+#endif
|
|
tsk->cputime_expires.prof_exp = 0;
|
|
tsk->cputime_expires.virt_exp = 0;
|
|
tsk->cputime_expires.sched_exp = 0;
|
|
@@ -1210,6 +1237,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
spin_lock_init(&p->alloc_lock);
|
|
|
|
init_sigpending(&p->pending);
|
|
+ p->sigqueue_cache = NULL;
|
|
|
|
p->utime = p->stime = p->gtime = 0;
|
|
p->utimescaled = p->stimescaled = 0;
|
|
@@ -1268,6 +1296,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
p->hardirq_context = 0;
|
|
p->softirq_context = 0;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ p->pagefault_disabled = 0;
|
|
+#endif
|
|
#ifdef CONFIG_LOCKDEP
|
|
p->lockdep_depth = 0; /* no locks held yet */
|
|
p->curr_chain_key = 0;
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index 6b320c2ad6fa..97ee57369a8e 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -645,95 +645,89 @@ void exit_pi_state_list(struct task_struct *curr)
|
|
* [10] There is no transient state which leaves owner and user space
|
|
* TID out of sync.
|
|
*/
|
|
-static int
|
|
-lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
|
- union futex_key *key, struct futex_pi_state **ps)
|
|
+
|
|
+/*
|
|
+ * Validate that the existing waiter has a pi_state and sanity check
|
|
+ * the pi_state against the user space value. If correct, attach to
|
|
+ * it.
|
|
+ */
|
|
+static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
|
|
+ struct futex_pi_state **ps)
|
|
{
|
|
- struct futex_pi_state *pi_state = NULL;
|
|
- struct futex_q *this, *next;
|
|
- struct plist_head *head;
|
|
- struct task_struct *p;
|
|
pid_t pid = uval & FUTEX_TID_MASK;
|
|
|
|
- head = &hb->chain;
|
|
+ /*
|
|
+ * Userspace might have messed up non-PI and PI futexes [3]
|
|
+ */
|
|
+ if (unlikely(!pi_state))
|
|
+ return -EINVAL;
|
|
|
|
- plist_for_each_entry_safe(this, next, head, list) {
|
|
- if (match_futex(&this->key, key)) {
|
|
- /*
|
|
- * Sanity check the waiter before increasing
|
|
- * the refcount and attaching to it.
|
|
- */
|
|
- pi_state = this->pi_state;
|
|
+ WARN_ON(!atomic_read(&pi_state->refcount));
|
|
+
|
|
+ /*
|
|
+ * Handle the owner died case:
|
|
+ */
|
|
+ if (uval & FUTEX_OWNER_DIED) {
|
|
+ /*
|
|
+ * exit_pi_state_list sets owner to NULL and wakes the
|
|
+ * topmost waiter. The task which acquires the
|
|
+ * pi_state->rt_mutex will fixup owner.
|
|
+ */
|
|
+ if (!pi_state->owner) {
|
|
/*
|
|
- * Userspace might have messed up non-PI and
|
|
- * PI futexes [3]
|
|
+ * No pi state owner, but the user space TID
|
|
+ * is not 0. Inconsistent state. [5]
|
|
*/
|
|
- if (unlikely(!pi_state))
|
|
+ if (pid)
|
|
return -EINVAL;
|
|
-
|
|
- WARN_ON(!atomic_read(&pi_state->refcount));
|
|
-
|
|
/*
|
|
- * Handle the owner died case:
|
|
+ * Take a ref on the state and return success. [4]
|
|
*/
|
|
- if (uval & FUTEX_OWNER_DIED) {
|
|
- /*
|
|
- * exit_pi_state_list sets owner to NULL and
|
|
- * wakes the topmost waiter. The task which
|
|
- * acquires the pi_state->rt_mutex will fixup
|
|
- * owner.
|
|
- */
|
|
- if (!pi_state->owner) {
|
|
- /*
|
|
- * No pi state owner, but the user
|
|
- * space TID is not 0. Inconsistent
|
|
- * state. [5]
|
|
- */
|
|
- if (pid)
|
|
- return -EINVAL;
|
|
- /*
|
|
- * Take a ref on the state and
|
|
- * return. [4]
|
|
- */
|
|
- goto out_state;
|
|
- }
|
|
+ goto out_state;
|
|
+ }
|
|
|
|
- /*
|
|
- * If TID is 0, then either the dying owner
|
|
- * has not yet executed exit_pi_state_list()
|
|
- * or some waiter acquired the rtmutex in the
|
|
- * pi state, but did not yet fixup the TID in
|
|
- * user space.
|
|
- *
|
|
- * Take a ref on the state and return. [6]
|
|
- */
|
|
- if (!pid)
|
|
- goto out_state;
|
|
- } else {
|
|
- /*
|
|
- * If the owner died bit is not set,
|
|
- * then the pi_state must have an
|
|
- * owner. [7]
|
|
- */
|
|
- if (!pi_state->owner)
|
|
- return -EINVAL;
|
|
- }
|
|
+ /*
|
|
+ * If TID is 0, then either the dying owner has not
|
|
+ * yet executed exit_pi_state_list() or some waiter
|
|
+ * acquired the rtmutex in the pi state, but did not
|
|
+ * yet fixup the TID in user space.
|
|
+ *
|
|
+ * Take a ref on the state and return success. [6]
|
|
+ */
|
|
+ if (!pid)
|
|
+ goto out_state;
|
|
+ } else {
|
|
+ /*
|
|
+ * If the owner died bit is not set, then the pi_state
|
|
+ * must have an owner. [7]
|
|
+ */
|
|
+ if (!pi_state->owner)
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
- /*
|
|
- * Bail out if user space manipulated the
|
|
- * futex value. If pi state exists then the
|
|
- * owner TID must be the same as the user
|
|
- * space TID. [9/10]
|
|
- */
|
|
- if (pid != task_pid_vnr(pi_state->owner))
|
|
- return -EINVAL;
|
|
+ /*
|
|
+ * Bail out if user space manipulated the futex value. If pi
|
|
+ * state exists then the owner TID must be the same as the
|
|
+ * user space TID. [9/10]
|
|
+ */
|
|
+ if (pid != task_pid_vnr(pi_state->owner))
|
|
+ return -EINVAL;
|
|
+out_state:
|
|
+ atomic_inc(&pi_state->refcount);
|
|
+ *ps = pi_state;
|
|
+ return 0;
|
|
+}
|
|
|
|
- out_state:
|
|
- atomic_inc(&pi_state->refcount);
|
|
- *ps = pi_state;
|
|
- return 0;
|
|
- }
|
|
- }
|
|
+/*
|
|
+ * Lookup the task for the TID provided from user space and attach to
|
|
+ * it after doing proper sanity checks.
|
|
+ */
|
|
+static int attach_to_pi_owner(u32 uval, union futex_key *key,
|
|
+ struct futex_pi_state **ps)
|
|
+{
|
|
+ pid_t pid = uval & FUTEX_TID_MASK;
|
|
+ struct futex_pi_state *pi_state;
|
|
+ struct task_struct *p;
|
|
|
|
/*
|
|
* We are the first waiter - try to look up the real owner and attach
|
|
@@ -776,7 +770,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
|
pi_state = alloc_pi_state();
|
|
|
|
/*
|
|
- * Initialize the pi_mutex in locked state and make 'p'
|
|
+ * Initialize the pi_mutex in locked state and make @p
|
|
* the owner of it:
|
|
*/
|
|
rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
|
|
@@ -796,6 +790,36 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
|
return 0;
|
|
}
|
|
|
|
+static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
|
+ union futex_key *key, struct futex_pi_state **ps)
|
|
+{
|
|
+ struct futex_q *match = futex_top_waiter(hb, key);
|
|
+
|
|
+ /*
|
|
+ * If there is a waiter on that futex, validate it and
|
|
+ * attach to the pi_state when the validation succeeds.
|
|
+ */
|
|
+ if (match)
|
|
+ return attach_to_pi_state(uval, match->pi_state, ps);
|
|
+
|
|
+ /*
|
|
+ * We are the first waiter - try to look up the owner based on
|
|
+ * @uval and attach to it.
|
|
+ */
|
|
+ return attach_to_pi_owner(uval, key, ps);
|
|
+}
|
|
+
|
|
+static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
|
|
+{
|
|
+ u32 uninitialized_var(curval);
|
|
+
|
|
+ if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ /*If user space value changed, let the caller retry */
|
|
+ return curval != uval ? -EAGAIN : 0;
|
|
+}
|
|
+
|
|
/**
|
|
* futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
|
|
* @uaddr: the pi futex user address
|
|
@@ -819,113 +843,69 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
|
|
struct futex_pi_state **ps,
|
|
struct task_struct *task, int set_waiters)
|
|
{
|
|
- int lock_taken, ret, force_take = 0;
|
|
- u32 uval, newval, curval, vpid = task_pid_vnr(task);
|
|
-
|
|
-retry:
|
|
- ret = lock_taken = 0;
|
|
+ u32 uval, newval, vpid = task_pid_vnr(task);
|
|
+ struct futex_q *match;
|
|
+ int ret;
|
|
|
|
/*
|
|
- * To avoid races, we attempt to take the lock here again
|
|
- * (by doing a 0 -> TID atomic cmpxchg), while holding all
|
|
- * the locks. It will most likely not succeed.
|
|
+ * Read the user space value first so we can validate a few
|
|
+ * things before proceeding further.
|
|
*/
|
|
- newval = vpid;
|
|
- if (set_waiters)
|
|
- newval |= FUTEX_WAITERS;
|
|
-
|
|
- if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
|
|
+ if (get_futex_value_locked(&uval, uaddr))
|
|
return -EFAULT;
|
|
|
|
/*
|
|
* Detect deadlocks.
|
|
*/
|
|
- if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
|
|
+ if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
|
|
return -EDEADLK;
|
|
|
|
/*
|
|
- * Surprise - we got the lock, but we do not trust user space at all.
|
|
+ * Lookup existing state first. If it exists, try to attach to
|
|
+ * its pi_state.
|
|
*/
|
|
- if (unlikely(!curval)) {
|
|
- /*
|
|
- * We verify whether there is kernel state for this
|
|
- * futex. If not, we can safely assume, that the 0 ->
|
|
- * TID transition is correct. If state exists, we do
|
|
- * not bother to fixup the user space state as it was
|
|
- * corrupted already.
|
|
- */
|
|
- return futex_top_waiter(hb, key) ? -EINVAL : 1;
|
|
- }
|
|
-
|
|
- uval = curval;
|
|
+ match = futex_top_waiter(hb, key);
|
|
+ if (match)
|
|
+ return attach_to_pi_state(uval, match->pi_state, ps);
|
|
|
|
/*
|
|
- * Set the FUTEX_WAITERS flag, so the owner will know it has someone
|
|
- * to wake at the next unlock.
|
|
+ * No waiter and user TID is 0. We are here because the
|
|
+ * waiters or the owner died bit is set or called from
|
|
+ * requeue_cmp_pi or for whatever reason something took the
|
|
+ * syscall.
|
|
*/
|
|
- newval = curval | FUTEX_WAITERS;
|
|
-
|
|
- /*
|
|
- * Should we force take the futex? See below.
|
|
- */
|
|
- if (unlikely(force_take)) {
|
|
+ if (!(uval & FUTEX_TID_MASK)) {
|
|
/*
|
|
- * Keep the OWNER_DIED and the WAITERS bit and set the
|
|
- * new TID value.
|
|
+ * We take over the futex. No other waiters and the user space
|
|
+ * TID is 0. We preserve the owner died bit.
|
|
*/
|
|
- newval = (curval & ~FUTEX_TID_MASK) | vpid;
|
|
- force_take = 0;
|
|
- lock_taken = 1;
|
|
- }
|
|
+ newval = uval & FUTEX_OWNER_DIED;
|
|
+ newval |= vpid;
|
|
|
|
- if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
|
|
- return -EFAULT;
|
|
- if (unlikely(curval != uval))
|
|
- goto retry;
|
|
+ /* The futex requeue_pi code can enforce the waiters bit */
|
|
+ if (set_waiters)
|
|
+ newval |= FUTEX_WAITERS;
|
|
+
|
|
+ ret = lock_pi_update_atomic(uaddr, uval, newval);
|
|
+ /* If the take over worked, return 1 */
|
|
+ return ret < 0 ? ret : 1;
|
|
+ }
|
|
|
|
/*
|
|
- * We took the lock due to forced take over.
|
|
+ * First waiter. Set the waiters bit before attaching ourself to
|
|
+ * the owner. If owner tries to unlock, it will be forced into
|
|
+ * the kernel and blocked on hb->lock.
|
|
*/
|
|
- if (unlikely(lock_taken))
|
|
- return 1;
|
|
-
|
|
+ newval = uval | FUTEX_WAITERS;
|
|
+ ret = lock_pi_update_atomic(uaddr, uval, newval);
|
|
+ if (ret)
|
|
+ return ret;
|
|
/*
|
|
- * We dont have the lock. Look up the PI state (or create it if
|
|
- * we are the first waiter):
|
|
+ * If the update of the user space value succeeded, we try to
|
|
+ * attach to the owner. If that fails, no harm done, we only
|
|
+ * set the FUTEX_WAITERS bit in the user space variable.
|
|
*/
|
|
- ret = lookup_pi_state(uval, hb, key, ps);
|
|
-
|
|
- if (unlikely(ret)) {
|
|
- switch (ret) {
|
|
- case -ESRCH:
|
|
- /*
|
|
- * We failed to find an owner for this
|
|
- * futex. So we have no pi_state to block
|
|
- * on. This can happen in two cases:
|
|
- *
|
|
- * 1) The owner died
|
|
- * 2) A stale FUTEX_WAITERS bit
|
|
- *
|
|
- * Re-read the futex value.
|
|
- */
|
|
- if (get_futex_value_locked(&curval, uaddr))
|
|
- return -EFAULT;
|
|
-
|
|
- /*
|
|
- * If the owner died or we have a stale
|
|
- * WAITERS bit the owner TID in the user space
|
|
- * futex is 0.
|
|
- */
|
|
- if (!(curval & FUTEX_TID_MASK)) {
|
|
- force_take = 1;
|
|
- goto retry;
|
|
- }
|
|
- default:
|
|
- break;
|
|
- }
|
|
- }
|
|
-
|
|
- return ret;
|
|
+ return attach_to_pi_owner(uval, key, ps);
|
|
}
|
|
|
|
/**
|
|
@@ -1041,22 +1021,6 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
|
|
return 0;
|
|
}
|
|
|
|
-static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
|
|
-{
|
|
- u32 uninitialized_var(oldval);
|
|
-
|
|
- /*
|
|
- * There is no waiter, so we unlock the futex. The owner died
|
|
- * bit has not to be preserved here. We are the owner:
|
|
- */
|
|
- if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
|
|
- return -EFAULT;
|
|
- if (oldval != uval)
|
|
- return -EAGAIN;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
/*
|
|
* Express the locking dependencies for lockdep:
|
|
*/
|
|
@@ -1504,7 +1468,12 @@ retry_private:
|
|
goto retry;
|
|
goto out;
|
|
case -EAGAIN:
|
|
- /* The owner was exiting, try again. */
|
|
+ /*
|
|
+ * Two reasons for this:
|
|
+ * - Owner is exiting and we just wait for the
|
|
+ * exit to complete.
|
|
+ * - The user space value changed.
|
|
+ */
|
|
free_pi_state(pi_state);
|
|
pi_state = NULL;
|
|
double_unlock_hb(hb1, hb2);
|
|
@@ -1565,12 +1534,22 @@ retry_private:
|
|
this->pi_state = pi_state;
|
|
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
|
|
this->rt_waiter,
|
|
- this->task, 1);
|
|
+ this->task);
|
|
if (ret == 1) {
|
|
/* We got the lock. */
|
|
requeue_pi_wake_futex(this, &key2, hb2);
|
|
drop_count++;
|
|
continue;
|
|
+ } else if (ret == -EAGAIN) {
|
|
+ /*
|
|
+ * Waiter was woken by timeout or
|
|
+ * signal and has set pi_blocked_on to
|
|
+ * PI_WAKEUP_INPROGRESS before we
|
|
+ * tried to enqueue it on the rtmutex.
|
|
+ */
|
|
+ this->pi_state = NULL;
|
|
+ free_pi_state(pi_state);
|
|
+ continue;
|
|
} else if (ret) {
|
|
/* -EDEADLK */
|
|
this->pi_state = NULL;
|
|
@@ -2149,8 +2128,10 @@ retry_private:
|
|
goto uaddr_faulted;
|
|
case -EAGAIN:
|
|
/*
|
|
- * Task is exiting and we just wait for the
|
|
- * exit to complete.
|
|
+ * Two reasons for this:
|
|
+ * - Task is exiting and we just wait for the
|
|
+ * exit to complete.
|
|
+ * - The user space value changed.
|
|
*/
|
|
queue_unlock(&q, hb);
|
|
put_futex_key(&q.key);
|
|
@@ -2170,9 +2151,9 @@ retry_private:
|
|
/*
|
|
* Block on the PI mutex:
|
|
*/
|
|
- if (!trylock)
|
|
- ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
|
|
- else {
|
|
+ if (!trylock) {
|
|
+ ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
|
|
+ } else {
|
|
ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
|
|
/* Fixup the trylock return value: */
|
|
ret = ret ? 0 : -EWOULDBLOCK;
|
|
@@ -2234,11 +2215,10 @@ uaddr_faulted:
|
|
*/
|
|
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
|
{
|
|
- struct futex_hash_bucket *hb;
|
|
- struct futex_q *this, *next;
|
|
- struct plist_head *head;
|
|
+ u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
|
|
union futex_key key = FUTEX_KEY_INIT;
|
|
- u32 uval, vpid = task_pid_vnr(current);
|
|
+ struct futex_hash_bucket *hb;
|
|
+ struct futex_q *match;
|
|
int ret;
|
|
|
|
retry:
|
|
@@ -2251,61 +2231,50 @@ retry:
|
|
return -EPERM;
|
|
|
|
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
|
|
- if (unlikely(ret != 0))
|
|
- goto out;
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
hb = hash_futex(&key);
|
|
spin_lock(&hb->lock);
|
|
|
|
/*
|
|
- * To avoid races, try to do the TID -> 0 atomic transition
|
|
- * again. If it succeeds then we can return without waking
|
|
- * anyone else up. We only try this if neither the waiters nor
|
|
- * the owner died bit are set.
|
|
- */
|
|
- if (!(uval & ~FUTEX_TID_MASK) &&
|
|
- cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
|
|
- goto pi_faulted;
|
|
- /*
|
|
- * Rare case: we managed to release the lock atomically,
|
|
- * no need to wake anyone else up:
|
|
+ * Check waiters first. We do not trust user space values at
|
|
+ * all and we at least want to know if user space fiddled
|
|
+ * with the futex value instead of blindly unlocking.
|
|
*/
|
|
- if (unlikely(uval == vpid))
|
|
- goto out_unlock;
|
|
-
|
|
- /*
|
|
- * Ok, other tasks may need to be woken up - check waiters
|
|
- * and do the wakeup if necessary:
|
|
- */
|
|
- head = &hb->chain;
|
|
-
|
|
- plist_for_each_entry_safe(this, next, head, list) {
|
|
- if (!match_futex (&this->key, &key))
|
|
- continue;
|
|
- ret = wake_futex_pi(uaddr, uval, this);
|
|
+ match = futex_top_waiter(hb, &key);
|
|
+ if (match) {
|
|
+ ret = wake_futex_pi(uaddr, uval, match);
|
|
/*
|
|
- * The atomic access to the futex value
|
|
- * generated a pagefault, so retry the
|
|
- * user-access and the wakeup:
|
|
+ * The atomic access to the futex value generated a
|
|
+ * pagefault, so retry the user-access and the wakeup:
|
|
*/
|
|
if (ret == -EFAULT)
|
|
goto pi_faulted;
|
|
goto out_unlock;
|
|
}
|
|
+
|
|
/*
|
|
- * No waiters - kernel unlocks the futex:
|
|
+ * We have no kernel internal state, i.e. no waiters in the
|
|
+ * kernel. Waiters which are about to queue themselves are stuck
|
|
+ * on hb->lock. So we can safely ignore them. We do neither
|
|
+ * preserve the WAITERS bit not the OWNER_DIED one. We are the
|
|
+ * owner.
|
|
*/
|
|
- ret = unlock_futex_pi(uaddr, uval);
|
|
- if (ret == -EFAULT)
|
|
+ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
|
|
goto pi_faulted;
|
|
|
|
+ /*
|
|
+ * If uval has changed, let user space handle it.
|
|
+ */
|
|
+ ret = (curval == uval) ? 0 : -EAGAIN;
|
|
+
|
|
out_unlock:
|
|
spin_unlock(&hb->lock);
|
|
put_futex_key(&key);
|
|
-
|
|
-out:
|
|
return ret;
|
|
|
|
+
|
|
pi_faulted:
|
|
spin_unlock(&hb->lock);
|
|
put_futex_key(&key);
|
|
@@ -2413,7 +2382,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
struct rt_mutex_waiter rt_waiter;
|
|
struct rt_mutex *pi_mutex = NULL;
|
|
- struct futex_hash_bucket *hb;
|
|
+ struct futex_hash_bucket *hb, *hb2;
|
|
union futex_key key2 = FUTEX_KEY_INIT;
|
|
struct futex_q q = futex_q_init;
|
|
int res, ret;
|
|
@@ -2438,8 +2407,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
* The waiter is allocated on our stack, manipulated by the requeue
|
|
* code while we sleep on uaddr.
|
|
*/
|
|
- debug_rt_mutex_init_waiter(&rt_waiter);
|
|
- rt_waiter.task = NULL;
|
|
+ rt_mutex_init_waiter(&rt_waiter, false);
|
|
|
|
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
|
|
if (unlikely(ret != 0))
|
|
@@ -2470,20 +2438,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
|
|
futex_wait_queue_me(hb, &q, to);
|
|
|
|
- spin_lock(&hb->lock);
|
|
- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
|
|
- spin_unlock(&hb->lock);
|
|
- if (ret)
|
|
- goto out_put_keys;
|
|
+ /*
|
|
+ * On RT we must avoid races with requeue and trying to block
|
|
+ * on two mutexes (hb->lock and uaddr2's rtmutex) by
|
|
+ * serializing access to pi_blocked_on with pi_lock.
|
|
+ */
|
|
+ raw_spin_lock_irq(¤t->pi_lock);
|
|
+ if (current->pi_blocked_on) {
|
|
+ /*
|
|
+ * We have been requeued or are in the process of
|
|
+ * being requeued.
|
|
+ */
|
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
|
+ } else {
|
|
+ /*
|
|
+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
|
|
+ * prevents a concurrent requeue from moving us to the
|
|
+ * uaddr2 rtmutex. After that we can safely acquire
|
|
+ * (and possibly block on) hb->lock.
|
|
+ */
|
|
+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
|
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
|
+
|
|
+ spin_lock(&hb->lock);
|
|
+
|
|
+ /*
|
|
+ * Clean up pi_blocked_on. We might leak it otherwise
|
|
+ * when we succeeded with the hb->lock in the fast
|
|
+ * path.
|
|
+ */
|
|
+ raw_spin_lock_irq(¤t->pi_lock);
|
|
+ current->pi_blocked_on = NULL;
|
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
|
+
|
|
+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
|
|
+ spin_unlock(&hb->lock);
|
|
+ if (ret)
|
|
+ goto out_put_keys;
|
|
+ }
|
|
|
|
/*
|
|
- * In order for us to be here, we know our q.key == key2, and since
|
|
- * we took the hb->lock above, we also know that futex_requeue() has
|
|
- * completed and we no longer have to concern ourselves with a wakeup
|
|
- * race with the atomic proxy lock acquisition by the requeue code. The
|
|
- * futex_requeue dropped our key1 reference and incremented our key2
|
|
- * reference count.
|
|
+ * In order to be here, we have either been requeued, are in
|
|
+ * the process of being requeued, or requeue successfully
|
|
+ * acquired uaddr2 on our behalf. If pi_blocked_on was
|
|
+ * non-null above, we may be racing with a requeue. Do not
|
|
+ * rely on q->lock_ptr to be hb2->lock until after blocking on
|
|
+ * hb->lock or hb2->lock. The futex_requeue dropped our key1
|
|
+ * reference and incremented our key2 reference count.
|
|
*/
|
|
+ hb2 = hash_futex(&key2);
|
|
|
|
/* Check if the requeue code acquired the second futex for us. */
|
|
if (!q.rt_waiter) {
|
|
@@ -2492,9 +2495,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
* did a lock-steal - fix up the PI-state in that case.
|
|
*/
|
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
|
- spin_lock(q.lock_ptr);
|
|
+ spin_lock(&hb2->lock);
|
|
+ BUG_ON(&hb2->lock != q.lock_ptr);
|
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
|
- spin_unlock(q.lock_ptr);
|
|
+ spin_unlock(&hb2->lock);
|
|
}
|
|
} else {
|
|
/*
|
|
@@ -2504,10 +2508,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
*/
|
|
WARN_ON(!q.pi_state);
|
|
pi_mutex = &q.pi_state->pi_mutex;
|
|
- ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
|
|
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
|
|
debug_rt_mutex_free_waiter(&rt_waiter);
|
|
|
|
- spin_lock(q.lock_ptr);
|
|
+ spin_lock(&hb2->lock);
|
|
+ BUG_ON(&hb2->lock != q.lock_ptr);
|
|
/*
|
|
* Fixup the pi_state owner and possibly acquire the lock if we
|
|
* haven't already.
|
|
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
|
|
index 434f2b673d5b..afb62c7cc494 100644
|
|
--- a/kernel/hrtimer.c
|
|
+++ b/kernel/hrtimer.c
|
|
@@ -49,6 +49,7 @@
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <trace/events/timer.h>
|
|
+#include <trace/events/hist.h>
|
|
|
|
/*
|
|
* The timer bases:
|
|
@@ -615,8 +616,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
|
|
* When the callback is running, we do not reprogram the clock event
|
|
* device. The timer callback is either running on a different CPU or
|
|
* the callback is executed in the hrtimer_interrupt context. The
|
|
- * reprogramming is handled either by the softirq, which called the
|
|
- * callback or at the end of the hrtimer_interrupt.
|
|
+ * reprogramming is handled at the end of the hrtimer_interrupt.
|
|
*/
|
|
if (hrtimer_callback_running(timer))
|
|
return 0;
|
|
@@ -651,6 +651,9 @@ static int hrtimer_reprogram(struct hrtimer *timer,
|
|
return res;
|
|
}
|
|
|
|
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
|
|
+static int hrtimer_rt_defer(struct hrtimer *timer);
|
|
+
|
|
/*
|
|
* Initialize the high resolution related parts of cpu_base
|
|
*/
|
|
@@ -759,6 +762,11 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
|
}
|
|
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
|
|
static inline void retrigger_next_event(void *arg) { }
|
|
+static inline int hrtimer_reprogram(struct hrtimer *timer,
|
|
+ struct hrtimer_clock_base *base)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
|
|
#endif /* CONFIG_HIGH_RES_TIMERS */
|
|
|
|
@@ -875,6 +883,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
|
|
}
|
|
EXPORT_SYMBOL_GPL(hrtimer_forward);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
|
|
+
|
|
+/**
|
|
+ * hrtimer_wait_for_timer - Wait for a running timer
|
|
+ *
|
|
+ * @timer: timer to wait for
|
|
+ *
|
|
+ * The function waits in case the timers callback function is
|
|
+ * currently executed on the waitqueue of the timer base. The
|
|
+ * waitqueue is woken up after the timer callback function has
|
|
+ * finished execution.
|
|
+ */
|
|
+void hrtimer_wait_for_timer(const struct hrtimer *timer)
|
|
+{
|
|
+ struct hrtimer_clock_base *base = timer->base;
|
|
+
|
|
+ if (base && base->cpu_base && !timer->irqsafe)
|
|
+ wait_event(base->cpu_base->wait,
|
|
+ !(timer->state & HRTIMER_STATE_CALLBACK));
|
|
+}
|
|
+
|
|
+#else
|
|
+# define wake_up_timer_waiters(b) do { } while (0)
|
|
+#endif
|
|
+
|
|
/*
|
|
* enqueue_hrtimer - internal function to (re)start a timer
|
|
*
|
|
@@ -918,6 +952,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
|
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
|
|
goto out;
|
|
|
|
+ if (unlikely(!list_empty(&timer->cb_entry))) {
|
|
+ list_del_init(&timer->cb_entry);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
next_timer = timerqueue_getnext(&base->active);
|
|
timerqueue_del(&base->active, &timer->node);
|
|
if (&timer->node == next_timer) {
|
|
@@ -1004,6 +1043,17 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
|
/* Switch the timer base, if necessary: */
|
|
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
|
|
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ {
|
|
+ ktime_t now = new_base->get_time();
|
|
+
|
|
+ if (ktime_to_ns(tim) < ktime_to_ns(now))
|
|
+ timer->praecox = now;
|
|
+ else
|
|
+ timer->praecox = ktime_set(0, 0);
|
|
+ }
|
|
+#endif
|
|
+
|
|
timer_stats_hrtimer_set_start_info(timer);
|
|
|
|
leftmost = enqueue_hrtimer(timer, new_base);
|
|
@@ -1016,7 +1066,17 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
|
*/
|
|
if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
|
|
&& hrtimer_enqueue_reprogram(timer, new_base)) {
|
|
- if (wakeup) {
|
|
+
|
|
+ if (wakeup
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ /*
|
|
+ * Move softirq based timers away from the rbtree in
|
|
+ * case it expired already. Otherwise we would have a
|
|
+ * stale base->first entry until the softirq runs.
|
|
+ */
|
|
+ && hrtimer_rt_defer(timer)
|
|
+#endif
|
|
+ ) {
|
|
/*
|
|
* We need to drop cpu_base->lock to avoid a
|
|
* lock ordering issue vs. rq->lock.
|
|
@@ -1025,9 +1085,18 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
|
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
return ret;
|
|
- } else {
|
|
- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
|
}
|
|
+
|
|
+ /*
|
|
+ * In case we failed to reprogram the timer (mostly
|
|
+ * because out current timer is already elapsed),
|
|
+ * remove it again and report a failure. This avoids
|
|
+ * stale base->first entries.
|
|
+ */
|
|
+ debug_deactivate(timer);
|
|
+ __remove_hrtimer(timer, new_base,
|
|
+ timer->state & HRTIMER_STATE_CALLBACK, 0);
|
|
+ ret = -ETIME;
|
|
}
|
|
|
|
unlock_hrtimer_base(timer, &flags);
|
|
@@ -1114,7 +1183,7 @@ int hrtimer_cancel(struct hrtimer *timer)
|
|
|
|
if (ret >= 0)
|
|
return ret;
|
|
- cpu_relax();
|
|
+ hrtimer_wait_for_timer(timer);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(hrtimer_cancel);
|
|
@@ -1193,6 +1262,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
|
|
|
|
base = hrtimer_clockid_to_base(clock_id);
|
|
timer->base = &cpu_base->clock_base[base];
|
|
+ INIT_LIST_HEAD(&timer->cb_entry);
|
|
timerqueue_init(&timer->node);
|
|
|
|
#ifdef CONFIG_TIMER_STATS
|
|
@@ -1276,6 +1346,126 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
|
|
timer->state &= ~HRTIMER_STATE_CALLBACK;
|
|
}
|
|
|
|
+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
|
|
+ struct hrtimer_clock_base *base)
|
|
+{
|
|
+ /*
|
|
+ * Note, we clear the callback flag before we requeue the
|
|
+ * timer otherwise we trigger the callback_running() check
|
|
+ * in hrtimer_reprogram().
|
|
+ */
|
|
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
|
|
+
|
|
+ if (restart != HRTIMER_NORESTART) {
|
|
+ BUG_ON(hrtimer_active(timer));
|
|
+ /*
|
|
+ * Enqueue the timer, if it's the leftmost timer then
|
|
+ * we need to reprogram it.
|
|
+ */
|
|
+ if (!enqueue_hrtimer(timer, base))
|
|
+ return;
|
|
+
|
|
+#ifndef CONFIG_HIGH_RES_TIMERS
|
|
+ }
|
|
+#else
|
|
+ if (base->cpu_base->hres_active &&
|
|
+ hrtimer_reprogram(timer, base))
|
|
+ goto requeue;
|
|
+
|
|
+ } else if (hrtimer_active(timer)) {
|
|
+ /*
|
|
+ * If the timer was rearmed on another CPU, reprogram
|
|
+ * the event device.
|
|
+ */
|
|
+ if (&timer->node == base->active.next &&
|
|
+ base->cpu_base->hres_active &&
|
|
+ hrtimer_reprogram(timer, base))
|
|
+ goto requeue;
|
|
+ }
|
|
+ return;
|
|
+
|
|
+requeue:
|
|
+ /*
|
|
+ * Timer is expired. Thus move it from tree to pending list
|
|
+ * again.
|
|
+ */
|
|
+ __remove_hrtimer(timer, base, timer->state, 0);
|
|
+ list_add_tail(&timer->cb_entry, &base->expired);
|
|
+#endif
|
|
+}
|
|
+
|
|
+/*
|
|
+ * The changes in mainline which removed the callback modes from
|
|
+ * hrtimer are not yet working with -rt. The non wakeup_process()
|
|
+ * based callbacks which involve sleeping locks need to be treated
|
|
+ * seperately.
|
|
+ */
|
|
+static void hrtimer_rt_run_pending(void)
|
|
+{
|
|
+ enum hrtimer_restart (*fn)(struct hrtimer *);
|
|
+ struct hrtimer_cpu_base *cpu_base;
|
|
+ struct hrtimer_clock_base *base;
|
|
+ struct hrtimer *timer;
|
|
+ int index, restart;
|
|
+
|
|
+ local_irq_disable();
|
|
+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
|
|
+
|
|
+ raw_spin_lock(&cpu_base->lock);
|
|
+
|
|
+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
|
|
+ base = &cpu_base->clock_base[index];
|
|
+
|
|
+ while (!list_empty(&base->expired)) {
|
|
+ timer = list_first_entry(&base->expired,
|
|
+ struct hrtimer, cb_entry);
|
|
+
|
|
+ /*
|
|
+ * Same as the above __run_hrtimer function
|
|
+ * just we run with interrupts enabled.
|
|
+ */
|
|
+ debug_hrtimer_deactivate(timer);
|
|
+ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
|
|
+ timer_stats_account_hrtimer(timer);
|
|
+ fn = timer->function;
|
|
+
|
|
+ raw_spin_unlock_irq(&cpu_base->lock);
|
|
+ restart = fn(timer);
|
|
+ raw_spin_lock_irq(&cpu_base->lock);
|
|
+
|
|
+ hrtimer_rt_reprogram(restart, timer, base);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ raw_spin_unlock_irq(&cpu_base->lock);
|
|
+
|
|
+ wake_up_timer_waiters(cpu_base);
|
|
+}
|
|
+
|
|
+static int hrtimer_rt_defer(struct hrtimer *timer)
|
|
+{
|
|
+ if (timer->irqsafe)
|
|
+ return 0;
|
|
+
|
|
+ __remove_hrtimer(timer, timer->base, timer->state, 0);
|
|
+ list_add_tail(&timer->cb_entry, &timer->base->expired);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static inline void hrtimer_rt_run_pending(void)
|
|
+{
|
|
+ hrtimer_peek_ahead_timers();
|
|
+}
|
|
+
|
|
+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
|
|
+
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
|
|
/*
|
|
@@ -1286,7 +1476,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
|
{
|
|
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
|
ktime_t expires_next, now, entry_time, delta;
|
|
- int i, retries = 0;
|
|
+ int i, retries = 0, raise = 0;
|
|
|
|
BUG_ON(!cpu_base->hres_active);
|
|
cpu_base->nr_events++;
|
|
@@ -1321,6 +1511,15 @@ retry:
|
|
|
|
timer = container_of(node, struct hrtimer, node);
|
|
|
|
+ trace_hrtimer_interrupt(raw_smp_processor_id(),
|
|
+ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
|
|
+ timer->praecox : hrtimer_get_expires(timer),
|
|
+ basenow)),
|
|
+ current,
|
|
+ timer->function == hrtimer_wakeup ?
|
|
+ container_of(timer, struct hrtimer_sleeper,
|
|
+ timer)->task : NULL);
|
|
+
|
|
/*
|
|
* The immediate goal for using the softexpires is
|
|
* minimizing wakeups, not running timers at the
|
|
@@ -1346,7 +1545,10 @@ retry:
|
|
break;
|
|
}
|
|
|
|
- __run_hrtimer(timer, &basenow);
|
|
+ if (!hrtimer_rt_defer(timer))
|
|
+ __run_hrtimer(timer, &basenow);
|
|
+ else
|
|
+ raise = 1;
|
|
}
|
|
}
|
|
|
|
@@ -1361,7 +1563,7 @@ retry:
|
|
if (expires_next.tv64 == KTIME_MAX ||
|
|
!tick_program_event(expires_next, 0)) {
|
|
cpu_base->hang_detected = 0;
|
|
- return;
|
|
+ goto out;
|
|
}
|
|
|
|
/*
|
|
@@ -1405,6 +1607,9 @@ retry:
|
|
tick_program_event(expires_next, 1);
|
|
printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
|
|
ktime_to_ns(delta));
|
|
+out:
|
|
+ if (raise)
|
|
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
|
}
|
|
|
|
/*
|
|
@@ -1441,17 +1646,17 @@ void hrtimer_peek_ahead_timers(void)
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
-static void run_hrtimer_softirq(struct softirq_action *h)
|
|
-{
|
|
- hrtimer_peek_ahead_timers();
|
|
-}
|
|
-
|
|
#else /* CONFIG_HIGH_RES_TIMERS */
|
|
|
|
static inline void __hrtimer_peek_ahead_timers(void) { }
|
|
|
|
#endif /* !CONFIG_HIGH_RES_TIMERS */
|
|
|
|
+static void run_hrtimer_softirq(struct softirq_action *h)
|
|
+{
|
|
+ hrtimer_rt_run_pending();
|
|
+}
|
|
+
|
|
/*
|
|
* Called from timer softirq every jiffy, expire hrtimers:
|
|
*
|
|
@@ -1484,7 +1689,7 @@ void hrtimer_run_queues(void)
|
|
struct timerqueue_node *node;
|
|
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
|
struct hrtimer_clock_base *base;
|
|
- int index, gettime = 1;
|
|
+ int index, gettime = 1, raise = 0;
|
|
|
|
if (hrtimer_hres_active())
|
|
return;
|
|
@@ -1509,10 +1714,16 @@ void hrtimer_run_queues(void)
|
|
hrtimer_get_expires_tv64(timer))
|
|
break;
|
|
|
|
- __run_hrtimer(timer, &base->softirq_time);
|
|
+ if (!hrtimer_rt_defer(timer))
|
|
+ __run_hrtimer(timer, &base->softirq_time);
|
|
+ else
|
|
+ raise = 1;
|
|
}
|
|
raw_spin_unlock(&cpu_base->lock);
|
|
}
|
|
+
|
|
+ if (raise)
|
|
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
|
}
|
|
|
|
/*
|
|
@@ -1534,16 +1745,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
|
|
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
|
|
{
|
|
sl->timer.function = hrtimer_wakeup;
|
|
+ sl->timer.irqsafe = 1;
|
|
sl->task = task;
|
|
}
|
|
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
|
|
|
|
-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
|
|
+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
|
|
+ unsigned long state)
|
|
{
|
|
hrtimer_init_sleeper(t, current);
|
|
|
|
do {
|
|
- set_current_state(TASK_INTERRUPTIBLE);
|
|
+ set_current_state(state);
|
|
hrtimer_start_expires(&t->timer, mode);
|
|
if (!hrtimer_active(&t->timer))
|
|
t->task = NULL;
|
|
@@ -1587,7 +1800,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
|
|
HRTIMER_MODE_ABS);
|
|
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
|
|
|
|
- if (do_nanosleep(&t, HRTIMER_MODE_ABS))
|
|
+ /* cpu_chill() does not care about restart state. */
|
|
+ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
|
|
goto out;
|
|
|
|
rmtp = restart->nanosleep.rmtp;
|
|
@@ -1604,8 +1818,10 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
|
|
- const enum hrtimer_mode mode, const clockid_t clockid)
|
|
+static long
|
|
+__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
|
|
+ const enum hrtimer_mode mode, const clockid_t clockid,
|
|
+ unsigned long state)
|
|
{
|
|
struct restart_block *restart;
|
|
struct hrtimer_sleeper t;
|
|
@@ -1618,7 +1834,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
|
|
|
|
hrtimer_init_on_stack(&t.timer, clockid, mode);
|
|
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
|
|
- if (do_nanosleep(&t, mode))
|
|
+ if (do_nanosleep(&t, mode, state))
|
|
goto out;
|
|
|
|
/* Absolute timers do not update the rmtp value and restart: */
|
|
@@ -1645,6 +1861,12 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
|
|
+ const enum hrtimer_mode mode, const clockid_t clockid)
|
|
+{
|
|
+ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
|
|
+}
|
|
+
|
|
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
|
|
struct timespec __user *, rmtp)
|
|
{
|
|
@@ -1659,6 +1881,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
|
|
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * Sleep for 1 ms in hope whoever holds what we want will let it go.
|
|
+ */
|
|
+void cpu_chill(void)
|
|
+{
|
|
+ struct timespec tu = {
|
|
+ .tv_nsec = NSEC_PER_MSEC,
|
|
+ };
|
|
+ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
|
|
+
|
|
+ current->flags |= PF_NOFREEZE;
|
|
+ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
|
|
+ TASK_UNINTERRUPTIBLE);
|
|
+ if (!freeze_flag)
|
|
+ current->flags &= ~PF_NOFREEZE;
|
|
+}
|
|
+EXPORT_SYMBOL(cpu_chill);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Functions related to boot-time initialization:
|
|
*/
|
|
@@ -1670,9 +1912,13 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
|
|
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
|
cpu_base->clock_base[i].cpu_base = cpu_base;
|
|
timerqueue_init_head(&cpu_base->clock_base[i].active);
|
|
+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
|
|
}
|
|
|
|
hrtimer_init_hres(cpu_base);
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ init_waitqueue_head(&cpu_base->wait);
|
|
+#endif
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -1785,9 +2031,7 @@ void __init hrtimers_init(void)
|
|
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
|
|
(void *)(long)smp_processor_id());
|
|
register_cpu_notifier(&hrtimers_nb);
|
|
-#ifdef CONFIG_HIGH_RES_TIMERS
|
|
open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
|
|
-#endif
|
|
}
|
|
|
|
/**
|
|
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
|
|
index 131ca176b497..7f50c558e46b 100644
|
|
--- a/kernel/irq/handle.c
|
|
+++ b/kernel/irq/handle.c
|
|
@@ -132,6 +132,8 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
|
|
irqreturn_t
|
|
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
|
|
{
|
|
+ struct pt_regs *regs = get_irq_regs();
|
|
+ u64 ip = regs ? instruction_pointer(regs) : 0;
|
|
irqreturn_t retval = IRQ_NONE;
|
|
unsigned int flags = 0, irq = desc->irq_data.irq;
|
|
|
|
@@ -172,7 +174,11 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
|
|
action = action->next;
|
|
} while (action);
|
|
|
|
- add_interrupt_randomness(irq, flags);
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+ add_interrupt_randomness(irq, flags, ip);
|
|
+#else
|
|
+ desc->random_ip = ip;
|
|
+#endif
|
|
|
|
if (!noirqdebug)
|
|
note_interrupt(irq, desc, retval);
|
|
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
|
|
index 07d45516b540..d91dcd08113d 100644
|
|
--- a/kernel/irq/irqdesc.c
|
|
+++ b/kernel/irq/irqdesc.c
|
|
@@ -23,10 +23,27 @@
|
|
static struct lock_class_key irq_desc_lock_class;
|
|
|
|
#if defined(CONFIG_SMP)
|
|
+static int __init irq_affinity_setup(char *str)
|
|
+{
|
|
+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
|
|
+ cpulist_parse(str, irq_default_affinity);
|
|
+ /*
|
|
+ * Set at least the boot cpu. We don't want to end up with
|
|
+ * bugreports caused by random comandline masks
|
|
+ */
|
|
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
|
|
+ return 1;
|
|
+}
|
|
+__setup("irqaffinity=", irq_affinity_setup);
|
|
+
|
|
static void __init init_irq_default_affinity(void)
|
|
{
|
|
- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
|
|
- cpumask_setall(irq_default_affinity);
|
|
+#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
+ if (!irq_default_affinity)
|
|
+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
|
|
+#endif
|
|
+ if (cpumask_empty(irq_default_affinity))
|
|
+ cpumask_setall(irq_default_affinity);
|
|
}
|
|
#else
|
|
static void __init init_irq_default_affinity(void)
|
|
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
|
|
index 9baa6315acf5..3df9d3b78c4a 100644
|
|
--- a/kernel/irq/manage.c
|
|
+++ b/kernel/irq/manage.c
|
|
@@ -18,6 +18,7 @@
|
|
#include "internals.h"
|
|
|
|
#ifdef CONFIG_IRQ_FORCED_THREADING
|
|
+# ifndef CONFIG_PREEMPT_RT_BASE
|
|
__read_mostly bool force_irqthreads;
|
|
|
|
static int __init setup_forced_irqthreads(char *arg)
|
|
@@ -26,6 +27,7 @@ static int __init setup_forced_irqthreads(char *arg)
|
|
return 0;
|
|
}
|
|
early_param("threadirqs", setup_forced_irqthreads);
|
|
+# endif
|
|
#endif
|
|
|
|
/**
|
|
@@ -139,6 +141,62 @@ static inline void
|
|
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
|
|
+static struct task_struct *set_affinity_helper;
|
|
+static LIST_HEAD(affinity_list);
|
|
+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
|
|
+
|
|
+static int set_affinity_thread(void *unused)
|
|
+{
|
|
+ while (1) {
|
|
+ struct irq_affinity_notify *notify;
|
|
+ int empty;
|
|
+
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+
|
|
+ raw_spin_lock_irq(&affinity_list_lock);
|
|
+ empty = list_empty(&affinity_list);
|
|
+ raw_spin_unlock_irq(&affinity_list_lock);
|
|
+
|
|
+ if (empty)
|
|
+ schedule();
|
|
+ if (kthread_should_stop())
|
|
+ break;
|
|
+ set_current_state(TASK_RUNNING);
|
|
+try_next:
|
|
+ notify = NULL;
|
|
+
|
|
+ raw_spin_lock_irq(&affinity_list_lock);
|
|
+ if (!list_empty(&affinity_list)) {
|
|
+ notify = list_first_entry(&affinity_list,
|
|
+ struct irq_affinity_notify, list);
|
|
+ list_del_init(¬ify->list);
|
|
+ }
|
|
+ raw_spin_unlock_irq(&affinity_list_lock);
|
|
+
|
|
+ if (!notify)
|
|
+ continue;
|
|
+ _irq_affinity_notify(notify);
|
|
+ goto try_next;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void init_helper_thread(void)
|
|
+{
|
|
+ if (set_affinity_helper)
|
|
+ return;
|
|
+ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
|
|
+ "affinity-cb");
|
|
+ WARN_ON(IS_ERR(set_affinity_helper));
|
|
+}
|
|
+#else
|
|
+
|
|
+static inline void init_helper_thread(void) { }
|
|
+
|
|
+#endif
|
|
+
|
|
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
|
|
{
|
|
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
|
@@ -164,7 +222,17 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
|
|
|
|
if (desc->affinity_notify) {
|
|
kref_get(&desc->affinity_notify->kref);
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ raw_spin_lock(&affinity_list_lock);
|
|
+ if (list_empty(&desc->affinity_notify->list))
|
|
+ list_add_tail(&affinity_list,
|
|
+ &desc->affinity_notify->list);
|
|
+ raw_spin_unlock(&affinity_list_lock);
|
|
+ wake_up_process(set_affinity_helper);
|
|
+#else
|
|
schedule_work(&desc->affinity_notify->work);
|
|
+#endif
|
|
}
|
|
irqd_set(data, IRQD_AFFINITY_SET);
|
|
|
|
@@ -205,10 +273,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
|
|
|
|
-static void irq_affinity_notify(struct work_struct *work)
|
|
+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
|
|
{
|
|
- struct irq_affinity_notify *notify =
|
|
- container_of(work, struct irq_affinity_notify, work);
|
|
struct irq_desc *desc = irq_to_desc(notify->irq);
|
|
cpumask_var_t cpumask;
|
|
unsigned long flags;
|
|
@@ -230,6 +296,13 @@ out:
|
|
kref_put(¬ify->kref, notify->release);
|
|
}
|
|
|
|
+static void irq_affinity_notify(struct work_struct *work)
|
|
+{
|
|
+ struct irq_affinity_notify *notify =
|
|
+ container_of(work, struct irq_affinity_notify, work);
|
|
+ _irq_affinity_notify(notify);
|
|
+}
|
|
+
|
|
/**
|
|
* irq_set_affinity_notifier - control notification of IRQ affinity changes
|
|
* @irq: Interrupt for which to enable/disable notification
|
|
@@ -259,6 +332,8 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
|
|
notify->irq = irq;
|
|
kref_init(¬ify->kref);
|
|
INIT_WORK(¬ify->work, irq_affinity_notify);
|
|
+ INIT_LIST_HEAD(¬ify->list);
|
|
+ init_helper_thread();
|
|
}
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
@@ -756,7 +831,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
|
|
local_bh_disable();
|
|
ret = action->thread_fn(action->irq, action->dev_id);
|
|
irq_finalize_oneshot(desc, action);
|
|
- local_bh_enable();
|
|
+ /*
|
|
+ * Interrupts which have real time requirements can be set up
|
|
+ * to avoid softirq processing in the thread handler. This is
|
|
+ * safe as these interrupts do not raise soft interrupts.
|
|
+ */
|
|
+ if (irq_settings_no_softirq_call(desc))
|
|
+ _local_bh_enable();
|
|
+ else
|
|
+ local_bh_enable();
|
|
return ret;
|
|
}
|
|
|
|
@@ -786,9 +869,6 @@ static void wake_threads_waitq(struct irq_desc *desc)
|
|
*/
|
|
static int irq_thread(void *data)
|
|
{
|
|
- static const struct sched_param param = {
|
|
- .sched_priority = MAX_USER_RT_PRIO/2,
|
|
- };
|
|
struct irqaction *action = data;
|
|
struct irq_desc *desc = irq_to_desc(action->irq);
|
|
irqreturn_t (*handler_fn)(struct irq_desc *desc,
|
|
@@ -800,7 +880,6 @@ static int irq_thread(void *data)
|
|
else
|
|
handler_fn = irq_thread_fn;
|
|
|
|
- sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
current->irq_thread = 1;
|
|
|
|
while (!irq_wait_for_interrupt(action)) {
|
|
@@ -812,6 +891,12 @@ static int irq_thread(void *data)
|
|
if (action_ret == IRQ_HANDLED)
|
|
atomic_inc(&desc->threads_handled);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ migrate_disable();
|
|
+ add_interrupt_randomness(action->irq, 0,
|
|
+ desc->random_ip ^ (u64) action);
|
|
+ migrate_enable();
|
|
+#endif
|
|
wake_threads_waitq(desc);
|
|
}
|
|
|
|
@@ -927,6 +1012,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
*/
|
|
if (new->thread_fn && !nested) {
|
|
struct task_struct *t;
|
|
+ static const struct sched_param param = {
|
|
+ .sched_priority = MAX_USER_RT_PRIO/2,
|
|
+ };
|
|
|
|
t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
|
|
new->name);
|
|
@@ -934,6 +1022,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
ret = PTR_ERR(t);
|
|
goto out_mput;
|
|
}
|
|
+
|
|
+ sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
|
|
+
|
|
/*
|
|
* We keep the reference to the task struct even if
|
|
* the thread dies to avoid that the interrupt code
|
|
@@ -1071,6 +1162,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
|
|
}
|
|
|
|
+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
|
|
+ irq_settings_set_no_softirq_call(desc);
|
|
+
|
|
/* Set default affinity mask once everything is setup */
|
|
setup_affinity(irq, desc, mask);
|
|
|
|
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
|
|
index 1162f1030f18..0d2c381c2bdb 100644
|
|
--- a/kernel/irq/settings.h
|
|
+++ b/kernel/irq/settings.h
|
|
@@ -14,6 +14,7 @@ enum {
|
|
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
|
|
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
|
|
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
|
|
+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
|
|
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
|
};
|
|
|
|
@@ -26,6 +27,7 @@ enum {
|
|
#define IRQ_NOAUTOEN GOT_YOU_MORON
|
|
#define IRQ_NESTED_THREAD GOT_YOU_MORON
|
|
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
|
|
+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
|
|
#undef IRQF_MODIFY_MASK
|
|
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
|
|
|
@@ -36,6 +38,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
|
|
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
|
|
}
|
|
|
|
+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
|
|
+{
|
|
+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
|
|
+}
|
|
+
|
|
+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
|
|
+{
|
|
+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
|
|
+}
|
|
+
|
|
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
|
|
{
|
|
return desc->status_use_accessors & _IRQ_PER_CPU;
|
|
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
|
|
index febcee3c2aa9..59745301395d 100644
|
|
--- a/kernel/irq/spurious.c
|
|
+++ b/kernel/irq/spurious.c
|
|
@@ -438,6 +438,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
|
|
|
|
static int __init irqfixup_setup(char *str)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ printk(KERN_WARNING "irqfixup boot option not supported "
|
|
+ "w/ CONFIG_PREEMPT_RT\n");
|
|
+ return 1;
|
|
+#endif
|
|
irqfixup = 1;
|
|
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
|
|
printk(KERN_WARNING "This may impact system performance.\n");
|
|
@@ -450,6 +455,11 @@ module_param(irqfixup, int, 0644);
|
|
|
|
static int __init irqpoll_setup(char *str)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ printk(KERN_WARNING "irqpoll boot option not supported "
|
|
+ "w/ CONFIG_PREEMPT_RT\n");
|
|
+ return 1;
|
|
+#endif
|
|
irqfixup = 2;
|
|
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
|
|
"enabled\n");
|
|
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
|
|
index 1588e3b2871b..170c2eabc00f 100644
|
|
--- a/kernel/irq_work.c
|
|
+++ b/kernel/irq_work.c
|
|
@@ -107,8 +107,10 @@ void irq_work_run(void)
|
|
if (llist_empty(this_list))
|
|
return;
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
BUG_ON(!in_irq());
|
|
BUG_ON(!irqs_disabled());
|
|
+#endif
|
|
|
|
llnode = llist_del_all(this_list);
|
|
while (llnode != NULL) {
|
|
diff --git a/kernel/itimer.c b/kernel/itimer.c
|
|
index 8d262b467573..d0513909d663 100644
|
|
--- a/kernel/itimer.c
|
|
+++ b/kernel/itimer.c
|
|
@@ -213,6 +213,7 @@ again:
|
|
/* We are sharing ->siglock with it_real_fn() */
|
|
if (hrtimer_try_to_cancel(timer) < 0) {
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
|
+ hrtimer_wait_for_timer(&tsk->signal->real_timer);
|
|
goto again;
|
|
}
|
|
expires = timeval_to_ktime(value->it_value);
|
|
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
|
|
index 4e316e1acf58..a546d3303a2c 100644
|
|
--- a/kernel/ksysfs.c
|
|
+++ b/kernel/ksysfs.c
|
|
@@ -133,6 +133,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
|
|
|
|
#endif /* CONFIG_KEXEC */
|
|
|
|
+#if defined(CONFIG_PREEMPT_RT_FULL)
|
|
+static ssize_t realtime_show(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr, char *buf)
|
|
+{
|
|
+ return sprintf(buf, "%d\n", 1);
|
|
+}
|
|
+KERNEL_ATTR_RO(realtime);
|
|
+#endif
|
|
+
|
|
/* whether file capabilities are enabled */
|
|
static ssize_t fscaps_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
@@ -182,6 +191,9 @@ static struct attribute * kernel_attrs[] = {
|
|
&kexec_crash_size_attr.attr,
|
|
&vmcoreinfo_attr.attr,
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ &realtime_attr.attr,
|
|
+#endif
|
|
NULL
|
|
};
|
|
|
|
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
|
|
index ea9ee4518c35..6537c1ccbe28 100644
|
|
--- a/kernel/lockdep.c
|
|
+++ b/kernel/lockdep.c
|
|
@@ -3495,6 +3495,7 @@ static void check_flags(unsigned long flags)
|
|
}
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* We dont accurately track softirq state in e.g.
|
|
* hardirq contexts (such as on 4KSTACKS), so only
|
|
@@ -3509,6 +3510,7 @@ static void check_flags(unsigned long flags)
|
|
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
|
|
}
|
|
}
|
|
+#endif
|
|
|
|
if (!debug_locks)
|
|
print_irqtrace_events(current);
|
|
diff --git a/kernel/panic.c b/kernel/panic.c
|
|
index 10b7685997c3..c93d13504228 100644
|
|
--- a/kernel/panic.c
|
|
+++ b/kernel/panic.c
|
|
@@ -371,9 +371,11 @@ static u64 oops_id;
|
|
|
|
static int init_oops_id(void)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
if (!oops_id)
|
|
get_random_bytes(&oops_id, sizeof(oops_id));
|
|
else
|
|
+#endif
|
|
oops_id++;
|
|
|
|
return 0;
|
|
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
|
|
index acbb79c9092c..54f30e425f42 100644
|
|
--- a/kernel/posix-cpu-timers.c
|
|
+++ b/kernel/posix-cpu-timers.c
|
|
@@ -682,7 +682,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
|
|
/*
|
|
* Disarm any old timer after extracting its expiry time.
|
|
*/
|
|
- BUG_ON(!irqs_disabled());
|
|
+ BUG_ON_NONRT(!irqs_disabled());
|
|
|
|
ret = 0;
|
|
old_incr = timer->it.cpu.incr;
|
|
@@ -1198,7 +1198,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|
/*
|
|
* Now re-arm for the new expiry time.
|
|
*/
|
|
- BUG_ON(!irqs_disabled());
|
|
+ BUG_ON_NONRT(!irqs_disabled());
|
|
arm_timer(timer);
|
|
spin_unlock(&p->sighand->siglock);
|
|
|
|
@@ -1262,10 +1262,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
|
|
sig = tsk->signal;
|
|
if (sig->cputimer.running) {
|
|
struct task_cputime group_sample;
|
|
+ unsigned long flags;
|
|
|
|
- raw_spin_lock(&sig->cputimer.lock);
|
|
+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
|
|
group_sample = sig->cputimer.cputime;
|
|
- raw_spin_unlock(&sig->cputimer.lock);
|
|
+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
|
|
|
|
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
|
|
return 1;
|
|
@@ -1279,13 +1280,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
|
|
* already updated our counts. We need to check if any timers fire now.
|
|
* Interrupts are disabled.
|
|
*/
|
|
-void run_posix_cpu_timers(struct task_struct *tsk)
|
|
+static void __run_posix_cpu_timers(struct task_struct *tsk)
|
|
{
|
|
LIST_HEAD(firing);
|
|
struct k_itimer *timer, *next;
|
|
unsigned long flags;
|
|
|
|
- BUG_ON(!irqs_disabled());
|
|
+ BUG_ON_NONRT(!irqs_disabled());
|
|
|
|
/*
|
|
* The fast path checks that there are no expired thread or thread
|
|
@@ -1343,6 +1344,190 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|
}
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/cpu.h>
|
|
+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
|
|
+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
|
|
+
|
|
+static int posix_cpu_timers_thread(void *data)
|
|
+{
|
|
+ int cpu = (long)data;
|
|
+
|
|
+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
|
|
+
|
|
+ while (!kthread_should_stop()) {
|
|
+ struct task_struct *tsk = NULL;
|
|
+ struct task_struct *next = NULL;
|
|
+
|
|
+ if (cpu_is_offline(cpu))
|
|
+ goto wait_to_die;
|
|
+
|
|
+ /* grab task list */
|
|
+ raw_local_irq_disable();
|
|
+ tsk = per_cpu(posix_timer_tasklist, cpu);
|
|
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
|
|
+ raw_local_irq_enable();
|
|
+
|
|
+ /* its possible the list is empty, just return */
|
|
+ if (!tsk) {
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+ schedule();
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Process task list */
|
|
+ while (1) {
|
|
+ /* save next */
|
|
+ next = tsk->posix_timer_list;
|
|
+
|
|
+ /* run the task timers, clear its ptr and
|
|
+ * unreference it
|
|
+ */
|
|
+ __run_posix_cpu_timers(tsk);
|
|
+ tsk->posix_timer_list = NULL;
|
|
+ put_task_struct(tsk);
|
|
+
|
|
+ /* check if this is the last on the list */
|
|
+ if (next == tsk)
|
|
+ break;
|
|
+ tsk = next;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+
|
|
+wait_to_die:
|
|
+ /* Wait for kthread_stop */
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+ while (!kthread_should_stop()) {
|
|
+ schedule();
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+ }
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline int __fastpath_timer_check(struct task_struct *tsk)
|
|
+{
|
|
+ /* tsk == current, ensure it is safe to use ->signal/sighand */
|
|
+ if (unlikely(tsk->exit_state))
|
|
+ return 0;
|
|
+
|
|
+ if (!task_cputime_zero(&tsk->cputime_expires))
|
|
+ return 1;
|
|
+
|
|
+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
|
|
+ return 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void run_posix_cpu_timers(struct task_struct *tsk)
|
|
+{
|
|
+ unsigned long cpu = smp_processor_id();
|
|
+ struct task_struct *tasklist;
|
|
+
|
|
+ BUG_ON(!irqs_disabled());
|
|
+ if(!per_cpu(posix_timer_task, cpu))
|
|
+ return;
|
|
+ /* get per-cpu references */
|
|
+ tasklist = per_cpu(posix_timer_tasklist, cpu);
|
|
+
|
|
+ /* check to see if we're already queued */
|
|
+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
|
|
+ get_task_struct(tsk);
|
|
+ if (tasklist) {
|
|
+ tsk->posix_timer_list = tasklist;
|
|
+ } else {
|
|
+ /*
|
|
+ * The list is terminated by a self-pointing
|
|
+ * task_struct
|
|
+ */
|
|
+ tsk->posix_timer_list = tsk;
|
|
+ }
|
|
+ per_cpu(posix_timer_tasklist, cpu) = tsk;
|
|
+
|
|
+ wake_up_process(per_cpu(posix_timer_task, cpu));
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
|
|
+ * Here we can start up the necessary migration thread for the new CPU.
|
|
+ */
|
|
+static int posix_cpu_thread_call(struct notifier_block *nfb,
|
|
+ unsigned long action, void *hcpu)
|
|
+{
|
|
+ int cpu = (long)hcpu;
|
|
+ struct task_struct *p;
|
|
+ struct sched_param param;
|
|
+
|
|
+ switch (action) {
|
|
+ case CPU_UP_PREPARE:
|
|
+ p = kthread_create(posix_cpu_timers_thread, hcpu,
|
|
+ "posixcputmr/%d",cpu);
|
|
+ if (IS_ERR(p))
|
|
+ return NOTIFY_BAD;
|
|
+ p->flags |= PF_NOFREEZE;
|
|
+ kthread_bind(p, cpu);
|
|
+ /* Must be high prio to avoid getting starved */
|
|
+ param.sched_priority = MAX_RT_PRIO-1;
|
|
+ sched_setscheduler(p, SCHED_FIFO, ¶m);
|
|
+ per_cpu(posix_timer_task,cpu) = p;
|
|
+ break;
|
|
+ case CPU_ONLINE:
|
|
+ /* Strictly unneccessary, as first user will wake it. */
|
|
+ wake_up_process(per_cpu(posix_timer_task,cpu));
|
|
+ break;
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
+ case CPU_UP_CANCELED:
|
|
+ /* Unbind it from offline cpu so it can run. Fall thru. */
|
|
+ kthread_bind(per_cpu(posix_timer_task, cpu),
|
|
+ cpumask_any(cpu_online_mask));
|
|
+ kthread_stop(per_cpu(posix_timer_task,cpu));
|
|
+ per_cpu(posix_timer_task,cpu) = NULL;
|
|
+ break;
|
|
+ case CPU_DEAD:
|
|
+ kthread_stop(per_cpu(posix_timer_task,cpu));
|
|
+ per_cpu(posix_timer_task,cpu) = NULL;
|
|
+ break;
|
|
+#endif
|
|
+ }
|
|
+ return NOTIFY_OK;
|
|
+}
|
|
+
|
|
+/* Register at highest priority so that task migration (migrate_all_tasks)
|
|
+ * happens before everything else.
|
|
+ */
|
|
+static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
|
|
+ .notifier_call = posix_cpu_thread_call,
|
|
+ .priority = 10
|
|
+};
|
|
+
|
|
+static int __init posix_cpu_thread_init(void)
|
|
+{
|
|
+ void *hcpu = (void *)(long)smp_processor_id();
|
|
+ /* Start one for boot CPU. */
|
|
+ unsigned long cpu;
|
|
+
|
|
+ /* init the per-cpu posix_timer_tasklets */
|
|
+ for_each_possible_cpu(cpu)
|
|
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
|
|
+
|
|
+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
|
|
+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
|
|
+ register_cpu_notifier(&posix_cpu_thread_notifier);
|
|
+ return 0;
|
|
+}
|
|
+early_initcall(posix_cpu_thread_init);
|
|
+#else /* CONFIG_PREEMPT_RT_BASE */
|
|
+void run_posix_cpu_timers(struct task_struct *tsk)
|
|
+{
|
|
+ __run_posix_cpu_timers(tsk);
|
|
+}
|
|
+#endif /* CONFIG_PREEMPT_RT_BASE */
|
|
+
|
|
/*
|
|
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
|
|
* The tsk->sighand->siglock must be held by the caller.
|
|
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
|
|
index 02824a5c2693..f1836907165a 100644
|
|
--- a/kernel/posix-timers.c
|
|
+++ b/kernel/posix-timers.c
|
|
@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
|
|
static struct pid *good_sigevent(sigevent_t * event)
|
|
{
|
|
struct task_struct *rtn = current->group_leader;
|
|
+ int sig = event->sigev_signo;
|
|
|
|
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
|
|
(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
|
|
@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigevent_t * event)
|
|
return NULL;
|
|
|
|
if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
|
|
- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
|
|
+ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
|
|
+ sig_kernel_coredump(sig)))
|
|
return NULL;
|
|
|
|
return task_pid(rtn);
|
|
@@ -772,6 +774,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
|
|
return overrun;
|
|
}
|
|
|
|
+/*
|
|
+ * Protected by RCU!
|
|
+ */
|
|
+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (kc->timer_set == common_timer_set)
|
|
+ hrtimer_wait_for_timer(&timr->it.real.timer);
|
|
+ else
|
|
+ /* FIXME: Whacky hack for posix-cpu-timers */
|
|
+ schedule_timeout(1);
|
|
+#endif
|
|
+}
|
|
+
|
|
/* Set a POSIX.1b interval timer. */
|
|
/* timr->it_lock is taken. */
|
|
static int
|
|
@@ -849,6 +865,7 @@ retry:
|
|
if (!timr)
|
|
return -EINVAL;
|
|
|
|
+ rcu_read_lock();
|
|
kc = clockid_to_kclock(timr->it_clock);
|
|
if (WARN_ON_ONCE(!kc || !kc->timer_set))
|
|
error = -EINVAL;
|
|
@@ -857,9 +874,12 @@ retry:
|
|
|
|
unlock_timer(timr, flag);
|
|
if (error == TIMER_RETRY) {
|
|
+ timer_wait_for_callback(kc, timr);
|
|
rtn = NULL; // We already got the old time...
|
|
+ rcu_read_unlock();
|
|
goto retry;
|
|
}
|
|
+ rcu_read_unlock();
|
|
|
|
if (old_setting && !error &&
|
|
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
|
|
@@ -897,10 +917,15 @@ retry_delete:
|
|
if (!timer)
|
|
return -EINVAL;
|
|
|
|
+ rcu_read_lock();
|
|
if (timer_delete_hook(timer) == TIMER_RETRY) {
|
|
unlock_timer(timer, flags);
|
|
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
|
|
+ timer);
|
|
+ rcu_read_unlock();
|
|
goto retry_delete;
|
|
}
|
|
+ rcu_read_unlock();
|
|
|
|
spin_lock(¤t->sighand->siglock);
|
|
list_del(&timer->list);
|
|
@@ -926,8 +951,18 @@ static void itimer_delete(struct k_itimer *timer)
|
|
retry_delete:
|
|
spin_lock_irqsave(&timer->it_lock, flags);
|
|
|
|
+ /* On RT we can race with a deletion */
|
|
+ if (!timer->it_signal) {
|
|
+ unlock_timer(timer, flags);
|
|
+ return;
|
|
+ }
|
|
+
|
|
if (timer_delete_hook(timer) == TIMER_RETRY) {
|
|
+ rcu_read_lock();
|
|
unlock_timer(timer, flags);
|
|
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
|
|
+ timer);
|
|
+ rcu_read_unlock();
|
|
goto retry_delete;
|
|
}
|
|
list_del(&timer->list);
|
|
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
|
|
index dd875cbe0d1a..0d67b2b2c77f 100644
|
|
--- a/kernel/power/hibernate.c
|
|
+++ b/kernel/power/hibernate.c
|
|
@@ -270,6 +270,8 @@ static int create_image(int platform_mode)
|
|
|
|
local_irq_disable();
|
|
|
|
+ system_state = SYSTEM_SUSPEND;
|
|
+
|
|
error = syscore_suspend();
|
|
if (error) {
|
|
printk(KERN_ERR "PM: Some system devices failed to power down, "
|
|
@@ -297,6 +299,7 @@ static int create_image(int platform_mode)
|
|
syscore_resume();
|
|
|
|
Enable_irqs:
|
|
+ system_state = SYSTEM_RUNNING;
|
|
local_irq_enable();
|
|
|
|
Enable_cpus:
|
|
@@ -422,6 +425,7 @@ static int resume_target_kernel(bool platform_mode)
|
|
goto Enable_cpus;
|
|
|
|
local_irq_disable();
|
|
+ system_state = SYSTEM_SUSPEND;
|
|
|
|
error = syscore_suspend();
|
|
if (error)
|
|
@@ -455,6 +459,7 @@ static int resume_target_kernel(bool platform_mode)
|
|
syscore_resume();
|
|
|
|
Enable_irqs:
|
|
+ system_state = SYSTEM_RUNNING;
|
|
local_irq_enable();
|
|
|
|
Enable_cpus:
|
|
@@ -543,6 +548,7 @@ int hibernation_platform_enter(void)
|
|
goto Platform_finish;
|
|
|
|
local_irq_disable();
|
|
+ system_state = SYSTEM_SUSPEND;
|
|
syscore_suspend();
|
|
if (pm_wakeup_pending()) {
|
|
error = -EAGAIN;
|
|
@@ -555,6 +561,7 @@ int hibernation_platform_enter(void)
|
|
|
|
Power_up:
|
|
syscore_resume();
|
|
+ system_state = SYSTEM_RUNNING;
|
|
local_irq_enable();
|
|
enable_nonboot_cpus();
|
|
|
|
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
|
|
index c8b7446b27df..ff2dadea9bd8 100644
|
|
--- a/kernel/power/suspend.c
|
|
+++ b/kernel/power/suspend.c
|
|
@@ -165,6 +165,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
|
|
arch_suspend_disable_irqs();
|
|
BUG_ON(!irqs_disabled());
|
|
|
|
+ system_state = SYSTEM_SUSPEND;
|
|
+
|
|
error = syscore_suspend();
|
|
if (!error) {
|
|
*wakeup = pm_wakeup_pending();
|
|
@@ -175,6 +177,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
|
|
syscore_resume();
|
|
}
|
|
|
|
+ system_state = SYSTEM_RUNNING;
|
|
+
|
|
arch_suspend_enable_irqs();
|
|
BUG_ON(irqs_disabled());
|
|
|
|
diff --git a/kernel/printk.c b/kernel/printk.c
|
|
index cb72d9c406cd..116fbb5c64f4 100644
|
|
--- a/kernel/printk.c
|
|
+++ b/kernel/printk.c
|
|
@@ -47,13 +47,6 @@
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/printk.h>
|
|
|
|
-/*
|
|
- * Architectures can override it:
|
|
- */
|
|
-void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
|
|
-{
|
|
-}
|
|
-
|
|
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
|
|
|
|
/* printk's without a loglevel use this.. */
|
|
@@ -514,6 +507,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
|
|
{
|
|
struct console *con;
|
|
|
|
+ migrate_disable();
|
|
for_each_console(con) {
|
|
if (exclusive_console && con != exclusive_console)
|
|
continue;
|
|
@@ -522,8 +516,65 @@ static void __call_console_drivers(unsigned start, unsigned end)
|
|
(con->flags & CON_ANYTIME)))
|
|
con->write(con, &LOG_BUF(start), end - start);
|
|
}
|
|
+ migrate_enable();
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_EARLY_PRINTK
|
|
+struct console *early_console;
|
|
+
|
|
+static void early_vprintk(const char *fmt, va_list ap)
|
|
+{
|
|
+ if (early_console) {
|
|
+ char buf[512];
|
|
+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
|
|
+
|
|
+ early_console->write(early_console, buf, n);
|
|
+ }
|
|
+}
|
|
+
|
|
+asmlinkage void early_printk(const char *fmt, ...)
|
|
+{
|
|
+ va_list ap;
|
|
+
|
|
+ va_start(ap, fmt);
|
|
+ early_vprintk(fmt, ap);
|
|
+ va_end(ap);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This is independent of any log levels - a global
|
|
+ * kill switch that turns off all of printk.
|
|
+ *
|
|
+ * Used by the NMI watchdog if early-printk is enabled.
|
|
+ */
|
|
+static bool __read_mostly printk_killswitch;
|
|
+
|
|
+static int __init force_early_printk_setup(char *str)
|
|
+{
|
|
+ printk_killswitch = true;
|
|
+ return 0;
|
|
+}
|
|
+early_param("force_early_printk", force_early_printk_setup);
|
|
+
|
|
+void printk_kill(void)
|
|
+{
|
|
+ printk_killswitch = true;
|
|
}
|
|
|
|
+static int forced_early_printk(const char *fmt, va_list ap)
|
|
+{
|
|
+ if (!printk_killswitch)
|
|
+ return 0;
|
|
+ early_vprintk(fmt, ap);
|
|
+ return 1;
|
|
+}
|
|
+#else
|
|
+static inline int forced_early_printk(const char *fmt, va_list ap)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
static bool __read_mostly ignore_loglevel;
|
|
|
|
static int __init ignore_loglevel_setup(char *str)
|
|
@@ -801,12 +852,18 @@ static inline int can_use_console(unsigned int cpu)
|
|
* interrupts disabled. It should return with 'lockbuf_lock'
|
|
* released but interrupts still disabled.
|
|
*/
|
|
-static int console_trylock_for_printk(unsigned int cpu)
|
|
+static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
|
|
__releases(&logbuf_lock)
|
|
{
|
|
int retval = 0, wake = 0;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
|
|
+ (preempt_count() <= 1);
|
|
+#else
|
|
+ int lock = 1;
|
|
+#endif
|
|
|
|
- if (console_trylock()) {
|
|
+ if (lock && console_trylock()) {
|
|
retval = 1;
|
|
|
|
/*
|
|
@@ -857,6 +914,13 @@ asmlinkage int vprintk(const char *fmt, va_list args)
|
|
size_t plen;
|
|
char special;
|
|
|
|
+ /*
|
|
+ * Fall back to early_printk if a debugging subsystem has
|
|
+ * killed printk output
|
|
+ */
|
|
+ if (unlikely(forced_early_printk(fmt, args)))
|
|
+ return 1;
|
|
+
|
|
boot_delay_msec();
|
|
printk_delay();
|
|
|
|
@@ -976,8 +1040,15 @@ asmlinkage int vprintk(const char *fmt, va_list args)
|
|
* will release 'logbuf_lock' regardless of whether it
|
|
* actually gets the semaphore or not.
|
|
*/
|
|
- if (console_trylock_for_printk(this_cpu))
|
|
+ if (console_trylock_for_printk(this_cpu, flags)) {
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+ console_unlock();
|
|
+#else
|
|
+ raw_local_irq_restore(flags);
|
|
console_unlock();
|
|
+ raw_local_irq_save(flags);
|
|
+#endif
|
|
+ }
|
|
|
|
lockdep_on();
|
|
out_restore_irqs:
|
|
@@ -1252,8 +1323,8 @@ void printk_tick(void)
|
|
|
|
int printk_needs_cpu(int cpu)
|
|
{
|
|
- if (cpu_is_offline(cpu))
|
|
- printk_tick();
|
|
+ if (unlikely(cpu_is_offline(cpu)))
|
|
+ __this_cpu_write(printk_pending, 0);
|
|
return __this_cpu_read(printk_pending);
|
|
}
|
|
|
|
@@ -1299,11 +1370,16 @@ again:
|
|
_con_start = con_start;
|
|
_log_end = log_end;
|
|
con_start = log_end; /* Flush */
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
raw_spin_unlock(&logbuf_lock);
|
|
stop_critical_timings(); /* don't trace print latency */
|
|
call_console_drivers(_con_start, _log_end);
|
|
start_critical_timings();
|
|
local_irq_restore(flags);
|
|
+#else
|
|
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
|
|
+ call_console_drivers(_con_start, _log_end);
|
|
+#endif
|
|
}
|
|
console_locked = 0;
|
|
|
|
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
|
|
index a86f1741cc27..63fc4b3e7af3 100644
|
|
--- a/kernel/rcupdate.c
|
|
+++ b/kernel/rcupdate.c
|
|
@@ -77,6 +77,7 @@ int debug_lockdep_rcu_enabled(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/**
|
|
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
|
|
*
|
|
@@ -103,6 +104,7 @@ int rcu_read_lock_bh_held(void)
|
|
return in_softirq() || irqs_disabled();
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
|
|
+#endif
|
|
|
|
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
|
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
|
|
index 37a5444204d2..fc581fb75751 100644
|
|
--- a/kernel/rcutiny.c
|
|
+++ b/kernel/rcutiny.c
|
|
@@ -368,6 +368,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
|
}
|
|
EXPORT_SYMBOL_GPL(call_rcu_sched);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Post an RCU bottom-half callback to be invoked after any subsequent
|
|
* quiescent state.
|
|
@@ -377,3 +378,4 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
|
__call_rcu(head, func, &rcu_bh_ctrlblk);
|
|
}
|
|
EXPORT_SYMBOL_GPL(call_rcu_bh);
|
|
+#endif
|
|
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
|
|
index 22ecea0dfb62..2824dbd9f661 100644
|
|
--- a/kernel/rcutiny_plugin.h
|
|
+++ b/kernel/rcutiny_plugin.h
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/seq_file.h>
|
|
+#include <linux/wait-simple.h>
|
|
|
|
/* Global control variables for rcupdate callback mechanism. */
|
|
struct rcu_ctrlblk {
|
|
@@ -261,7 +262,7 @@ static void show_tiny_preempt_stats(struct seq_file *m)
|
|
|
|
/* Controls for rcu_kthread() kthread. */
|
|
static struct task_struct *rcu_kthread_task;
|
|
-static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
|
|
+static DEFINE_SWAIT_HEAD(rcu_kthread_wq);
|
|
static unsigned long have_rcu_kthread_work;
|
|
|
|
/*
|
|
@@ -572,7 +573,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
|
rcu_preempt_cpu_qs();
|
|
|
|
/* Hardware IRQ handlers cannot block. */
|
|
- if (in_irq() || in_serving_softirq()) {
|
|
+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
|
|
local_irq_restore(flags);
|
|
return;
|
|
}
|
|
@@ -754,7 +755,7 @@ void synchronize_rcu(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
|
|
|
-static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
|
|
+static DEFINE_SWAIT_HEAD(sync_rcu_preempt_exp_wq);
|
|
static unsigned long sync_rcu_preempt_exp_count;
|
|
static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
|
|
|
|
@@ -776,7 +777,7 @@ static int rcu_preempted_readers_exp(void)
|
|
*/
|
|
static void rcu_report_exp_done(void)
|
|
{
|
|
- wake_up(&sync_rcu_preempt_exp_wq);
|
|
+ swait_wake(&sync_rcu_preempt_exp_wq);
|
|
}
|
|
|
|
/*
|
|
@@ -828,8 +829,8 @@ void synchronize_rcu_expedited(void)
|
|
else {
|
|
rcu_initiate_boost();
|
|
local_irq_restore(flags);
|
|
- wait_event(sync_rcu_preempt_exp_wq,
|
|
- !rcu_preempted_readers_exp());
|
|
+ swait_event(sync_rcu_preempt_exp_wq,
|
|
+ !rcu_preempted_readers_exp());
|
|
}
|
|
|
|
/* Clean up and exit. */
|
|
@@ -917,7 +918,7 @@ static void invoke_rcu_callbacks(void)
|
|
{
|
|
have_rcu_kthread_work = 1;
|
|
if (rcu_kthread_task != NULL)
|
|
- wake_up(&rcu_kthread_wq);
|
|
+ swait_wake(&rcu_kthread_wq);
|
|
}
|
|
|
|
#ifdef CONFIG_RCU_TRACE
|
|
@@ -947,8 +948,8 @@ static int rcu_kthread(void *arg)
|
|
unsigned long flags;
|
|
|
|
for (;;) {
|
|
- wait_event_interruptible(rcu_kthread_wq,
|
|
- have_rcu_kthread_work != 0);
|
|
+ swait_event_interruptible(rcu_kthread_wq,
|
|
+ have_rcu_kthread_work != 0);
|
|
morework = rcu_boost();
|
|
local_irq_save(flags);
|
|
work = have_rcu_kthread_work;
|
|
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
|
|
index 62c5e9cf6fcb..055268b44f8b 100644
|
|
--- a/kernel/rcutree.c
|
|
+++ b/kernel/rcutree.c
|
|
@@ -172,6 +172,19 @@ void rcu_sched_qs(int cpu)
|
|
rdp->passed_quiesce = 1;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static void rcu_preempt_qs(int cpu);
|
|
+
|
|
+void rcu_bh_qs(int cpu)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
|
|
+ local_irq_save(flags);
|
|
+ rcu_preempt_qs(cpu);
|
|
+ local_irq_restore(flags);
|
|
+}
|
|
+#else
|
|
void rcu_bh_qs(int cpu)
|
|
{
|
|
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
|
|
@@ -182,6 +195,7 @@ void rcu_bh_qs(int cpu)
|
|
trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
|
|
rdp->passed_quiesce = 1;
|
|
}
|
|
+#endif
|
|
|
|
/*
|
|
* Note a context switch. This is a quiescent state for RCU-sched,
|
|
@@ -228,6 +242,7 @@ long rcu_batches_completed_sched(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Return the number of RCU BH batches processed thus far for debug & stats.
|
|
*/
|
|
@@ -245,6 +260,7 @@ void rcu_bh_force_quiescent_state(void)
|
|
force_quiescent_state(&rcu_bh_state, 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
|
|
+#endif
|
|
|
|
/*
|
|
* Record the number of times rcutorture tests have been initiated and
|
|
@@ -1886,6 +1902,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
|
}
|
|
EXPORT_SYMBOL_GPL(call_rcu_sched);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Queue an RCU callback for invocation after a quicker grace period.
|
|
*/
|
|
@@ -1894,6 +1911,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
|
__call_rcu(head, func, &rcu_bh_state, 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(call_rcu_bh);
|
|
+#endif
|
|
|
|
/**
|
|
* synchronize_sched - wait until an rcu-sched grace period has elapsed.
|
|
@@ -1930,6 +1948,7 @@ void synchronize_sched(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(synchronize_sched);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/**
|
|
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
|
|
*
|
|
@@ -1950,6 +1969,7 @@ void synchronize_rcu_bh(void)
|
|
wait_rcu_gp(call_rcu_bh);
|
|
}
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
|
+#endif
|
|
|
|
static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
|
|
static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
|
|
@@ -2225,6 +2245,7 @@ static void _rcu_barrier(struct rcu_state *rsp,
|
|
mutex_unlock(&rcu_barrier_mutex);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/**
|
|
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
|
|
*/
|
|
@@ -2233,6 +2254,7 @@ void rcu_barrier_bh(void)
|
|
_rcu_barrier(&rcu_bh_state, call_rcu_bh);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
|
|
+#endif
|
|
|
|
/**
|
|
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
|
|
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
|
|
index c023464816be..2844d7d85e4e 100644
|
|
--- a/kernel/rcutree_plugin.h
|
|
+++ b/kernel/rcutree_plugin.h
|
|
@@ -339,7 +339,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
|
}
|
|
|
|
/* Hardware IRQ handlers cannot block. */
|
|
- if (in_irq() || in_serving_softirq()) {
|
|
+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
|
|
local_irq_restore(flags);
|
|
return;
|
|
}
|
|
@@ -1899,7 +1899,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
|
|
|
|
#endif /* #else #ifdef CONFIG_RCU_BOOST */
|
|
|
|
-#if !defined(CONFIG_RCU_FAST_NO_HZ)
|
|
+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
|
|
/*
|
|
* Check to see if any future RCU-related work will need to be done
|
|
@@ -1914,6 +1914,9 @@ int rcu_needs_cpu(int cpu)
|
|
{
|
|
return rcu_cpu_has_callbacks(cpu);
|
|
}
|
|
+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
|
|
+
|
|
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
|
|
|
|
/*
|
|
* Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
|
|
@@ -1984,6 +1987,7 @@ static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
|
|
static ktime_t rcu_idle_gp_wait; /* If some non-lazy callbacks. */
|
|
static ktime_t rcu_idle_lazy_gp_wait; /* If only lazy callbacks. */
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Allow the CPU to enter dyntick-idle mode if either: (1) There are no
|
|
* callbacks on this CPU, (2) this CPU has not yet attempted to enter
|
|
@@ -2001,6 +2005,7 @@ int rcu_needs_cpu(int cpu)
|
|
/* Otherwise, RCU needs the CPU only if it recently tried and failed. */
|
|
return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
|
|
}
|
|
+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
|
|
|
|
/*
|
|
* Does the specified flavor of RCU have non-lazy callbacks pending on
|
|
diff --git a/kernel/relay.c b/kernel/relay.c
|
|
index e8cd2027abbd..56ba44f32c89 100644
|
|
--- a/kernel/relay.c
|
|
+++ b/kernel/relay.c
|
|
@@ -340,6 +340,10 @@ static void wakeup_readers(unsigned long data)
|
|
{
|
|
struct rchan_buf *buf = (struct rchan_buf *)data;
|
|
wake_up_interruptible(&buf->read_wait);
|
|
+ /*
|
|
+ * Stupid polling for now:
|
|
+ */
|
|
+ mod_timer(&buf->timer, jiffies + 1);
|
|
}
|
|
|
|
/**
|
|
@@ -357,6 +361,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
|
|
init_waitqueue_head(&buf->read_wait);
|
|
kref_init(&buf->kref);
|
|
setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
|
|
+ mod_timer(&buf->timer, jiffies + 1);
|
|
} else
|
|
del_timer_sync(&buf->timer);
|
|
|
|
@@ -739,15 +744,6 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
|
|
else
|
|
buf->early_bytes += buf->chan->subbuf_size -
|
|
buf->padding[old_subbuf];
|
|
- smp_mb();
|
|
- if (waitqueue_active(&buf->read_wait))
|
|
- /*
|
|
- * Calling wake_up_interruptible() from here
|
|
- * will deadlock if we happen to be logging
|
|
- * from the scheduler (trying to re-grab
|
|
- * rq->lock), so defer it.
|
|
- */
|
|
- mod_timer(&buf->timer, jiffies + 1);
|
|
}
|
|
|
|
old = buf->data;
|
|
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
|
|
index d508363858b3..402f91ac98bc 100644
|
|
--- a/kernel/res_counter.c
|
|
+++ b/kernel/res_counter.c
|
|
@@ -43,7 +43,7 @@ int res_counter_charge(struct res_counter *counter, unsigned long val,
|
|
struct res_counter *c, *u;
|
|
|
|
*limit_fail_at = NULL;
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
for (c = counter; c != NULL; c = c->parent) {
|
|
spin_lock(&c->lock);
|
|
ret = res_counter_charge_locked(c, val);
|
|
@@ -62,7 +62,7 @@ undo:
|
|
spin_unlock(&u->lock);
|
|
}
|
|
done:
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
return ret;
|
|
}
|
|
|
|
@@ -104,13 +104,13 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
|
|
unsigned long flags;
|
|
struct res_counter *c;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
for (c = counter; c != NULL; c = c->parent) {
|
|
spin_lock(&c->lock);
|
|
res_counter_uncharge_locked(c, val);
|
|
spin_unlock(&c->lock);
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
|
|
|
|
diff --git a/kernel/rt.c b/kernel/rt.c
|
|
new file mode 100644
|
|
index 000000000000..389f9bdec9be
|
|
--- /dev/null
|
|
+++ b/kernel/rt.c
|
|
@@ -0,0 +1,446 @@
|
|
+/*
|
|
+ * kernel/rt.c
|
|
+ *
|
|
+ * Real-Time Preemption Support
|
|
+ *
|
|
+ * started by Ingo Molnar:
|
|
+ *
|
|
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
|
|
+ *
|
|
+ * historic credit for proving that Linux spinlocks can be implemented via
|
|
+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
|
|
+ * and others) who prototyped it on 2.4 and did lots of comparative
|
|
+ * research and analysis; TimeSys, for proving that you can implement a
|
|
+ * fully preemptible kernel via the use of IRQ threading and mutexes;
|
|
+ * Bill Huey for persuasively arguing on lkml that the mutex model is the
|
|
+ * right one; and to MontaVista, who ported pmutexes to 2.6.
|
|
+ *
|
|
+ * This code is a from-scratch implementation and is not based on pmutexes,
|
|
+ * but the idea of converting spinlocks to mutexes is used here too.
|
|
+ *
|
|
+ * lock debugging, locking tree, deadlock detection:
|
|
+ *
|
|
+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
|
|
+ * Released under the General Public License (GPL).
|
|
+ *
|
|
+ * Includes portions of the generic R/W semaphore implementation from:
|
|
+ *
|
|
+ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
|
|
+ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
|
|
+ * - Derived also from comments by Linus
|
|
+ *
|
|
+ * Pending ownership of locks and ownership stealing:
|
|
+ *
|
|
+ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
|
|
+ *
|
|
+ * (also by Steven Rostedt)
|
|
+ * - Converted single pi_lock to individual task locks.
|
|
+ *
|
|
+ * By Esben Nielsen:
|
|
+ * Doing priority inheritance with help of the scheduler.
|
|
+ *
|
|
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
|
|
+ * - major rework based on Esben Nielsens initial patch
|
|
+ * - replaced thread_info references by task_struct refs
|
|
+ * - removed task->pending_owner dependency
|
|
+ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
|
|
+ * in the scheduler return path as discussed with Steven Rostedt
|
|
+ *
|
|
+ * Copyright (C) 2006, Kihon Technologies Inc.
|
|
+ * Steven Rostedt <rostedt@goodmis.org>
|
|
+ * - debugged and patched Thomas Gleixner's rework.
|
|
+ * - added back the cmpxchg to the rework.
|
|
+ * - turned atomic require back on for SMP.
|
|
+ */
|
|
+
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/rtmutex.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/kallsyms.h>
|
|
+#include <linux/syscalls.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/plist.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/futex.h>
|
|
+#include <linux/hrtimer.h>
|
|
+
|
|
+#include "rtmutex_common.h"
|
|
+
|
|
+/*
|
|
+ * struct mutex functions
|
|
+ */
|
|
+void __mutex_do_init(struct mutex *mutex, const char *name,
|
|
+ struct lock_class_key *key)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ /*
|
|
+ * Make sure we are not reinitializing a held lock:
|
|
+ */
|
|
+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
|
|
+ lockdep_init_map(&mutex->dep_map, name, key, 0);
|
|
+#endif
|
|
+ mutex->lock.save_state = 0;
|
|
+}
|
|
+EXPORT_SYMBOL(__mutex_do_init);
|
|
+
|
|
+void __lockfunc _mutex_lock(struct mutex *lock)
|
|
+{
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+ rt_mutex_lock(&lock->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock);
|
|
+
|
|
+int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+ ret = rt_mutex_lock_interruptible(&lock->lock);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_interruptible);
|
|
+
|
|
+int __lockfunc _mutex_lock_killable(struct mutex *lock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+ ret = rt_mutex_lock_killable(&lock->lock);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_killable);
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
|
|
+{
|
|
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
|
|
+ rt_mutex_lock(&lock->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_nested);
|
|
+
|
|
+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
|
+{
|
|
+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
|
|
+ rt_mutex_lock(&lock->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_nest_lock);
|
|
+
|
|
+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
|
|
+ ret = rt_mutex_lock_interruptible(&lock->lock);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
|
|
+
|
|
+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
|
+ ret = rt_mutex_lock_killable(&lock->lock);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_lock_killable_nested);
|
|
+#endif
|
|
+
|
|
+int __lockfunc _mutex_trylock(struct mutex *lock)
|
|
+{
|
|
+ int ret = rt_mutex_trylock(&lock->lock);
|
|
+
|
|
+ if (ret)
|
|
+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_trylock);
|
|
+
|
|
+void __lockfunc _mutex_unlock(struct mutex *lock)
|
|
+{
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ rt_mutex_unlock(&lock->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(_mutex_unlock);
|
|
+
|
|
+/*
|
|
+ * rwlock_t functions
|
|
+ */
|
|
+int __lockfunc rt_write_trylock(rwlock_t *rwlock)
|
|
+{
|
|
+ int ret = rt_mutex_trylock(&rwlock->lock);
|
|
+
|
|
+ migrate_disable();
|
|
+ if (ret)
|
|
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
|
|
+ else
|
|
+ migrate_enable();
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_write_trylock);
|
|
+
|
|
+int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ *flags = 0;
|
|
+ migrate_disable();
|
|
+ ret = rt_write_trylock(rwlock);
|
|
+ if (!ret)
|
|
+ migrate_enable();
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_write_trylock_irqsave);
|
|
+
|
|
+int __lockfunc rt_read_trylock(rwlock_t *rwlock)
|
|
+{
|
|
+ struct rt_mutex *lock = &rwlock->lock;
|
|
+ int ret = 1;
|
|
+
|
|
+ /*
|
|
+ * recursive read locks succeed when current owns the lock,
|
|
+ * but not when read_depth == 0 which means that the lock is
|
|
+ * write locked.
|
|
+ */
|
|
+ migrate_disable();
|
|
+ if (rt_mutex_owner(lock) != current) {
|
|
+ ret = rt_mutex_trylock(lock);
|
|
+ if (ret)
|
|
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
|
|
+ } else if (!rwlock->read_depth) {
|
|
+ ret = 0;
|
|
+ }
|
|
+
|
|
+ if (ret)
|
|
+ rwlock->read_depth++;
|
|
+ else
|
|
+ migrate_enable();
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_read_trylock);
|
|
+
|
|
+void __lockfunc rt_write_lock(rwlock_t *rwlock)
|
|
+{
|
|
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
|
|
+ __rt_spin_lock(&rwlock->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_write_lock);
|
|
+
|
|
+void __lockfunc rt_read_lock(rwlock_t *rwlock)
|
|
+{
|
|
+ struct rt_mutex *lock = &rwlock->lock;
|
|
+
|
|
+ /*
|
|
+ * recursive read locks succeed when current owns the lock
|
|
+ */
|
|
+ if (rt_mutex_owner(lock) != current) {
|
|
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
|
|
+ __rt_spin_lock(lock);
|
|
+ }
|
|
+ rwlock->read_depth++;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(rt_read_lock);
|
|
+
|
|
+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
|
|
+{
|
|
+ /* NOTE: we always pass in '1' for nested, for simplicity */
|
|
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
|
|
+ __rt_spin_unlock(&rwlock->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_write_unlock);
|
|
+
|
|
+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
|
|
+{
|
|
+ /* Release the lock only when read_depth is down to 0 */
|
|
+ if (--rwlock->read_depth == 0) {
|
|
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
|
|
+ __rt_spin_unlock(&rwlock->lock);
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL(rt_read_unlock);
|
|
+
|
|
+unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
|
|
+{
|
|
+ rt_write_lock(rwlock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_write_lock_irqsave);
|
|
+
|
|
+unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
|
|
+{
|
|
+ rt_read_lock(rwlock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_read_lock_irqsave);
|
|
+
|
|
+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ /*
|
|
+ * Make sure we are not reinitializing a held lock:
|
|
+ */
|
|
+ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
|
|
+ lockdep_init_map(&rwlock->dep_map, name, key, 0);
|
|
+#endif
|
|
+ rwlock->lock.save_state = 1;
|
|
+ rwlock->read_depth = 0;
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_rwlock_init);
|
|
+
|
|
+/*
|
|
+ * rw_semaphores
|
|
+ */
|
|
+
|
|
+void rt_up_write(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
|
|
+ rt_mutex_unlock(&rwsem->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_up_write);
|
|
+
|
|
+void rt_up_read(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ if (--rwsem->read_depth == 0) {
|
|
+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
|
|
+ rt_mutex_unlock(&rwsem->lock);
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL(rt_up_read);
|
|
+
|
|
+/*
|
|
+ * downgrade a write lock into a read lock
|
|
+ * - just wake up any readers at the front of the queue
|
|
+ */
|
|
+void rt_downgrade_write(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
|
|
+ rwsem->read_depth = 1;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_downgrade_write);
|
|
+
|
|
+int rt_down_write_trylock(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ int ret = rt_mutex_trylock(&rwsem->lock);
|
|
+
|
|
+ if (ret)
|
|
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_down_write_trylock);
|
|
+
|
|
+void rt_down_write(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
|
|
+ rt_mutex_lock(&rwsem->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_down_write);
|
|
+
|
|
+void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
|
|
+{
|
|
+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
|
|
+ rt_mutex_lock(&rwsem->lock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_down_write_nested);
|
|
+
|
|
+int rt_down_read_trylock(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ struct rt_mutex *lock = &rwsem->lock;
|
|
+ int ret = 1;
|
|
+
|
|
+ /*
|
|
+ * recursive read locks succeed when current owns the rwsem,
|
|
+ * but not when read_depth == 0 which means that the rwsem is
|
|
+ * write locked.
|
|
+ */
|
|
+ if (rt_mutex_owner(lock) != current) {
|
|
+ ret = rt_mutex_trylock(&rwsem->lock);
|
|
+ if (ret)
|
|
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
|
|
+ } else if (!rwsem->read_depth) {
|
|
+ ret = 0;
|
|
+ }
|
|
+
|
|
+ if (ret)
|
|
+ rwsem->read_depth++;
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_down_read_trylock);
|
|
+
|
|
+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
|
|
+{
|
|
+ struct rt_mutex *lock = &rwsem->lock;
|
|
+
|
|
+ if (rt_mutex_owner(lock) != current) {
|
|
+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
|
|
+ rt_mutex_lock(&rwsem->lock);
|
|
+ }
|
|
+ rwsem->read_depth++;
|
|
+}
|
|
+
|
|
+void rt_down_read(struct rw_semaphore *rwsem)
|
|
+{
|
|
+ __rt_down_read(rwsem, 0);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_down_read);
|
|
+
|
|
+void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
|
|
+{
|
|
+ __rt_down_read(rwsem, subclass);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_down_read_nested);
|
|
+
|
|
+void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
|
|
+ struct lock_class_key *key)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ /*
|
|
+ * Make sure we are not reinitializing a held lock:
|
|
+ */
|
|
+ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
|
|
+ lockdep_init_map(&rwsem->dep_map, name, key, 0);
|
|
+#endif
|
|
+ rwsem->read_depth = 0;
|
|
+ rwsem->lock.save_state = 0;
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_rwsem_init);
|
|
+
|
|
+/**
|
|
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
|
+ * @cnt: the atomic which we are to dec
|
|
+ * @lock: the mutex to return holding if we dec to 0
|
|
+ *
|
|
+ * return true and hold lock if we dec to 0, return false otherwise
|
|
+ */
|
|
+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
|
|
+{
|
|
+ /* dec if we can't possibly hit 0 */
|
|
+ if (atomic_add_unless(cnt, -1, 1))
|
|
+ return 0;
|
|
+ /* we might hit 0, so take the lock */
|
|
+ mutex_lock(lock);
|
|
+ if (!atomic_dec_and_test(cnt)) {
|
|
+ /* when we actually did the dec, we didn't hit 0 */
|
|
+ mutex_unlock(lock);
|
|
+ return 0;
|
|
+ }
|
|
+ /* we hit 0, and we hold the lock */
|
|
+ return 1;
|
|
+}
|
|
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
|
|
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
|
|
index 16502d3a71c8..655b4cd5c070 100644
|
|
--- a/kernel/rtmutex-debug.c
|
|
+++ b/kernel/rtmutex-debug.c
|
|
@@ -65,12 +65,13 @@ void rt_mutex_debug_task_free(struct task_struct *task)
|
|
* the deadlock. We print when we return. act_waiter can be NULL in
|
|
* case of a remove waiter operation.
|
|
*/
|
|
-void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
|
|
+void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
|
|
+ struct rt_mutex_waiter *act_waiter,
|
|
struct rt_mutex *lock)
|
|
{
|
|
struct task_struct *task;
|
|
|
|
- if (!debug_locks || detect || !act_waiter)
|
|
+ if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
|
|
return;
|
|
|
|
task = rt_mutex_owner(act_waiter->lock);
|
|
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
|
|
index ab29b6a22669..d0519c3432b6 100644
|
|
--- a/kernel/rtmutex-debug.h
|
|
+++ b/kernel/rtmutex-debug.h
|
|
@@ -20,14 +20,15 @@ extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
|
|
extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
|
|
struct task_struct *powner);
|
|
extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
|
|
-extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
|
|
+extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
|
|
+ struct rt_mutex_waiter *waiter,
|
|
struct rt_mutex *lock);
|
|
extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
|
|
# define debug_rt_mutex_reset_waiter(w) \
|
|
do { (w)->deadlock_lock = NULL; } while (0)
|
|
|
|
-static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
|
|
- int detect)
|
|
+static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
|
|
+ enum rtmutex_chainwalk walk)
|
|
{
|
|
return (waiter != NULL);
|
|
}
|
|
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
|
|
index 98ec49475460..c1f26d9669ea 100644
|
|
--- a/kernel/rtmutex-tester.c
|
|
+++ b/kernel/rtmutex-tester.c
|
|
@@ -14,7 +14,7 @@
|
|
#include <linux/timer.h>
|
|
#include <linux/freezer.h>
|
|
|
|
-#include "rtmutex.h"
|
|
+#include "rtmutex_common.h"
|
|
|
|
#define MAX_RT_TEST_THREADS 8
|
|
#define MAX_RT_TEST_MUTEXES 8
|
|
@@ -105,7 +105,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
|
|
|
|
td->mutexes[id] = 1;
|
|
td->event = atomic_add_return(1, &rttest_event);
|
|
- ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
|
|
+ ret = rt_mutex_lock_interruptible(&mutexes[id]);
|
|
td->event = atomic_add_return(1, &rttest_event);
|
|
td->mutexes[id] = ret ? 0 : 4;
|
|
return ret ? -EINTR : 0;
|
|
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
|
|
index 375ae48479fa..4cc273b85beb 100644
|
|
--- a/kernel/rtmutex.c
|
|
+++ b/kernel/rtmutex.c
|
|
@@ -8,6 +8,12 @@
|
|
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
|
|
* Copyright (C) 2006 Esben Nielsen
|
|
*
|
|
+ * Adaptive Spinlocks:
|
|
+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
|
|
+ * and Peter Morreale,
|
|
+ * Adaptive Spinlocks simplification:
|
|
+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
|
|
+ *
|
|
* See Documentation/rt-mutex-design.txt for details.
|
|
*/
|
|
#include <linux/spinlock.h>
|
|
@@ -67,6 +73,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
|
|
clear_rt_mutex_waiters(lock);
|
|
}
|
|
|
|
+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
|
|
+ waiter != PI_REQUEUE_INPROGRESS;
|
|
+}
|
|
+
|
|
/*
|
|
* We can speed up the acquire/release, if the architecture
|
|
* supports cmpxchg and if there's no debugging state to be set up
|
|
@@ -142,6 +154,37 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
|
|
}
|
|
#endif
|
|
|
|
+static inline void
|
|
+rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ plist_add(&waiter->list_entry, &lock->wait_list);
|
|
+}
|
|
+
|
|
+static inline void
|
|
+rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ plist_del(&waiter->list_entry, &lock->wait_list);
|
|
+}
|
|
+
|
|
+static inline void
|
|
+rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ waiter->pi_list_entry.prio = waiter->list_entry.prio;
|
|
+ plist_add(&waiter->pi_list_entry, &task->pi_waiters);
|
|
+}
|
|
+
|
|
+static inline void
|
|
+rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ plist_del(&waiter->pi_list_entry, &task->pi_waiters);
|
|
+}
|
|
+
|
|
+static inline void init_lists(struct rt_mutex *lock)
|
|
+{
|
|
+ if (unlikely(!lock->wait_list.node_list.prev))
|
|
+ plist_head_init(&lock->wait_list);
|
|
+}
|
|
+
|
|
/*
|
|
* Calculate task priority from the waiter list priority
|
|
*
|
|
@@ -158,6 +201,18 @@ int rt_mutex_getprio(struct task_struct *task)
|
|
}
|
|
|
|
/*
|
|
+ * Called by sched_setscheduler() to check whether the priority change
|
|
+ * is overruled by a possible priority boosting.
|
|
+ */
|
|
+int rt_mutex_check_prio(struct task_struct *task, int newprio)
|
|
+{
|
|
+ if (!task_has_pi_waiters(task))
|
|
+ return 0;
|
|
+
|
|
+ return task_top_pi_waiter(task)->pi_list_entry.prio <= newprio;
|
|
+}
|
|
+
|
|
+/*
|
|
* Adjust the priority of a task, after its pi_waiters got modified.
|
|
*
|
|
* This can be both boosting and unboosting. task->pi_lock must be held.
|
|
@@ -188,6 +243,40 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
|
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
}
|
|
|
|
+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ if (waiter->savestate)
|
|
+ wake_up_lock_sleeper(waiter->task);
|
|
+ else
|
|
+ wake_up_process(waiter->task);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Deadlock detection is conditional:
|
|
+ *
|
|
+ * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
|
|
+ * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
|
|
+ *
|
|
+ * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
|
|
+ * conducted independent of the detect argument.
|
|
+ *
|
|
+ * If the waiter argument is NULL this indicates the deboost path and
|
|
+ * deadlock detection is disabled independent of the detect argument
|
|
+ * and the config settings.
|
|
+ */
|
|
+static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
|
|
+ enum rtmutex_chainwalk chwalk)
|
|
+{
|
|
+ /*
|
|
+ * This is just a wrapper function for the following call,
|
|
+ * because debug_rt_mutex_detect_deadlock() smells like a magic
|
|
+ * debug feature and I wanted to keep the cond function in the
|
|
+ * main source file along with the comments instead of having
|
|
+ * two of the same in the headers.
|
|
+ */
|
|
+ return debug_rt_mutex_detect_deadlock(waiter, chwalk);
|
|
+}
|
|
+
|
|
/*
|
|
* Max number of times we'll walk the boosting chain:
|
|
*/
|
|
@@ -202,21 +291,65 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
|
|
* Adjust the priority chain. Also used for deadlock detection.
|
|
* Decreases task's usage by one - may thus free the task.
|
|
* Returns 0 or -EDEADLK.
|
|
+ *
|
|
+ * Chain walk basics and protection scope
|
|
+ *
|
|
+ * [R] refcount on task
|
|
+ * [P] task->pi_lock held
|
|
+ * [L] rtmutex->wait_lock held
|
|
+ *
|
|
+ * Step Description Protected by
|
|
+ * function arguments:
|
|
+ * @task [R]
|
|
+ * @orig_lock if != NULL @top_task is blocked on it
|
|
+ * @next_lock Unprotected. Cannot be
|
|
+ * dereferenced. Only used for
|
|
+ * comparison.
|
|
+ * @orig_waiter if != NULL @top_task is blocked on it
|
|
+ * @top_task current, or in case of proxy
|
|
+ * locking protected by calling
|
|
+ * code
|
|
+ * again:
|
|
+ * loop_sanity_check();
|
|
+ * retry:
|
|
+ * [1] lock(task->pi_lock); [R] acquire [P]
|
|
+ * [2] waiter = task->pi_blocked_on; [P]
|
|
+ * [3] check_exit_conditions_1(); [P]
|
|
+ * [4] lock = waiter->lock; [P]
|
|
+ * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
|
|
+ * unlock(task->pi_lock); release [P]
|
|
+ * goto retry;
|
|
+ * }
|
|
+ * [6] check_exit_conditions_2(); [P] + [L]
|
|
+ * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
|
|
+ * [8] unlock(task->pi_lock); release [P]
|
|
+ * put_task_struct(task); release [R]
|
|
+ * [9] check_exit_conditions_3(); [L]
|
|
+ * [10] task = owner(lock); [L]
|
|
+ * get_task_struct(task); [L] acquire [R]
|
|
+ * lock(task->pi_lock); [L] acquire [P]
|
|
+ * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
|
|
+ * [12] check_exit_conditions_4(); [P] + [L]
|
|
+ * [13] unlock(task->pi_lock); release [P]
|
|
+ * unlock(lock->wait_lock); release [L]
|
|
+ * goto again;
|
|
*/
|
|
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
- int deadlock_detect,
|
|
+ enum rtmutex_chainwalk chwalk,
|
|
struct rt_mutex *orig_lock,
|
|
struct rt_mutex *next_lock,
|
|
struct rt_mutex_waiter *orig_waiter,
|
|
struct task_struct *top_task)
|
|
{
|
|
- struct rt_mutex *lock;
|
|
struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
|
|
- int detect_deadlock, ret = 0, depth = 0;
|
|
+ struct rt_mutex_waiter *prerequeue_top_waiter;
|
|
+ int ret = 0, depth = 0;
|
|
+ struct rt_mutex *lock;
|
|
+ bool detect_deadlock;
|
|
unsigned long flags;
|
|
+ bool requeue = true;
|
|
|
|
- detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
|
|
- deadlock_detect);
|
|
+ detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
|
|
|
|
/*
|
|
* The (de)boosting is a step by step approach with a lot of
|
|
@@ -225,6 +358,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
* carefully whether things change under us.
|
|
*/
|
|
again:
|
|
+ /*
|
|
+ * We limit the lock chain length for each invocation.
|
|
+ */
|
|
if (++depth > max_lock_depth) {
|
|
static int prev_max;
|
|
|
|
@@ -242,19 +378,34 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
|
return -EDEADLK;
|
|
}
|
|
+
|
|
+ /*
|
|
+ * We are fully preemptible here and only hold the refcount on
|
|
+ * @task. So everything can have changed under us since the
|
|
+ * caller or our own code below (goto retry/again) dropped all
|
|
+ * locks.
|
|
+ */
|
|
retry:
|
|
/*
|
|
- * Task can not go away as we did a get_task() before !
|
|
+ * [1] Task cannot go away as we did a get_task() before !
|
|
*/
|
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
|
+ /*
|
|
+ * [2] Get the waiter on which @task is blocked on.
|
|
+ */
|
|
waiter = task->pi_blocked_on;
|
|
+
|
|
+ /*
|
|
+ * [3] check_exit_conditions_1() protected by task->pi_lock.
|
|
+ */
|
|
+
|
|
/*
|
|
* Check whether the end of the boosting chain has been
|
|
* reached or the state of the chain has changed while we
|
|
* dropped the locks.
|
|
*/
|
|
- if (!waiter)
|
|
+ if (!rt_mutex_real_waiter(waiter))
|
|
goto out_unlock_pi;
|
|
|
|
/*
|
|
@@ -286,20 +437,41 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
goto out_unlock_pi;
|
|
/*
|
|
* If deadlock detection is off, we stop here if we
|
|
- * are not the top pi waiter of the task.
|
|
+ * are not the top pi waiter of the task. If deadlock
|
|
+ * detection is enabled we continue, but stop the
|
|
+ * requeueing in the chain walk.
|
|
*/
|
|
- if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
|
|
- goto out_unlock_pi;
|
|
+ if (top_waiter != task_top_pi_waiter(task)) {
|
|
+ if (!detect_deadlock)
|
|
+ goto out_unlock_pi;
|
|
+ else
|
|
+ requeue = false;
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
- * When deadlock detection is off then we check, if further
|
|
- * priority adjustment is necessary.
|
|
+ * If the waiter priority is the same as the task priority
|
|
+ * then there is no further priority adjustment necessary. If
|
|
+ * deadlock detection is off, we stop the chain walk. If its
|
|
+ * enabled we continue, but stop the requeueing in the chain
|
|
+ * walk.
|
|
*/
|
|
- if (!detect_deadlock && waiter->list_entry.prio == task->prio)
|
|
- goto out_unlock_pi;
|
|
+ if (waiter->list_entry.prio == task->prio) {
|
|
+ if (!detect_deadlock)
|
|
+ goto out_unlock_pi;
|
|
+ else
|
|
+ requeue = false;
|
|
+ }
|
|
|
|
+ /*
|
|
+ * [4] Get the next lock
|
|
+ */
|
|
lock = waiter->lock;
|
|
+ /*
|
|
+ * [5] We need to trylock here as we are holding task->pi_lock,
|
|
+ * which is the reverse lock order versus the other rtmutex
|
|
+ * operations.
|
|
+ */
|
|
if (!raw_spin_trylock(&lock->wait_lock)) {
|
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
cpu_relax();
|
|
@@ -307,81 +479,183 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
}
|
|
|
|
/*
|
|
+ * [6] check_exit_conditions_2() protected by task->pi_lock and
|
|
+ * lock->wait_lock.
|
|
+ *
|
|
* Deadlock detection. If the lock is the same as the original
|
|
* lock which caused us to walk the lock chain or if the
|
|
* current lock is owned by the task which initiated the chain
|
|
* walk, we detected a deadlock.
|
|
*/
|
|
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
|
|
- debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
|
|
+ debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
ret = -EDEADLK;
|
|
goto out_unlock_pi;
|
|
}
|
|
|
|
- top_waiter = rt_mutex_top_waiter(lock);
|
|
+ /*
|
|
+ * If we just follow the lock chain for deadlock detection, no
|
|
+ * need to do all the requeue operations. To avoid a truckload
|
|
+ * of conditionals around the various places below, just do the
|
|
+ * minimum chain walk checks.
|
|
+ */
|
|
+ if (!requeue) {
|
|
+ /*
|
|
+ * No requeue[7] here. Just release @task [8]
|
|
+ */
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
+ put_task_struct(task);
|
|
|
|
- /* Requeue the waiter */
|
|
- plist_del(&waiter->list_entry, &lock->wait_list);
|
|
+ /*
|
|
+ * [9] check_exit_conditions_3 protected by lock->wait_lock.
|
|
+ * If there is no owner of the lock, end of chain.
|
|
+ */
|
|
+ if (!rt_mutex_owner(lock)) {
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* [10] Grab the next task, i.e. owner of @lock */
|
|
+ task = rt_mutex_owner(lock);
|
|
+ get_task_struct(task);
|
|
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
+
|
|
+ /*
|
|
+ * No requeue [11] here. We just do deadlock detection.
|
|
+ *
|
|
+ * [12] Store whether owner is blocked
|
|
+ * itself. Decision is made after dropping the locks
|
|
+ */
|
|
+ next_lock = task_blocked_on_lock(task);
|
|
+ /*
|
|
+ * Get the top waiter for the next iteration
|
|
+ */
|
|
+ top_waiter = rt_mutex_top_waiter(lock);
|
|
+
|
|
+ /* [13] Drop locks */
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
+
|
|
+ /* If owner is not blocked, end of chain. */
|
|
+ if (!next_lock)
|
|
+ goto out_put_task;
|
|
+ goto again;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Store the current top waiter before doing the requeue
|
|
+ * operation on @lock. We need it for the boost/deboost
|
|
+ * decision below.
|
|
+ */
|
|
+ prerequeue_top_waiter = rt_mutex_top_waiter(lock);
|
|
+
|
|
+ /* [7] Requeue the waiter in the lock waiter list. */
|
|
+ rt_mutex_dequeue(lock, waiter);
|
|
waiter->list_entry.prio = task->prio;
|
|
- plist_add(&waiter->list_entry, &lock->wait_list);
|
|
+ rt_mutex_enqueue(lock, waiter);
|
|
|
|
- /* Release the task */
|
|
+ /* [8] Release the task */
|
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
+ put_task_struct(task);
|
|
+
|
|
+ /*
|
|
+ * [9] check_exit_conditions_3 protected by lock->wait_lock.
|
|
+ *
|
|
+ * We must abort the chain walk if there is no lock owner even
|
|
+ * in the dead lock detection case, as we have nothing to
|
|
+ * follow here. This is the end of the chain we are walking.
|
|
+ */
|
|
if (!rt_mutex_owner(lock)) {
|
|
+ struct rt_mutex_waiter *lock_top_waiter;
|
|
+
|
|
/*
|
|
- * If the requeue above changed the top waiter, then we need
|
|
- * to wake the new top waiter up to try to get the lock.
|
|
+ * If the requeue [7] above changed the top waiter,
|
|
+ * then we need to wake the new top waiter up to try
|
|
+ * to get the lock.
|
|
*/
|
|
-
|
|
- if (top_waiter != rt_mutex_top_waiter(lock))
|
|
- wake_up_process(rt_mutex_top_waiter(lock)->task);
|
|
+ lock_top_waiter = rt_mutex_top_waiter(lock);
|
|
+ if (prerequeue_top_waiter != lock_top_waiter)
|
|
+ rt_mutex_wake_waiter(lock_top_waiter);
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
- goto out_put_task;
|
|
+ return 0;
|
|
}
|
|
- put_task_struct(task);
|
|
|
|
- /* Grab the next task */
|
|
+ /* [10] Grab the next task, i.e. the owner of @lock */
|
|
task = rt_mutex_owner(lock);
|
|
get_task_struct(task);
|
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
|
+ /* [11] requeue the pi waiters if necessary */
|
|
if (waiter == rt_mutex_top_waiter(lock)) {
|
|
- /* Boost the owner */
|
|
- plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
|
|
- waiter->pi_list_entry.prio = waiter->list_entry.prio;
|
|
- plist_add(&waiter->pi_list_entry, &task->pi_waiters);
|
|
+ /*
|
|
+ * The waiter became the new top (highest priority)
|
|
+ * waiter on the lock. Replace the previous top waiter
|
|
+ * in the owner tasks pi waiters list with this waiter
|
|
+ * and adjust the priority of the owner.
|
|
+ */
|
|
+ rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
|
|
+ rt_mutex_enqueue_pi(task, waiter);
|
|
__rt_mutex_adjust_prio(task);
|
|
|
|
- } else if (top_waiter == waiter) {
|
|
- /* Deboost the owner */
|
|
- plist_del(&waiter->pi_list_entry, &task->pi_waiters);
|
|
+ } else if (prerequeue_top_waiter == waiter) {
|
|
+ /*
|
|
+ * The waiter was the top waiter on the lock, but is
|
|
+ * no longer the top prority waiter. Replace waiter in
|
|
+ * the owner tasks pi waiters list with the new top
|
|
+ * (highest priority) waiter and adjust the priority
|
|
+ * of the owner.
|
|
+ * The new top waiter is stored in @waiter so that
|
|
+ * @waiter == @top_waiter evaluates to true below and
|
|
+ * we continue to deboost the rest of the chain.
|
|
+ */
|
|
+ rt_mutex_dequeue_pi(task, waiter);
|
|
waiter = rt_mutex_top_waiter(lock);
|
|
- waiter->pi_list_entry.prio = waiter->list_entry.prio;
|
|
- plist_add(&waiter->pi_list_entry, &task->pi_waiters);
|
|
+ rt_mutex_enqueue_pi(task, waiter);
|
|
__rt_mutex_adjust_prio(task);
|
|
+ } else {
|
|
+ /*
|
|
+ * Nothing changed. No need to do any priority
|
|
+ * adjustment.
|
|
+ */
|
|
}
|
|
|
|
/*
|
|
+ * [12] check_exit_conditions_4() protected by task->pi_lock
|
|
+ * and lock->wait_lock. The actual decisions are made after we
|
|
+ * dropped the locks.
|
|
+ *
|
|
* Check whether the task which owns the current lock is pi
|
|
* blocked itself. If yes we store a pointer to the lock for
|
|
* the lock chain change detection above. After we dropped
|
|
* task->pi_lock next_lock cannot be dereferenced anymore.
|
|
*/
|
|
next_lock = task_blocked_on_lock(task);
|
|
+ /*
|
|
+ * Store the top waiter of @lock for the end of chain walk
|
|
+ * decision below.
|
|
+ */
|
|
+ top_waiter = rt_mutex_top_waiter(lock);
|
|
|
|
+ /* [13] Drop the locks */
|
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
-
|
|
- top_waiter = rt_mutex_top_waiter(lock);
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
|
|
/*
|
|
+ * Make the actual exit decisions [12], based on the stored
|
|
+ * values.
|
|
+ *
|
|
* We reached the end of the lock chain. Stop right here. No
|
|
* point to go back just to figure that out.
|
|
*/
|
|
if (!next_lock)
|
|
goto out_put_task;
|
|
|
|
+ /*
|
|
+ * If the current waiter is not the top waiter on the lock,
|
|
+ * then we can stop the chain walk here if we are not in full
|
|
+ * deadlock detection mode.
|
|
+ */
|
|
if (!detect_deadlock && waiter != top_waiter)
|
|
goto out_put_task;
|
|
|
|
@@ -395,82 +669,146 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
return ret;
|
|
}
|
|
|
|
+
|
|
+#define STEAL_NORMAL 0
|
|
+#define STEAL_LATERAL 1
|
|
+
|
|
+/*
|
|
+ * Note that RT tasks are excluded from lateral-steals to prevent the
|
|
+ * introduction of an unbounded latency
|
|
+ */
|
|
+static inline int lock_is_stealable(struct task_struct *task,
|
|
+ struct task_struct *pendowner, int mode)
|
|
+{
|
|
+ if (mode == STEAL_NORMAL || rt_task(task)) {
|
|
+ if (task->prio >= pendowner->prio)
|
|
+ return 0;
|
|
+ } else if (task->prio > pendowner->prio)
|
|
+ return 0;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
/*
|
|
* Try to take an rt-mutex
|
|
*
|
|
* Must be called with lock->wait_lock held.
|
|
*
|
|
- * @lock: the lock to be acquired.
|
|
- * @task: the task which wants to acquire the lock
|
|
- * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
|
|
+ * @lock: The lock to be acquired.
|
|
+ * @task: The task which wants to acquire the lock
|
|
+ * @waiter: The waiter that is queued to the lock's wait list if the
|
|
+ * callsite called task_blocked_on_lock(), otherwise NULL
|
|
*/
|
|
-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
- struct rt_mutex_waiter *waiter)
|
|
+static int
|
|
+__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
+ struct rt_mutex_waiter *waiter, int mode)
|
|
{
|
|
+ unsigned long flags;
|
|
+
|
|
/*
|
|
- * We have to be careful here if the atomic speedups are
|
|
- * enabled, such that, when
|
|
- * - no other waiter is on the lock
|
|
- * - the lock has been released since we did the cmpxchg
|
|
- * the lock can be released or taken while we are doing the
|
|
- * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
|
|
+ * Before testing whether we can acquire @lock, we set the
|
|
+ * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
|
|
+ * other tasks which try to modify @lock into the slow path
|
|
+ * and they serialize on @lock->wait_lock.
|
|
+ *
|
|
+ * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
|
|
+ * as explained at the top of this file if and only if:
|
|
*
|
|
- * The atomic acquire/release aware variant of
|
|
- * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
|
|
- * the WAITERS bit, the atomic release / acquire can not
|
|
- * happen anymore and lock->wait_lock protects us from the
|
|
- * non-atomic case.
|
|
+ * - There is a lock owner. The caller must fixup the
|
|
+ * transient state if it does a trylock or leaves the lock
|
|
+ * function due to a signal or timeout.
|
|
*
|
|
- * Note, that this might set lock->owner =
|
|
- * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
|
|
- * any more. This is fixed up when we take the ownership.
|
|
- * This is the transitional state explained at the top of this file.
|
|
+ * - @task acquires the lock and there are no other
|
|
+ * waiters. This is undone in rt_mutex_set_owner(@task) at
|
|
+ * the end of this function.
|
|
*/
|
|
mark_rt_mutex_waiters(lock);
|
|
|
|
+ /*
|
|
+ * If @lock has an owner, give up.
|
|
+ */
|
|
if (rt_mutex_owner(lock))
|
|
return 0;
|
|
|
|
/*
|
|
- * It will get the lock because of one of these conditions:
|
|
- * 1) there is no waiter
|
|
- * 2) higher priority than waiters
|
|
- * 3) it is top waiter
|
|
+ * If @waiter != NULL, @task has already enqueued the waiter
|
|
+ * into @lock waiter list. If @waiter == NULL then this is a
|
|
+ * trylock attempt.
|
|
*/
|
|
- if (rt_mutex_has_waiters(lock)) {
|
|
- if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
|
|
- if (!waiter || waiter != rt_mutex_top_waiter(lock))
|
|
- return 0;
|
|
- }
|
|
- }
|
|
-
|
|
- if (waiter || rt_mutex_has_waiters(lock)) {
|
|
- unsigned long flags;
|
|
- struct rt_mutex_waiter *top;
|
|
-
|
|
- raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
+ if (waiter) {
|
|
+ /*
|
|
+ * If waiter is not the highest priority waiter of
|
|
+ * @lock, give up.
|
|
+ */
|
|
+ if (waiter != rt_mutex_top_waiter(lock))
|
|
+ return 0;
|
|
|
|
- /* remove the queued waiter. */
|
|
- if (waiter) {
|
|
- plist_del(&waiter->list_entry, &lock->wait_list);
|
|
- task->pi_blocked_on = NULL;
|
|
- }
|
|
+ /*
|
|
+ * We can acquire the lock. Remove the waiter from the
|
|
+ * lock waiters list.
|
|
+ */
|
|
+ rt_mutex_dequeue(lock, waiter);
|
|
|
|
+ } else {
|
|
/*
|
|
- * We have to enqueue the top waiter(if it exists) into
|
|
- * task->pi_waiters list.
|
|
+ * If the lock has waiters already we check whether @task is
|
|
+ * eligible to take over the lock.
|
|
+ *
|
|
+ * If there are no other waiters, @task can acquire
|
|
+ * the lock. @task->pi_blocked_on is NULL, so it does
|
|
+ * not need to be dequeued.
|
|
*/
|
|
if (rt_mutex_has_waiters(lock)) {
|
|
- top = rt_mutex_top_waiter(lock);
|
|
- top->pi_list_entry.prio = top->list_entry.prio;
|
|
- plist_add(&top->pi_list_entry, &task->pi_waiters);
|
|
+ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
|
|
+
|
|
+ /*
|
|
+ * If @task->prio is greater than or equal to
|
|
+ * the top waiter priority (kernel view),
|
|
+ * @task lost.
|
|
+ */
|
|
+ if (task != pown && !lock_is_stealable(task, pown, mode))
|
|
+ return 0;
|
|
+
|
|
+ /*
|
|
+ * The current top waiter stays enqueued. We
|
|
+ * don't have to change anything in the lock
|
|
+ * waiters order.
|
|
+ */
|
|
+ } else {
|
|
+ /*
|
|
+ * No waiters. Take the lock without the
|
|
+ * pi_lock dance.@task->pi_blocked_on is NULL
|
|
+ * and we have no waiters to enqueue in @task
|
|
+ * pi waiters list.
|
|
+ */
|
|
+ goto takeit;
|
|
}
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
}
|
|
|
|
+ /*
|
|
+ * Clear @task->pi_blocked_on. Requires protection by
|
|
+ * @task->pi_lock. Redundant operation for the @waiter == NULL
|
|
+ * case, but conditionals are more expensive than a redundant
|
|
+ * store.
|
|
+ */
|
|
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
+ task->pi_blocked_on = NULL;
|
|
+ /*
|
|
+ * Finish the lock acquisition. @task is the new owner. If
|
|
+ * other waiters exist we have to insert the highest priority
|
|
+ * waiter into @task->pi_waiters list.
|
|
+ */
|
|
+ if (rt_mutex_has_waiters(lock))
|
|
+ rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
+
|
|
+takeit:
|
|
/* We got the lock. */
|
|
debug_rt_mutex_lock(lock);
|
|
|
|
+ /*
|
|
+ * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
|
|
+ * are still waiters or clears it.
|
|
+ */
|
|
rt_mutex_set_owner(lock, task);
|
|
|
|
rt_mutex_deadlock_account_lock(lock, task);
|
|
@@ -478,6 +816,13 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
return 1;
|
|
}
|
|
|
|
+static inline int
|
|
+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
+ struct rt_mutex_waiter *waiter)
|
|
+{
|
|
+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
|
|
+}
|
|
+
|
|
/*
|
|
* Task blocks on lock.
|
|
*
|
|
@@ -488,7 +833,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter,
|
|
struct task_struct *task,
|
|
- int detect_deadlock)
|
|
+ enum rtmutex_chainwalk chwalk)
|
|
{
|
|
struct task_struct *owner = rt_mutex_owner(lock);
|
|
struct rt_mutex_waiter *top_waiter = waiter;
|
|
@@ -509,6 +854,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
return -EDEADLK;
|
|
|
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
+
|
|
+ /*
|
|
+ * In the case of futex requeue PI, this will be a proxy
|
|
+ * lock. The task will wake unaware that it is enqueueed on
|
|
+ * this lock. Avoid blocking on two locks and corrupting
|
|
+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
|
|
+ * flag. futex_wait_requeue_pi() sets this when it wakes up
|
|
+ * before requeue (due to a signal or timeout). Do not enqueue
|
|
+ * the task if PI_WAKEUP_INPROGRESS is set.
|
|
+ */
|
|
+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
|
|
+
|
|
__rt_mutex_adjust_prio(task);
|
|
waiter->task = task;
|
|
waiter->lock = lock;
|
|
@@ -518,7 +880,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
/* Get the top priority waiter on the lock */
|
|
if (rt_mutex_has_waiters(lock))
|
|
top_waiter = rt_mutex_top_waiter(lock);
|
|
- plist_add(&waiter->list_entry, &lock->wait_list);
|
|
+ rt_mutex_enqueue(lock, waiter);
|
|
|
|
task->pi_blocked_on = waiter;
|
|
|
|
@@ -529,13 +891,13 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
|
raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
|
if (waiter == rt_mutex_top_waiter(lock)) {
|
|
- plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
|
|
- plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
|
|
+ rt_mutex_dequeue_pi(owner, top_waiter);
|
|
+ rt_mutex_enqueue_pi(owner, waiter);
|
|
|
|
__rt_mutex_adjust_prio(owner);
|
|
- if (owner->pi_blocked_on)
|
|
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
|
|
chain_walk = 1;
|
|
- } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
|
|
+ } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
|
|
chain_walk = 1;
|
|
}
|
|
|
|
@@ -560,7 +922,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
|
|
- res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
|
|
+ res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
|
|
next_lock, waiter, task);
|
|
|
|
raw_spin_lock(&lock->wait_lock);
|
|
@@ -591,7 +953,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
|
|
* boosted mode and go back to normal after releasing
|
|
* lock->wait_lock.
|
|
*/
|
|
- plist_del(&waiter->pi_list_entry, ¤t->pi_waiters);
|
|
+ rt_mutex_dequeue_pi(current, waiter);
|
|
|
|
/*
|
|
* As we are waking up the top waiter, and the waiter stays
|
|
@@ -610,7 +972,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
|
|
* long as we hold lock->wait_lock. The waiter task needs to
|
|
* acquire it in order to dequeue the waiter.
|
|
*/
|
|
- wake_up_process(waiter->task);
|
|
+ rt_mutex_wake_waiter(waiter);
|
|
}
|
|
|
|
/*
|
|
@@ -622,41 +984,44 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
|
|
static void remove_waiter(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter)
|
|
{
|
|
- int first = (waiter == rt_mutex_top_waiter(lock));
|
|
+ bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
|
|
struct task_struct *owner = rt_mutex_owner(lock);
|
|
struct rt_mutex *next_lock = NULL;
|
|
unsigned long flags;
|
|
|
|
raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
|
- plist_del(&waiter->list_entry, &lock->wait_list);
|
|
+ rt_mutex_dequeue(lock, waiter);
|
|
current->pi_blocked_on = NULL;
|
|
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
|
|
|
|
- if (!owner)
|
|
+ /*
|
|
+ * Only update priority if the waiter was the highest priority
|
|
+ * waiter of the lock and there is an owner to update.
|
|
+ */
|
|
+ if (!owner || !is_top_waiter)
|
|
return;
|
|
|
|
- if (first) {
|
|
-
|
|
- raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
|
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
|
|
|
- plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
|
|
+ rt_mutex_dequeue_pi(owner, waiter);
|
|
|
|
- if (rt_mutex_has_waiters(lock)) {
|
|
- struct rt_mutex_waiter *next;
|
|
+ if (rt_mutex_has_waiters(lock))
|
|
+ rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
|
|
|
|
- next = rt_mutex_top_waiter(lock);
|
|
- plist_add(&next->pi_list_entry, &owner->pi_waiters);
|
|
- }
|
|
- __rt_mutex_adjust_prio(owner);
|
|
+ __rt_mutex_adjust_prio(owner);
|
|
|
|
- /* Store the lock on which owner is blocked or NULL */
|
|
+ /* Store the lock on which owner is blocked or NULL */
|
|
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
|
|
next_lock = task_blocked_on_lock(owner);
|
|
|
|
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
|
|
- }
|
|
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
|
|
|
|
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
|
|
|
|
+ /*
|
|
+ * Don't walk the chain, if the owner task is not blocked
|
|
+ * itself.
|
|
+ */
|
|
if (!next_lock)
|
|
return;
|
|
|
|
@@ -665,7 +1030,8 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
|
|
- rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
|
|
+ rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
|
|
+ next_lock, NULL, current);
|
|
|
|
raw_spin_lock(&lock->wait_lock);
|
|
}
|
|
@@ -684,24 +1050,340 @@ void rt_mutex_adjust_pi(struct task_struct *task)
|
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
|
waiter = task->pi_blocked_on;
|
|
- if (!waiter || waiter->list_entry.prio == task->prio) {
|
|
+ if (!rt_mutex_real_waiter(waiter) ||
|
|
+ waiter->list_entry.prio == task->prio) {
|
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
return;
|
|
}
|
|
- next_lock = waiter->lock;
|
|
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
|
+ next_lock = waiter->lock;
|
|
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
|
get_task_struct(task);
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
+ rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
|
|
+ next_lock, NULL, task);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * preemptible spin_lock functions:
|
|
+ */
|
|
+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
|
|
+ void (*slowfn)(struct rt_mutex *lock))
|
|
+{
|
|
+ might_sleep();
|
|
|
|
- rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
|
|
+ if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
|
|
+ rt_mutex_deadlock_account_lock(lock, current);
|
|
+ else
|
|
+ slowfn(lock);
|
|
}
|
|
|
|
+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
|
|
+ void (*slowfn)(struct rt_mutex *lock))
|
|
+{
|
|
+ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
|
|
+ rt_mutex_deadlock_account_unlock(current);
|
|
+ else
|
|
+ slowfn(lock);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_SMP
|
|
+/*
|
|
+ * Note that owner is a speculative pointer and dereferencing relies
|
|
+ * on rcu_read_lock() and the check against the lock owner.
|
|
+ */
|
|
+static int adaptive_wait(struct rt_mutex *lock,
|
|
+ struct task_struct *owner)
|
|
+{
|
|
+ int res = 0;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ for (;;) {
|
|
+ if (owner != rt_mutex_owner(lock))
|
|
+ break;
|
|
+ /*
|
|
+ * Ensure that owner->on_cpu is dereferenced _after_
|
|
+ * checking the above to be valid.
|
|
+ */
|
|
+ barrier();
|
|
+ if (!owner->on_cpu) {
|
|
+ res = 1;
|
|
+ break;
|
|
+ }
|
|
+ cpu_relax();
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ return res;
|
|
+}
|
|
+#else
|
|
+static int adaptive_wait(struct rt_mutex *lock,
|
|
+ struct task_struct *orig_owner)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+#endif
|
|
+
|
|
+# define pi_lock(lock) raw_spin_lock_irq(lock)
|
|
+# define pi_unlock(lock) raw_spin_unlock_irq(lock)
|
|
+
|
|
+/*
|
|
+ * Slow path lock function spin_lock style: this variant is very
|
|
+ * careful not to miss any non-lock wakeups.
|
|
+ *
|
|
+ * We store the current state under p->pi_lock in p->saved_state and
|
|
+ * the try_to_wake_up() code handles this accordingly.
|
|
+ */
|
|
+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
|
|
+{
|
|
+ struct task_struct *lock_owner, *self = current;
|
|
+ struct rt_mutex_waiter waiter, *top_waiter;
|
|
+ int ret;
|
|
+
|
|
+ rt_mutex_init_waiter(&waiter, true);
|
|
+
|
|
+ raw_spin_lock(&lock->wait_lock);
|
|
+ init_lists(lock);
|
|
+
|
|
+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ BUG_ON(rt_mutex_owner(lock) == self);
|
|
+
|
|
+ /*
|
|
+ * We save whatever state the task is in and we'll restore it
|
|
+ * after acquiring the lock taking real wakeups into account
|
|
+ * as well. We are serialized via pi_lock against wakeups. See
|
|
+ * try_to_wake_up().
|
|
+ */
|
|
+ pi_lock(&self->pi_lock);
|
|
+ self->saved_state = self->state;
|
|
+ __set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ pi_unlock(&self->pi_lock);
|
|
+
|
|
+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
|
|
+ BUG_ON(ret);
|
|
+
|
|
+ for (;;) {
|
|
+ /* Try to acquire the lock again. */
|
|
+ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
|
|
+ break;
|
|
+
|
|
+ top_waiter = rt_mutex_top_waiter(lock);
|
|
+ lock_owner = rt_mutex_owner(lock);
|
|
+
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
+
|
|
+ debug_rt_mutex_print_deadlock(&waiter);
|
|
+
|
|
+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
|
|
+ schedule_rt_mutex(lock);
|
|
+
|
|
+ raw_spin_lock(&lock->wait_lock);
|
|
+
|
|
+ pi_lock(&self->pi_lock);
|
|
+ __set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ pi_unlock(&self->pi_lock);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Restore the task state to current->saved_state. We set it
|
|
+ * to the original state above and the try_to_wake_up() code
|
|
+ * has possibly updated it when a real (non-rtmutex) wakeup
|
|
+ * happened while we were blocked. Clear saved_state so
|
|
+ * try_to_wakeup() does not get confused.
|
|
+ */
|
|
+ pi_lock(&self->pi_lock);
|
|
+ __set_current_state(self->saved_state);
|
|
+ self->saved_state = TASK_RUNNING;
|
|
+ pi_unlock(&self->pi_lock);
|
|
+
|
|
+ /*
|
|
+ * try_to_take_rt_mutex() sets the waiter bit
|
|
+ * unconditionally. We might have to fix that up:
|
|
+ */
|
|
+ fixup_rt_mutex_waiters(lock);
|
|
+
|
|
+ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
|
|
+ BUG_ON(!plist_node_empty(&waiter.list_entry));
|
|
+
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
+
|
|
+ debug_rt_mutex_free_waiter(&waiter);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Slow path to release a rt_mutex spin_lock style
|
|
+ */
|
|
+static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
|
|
+{
|
|
+ debug_rt_mutex_unlock(lock);
|
|
+
|
|
+ rt_mutex_deadlock_account_unlock(current);
|
|
+
|
|
+ if (!rt_mutex_has_waiters(lock)) {
|
|
+ lock->owner = NULL;
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ wakeup_next_waiter(lock);
|
|
+
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
+
|
|
+ /* Undo pi boosting.when necessary */
|
|
+ rt_mutex_adjust_prio(current);
|
|
+}
|
|
+
|
|
+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
|
|
+{
|
|
+ raw_spin_lock(&lock->wait_lock);
|
|
+ __rt_spin_lock_slowunlock(lock);
|
|
+}
|
|
+
|
|
+static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ do {
|
|
+ ret = raw_spin_trylock(&lock->wait_lock);
|
|
+ } while (!ret);
|
|
+
|
|
+ __rt_spin_lock_slowunlock(lock);
|
|
+}
|
|
+
|
|
+void __lockfunc rt_spin_lock(spinlock_t *lock)
|
|
+{
|
|
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
|
|
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_lock);
|
|
+
|
|
+void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
|
|
+{
|
|
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_spin_lock);
|
|
+
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
|
|
+{
|
|
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
|
|
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_lock_nested);
|
|
+#endif
|
|
+
|
|
+void __lockfunc rt_spin_unlock(spinlock_t *lock)
|
|
+{
|
|
+ /* NOTE: we always pass in '1' for nested, for simplicity */
|
|
+ spin_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_unlock);
|
|
+
|
|
+void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
|
|
+{
|
|
+ /* NOTE: we always pass in '1' for nested, for simplicity */
|
|
+ spin_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
|
|
+}
|
|
+
|
|
+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
|
|
+{
|
|
+ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_spin_unlock);
|
|
+
|
|
+/*
|
|
+ * Wait for the lock to get unlocked: instead of polling for an unlock
|
|
+ * (like raw spinlocks do), we lock and unlock, to force the kernel to
|
|
+ * schedule if there's contention:
|
|
+ */
|
|
+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
|
|
+{
|
|
+ spin_lock(lock);
|
|
+ spin_unlock(lock);
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_unlock_wait);
|
|
+
|
|
+int __lockfunc rt_spin_trylock(spinlock_t *lock)
|
|
+{
|
|
+ int ret = rt_mutex_trylock(&lock->lock);
|
|
+
|
|
+ if (ret)
|
|
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_trylock);
|
|
+
|
|
+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ local_bh_disable();
|
|
+ ret = rt_mutex_trylock(&lock->lock);
|
|
+ if (ret) {
|
|
+ migrate_disable();
|
|
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
+ } else
|
|
+ local_bh_enable();
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_trylock_bh);
|
|
+
|
|
+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ *flags = 0;
|
|
+ migrate_disable();
|
|
+ ret = rt_mutex_trylock(&lock->lock);
|
|
+ if (ret)
|
|
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
+ else
|
|
+ migrate_enable();
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(rt_spin_trylock_irqsave);
|
|
+
|
|
+int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
|
|
+{
|
|
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
|
+ if (atomic_add_unless(atomic, -1, 1))
|
|
+ return 0;
|
|
+ migrate_disable();
|
|
+ rt_spin_lock(lock);
|
|
+ if (atomic_dec_and_test(atomic))
|
|
+ return 1;
|
|
+ rt_spin_unlock(lock);
|
|
+ migrate_enable();
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(atomic_dec_and_spin_lock);
|
|
+
|
|
+void
|
|
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
|
|
+{
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
+ /*
|
|
+ * Make sure we are not reinitializing a held lock:
|
|
+ */
|
|
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
|
+ lockdep_init_map(&lock->dep_map, name, key, 0);
|
|
+#endif
|
|
+}
|
|
+EXPORT_SYMBOL(__rt_spin_lock_init);
|
|
+
|
|
+#endif /* PREEMPT_RT_FULL */
|
|
+
|
|
/**
|
|
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
|
|
* @lock: the rt_mutex to take
|
|
* @state: the state the task should block in (TASK_INTERRUPTIBLE
|
|
- * or TASK_UNINTERRUPTIBLE)
|
|
+ * or TASK_UNINTERRUPTIBLE)
|
|
* @timeout: the pre-initialized and started timer, or NULL for none
|
|
* @waiter: the pre-initialized rt_mutex_waiter
|
|
*
|
|
@@ -772,14 +1454,15 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
|
static int __sched
|
|
rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
- int detect_deadlock)
|
|
+ enum rtmutex_chainwalk chwalk)
|
|
{
|
|
struct rt_mutex_waiter waiter;
|
|
int ret = 0;
|
|
|
|
- debug_rt_mutex_init_waiter(&waiter);
|
|
+ rt_mutex_init_waiter(&waiter, false);
|
|
|
|
raw_spin_lock(&lock->wait_lock);
|
|
+ init_lists(lock);
|
|
|
|
/* Try to acquire the lock again: */
|
|
if (try_to_take_rt_mutex(lock, current, NULL)) {
|
|
@@ -796,7 +1479,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
timeout->task = NULL;
|
|
}
|
|
|
|
- ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
|
|
+ ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
|
|
|
|
if (likely(!ret))
|
|
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
|
|
@@ -804,8 +1487,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
set_current_state(TASK_RUNNING);
|
|
|
|
if (unlikely(ret)) {
|
|
- remove_waiter(lock, &waiter);
|
|
- rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
|
|
+ if (rt_mutex_has_waiters(lock))
|
|
+ remove_waiter(lock, &waiter);
|
|
+ rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
|
}
|
|
|
|
/*
|
|
@@ -828,22 +1512,33 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
/*
|
|
* Slow path try-lock function:
|
|
*/
|
|
-static inline int
|
|
-rt_mutex_slowtrylock(struct rt_mutex *lock)
|
|
+static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
|
{
|
|
- int ret = 0;
|
|
+ int ret;
|
|
|
|
- raw_spin_lock(&lock->wait_lock);
|
|
+ /*
|
|
+ * If the lock already has an owner we fail to get the lock.
|
|
+ * This can be done without taking the @lock->wait_lock as
|
|
+ * it is only being read, and this is a trylock anyway.
|
|
+ */
|
|
+ if (rt_mutex_owner(lock))
|
|
+ return 0;
|
|
+
|
|
+ /*
|
|
+ * The mutex has currently no owner. Lock the wait lock and
|
|
+ * try to acquire the lock.
|
|
+ */
|
|
+ if (!raw_spin_trylock(&lock->wait_lock))
|
|
+ return 0;
|
|
+ init_lists(lock);
|
|
|
|
- if (likely(rt_mutex_owner(lock) != current)) {
|
|
+ ret = try_to_take_rt_mutex(lock, current, NULL);
|
|
|
|
- ret = try_to_take_rt_mutex(lock, current, NULL);
|
|
- /*
|
|
- * try_to_take_rt_mutex() sets the lock waiters
|
|
- * bit unconditionally. Clean this up.
|
|
- */
|
|
- fixup_rt_mutex_waiters(lock);
|
|
- }
|
|
+ /*
|
|
+ * try_to_take_rt_mutex() sets the lock waiters bit
|
|
+ * unconditionally. Clean this up.
|
|
+ */
|
|
+ fixup_rt_mutex_waiters(lock);
|
|
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
|
|
@@ -921,30 +1616,31 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
|
|
*/
|
|
static inline int
|
|
rt_mutex_fastlock(struct rt_mutex *lock, int state,
|
|
- int detect_deadlock,
|
|
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
- int detect_deadlock))
|
|
+ enum rtmutex_chainwalk chwalk))
|
|
{
|
|
- if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
|
+ if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
|
rt_mutex_deadlock_account_lock(lock, current);
|
|
return 0;
|
|
} else
|
|
- return slowfn(lock, state, NULL, detect_deadlock);
|
|
+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
|
}
|
|
|
|
static inline int
|
|
rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
|
|
- struct hrtimer_sleeper *timeout, int detect_deadlock,
|
|
+ struct hrtimer_sleeper *timeout,
|
|
+ enum rtmutex_chainwalk chwalk,
|
|
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
int detect_deadlock))
|
|
{
|
|
- if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
|
+ if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
|
+ likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
|
rt_mutex_deadlock_account_lock(lock, current);
|
|
return 0;
|
|
} else
|
|
- return slowfn(lock, state, timeout, detect_deadlock);
|
|
+ return slowfn(lock, state, timeout, chwalk);
|
|
}
|
|
|
|
static inline int
|
|
@@ -977,54 +1673,77 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
|
|
{
|
|
might_sleep();
|
|
|
|
- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
|
|
+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
|
|
/**
|
|
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
|
|
*
|
|
- * @lock: the rt_mutex to be locked
|
|
- * @detect_deadlock: deadlock detection on/off
|
|
+ * @lock: the rt_mutex to be locked
|
|
*
|
|
* Returns:
|
|
- * 0 on success
|
|
- * -EINTR when interrupted by a signal
|
|
- * -EDEADLK when the lock would deadlock (when deadlock detection is on)
|
|
+ * 0 on success
|
|
+ * -EINTR when interrupted by a signal
|
|
*/
|
|
-int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
|
|
- int detect_deadlock)
|
|
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
|
|
{
|
|
might_sleep();
|
|
|
|
- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
|
|
- detect_deadlock, rt_mutex_slowlock);
|
|
+ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
|
|
|
+/*
|
|
+ * Futex variant with full deadlock detection.
|
|
+ */
|
|
+int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
|
|
+ struct hrtimer_sleeper *timeout)
|
|
+{
|
|
+ might_sleep();
|
|
+
|
|
+ return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
+ RT_MUTEX_FULL_CHAINWALK, rt_mutex_slowlock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * rt_mutex_lock_killable - lock a rt_mutex killable
|
|
+ *
|
|
+ * @lock: the rt_mutex to be locked
|
|
+ *
|
|
+ * Returns:
|
|
+ * 0 on success
|
|
+ * -EINTR when interrupted by a signal
|
|
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
|
|
+ */
|
|
+int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
|
|
+{
|
|
+ might_sleep();
|
|
+
|
|
+ return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
|
|
+
|
|
/**
|
|
* rt_mutex_timed_lock - lock a rt_mutex interruptible
|
|
* the timeout structure is provided
|
|
* by the caller
|
|
*
|
|
- * @lock: the rt_mutex to be locked
|
|
+ * @lock: the rt_mutex to be locked
|
|
* @timeout: timeout structure or NULL (no timeout)
|
|
- * @detect_deadlock: deadlock detection on/off
|
|
*
|
|
* Returns:
|
|
- * 0 on success
|
|
- * -EINTR when interrupted by a signal
|
|
+ * 0 on success
|
|
+ * -EINTR when interrupted by a signal
|
|
* -ETIMEDOUT when the timeout expired
|
|
- * -EDEADLK when the lock would deadlock (when deadlock detection is on)
|
|
*/
|
|
int
|
|
-rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
|
|
- int detect_deadlock)
|
|
+rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
|
|
{
|
|
might_sleep();
|
|
|
|
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
- detect_deadlock, rt_mutex_slowlock);
|
|
+ RT_MUTEX_MIN_CHAINWALK, rt_mutex_slowlock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
|
|
|
|
@@ -1082,12 +1801,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
|
|
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
|
|
{
|
|
lock->owner = NULL;
|
|
- raw_spin_lock_init(&lock->wait_lock);
|
|
plist_head_init(&lock->wait_list);
|
|
|
|
debug_rt_mutex_init(lock, name);
|
|
}
|
|
-EXPORT_SYMBOL_GPL(__rt_mutex_init);
|
|
+EXPORT_SYMBOL(__rt_mutex_init);
|
|
|
|
/**
|
|
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
|
|
@@ -1102,7 +1820,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
|
|
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
struct task_struct *proxy_owner)
|
|
{
|
|
- __rt_mutex_init(lock, NULL);
|
|
+ rt_mutex_init(lock);
|
|
debug_rt_mutex_proxy_lock(lock, proxy_owner);
|
|
rt_mutex_set_owner(lock, proxy_owner);
|
|
rt_mutex_deadlock_account_lock(lock, proxy_owner);
|
|
@@ -1129,7 +1847,6 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
* @lock: the rt_mutex to take
|
|
* @waiter: the pre-initialized rt_mutex_waiter
|
|
* @task: the task to prepare
|
|
- * @detect_deadlock: perform deadlock detection (1) or not (0)
|
|
*
|
|
* Returns:
|
|
* 0 - task blocked on lock
|
|
@@ -1140,7 +1857,7 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
*/
|
|
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter,
|
|
- struct task_struct *task, int detect_deadlock)
|
|
+ struct task_struct *task)
|
|
{
|
|
int ret;
|
|
|
|
@@ -1151,8 +1868,38 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
return 1;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /*
|
|
+ * In PREEMPT_RT there's an added race.
|
|
+ * If the task, that we are about to requeue, times out,
|
|
+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
|
|
+ * to skip this task. But right after the task sets
|
|
+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
|
|
+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
|
|
+ * This will replace the PI_WAKEUP_INPROGRESS with the actual
|
|
+ * lock that it blocks on. We *must not* place this task
|
|
+ * on this proxy lock in that case.
|
|
+ *
|
|
+ * To prevent this race, we first take the task's pi_lock
|
|
+ * and check if it has updated its pi_blocked_on. If it has,
|
|
+ * we assume that it woke up and we return -EAGAIN.
|
|
+ * Otherwise, we set the task's pi_blocked_on to
|
|
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
|
|
+ * it will know that we are in the process of requeuing it.
|
|
+ */
|
|
+ raw_spin_lock_irq(&task->pi_lock);
|
|
+ if (task->pi_blocked_on) {
|
|
+ raw_spin_unlock_irq(&task->pi_lock);
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
|
|
+ raw_spin_unlock_irq(&task->pi_lock);
|
|
+#endif
|
|
+
|
|
/* We enforce deadlock detection for futexes */
|
|
- ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
|
|
+ ret = task_blocks_on_rt_mutex(lock, waiter, task,
|
|
+ RT_MUTEX_FULL_CHAINWALK);
|
|
|
|
if (ret && !rt_mutex_owner(lock)) {
|
|
/*
|
|
@@ -1198,22 +1945,20 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
|
|
* rt_mutex_finish_proxy_lock() - Complete lock acquisition
|
|
* @lock: the rt_mutex we were woken on
|
|
* @to: the timeout, null if none. hrtimer should already have
|
|
- * been started.
|
|
+ * been started.
|
|
* @waiter: the pre-initialized rt_mutex_waiter
|
|
- * @detect_deadlock: perform deadlock detection (1) or not (0)
|
|
*
|
|
* Complete the lock acquisition started our behalf by another thread.
|
|
*
|
|
* Returns:
|
|
* 0 - success
|
|
- * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
|
|
+ * <0 - error, one of -EINTR, -ETIMEDOUT
|
|
*
|
|
* Special API call for PI-futex requeue support
|
|
*/
|
|
int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
|
|
struct hrtimer_sleeper *to,
|
|
- struct rt_mutex_waiter *waiter,
|
|
- int detect_deadlock)
|
|
+ struct rt_mutex_waiter *waiter)
|
|
{
|
|
int ret;
|
|
|
|
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
|
|
index f6a1f3c133b1..c4060584c407 100644
|
|
--- a/kernel/rtmutex.h
|
|
+++ b/kernel/rtmutex.h
|
|
@@ -22,10 +22,15 @@
|
|
#define debug_rt_mutex_init(m, n) do { } while (0)
|
|
#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
|
|
#define debug_rt_mutex_print_deadlock(w) do { } while (0)
|
|
-#define debug_rt_mutex_detect_deadlock(w,d) (d)
|
|
#define debug_rt_mutex_reset_waiter(w) do { } while (0)
|
|
|
|
static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
|
|
{
|
|
WARN(1, "rtmutex deadlock detected\n");
|
|
}
|
|
+
|
|
+static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w,
|
|
+ enum rtmutex_chainwalk walk)
|
|
+{
|
|
+ return walk == RT_MUTEX_FULL_CHAINWALK;
|
|
+}
|
|
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
|
|
index 53a66c85261b..213c7bf3c0d9 100644
|
|
--- a/kernel/rtmutex_common.h
|
|
+++ b/kernel/rtmutex_common.h
|
|
@@ -49,6 +49,7 @@ struct rt_mutex_waiter {
|
|
struct plist_node pi_list_entry;
|
|
struct task_struct *task;
|
|
struct rt_mutex *lock;
|
|
+ bool savestate;
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
unsigned long ip;
|
|
struct pid *deadlock_task_pid;
|
|
@@ -101,8 +102,26 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
|
|
}
|
|
|
|
/*
|
|
+ * Constants for rt mutex functions which have a selectable deadlock
|
|
+ * detection.
|
|
+ *
|
|
+ * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are
|
|
+ * no further PI adjustments to be made.
|
|
+ *
|
|
+ * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full
|
|
+ * walk of the lock chain.
|
|
+ */
|
|
+enum rtmutex_chainwalk {
|
|
+ RT_MUTEX_MIN_CHAINWALK,
|
|
+ RT_MUTEX_FULL_CHAINWALK,
|
|
+};
|
|
+
|
|
+/*
|
|
* PI-futex support (proxy locking functions, etc.):
|
|
*/
|
|
+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
|
|
+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
|
|
+
|
|
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
|
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
struct task_struct *proxy_owner);
|
|
@@ -110,12 +129,11 @@ extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
struct task_struct *proxy_owner);
|
|
extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter,
|
|
- struct task_struct *task,
|
|
- int detect_deadlock);
|
|
+ struct task_struct *task);
|
|
extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
|
|
struct hrtimer_sleeper *to,
|
|
- struct rt_mutex_waiter *waiter,
|
|
- int detect_deadlock);
|
|
+ struct rt_mutex_waiter *waiter);
|
|
+extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
|
|
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
# include "rtmutex-debug.h"
|
|
@@ -123,4 +141,12 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
|
|
# include "rtmutex.h"
|
|
#endif
|
|
|
|
+static inline void
|
|
+rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
|
|
+{
|
|
+ debug_rt_mutex_init_waiter(waiter);
|
|
+ waiter->task = NULL;
|
|
+ waiter->savestate = savestate;
|
|
+}
|
|
+
|
|
#endif
|
|
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
|
|
index 9a7dd35102a3..20b5b4a9a27a 100644
|
|
--- a/kernel/sched/Makefile
|
|
+++ b/kernel/sched/Makefile
|
|
@@ -12,6 +12,7 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
|
|
endif
|
|
|
|
obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o
|
|
+obj-y += work-simple.o
|
|
obj-$(CONFIG_SMP) += cpupri.o
|
|
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
|
|
obj-$(CONFIG_SCHEDSTATS) += stats.o
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index 2f8363e0a1ec..47fff2de5d5b 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -263,7 +263,11 @@ late_initcall(sched_init_debug);
|
|
* Number of tasks to iterate in a single balance run.
|
|
* Limited because this is done with IRQs disabled.
|
|
*/
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
|
+#else
|
|
+const_debug unsigned int sysctl_sched_nr_migrate = 8;
|
|
+#endif
|
|
|
|
/*
|
|
* period over which we average the RT time consumption, measured
|
|
@@ -480,6 +484,7 @@ static void init_rq_hrtick(struct rq *rq)
|
|
|
|
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
rq->hrtick_timer.function = hrtick;
|
|
+ rq->hrtick_timer.irqsafe = 1;
|
|
}
|
|
#else /* CONFIG_SCHED_HRTICK */
|
|
static inline void hrtick_clear(struct rq *rq)
|
|
@@ -1167,7 +1172,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
* is actually now running somewhere else!
|
|
*/
|
|
while (task_running(rq, p)) {
|
|
- if (match_state && unlikely(p->state != match_state))
|
|
+ if (match_state && unlikely(p->state != match_state)
|
|
+ && unlikely(p->saved_state != match_state))
|
|
return 0;
|
|
cpu_relax();
|
|
}
|
|
@@ -1182,7 +1188,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
running = task_running(rq, p);
|
|
on_rq = p->on_rq;
|
|
ncsw = 0;
|
|
- if (!match_state || p->state == match_state)
|
|
+ if (!match_state || p->state == match_state
|
|
+ || p->saved_state == match_state)
|
|
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
|
|
task_rq_unlock(rq, p, &flags);
|
|
|
|
@@ -1397,10 +1404,6 @@ static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
|
|
{
|
|
activate_task(rq, p, en_flags);
|
|
p->on_rq = 1;
|
|
-
|
|
- /* if a worker is waking up, notify workqueue */
|
|
- if (p->flags & PF_WQ_WORKER)
|
|
- wq_worker_waking_up(p, cpu_of(rq));
|
|
}
|
|
|
|
/*
|
|
@@ -1583,8 +1586,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
|
|
smp_wmb();
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
- if (!(p->state & state))
|
|
+ if (!(p->state & state)) {
|
|
+ /*
|
|
+ * The task might be running due to a spinlock sleeper
|
|
+ * wakeup. Check the saved state and set it to running
|
|
+ * if the wakeup condition is true.
|
|
+ */
|
|
+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
|
|
+ if (p->saved_state & state) {
|
|
+ p->saved_state = TASK_RUNNING;
|
|
+ success = 1;
|
|
+ }
|
|
+ }
|
|
goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If this is a regular wakeup, then we can unconditionally
|
|
+ * clear the saved state of a "lock sleeper".
|
|
+ */
|
|
+ if (!(wake_flags & WF_LOCK_SLEEPER))
|
|
+ p->saved_state = TASK_RUNNING;
|
|
|
|
success = 1; /* we're going to change ->state */
|
|
cpu = task_cpu(p);
|
|
@@ -1640,42 +1662,6 @@ out:
|
|
}
|
|
|
|
/**
|
|
- * try_to_wake_up_local - try to wake up a local task with rq lock held
|
|
- * @p: the thread to be awakened
|
|
- *
|
|
- * Put @p on the run-queue if it's not already there. The caller must
|
|
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
|
|
- * the current task.
|
|
- */
|
|
-static void try_to_wake_up_local(struct task_struct *p)
|
|
-{
|
|
- struct rq *rq = task_rq(p);
|
|
-
|
|
- if (WARN_ON_ONCE(rq != this_rq()) ||
|
|
- WARN_ON_ONCE(p == current))
|
|
- return;
|
|
-
|
|
- lockdep_assert_held(&rq->lock);
|
|
-
|
|
- if (!raw_spin_trylock(&p->pi_lock)) {
|
|
- raw_spin_unlock(&rq->lock);
|
|
- raw_spin_lock(&p->pi_lock);
|
|
- raw_spin_lock(&rq->lock);
|
|
- }
|
|
-
|
|
- if (!(p->state & TASK_NORMAL))
|
|
- goto out;
|
|
-
|
|
- if (!p->on_rq)
|
|
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
|
-
|
|
- ttwu_do_wakeup(rq, p, 0);
|
|
- ttwu_stat(p, smp_processor_id(), 0);
|
|
-out:
|
|
- raw_spin_unlock(&p->pi_lock);
|
|
-}
|
|
-
|
|
-/**
|
|
* wake_up_process - Wake up a specific process
|
|
* @p: The process to be woken up.
|
|
*
|
|
@@ -1693,6 +1679,18 @@ int wake_up_process(struct task_struct *p)
|
|
}
|
|
EXPORT_SYMBOL(wake_up_process);
|
|
|
|
+/**
|
|
+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
|
|
+ * @p: The process to be woken up.
|
|
+ *
|
|
+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
|
|
+ * the nature of the wakeup.
|
|
+ */
|
|
+int wake_up_lock_sleeper(struct task_struct *p)
|
|
+{
|
|
+ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
|
|
+}
|
|
+
|
|
int wake_up_state(struct task_struct *p, unsigned int state)
|
|
{
|
|
return try_to_wake_up(p, state, 0);
|
|
@@ -1968,8 +1966,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
|
finish_arch_post_lock_switch();
|
|
|
|
fire_sched_in_preempt_notifiers(current);
|
|
+ /*
|
|
+ * We use mmdrop_delayed() here so we don't have to do the
|
|
+ * full __mmdrop() when we are the last user.
|
|
+ */
|
|
if (mm)
|
|
- mmdrop(mm);
|
|
+ mmdrop_delayed(mm);
|
|
if (unlikely(prev_state == TASK_DEAD)) {
|
|
/*
|
|
* Remove function-return probe instances associated with this
|
|
@@ -3266,8 +3268,13 @@ void __kprobes add_preempt_count(int val)
|
|
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
|
|
PREEMPT_MASK - 10);
|
|
#endif
|
|
- if (preempt_count() == val)
|
|
- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
|
|
+ if (preempt_count() == val) {
|
|
+ unsigned long ip = get_parent_ip(CALLER_ADDR1);
|
|
+#ifdef CONFIG_DEBUG_PREEMPT
|
|
+ current->preempt_disable_ip = ip;
|
|
+#endif
|
|
+ trace_preempt_off(CALLER_ADDR0, ip);
|
|
+ }
|
|
}
|
|
EXPORT_SYMBOL(add_preempt_count);
|
|
|
|
@@ -3310,6 +3317,13 @@ static noinline void __schedule_bug(struct task_struct *prev)
|
|
print_modules();
|
|
if (irqs_disabled())
|
|
print_irqtrace_events(prev);
|
|
+#ifdef DEBUG_PREEMPT
|
|
+ if (in_atomic_preempt_off()) {
|
|
+ pr_err("Preemption disabled at:");
|
|
+ print_ip_sym(current->preempt_disable_ip);
|
|
+ pr_cont("\n");
|
|
+ }
|
|
+#endif
|
|
dump_stack();
|
|
}
|
|
|
|
@@ -3332,6 +3346,126 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
schedstat_inc(this_rq(), sched_count);
|
|
}
|
|
|
|
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
|
|
+#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
|
|
+#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
|
|
+#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
|
|
+
|
|
+static inline void update_migrate_disable(struct task_struct *p)
|
|
+{
|
|
+ const struct cpumask *mask;
|
|
+
|
|
+ if (likely(!p->migrate_disable))
|
|
+ return;
|
|
+
|
|
+ /* Did we already update affinity? */
|
|
+ if (unlikely(migrate_disabled_updated(p)))
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Since this is always current we can get away with only locking
|
|
+ * rq->lock, the ->cpus_allowed value can normally only be changed
|
|
+ * while holding both p->pi_lock and rq->lock, but seeing that this
|
|
+ * is current, we cannot actually be waking up, so all code that
|
|
+ * relies on serialization against p->pi_lock is out of scope.
|
|
+ *
|
|
+ * Having rq->lock serializes us against things like
|
|
+ * set_cpus_allowed_ptr() that can still happen concurrently.
|
|
+ */
|
|
+ mask = tsk_cpus_allowed(p);
|
|
+
|
|
+ if (p->sched_class->set_cpus_allowed)
|
|
+ p->sched_class->set_cpus_allowed(p, mask);
|
|
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
|
|
+
|
|
+ /* Let migrate_enable know to fix things back up */
|
|
+ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
|
|
+}
|
|
+
|
|
+void migrate_disable(void)
|
|
+{
|
|
+ struct task_struct *p = current;
|
|
+
|
|
+ if (in_atomic()) {
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ p->migrate_disable_atomic++;
|
|
+#endif
|
|
+ return;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ WARN_ON_ONCE(p->migrate_disable_atomic);
|
|
+#endif
|
|
+
|
|
+ preempt_disable();
|
|
+ if (p->migrate_disable) {
|
|
+ p->migrate_disable++;
|
|
+ preempt_enable();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ pin_current_cpu();
|
|
+ p->migrate_disable = 1;
|
|
+ preempt_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(migrate_disable);
|
|
+
|
|
+void migrate_enable(void)
|
|
+{
|
|
+ struct task_struct *p = current;
|
|
+ const struct cpumask *mask;
|
|
+ unsigned long flags;
|
|
+ struct rq *rq;
|
|
+
|
|
+ if (in_atomic()) {
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ p->migrate_disable_atomic--;
|
|
+#endif
|
|
+ return;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
+ WARN_ON_ONCE(p->migrate_disable_atomic);
|
|
+#endif
|
|
+ WARN_ON_ONCE(p->migrate_disable <= 0);
|
|
+
|
|
+ preempt_disable();
|
|
+ if (migrate_disable_count(p) > 1) {
|
|
+ p->migrate_disable--;
|
|
+ preempt_enable();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (unlikely(migrate_disabled_updated(p))) {
|
|
+ /*
|
|
+ * Undo whatever update_migrate_disable() did, also see there
|
|
+ * about locking.
|
|
+ */
|
|
+ rq = this_rq();
|
|
+ raw_spin_lock_irqsave(&rq->lock, flags);
|
|
+
|
|
+ /*
|
|
+ * Clearing migrate_disable causes tsk_cpus_allowed to
|
|
+ * show the tasks original cpu affinity.
|
|
+ */
|
|
+ p->migrate_disable = 0;
|
|
+ mask = tsk_cpus_allowed(p);
|
|
+ if (p->sched_class->set_cpus_allowed)
|
|
+ p->sched_class->set_cpus_allowed(p, mask);
|
|
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
|
|
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
+ } else
|
|
+ p->migrate_disable = 0;
|
|
+
|
|
+ unpin_current_cpu();
|
|
+ preempt_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(migrate_enable);
|
|
+#else
|
|
+static inline void update_migrate_disable(struct task_struct *p) { }
|
|
+#define migrate_disabled_updated(p) 0
|
|
+#endif
|
|
+
|
|
static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
if (prev->on_rq || rq->skip_clock_update < 0)
|
|
@@ -3391,6 +3525,8 @@ need_resched:
|
|
|
|
raw_spin_lock_irq(&rq->lock);
|
|
|
|
+ update_migrate_disable(prev);
|
|
+
|
|
switch_count = &prev->nivcsw;
|
|
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
|
if (unlikely(signal_pending_state(prev->state, prev))) {
|
|
@@ -3398,19 +3534,6 @@ need_resched:
|
|
} else {
|
|
deactivate_task(rq, prev, DEQUEUE_SLEEP);
|
|
prev->on_rq = 0;
|
|
-
|
|
- /*
|
|
- * If a worker went to sleep, notify and ask workqueue
|
|
- * whether it wants to wake up a task to maintain
|
|
- * concurrency.
|
|
- */
|
|
- if (prev->flags & PF_WQ_WORKER) {
|
|
- struct task_struct *to_wakeup;
|
|
-
|
|
- to_wakeup = wq_worker_sleeping(prev, cpu);
|
|
- if (to_wakeup)
|
|
- try_to_wake_up_local(to_wakeup);
|
|
- }
|
|
}
|
|
switch_count = &prev->nvcsw;
|
|
}
|
|
@@ -3451,9 +3574,22 @@ need_resched:
|
|
|
|
static inline void sched_submit_work(struct task_struct *tsk)
|
|
{
|
|
- if (!tsk->state || tsk_is_pi_blocked(tsk))
|
|
+ if (!tsk->state)
|
|
return;
|
|
/*
|
|
+ * If a worker went to sleep, notify and ask workqueue whether
|
|
+ * it wants to wake up a task to maintain concurrency.
|
|
+ * Only call wake up if prev isn't blocked on a sleeping
|
|
+ * spin lock.
|
|
+ */
|
|
+ if (tsk->flags & PF_WQ_WORKER && !tsk->saved_state)
|
|
+ wq_worker_sleeping(tsk);
|
|
+
|
|
+
|
|
+ if (tsk_is_pi_blocked(tsk))
|
|
+ return;
|
|
+
|
|
+ /*
|
|
* If we are going to sleep and we have plugged IO queued,
|
|
* make sure to submit it to avoid deadlocks.
|
|
*/
|
|
@@ -3461,12 +3597,19 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
|
blk_schedule_flush_plug(tsk);
|
|
}
|
|
|
|
+static inline void sched_update_worker(struct task_struct *tsk)
|
|
+{
|
|
+ if (tsk->flags & PF_WQ_WORKER)
|
|
+ wq_worker_running(tsk);
|
|
+}
|
|
+
|
|
asmlinkage void __sched schedule(void)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
|
|
sched_submit_work(tsk);
|
|
__schedule();
|
|
+ sched_update_worker(tsk);
|
|
}
|
|
EXPORT_SYMBOL(schedule);
|
|
|
|
@@ -3546,7 +3689,16 @@ asmlinkage void __sched notrace preempt_schedule(void)
|
|
|
|
do {
|
|
add_preempt_count_notrace(PREEMPT_ACTIVE);
|
|
+ /*
|
|
+ * The add/subtract must not be traced by the function
|
|
+ * tracer. But we still want to account for the
|
|
+ * preempt off latency tracer. Since the _notrace versions
|
|
+ * of add/subtract skip the accounting for latency tracer
|
|
+ * we must force it manually.
|
|
+ */
|
|
+ start_critical_timings();
|
|
__schedule();
|
|
+ stop_critical_timings();
|
|
sub_preempt_count_notrace(PREEMPT_ACTIVE);
|
|
|
|
/*
|
|
@@ -3991,7 +4143,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
|
|
* This function changes the 'effective' priority of a task. It does
|
|
* not touch ->normal_prio like __setscheduler().
|
|
*
|
|
- * Used by the rt_mutex code to implement priority inheritance logic.
|
|
+ * Used by the rt_mutex code to implement priority inheritance
|
|
+ * logic. Call site only calls if the priority of the task changed.
|
|
*/
|
|
void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
{
|
|
@@ -4217,20 +4370,25 @@ static struct task_struct *find_process_by_pid(pid_t pid)
|
|
return pid ? find_task_by_vpid(pid) : current;
|
|
}
|
|
|
|
-/* Actually do priority change: must hold rq lock. */
|
|
-static void
|
|
-__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
|
|
+static void __setscheduler_params(struct task_struct *p, int policy, int prio)
|
|
{
|
|
p->policy = policy;
|
|
p->rt_priority = prio;
|
|
p->normal_prio = normal_prio(p);
|
|
+ set_load_weight(p);
|
|
+}
|
|
+
|
|
+/* Actually do priority change: must hold rq lock. */
|
|
+static void
|
|
+__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
|
|
+{
|
|
+ __setscheduler_params(p, policy, prio);
|
|
/* we are holding p->pi_lock already */
|
|
p->prio = rt_mutex_getprio(p);
|
|
if (rt_prio(p->prio))
|
|
p->sched_class = &rt_sched_class;
|
|
else
|
|
p->sched_class = &fair_sched_class;
|
|
- set_load_weight(p);
|
|
}
|
|
|
|
/*
|
|
@@ -4255,6 +4413,7 @@ static bool check_same_owner(struct task_struct *p)
|
|
static int __sched_setscheduler(struct task_struct *p, int policy,
|
|
const struct sched_param *param, bool user)
|
|
{
|
|
+ int newprio = MAX_RT_PRIO - 1 - param->sched_priority;
|
|
int retval, oldprio, oldpolicy = -1, on_rq, running;
|
|
unsigned long flags;
|
|
const struct sched_class *prev_class;
|
|
@@ -4350,11 +4509,13 @@ recheck:
|
|
}
|
|
|
|
/*
|
|
- * If not changing anything there's no need to proceed further:
|
|
+ * If not changing anything there's no need to proceed
|
|
+ * further, but store a possible modification of
|
|
+ * reset_on_fork.
|
|
*/
|
|
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
|
|
param->sched_priority == p->rt_priority))) {
|
|
-
|
|
+ p->sched_reset_on_fork = reset_on_fork;
|
|
__task_rq_unlock(rq);
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
return 0;
|
|
@@ -4381,6 +4542,25 @@ recheck:
|
|
task_rq_unlock(rq, p, &flags);
|
|
goto recheck;
|
|
}
|
|
+
|
|
+ p->sched_reset_on_fork = reset_on_fork;
|
|
+ oldprio = p->prio;
|
|
+
|
|
+ /*
|
|
+ * Special case for priority boosted tasks.
|
|
+ *
|
|
+ * If the new priority is lower or equal (user space view)
|
|
+ * than the current (boosted) priority, we just store the new
|
|
+ * normal parameters and do not touch the scheduler class and
|
|
+ * the runqueue. This will be done when the task deboost
|
|
+ * itself.
|
|
+ */
|
|
+ if (rt_mutex_check_prio(p, newprio)) {
|
|
+ __setscheduler_params(p, policy, param->sched_priority);
|
|
+ task_rq_unlock(rq, p, &flags);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
on_rq = p->on_rq;
|
|
running = task_current(rq, p);
|
|
if (on_rq)
|
|
@@ -4388,17 +4568,18 @@ recheck:
|
|
if (running)
|
|
p->sched_class->put_prev_task(rq, p);
|
|
|
|
- p->sched_reset_on_fork = reset_on_fork;
|
|
-
|
|
- oldprio = p->prio;
|
|
prev_class = p->sched_class;
|
|
__setscheduler(rq, p, policy, param->sched_priority);
|
|
|
|
if (running)
|
|
p->sched_class->set_curr_task(rq);
|
|
- if (on_rq)
|
|
- enqueue_task(rq, p, 0);
|
|
-
|
|
+ if (on_rq) {
|
|
+ /*
|
|
+ * We enqueue to tail when the priority of a task is
|
|
+ * increased (user space view).
|
|
+ */
|
|
+ enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
|
|
+ }
|
|
check_class_changed(rq, p, prev_class, oldprio);
|
|
task_rq_unlock(rq, p, &flags);
|
|
|
|
@@ -4744,9 +4925,17 @@ static inline int should_resched(void)
|
|
|
|
static void __cond_resched(void)
|
|
{
|
|
- add_preempt_count(PREEMPT_ACTIVE);
|
|
- __schedule();
|
|
- sub_preempt_count(PREEMPT_ACTIVE);
|
|
+ do {
|
|
+ add_preempt_count(PREEMPT_ACTIVE);
|
|
+ __schedule();
|
|
+ sub_preempt_count(PREEMPT_ACTIVE);
|
|
+ /*
|
|
+ * Check again in case we missed a preemption
|
|
+ * opportunity between schedule and now.
|
|
+ */
|
|
+ barrier();
|
|
+
|
|
+ } while (need_resched());
|
|
}
|
|
|
|
int __sched _cond_resched(void)
|
|
@@ -4787,6 +4976,7 @@ int __cond_resched_lock(spinlock_t *lock)
|
|
}
|
|
EXPORT_SYMBOL(__cond_resched_lock);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
int __sched __cond_resched_softirq(void)
|
|
{
|
|
BUG_ON(!in_softirq());
|
|
@@ -4800,6 +4990,7 @@ int __sched __cond_resched_softirq(void)
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(__cond_resched_softirq);
|
|
+#endif
|
|
|
|
/**
|
|
* yield - yield the current processor to other threads.
|
|
@@ -5133,6 +5324,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|
rcu_read_unlock();
|
|
|
|
rq->curr = rq->idle = idle;
|
|
+ idle->on_rq = 1;
|
|
#if defined(CONFIG_SMP)
|
|
idle->on_cpu = 1;
|
|
#endif
|
|
@@ -5154,11 +5346,90 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|
#ifdef CONFIG_SMP
|
|
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
{
|
|
- if (p->sched_class && p->sched_class->set_cpus_allowed)
|
|
- p->sched_class->set_cpus_allowed(p, new_mask);
|
|
-
|
|
+ if (!migrate_disabled_updated(p)) {
|
|
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
|
|
+ p->sched_class->set_cpus_allowed(p, new_mask);
|
|
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
|
|
+ }
|
|
cpumask_copy(&p->cpus_allowed, new_mask);
|
|
- p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
|
|
+}
|
|
+
|
|
+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
|
|
+static DEFINE_MUTEX(sched_down_mutex);
|
|
+static cpumask_t sched_down_cpumask;
|
|
+
|
|
+void tell_sched_cpu_down_begin(int cpu)
|
|
+{
|
|
+ mutex_lock(&sched_down_mutex);
|
|
+ cpumask_set_cpu(cpu, &sched_down_cpumask);
|
|
+ mutex_unlock(&sched_down_mutex);
|
|
+}
|
|
+
|
|
+void tell_sched_cpu_down_done(int cpu)
|
|
+{
|
|
+ mutex_lock(&sched_down_mutex);
|
|
+ cpumask_clear_cpu(cpu, &sched_down_cpumask);
|
|
+ mutex_unlock(&sched_down_mutex);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * migrate_me - try to move the current task off this cpu
|
|
+ *
|
|
+ * Used by the pin_current_cpu() code to try to get tasks
|
|
+ * to move off the current CPU as it is going down.
|
|
+ * It will only move the task if the task isn't pinned to
|
|
+ * the CPU (with migrate_disable, affinity or THREAD_BOUND)
|
|
+ * and the task has to be in a RUNNING state. Otherwise the
|
|
+ * movement of the task will wake it up (change its state
|
|
+ * to running) when the task did not expect it.
|
|
+ *
|
|
+ * Returns 1 if it succeeded in moving the current task
|
|
+ * 0 otherwise.
|
|
+ */
|
|
+int migrate_me(void)
|
|
+{
|
|
+ struct task_struct *p = current;
|
|
+ struct migration_arg arg;
|
|
+ struct cpumask *cpumask;
|
|
+ struct cpumask *mask;
|
|
+ unsigned long flags;
|
|
+ unsigned int dest_cpu;
|
|
+ struct rq *rq;
|
|
+
|
|
+ /*
|
|
+ * We can not migrate tasks bounded to a CPU or tasks not
|
|
+ * running. The movement of the task will wake it up.
|
|
+ */
|
|
+ if (p->flags & PF_THREAD_BOUND || p->state)
|
|
+ return 0;
|
|
+
|
|
+ mutex_lock(&sched_down_mutex);
|
|
+ rq = task_rq_lock(p, &flags);
|
|
+
|
|
+ cpumask = &__get_cpu_var(sched_cpumasks);
|
|
+ mask = &p->cpus_allowed;
|
|
+
|
|
+ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
|
|
+
|
|
+ if (!cpumask_weight(cpumask)) {
|
|
+ /* It's only on this CPU? */
|
|
+ task_rq_unlock(rq, p, &flags);
|
|
+ mutex_unlock(&sched_down_mutex);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
|
|
+
|
|
+ arg.task = p;
|
|
+ arg.dest_cpu = dest_cpu;
|
|
+
|
|
+ task_rq_unlock(rq, p, &flags);
|
|
+
|
|
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
|
|
+ tlb_migrate_finish(p->mm);
|
|
+ mutex_unlock(&sched_down_mutex);
|
|
+
|
|
+ return 1;
|
|
}
|
|
|
|
/*
|
|
@@ -5209,7 +5480,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
do_set_cpus_allowed(p, new_mask);
|
|
|
|
/* Can the task run on the task's current CPU? If so, we're done */
|
|
- if (cpumask_test_cpu(task_cpu(p), new_mask))
|
|
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
|
|
goto out;
|
|
|
|
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
|
@@ -5298,6 +5569,8 @@ static int migration_cpu_stop(void *data)
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
|
|
+
|
|
/*
|
|
* Ensures that the idle task is using init_mm right before its cpu goes
|
|
* offline.
|
|
@@ -5310,7 +5583,12 @@ void idle_task_exit(void)
|
|
|
|
if (mm != &init_mm)
|
|
switch_mm(mm, &init_mm, current);
|
|
- mmdrop(mm);
|
|
+
|
|
+ /*
|
|
+ * Defer the cleanup to an alive cpu. On RT we can neither
|
|
+ * call mmdrop() nor mmdrop_delayed() from here.
|
|
+ */
|
|
+ per_cpu(idle_last_mm, smp_processor_id()) = mm;
|
|
}
|
|
|
|
/*
|
|
@@ -5637,6 +5915,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
migrate_nr_uninterruptible(rq);
|
|
calc_global_load_remove(rq);
|
|
break;
|
|
+ case CPU_DEAD:
|
|
+ if (per_cpu(idle_last_mm, cpu)) {
|
|
+ mmdrop(per_cpu(idle_last_mm, cpu));
|
|
+ per_cpu(idle_last_mm, cpu) = NULL;
|
|
+ }
|
|
+ break;
|
|
#endif
|
|
}
|
|
|
|
@@ -7306,7 +7590,8 @@ void __init sched_init(void)
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
static inline int preempt_count_equals(int preempt_offset)
|
|
{
|
|
- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
|
|
+ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
|
|
+ sched_rcu_preempt_depth();
|
|
|
|
return (nested == preempt_offset);
|
|
}
|
|
@@ -7316,7 +7601,8 @@ void __might_sleep(const char *file, int line, int preempt_offset)
|
|
static unsigned long prev_jiffy; /* ratelimiting */
|
|
|
|
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
|
|
- if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
|
|
+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
|
|
+ !is_idle_task(current)) ||
|
|
oops_in_progress)
|
|
return;
|
|
if (system_state != SYSTEM_RUNNING &&
|
|
@@ -7334,6 +7620,13 @@ void __might_sleep(const char *file, int line, int preempt_offset)
|
|
debug_show_held_locks(current);
|
|
if (irqs_disabled())
|
|
print_irqtrace_events(current);
|
|
+#ifdef DEBUG_PREEMPT
|
|
+ if (!preempt_count_equals(preempt_offset)) {
|
|
+ pr_err("Preemption disabled at:");
|
|
+ print_ip_sym(current->preempt_disable_ip);
|
|
+ pr_cont("\n");
|
|
+ }
|
|
+#endif
|
|
dump_stack();
|
|
}
|
|
EXPORT_SYMBOL(__might_sleep);
|
|
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
|
|
index 2f9ac8ff1e80..4edcce4ce082 100644
|
|
--- a/kernel/sched/debug.c
|
|
+++ b/kernel/sched/debug.c
|
|
@@ -245,6 +245,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
|
|
P(rt_throttled);
|
|
PN(rt_time);
|
|
PN(rt_runtime);
|
|
+#ifdef CONFIG_SMP
|
|
+ P(rt_nr_migratory);
|
|
+#endif
|
|
|
|
#undef PN
|
|
#undef P
|
|
@@ -493,6 +496,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
|
P(se.load.weight);
|
|
P(policy);
|
|
P(prio);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ P(migrate_disable);
|
|
+#endif
|
|
+ P(rt.nr_cpus_allowed);
|
|
#undef PN
|
|
#undef __PN
|
|
#undef P
|
|
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
|
|
index de00a486c5c6..27afd1e8662d 100644
|
|
--- a/kernel/sched/features.h
|
|
+++ b/kernel/sched/features.h
|
|
@@ -60,11 +60,15 @@ SCHED_FEAT(OWNER_SPIN, true)
|
|
*/
|
|
SCHED_FEAT(NONTASK_POWER, true)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Queue remote wakeups on the target CPU and process them
|
|
* using the scheduler IPI. Reduces rq->lock contention/bounces.
|
|
*/
|
|
SCHED_FEAT(TTWU_QUEUE, true)
|
|
+#else
|
|
+SCHED_FEAT(TTWU_QUEUE, false)
|
|
+#endif
|
|
|
|
SCHED_FEAT(FORCE_SD_OVERLAP, false)
|
|
SCHED_FEAT(RT_RUNTIME_SHARE, true)
|
|
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
|
index 71429db2af2f..89a985a89bcb 100644
|
|
--- a/kernel/sched/rt.c
|
|
+++ b/kernel/sched/rt.c
|
|
@@ -41,6 +41,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
|
|
|
|
hrtimer_init(&rt_b->rt_period_timer,
|
|
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ rt_b->rt_period_timer.irqsafe = 1;
|
|
rt_b->rt_period_timer.function = sched_rt_period_timer;
|
|
}
|
|
|
|
diff --git a/kernel/sched/work-simple.c b/kernel/sched/work-simple.c
|
|
new file mode 100644
|
|
index 000000000000..c996f755dba6
|
|
--- /dev/null
|
|
+++ b/kernel/sched/work-simple.c
|
|
@@ -0,0 +1,172 @@
|
|
+/*
|
|
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
|
|
+ *
|
|
+ * Provides a framework for enqueuing callbacks from irq context
|
|
+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
|
|
+ */
|
|
+
|
|
+#include <linux/wait-simple.h>
|
|
+#include <linux/work-simple.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/spinlock.h>
|
|
+
|
|
+#define SWORK_EVENT_PENDING (1 << 0)
|
|
+
|
|
+static DEFINE_MUTEX(worker_mutex);
|
|
+static struct sworker *glob_worker;
|
|
+
|
|
+struct sworker {
|
|
+ struct list_head events;
|
|
+ struct swait_head wq;
|
|
+
|
|
+ raw_spinlock_t lock;
|
|
+
|
|
+ struct task_struct *task;
|
|
+ int refs;
|
|
+};
|
|
+
|
|
+static bool swork_readable(struct sworker *worker)
|
|
+{
|
|
+ bool r;
|
|
+
|
|
+ if (kthread_should_stop())
|
|
+ return true;
|
|
+
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
+ r = !list_empty(&worker->events);
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
+
|
|
+ return r;
|
|
+}
|
|
+
|
|
+static int swork_kthread(void *arg)
|
|
+{
|
|
+ struct sworker *worker = arg;
|
|
+
|
|
+ for (;;) {
|
|
+ swait_event_interruptible(worker->wq,
|
|
+ swork_readable(worker));
|
|
+ if (kthread_should_stop())
|
|
+ break;
|
|
+
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
+ while (!list_empty(&worker->events)) {
|
|
+ struct swork_event *sev;
|
|
+
|
|
+ sev = list_first_entry(&worker->events,
|
|
+ struct swork_event, item);
|
|
+ list_del(&sev->item);
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
+
|
|
+ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
|
|
+ &sev->flags));
|
|
+ sev->func(sev);
|
|
+ raw_spin_lock_irq(&worker->lock);
|
|
+ }
|
|
+ raw_spin_unlock_irq(&worker->lock);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct sworker *swork_create(void)
|
|
+{
|
|
+ struct sworker *worker;
|
|
+
|
|
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
|
|
+ if (!worker)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ INIT_LIST_HEAD(&worker->events);
|
|
+ raw_spin_lock_init(&worker->lock);
|
|
+ init_swait_head(&worker->wq);
|
|
+
|
|
+ worker->task = kthread_run(swork_kthread, worker, "kswork");
|
|
+ if (IS_ERR(worker->task)) {
|
|
+ kfree(worker);
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ }
|
|
+
|
|
+ return worker;
|
|
+}
|
|
+
|
|
+static void swork_destroy(struct sworker *worker)
|
|
+{
|
|
+ kthread_stop(worker->task);
|
|
+
|
|
+ WARN_ON(!list_empty(&worker->events));
|
|
+ kfree(worker);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * swork_queue - queue swork
|
|
+ *
|
|
+ * Returns %false if @work was already on a queue, %true otherwise.
|
|
+ *
|
|
+ * The work is queued and processed on a random CPU
|
|
+ */
|
|
+bool swork_queue(struct swork_event *sev)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
|
|
+ return false;
|
|
+
|
|
+ raw_spin_lock_irqsave(&glob_worker->lock, flags);
|
|
+ list_add_tail(&sev->item, &glob_worker->events);
|
|
+ raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
|
|
+
|
|
+ swait_wake(&glob_worker->wq);
|
|
+ return true;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(swork_queue);
|
|
+
|
|
+/**
|
|
+ * swork_get - get an instance of the sworker
|
|
+ *
|
|
+ * Returns an negative error code if the initialization if the worker did not
|
|
+ * work, %0 otherwise.
|
|
+ *
|
|
+ */
|
|
+int swork_get(void)
|
|
+{
|
|
+ struct sworker *worker;
|
|
+
|
|
+ mutex_lock(&worker_mutex);
|
|
+ if (!glob_worker) {
|
|
+ worker = swork_create();
|
|
+ if (IS_ERR(worker)) {
|
|
+ mutex_unlock(&worker_mutex);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ glob_worker = worker;
|
|
+ }
|
|
+
|
|
+ glob_worker->refs++;
|
|
+ mutex_unlock(&worker_mutex);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(swork_get);
|
|
+
|
|
+/**
|
|
+ * swork_put - puts an instance of the sworker
|
|
+ *
|
|
+ * Will destroy the sworker thread. This function must not be called until all
|
|
+ * queued events have been completed.
|
|
+ */
|
|
+void swork_put(void)
|
|
+{
|
|
+ mutex_lock(&worker_mutex);
|
|
+
|
|
+ glob_worker->refs--;
|
|
+ if (glob_worker->refs > 0)
|
|
+ goto out;
|
|
+
|
|
+ swork_destroy(glob_worker);
|
|
+ glob_worker = NULL;
|
|
+out:
|
|
+ mutex_unlock(&worker_mutex);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(swork_put);
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
|
index 959df4f8d874..13e98dbe0a8c 100644
|
|
--- a/kernel/signal.c
|
|
+++ b/kernel/signal.c
|
|
@@ -345,13 +345,45 @@ static bool task_participate_group_stop(struct task_struct *task)
|
|
return false;
|
|
}
|
|
|
|
+#ifdef __HAVE_ARCH_CMPXCHG
|
|
+static inline struct sigqueue *get_task_cache(struct task_struct *t)
|
|
+{
|
|
+ struct sigqueue *q = t->sigqueue_cache;
|
|
+
|
|
+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
|
|
+ return NULL;
|
|
+ return q;
|
|
+}
|
|
+
|
|
+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
|
|
+{
|
|
+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
|
|
+ return 0;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static inline struct sigqueue *get_task_cache(struct task_struct *t)
|
|
+{
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
/*
|
|
* allocate a new signal queue record
|
|
* - this may be called without locks if and only if t == current, otherwise an
|
|
* appropriate lock must be held to stop the target task from exiting
|
|
*/
|
|
static struct sigqueue *
|
|
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
|
|
+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
|
|
+ int override_rlimit, int fromslab)
|
|
{
|
|
struct sigqueue *q = NULL;
|
|
struct user_struct *user;
|
|
@@ -368,7 +400,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
|
|
if (override_rlimit ||
|
|
atomic_read(&user->sigpending) <=
|
|
task_rlimit(t, RLIMIT_SIGPENDING)) {
|
|
- q = kmem_cache_alloc(sigqueue_cachep, flags);
|
|
+ if (!fromslab)
|
|
+ q = get_task_cache(t);
|
|
+ if (!q)
|
|
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
|
|
} else {
|
|
print_dropped_signal(sig);
|
|
}
|
|
@@ -385,6 +420,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
|
|
return q;
|
|
}
|
|
|
|
+static struct sigqueue *
|
|
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
|
|
+ int override_rlimit)
|
|
+{
|
|
+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
|
|
+}
|
|
+
|
|
static void __sigqueue_free(struct sigqueue *q)
|
|
{
|
|
if (q->flags & SIGQUEUE_PREALLOC)
|
|
@@ -394,6 +436,21 @@ static void __sigqueue_free(struct sigqueue *q)
|
|
kmem_cache_free(sigqueue_cachep, q);
|
|
}
|
|
|
|
+static void sigqueue_free_current(struct sigqueue *q)
|
|
+{
|
|
+ struct user_struct *up;
|
|
+
|
|
+ if (q->flags & SIGQUEUE_PREALLOC)
|
|
+ return;
|
|
+
|
|
+ up = q->user;
|
|
+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
|
|
+ atomic_dec(&up->sigpending);
|
|
+ free_uid(up);
|
|
+ } else
|
|
+ __sigqueue_free(q);
|
|
+}
|
|
+
|
|
void flush_sigqueue(struct sigpending *queue)
|
|
{
|
|
struct sigqueue *q;
|
|
@@ -407,6 +464,21 @@ void flush_sigqueue(struct sigpending *queue)
|
|
}
|
|
|
|
/*
|
|
+ * Called from __exit_signal. Flush tsk->pending and
|
|
+ * tsk->sigqueue_cache
|
|
+ */
|
|
+void flush_task_sigqueue(struct task_struct *tsk)
|
|
+{
|
|
+ struct sigqueue *q;
|
|
+
|
|
+ flush_sigqueue(&tsk->pending);
|
|
+
|
|
+ q = get_task_cache(tsk);
|
|
+ if (q)
|
|
+ kmem_cache_free(sigqueue_cachep, q);
|
|
+}
|
|
+
|
|
+/*
|
|
* Flush all pending signals for a task.
|
|
*/
|
|
void __flush_signals(struct task_struct *t)
|
|
@@ -558,7 +630,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
|
|
still_pending:
|
|
list_del_init(&first->list);
|
|
copy_siginfo(info, &first->info);
|
|
- __sigqueue_free(first);
|
|
+ sigqueue_free_current(first);
|
|
} else {
|
|
/*
|
|
* Ok, it wasn't in the queue. This must be
|
|
@@ -604,6 +676,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
|
|
{
|
|
int signr;
|
|
|
|
+ WARN_ON_ONCE(tsk != current);
|
|
+
|
|
/* We only dequeue private signals from ourselves, we don't let
|
|
* signalfd steal them
|
|
*/
|
|
@@ -683,6 +757,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
|
|
void signal_wake_up_state(struct task_struct *t, unsigned int state)
|
|
{
|
|
set_tsk_thread_flag(t, TIF_SIGPENDING);
|
|
+
|
|
+ if (unlikely(t == current))
|
|
+ return;
|
|
+
|
|
/*
|
|
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
|
|
* case. We don't check t->state here because there is a race with it
|
|
@@ -1232,8 +1310,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
|
|
* We don't want to have recursive SIGSEGV's etc, for example,
|
|
* that is why we also clear SIGNAL_UNKILLABLE.
|
|
*/
|
|
-int
|
|
-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
|
|
+static int
|
|
+do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
|
|
{
|
|
unsigned long int flags;
|
|
int ret, blocked, ignored;
|
|
@@ -1258,6 +1336,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
|
|
return ret;
|
|
}
|
|
|
|
+int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
|
|
+{
|
|
+/*
|
|
+ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
|
|
+ * since it can not enable preemption, and the signal code's spin_locks
|
|
+ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
|
|
+ * send the signal on exit of the trap.
|
|
+ */
|
|
+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
|
|
+ if (in_atomic()) {
|
|
+ if (WARN_ON_ONCE(t != current))
|
|
+ return 0;
|
|
+ if (WARN_ON_ONCE(t->forced_info.si_signo))
|
|
+ return 0;
|
|
+
|
|
+ if (is_si_special(info)) {
|
|
+ WARN_ON_ONCE(info != SEND_SIG_PRIV);
|
|
+ t->forced_info.si_signo = sig;
|
|
+ t->forced_info.si_errno = 0;
|
|
+ t->forced_info.si_code = SI_KERNEL;
|
|
+ t->forced_info.si_pid = 0;
|
|
+ t->forced_info.si_uid = 0;
|
|
+ } else {
|
|
+ t->forced_info = *info;
|
|
+ }
|
|
+
|
|
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
|
|
+ return 0;
|
|
+ }
|
|
+#endif
|
|
+ return do_force_sig_info(sig, info, t);
|
|
+}
|
|
+
|
|
/*
|
|
* Nuke all other threads in the group.
|
|
*/
|
|
@@ -1288,12 +1399,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
|
|
struct sighand_struct *sighand;
|
|
|
|
for (;;) {
|
|
- local_irq_save(*flags);
|
|
+ local_irq_save_nort(*flags);
|
|
rcu_read_lock();
|
|
sighand = rcu_dereference(tsk->sighand);
|
|
if (unlikely(sighand == NULL)) {
|
|
rcu_read_unlock();
|
|
- local_irq_restore(*flags);
|
|
+ local_irq_restore_nort(*flags);
|
|
break;
|
|
}
|
|
|
|
@@ -1304,7 +1415,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
|
|
}
|
|
spin_unlock(&sighand->siglock);
|
|
rcu_read_unlock();
|
|
- local_irq_restore(*flags);
|
|
+ local_irq_restore_nort(*flags);
|
|
}
|
|
|
|
return sighand;
|
|
@@ -1551,7 +1662,8 @@ EXPORT_SYMBOL(kill_pid);
|
|
*/
|
|
struct sigqueue *sigqueue_alloc(void)
|
|
{
|
|
- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
|
|
+ /* Preallocated sigqueue objects always from the slabcache ! */
|
|
+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
|
|
|
|
if (q)
|
|
q->flags |= SIGQUEUE_PREALLOC;
|
|
@@ -1910,15 +2022,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
|
|
if (gstop_done && ptrace_reparented(current))
|
|
do_notify_parent_cldstop(current, false, why);
|
|
|
|
- /*
|
|
- * Don't want to allow preemption here, because
|
|
- * sys_ptrace() needs this task to be inactive.
|
|
- *
|
|
- * XXX: implement read_unlock_no_resched().
|
|
- */
|
|
- preempt_disable();
|
|
read_unlock(&tasklist_lock);
|
|
- preempt_enable_no_resched();
|
|
schedule();
|
|
} else {
|
|
/*
|
|
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
|
index 5cc401e6d3b4..ecf0ab0f7b4d 100644
|
|
--- a/kernel/softirq.c
|
|
+++ b/kernel/softirq.c
|
|
@@ -21,9 +21,11 @@
|
|
#include <linux/freezer.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/rcupdate.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/tick.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/irq.h>
|
|
@@ -61,6 +63,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
|
|
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
|
};
|
|
|
|
+#ifdef CONFIG_NO_HZ
|
|
+# ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * On preempt-rt a softirq might be blocked on a lock. There might be
|
|
+ * no other runnable task on this CPU because the lock owner runs on
|
|
+ * some other CPU. So we have to go into idle with the pending bit
|
|
+ * set. Therefor we need to check this otherwise we warn about false
|
|
+ * positives which confuses users and defeats the whole purpose of
|
|
+ * this test.
|
|
+ *
|
|
+ * This code is called with interrupts disabled.
|
|
+ */
|
|
+void softirq_check_pending_idle(void)
|
|
+{
|
|
+ static int rate_limit;
|
|
+ u32 warnpending = 0, pending = local_softirq_pending();
|
|
+
|
|
+ if (rate_limit >= 10)
|
|
+ return;
|
|
+
|
|
+ if (pending) {
|
|
+ struct task_struct *tsk;
|
|
+
|
|
+ tsk = __get_cpu_var(ksoftirqd);
|
|
+ /*
|
|
+ * The wakeup code in rtmutex.c wakes up the task
|
|
+ * _before_ it sets pi_blocked_on to NULL under
|
|
+ * tsk->pi_lock. So we need to check for both: state
|
|
+ * and pi_blocked_on.
|
|
+ */
|
|
+ raw_spin_lock(&tsk->pi_lock);
|
|
+
|
|
+ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
|
|
+ warnpending = 1;
|
|
+
|
|
+ raw_spin_unlock(&tsk->pi_lock);
|
|
+ }
|
|
+
|
|
+ if (warnpending) {
|
|
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
|
+ pending);
|
|
+ rate_limit++;
|
|
+ }
|
|
+}
|
|
+# else
|
|
+/*
|
|
+ * On !PREEMPT_RT we just printk rate limited:
|
|
+ */
|
|
+void softirq_check_pending_idle(void)
|
|
+{
|
|
+ static int rate_limit;
|
|
+
|
|
+ if (rate_limit < 10) {
|
|
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
|
+ local_softirq_pending());
|
|
+ rate_limit++;
|
|
+ }
|
|
+}
|
|
+# endif
|
|
+#endif
|
|
+
|
|
/*
|
|
* we cannot loop indefinitely here to avoid userspace starvation,
|
|
* but we also don't want to introduce a worst case 1/HZ latency
|
|
@@ -76,6 +139,36 @@ static void wakeup_softirqd(void)
|
|
wake_up_process(tsk);
|
|
}
|
|
|
|
+static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
|
|
+{
|
|
+ struct softirq_action *h = softirq_vec;
|
|
+ unsigned int prev_count = preempt_count();
|
|
+
|
|
+ local_irq_enable();
|
|
+ for ( ; pending; h++, pending >>= 1) {
|
|
+ unsigned int vec_nr = h - softirq_vec;
|
|
+
|
|
+ if (!(pending & 1))
|
|
+ continue;
|
|
+
|
|
+ kstat_incr_softirqs_this_cpu(vec_nr);
|
|
+ trace_softirq_entry(vec_nr);
|
|
+ h->action(h);
|
|
+ trace_softirq_exit(vec_nr);
|
|
+ if (unlikely(prev_count != preempt_count())) {
|
|
+ printk(KERN_ERR
|
|
+ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
|
|
+ vec_nr, softirq_to_name[vec_nr], h->action,
|
|
+ prev_count, (unsigned int) preempt_count());
|
|
+ preempt_count() = prev_count;
|
|
+ }
|
|
+ if (need_rcu_bh_qs)
|
|
+ rcu_bh_qs(cpu);
|
|
+ }
|
|
+ local_irq_disable();
|
|
+}
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* preempt_count and SOFTIRQ_OFFSET usage:
|
|
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
|
|
@@ -209,9 +302,46 @@ EXPORT_SYMBOL(local_bh_enable_ip);
|
|
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
|
|
#define MAX_SOFTIRQ_RESTART 10
|
|
|
|
+#ifdef CONFIG_TRACE_IRQFLAGS
|
|
+/*
|
|
+ * Convoluted means of passing __do_softirq() a message through the various
|
|
+ * architecture execute_on_stack() bits.
|
|
+ *
|
|
+ * When we run softirqs from irq_exit() and thus on the hardirq stack we need
|
|
+ * to keep the lockdep irq context tracking as tight as possible in order to
|
|
+ * not miss-qualify lock contexts and miss possible deadlocks.
|
|
+ */
|
|
+static DEFINE_PER_CPU(int, softirq_from_hardirq);
|
|
+
|
|
+static inline void lockdep_softirq_from_hardirq(void)
|
|
+{
|
|
+ this_cpu_write(softirq_from_hardirq, 1);
|
|
+}
|
|
+
|
|
+static inline void lockdep_softirq_start(void)
|
|
+{
|
|
+ if (this_cpu_read(softirq_from_hardirq))
|
|
+ trace_hardirq_exit();
|
|
+ lockdep_softirq_enter();
|
|
+}
|
|
+
|
|
+static inline void lockdep_softirq_end(void)
|
|
+{
|
|
+ lockdep_softirq_exit();
|
|
+ if (this_cpu_read(softirq_from_hardirq)) {
|
|
+ this_cpu_write(softirq_from_hardirq, 0);
|
|
+ trace_hardirq_enter();
|
|
+ }
|
|
+}
|
|
+
|
|
+#else
|
|
+static inline void lockdep_softirq_from_hardirq(void) { }
|
|
+static inline void lockdep_softirq_start(void) { }
|
|
+static inline void lockdep_softirq_end(void) { }
|
|
+#endif
|
|
+
|
|
asmlinkage void __do_softirq(void)
|
|
{
|
|
- struct softirq_action *h;
|
|
__u32 pending;
|
|
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
|
int cpu;
|
|
@@ -221,44 +351,15 @@ asmlinkage void __do_softirq(void)
|
|
account_system_vtime(current);
|
|
|
|
__local_bh_disable((unsigned long)__builtin_return_address(0),
|
|
- SOFTIRQ_OFFSET);
|
|
- lockdep_softirq_enter();
|
|
+ SOFTIRQ_OFFSET);
|
|
+ lockdep_softirq_start();
|
|
|
|
cpu = smp_processor_id();
|
|
restart:
|
|
/* Reset the pending bitmask before enabling irqs */
|
|
set_softirq_pending(0);
|
|
|
|
- local_irq_enable();
|
|
-
|
|
- h = softirq_vec;
|
|
-
|
|
- do {
|
|
- if (pending & 1) {
|
|
- unsigned int vec_nr = h - softirq_vec;
|
|
- int prev_count = preempt_count();
|
|
-
|
|
- kstat_incr_softirqs_this_cpu(vec_nr);
|
|
-
|
|
- trace_softirq_entry(vec_nr);
|
|
- h->action(h);
|
|
- trace_softirq_exit(vec_nr);
|
|
- if (unlikely(prev_count != preempt_count())) {
|
|
- printk(KERN_ERR "huh, entered softirq %u %s %p"
|
|
- "with preempt_count %08x,"
|
|
- " exited with %08x?\n", vec_nr,
|
|
- softirq_to_name[vec_nr], h->action,
|
|
- prev_count, preempt_count());
|
|
- preempt_count() = prev_count;
|
|
- }
|
|
-
|
|
- rcu_bh_qs(cpu);
|
|
- }
|
|
- h++;
|
|
- pending >>= 1;
|
|
- } while (pending);
|
|
-
|
|
- local_irq_disable();
|
|
+ handle_pending_softirqs(pending, cpu, 1);
|
|
|
|
pending = local_softirq_pending();
|
|
if (pending) {
|
|
@@ -269,12 +370,32 @@ restart:
|
|
wakeup_softirqd();
|
|
}
|
|
|
|
- lockdep_softirq_exit();
|
|
+ lockdep_softirq_end();
|
|
|
|
account_system_vtime(current);
|
|
__local_bh_enable(SOFTIRQ_OFFSET);
|
|
}
|
|
|
|
+/*
|
|
+ * Called with preemption disabled from run_ksoftirqd()
|
|
+ */
|
|
+static int ksoftirqd_do_softirq(int cpu)
|
|
+{
|
|
+ /*
|
|
+ * Preempt disable stops cpu going offline.
|
|
+ * If already offline, we'll be on wrong CPU:
|
|
+ * don't process.
|
|
+ */
|
|
+ if (cpu_is_offline(cpu))
|
|
+ return -1;
|
|
+
|
|
+ local_irq_disable();
|
|
+ if (local_softirq_pending())
|
|
+ __do_softirq();
|
|
+ local_irq_enable();
|
|
+ return 0;
|
|
+}
|
|
+
|
|
#ifndef __ARCH_HAS_DO_SOFTIRQ
|
|
|
|
asmlinkage void do_softirq(void)
|
|
@@ -297,6 +418,191 @@ asmlinkage void do_softirq(void)
|
|
|
|
#endif
|
|
|
|
+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
|
|
+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
|
|
+static inline void ksoftirqd_set_sched_params(void) { }
|
|
+static inline void ksoftirqd_clr_sched_params(void) { }
|
|
+
|
|
+#else /* !PREEMPT_RT_FULL */
|
|
+
|
|
+/*
|
|
+ * On RT we serialize softirq execution with a cpu local lock
|
|
+ */
|
|
+static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
|
|
+static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
|
|
+
|
|
+static void __do_softirq_common(int need_rcu_bh_qs);
|
|
+
|
|
+void __do_softirq(void)
|
|
+{
|
|
+ __do_softirq_common(0);
|
|
+}
|
|
+
|
|
+void __init softirq_early_init(void)
|
|
+{
|
|
+ local_irq_lock_init(local_softirq_lock);
|
|
+}
|
|
+
|
|
+void local_bh_disable(void)
|
|
+{
|
|
+ migrate_disable();
|
|
+ current->softirq_nestcnt++;
|
|
+}
|
|
+EXPORT_SYMBOL(local_bh_disable);
|
|
+
|
|
+void local_bh_enable(void)
|
|
+{
|
|
+ if (WARN_ON(current->softirq_nestcnt == 0))
|
|
+ return;
|
|
+
|
|
+ if ((current->softirq_nestcnt == 1) &&
|
|
+ local_softirq_pending() &&
|
|
+ local_trylock(local_softirq_lock)) {
|
|
+
|
|
+ local_irq_disable();
|
|
+ if (local_softirq_pending())
|
|
+ __do_softirq();
|
|
+ local_irq_enable();
|
|
+ local_unlock(local_softirq_lock);
|
|
+ WARN_ON(current->softirq_nestcnt != 1);
|
|
+ }
|
|
+ current->softirq_nestcnt--;
|
|
+ migrate_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(local_bh_enable);
|
|
+
|
|
+void local_bh_enable_ip(unsigned long ip)
|
|
+{
|
|
+ local_bh_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(local_bh_enable_ip);
|
|
+
|
|
+void _local_bh_enable(void)
|
|
+{
|
|
+ current->softirq_nestcnt--;
|
|
+ migrate_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(_local_bh_enable);
|
|
+
|
|
+/* For tracing */
|
|
+int notrace __in_softirq(void)
|
|
+{
|
|
+ if (__get_cpu_var(local_softirq_lock).owner == current)
|
|
+ return __get_cpu_var(local_softirq_lock).nestcnt;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int in_serving_softirq(void)
|
|
+{
|
|
+ int res;
|
|
+
|
|
+ preempt_disable();
|
|
+ res = __get_cpu_var(local_softirq_runner) == current;
|
|
+ preempt_enable();
|
|
+ return res;
|
|
+}
|
|
+EXPORT_SYMBOL(in_serving_softirq);
|
|
+
|
|
+/*
|
|
+ * Called with bh and local interrupts disabled. For full RT cpu must
|
|
+ * be pinned.
|
|
+ */
|
|
+static void __do_softirq_common(int need_rcu_bh_qs)
|
|
+{
|
|
+ u32 pending = local_softirq_pending();
|
|
+ int cpu = smp_processor_id();
|
|
+
|
|
+ current->softirq_nestcnt++;
|
|
+
|
|
+ /* Reset the pending bitmask before enabling irqs */
|
|
+ set_softirq_pending(0);
|
|
+
|
|
+ __get_cpu_var(local_softirq_runner) = current;
|
|
+
|
|
+ lockdep_softirq_enter();
|
|
+
|
|
+ handle_pending_softirqs(pending, cpu, need_rcu_bh_qs);
|
|
+
|
|
+ pending = local_softirq_pending();
|
|
+ if (pending)
|
|
+ wakeup_softirqd();
|
|
+
|
|
+ lockdep_softirq_exit();
|
|
+ __get_cpu_var(local_softirq_runner) = NULL;
|
|
+
|
|
+ current->softirq_nestcnt--;
|
|
+}
|
|
+
|
|
+static int __thread_do_softirq(int cpu)
|
|
+{
|
|
+ /*
|
|
+ * Prevent the current cpu from going offline.
|
|
+ * pin_current_cpu() can reenable preemption and block on the
|
|
+ * hotplug mutex. When it returns, the current cpu is
|
|
+ * pinned. It might be the wrong one, but the offline check
|
|
+ * below catches that.
|
|
+ */
|
|
+ pin_current_cpu();
|
|
+ /*
|
|
+ * If called from ksoftirqd (cpu >= 0) we need to check
|
|
+ * whether we are on the wrong cpu due to cpu offlining. If
|
|
+ * called via thread_do_softirq() no action required.
|
|
+ */
|
|
+ if (cpu >= 0 && cpu_is_offline(cpu)) {
|
|
+ unpin_current_cpu();
|
|
+ return -1;
|
|
+ }
|
|
+ preempt_enable();
|
|
+ local_lock(local_softirq_lock);
|
|
+ local_irq_disable();
|
|
+ /*
|
|
+ * We cannot switch stacks on RT as we want to be able to
|
|
+ * schedule!
|
|
+ */
|
|
+ if (local_softirq_pending())
|
|
+ __do_softirq_common(cpu >= 0);
|
|
+ local_unlock(local_softirq_lock);
|
|
+ unpin_current_cpu();
|
|
+ preempt_disable();
|
|
+ local_irq_enable();
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called from netif_rx_ni(). Preemption enabled.
|
|
+ */
|
|
+void thread_do_softirq(void)
|
|
+{
|
|
+ if (!in_serving_softirq()) {
|
|
+ preempt_disable();
|
|
+ __thread_do_softirq(-1);
|
|
+ preempt_enable();
|
|
+ }
|
|
+}
|
|
+
|
|
+static int ksoftirqd_do_softirq(int cpu)
|
|
+{
|
|
+ return __thread_do_softirq(cpu);
|
|
+}
|
|
+
|
|
+static inline void local_bh_disable_nort(void) { }
|
|
+static inline void _local_bh_enable_nort(void) { }
|
|
+
|
|
+static inline void ksoftirqd_set_sched_params(void)
|
|
+{
|
|
+ struct sched_param param = { .sched_priority = 1 };
|
|
+
|
|
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
+}
|
|
+
|
|
+static inline void ksoftirqd_clr_sched_params(void)
|
|
+{
|
|
+ struct sched_param param = { .sched_priority = 0 };
|
|
+
|
|
+ sched_setscheduler(current, SCHED_NORMAL, ¶m);
|
|
+}
|
|
+
|
|
+#endif /* PREEMPT_RT_FULL */
|
|
/*
|
|
* Enter an interrupt context.
|
|
*/
|
|
@@ -310,9 +616,9 @@ void irq_enter(void)
|
|
* Prevent raise_softirq from needlessly waking up ksoftirqd
|
|
* here, as softirq will be serviced on return from interrupt.
|
|
*/
|
|
- local_bh_disable();
|
|
+ local_bh_disable_nort();
|
|
tick_check_idle(cpu);
|
|
- _local_bh_enable();
|
|
+ _local_bh_enable_nort();
|
|
}
|
|
|
|
__irq_enter();
|
|
@@ -320,7 +626,9 @@ void irq_enter(void)
|
|
|
|
static inline void invoke_softirq(void)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
if (!force_irqthreads) {
|
|
+ lockdep_softirq_from_hardirq();
|
|
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
|
|
__do_softirq();
|
|
#else
|
|
@@ -332,6 +640,9 @@ static inline void invoke_softirq(void)
|
|
wakeup_softirqd();
|
|
__local_bh_enable(SOFTIRQ_OFFSET);
|
|
}
|
|
+#else
|
|
+ wakeup_softirqd();
|
|
+#endif
|
|
}
|
|
|
|
/*
|
|
@@ -340,7 +651,6 @@ static inline void invoke_softirq(void)
|
|
void irq_exit(void)
|
|
{
|
|
account_system_vtime(current);
|
|
- trace_hardirq_exit();
|
|
sub_preempt_count(IRQ_EXIT_OFFSET);
|
|
if (!in_interrupt() && local_softirq_pending())
|
|
invoke_softirq();
|
|
@@ -351,6 +661,7 @@ void irq_exit(void)
|
|
tick_nohz_irq_exit();
|
|
#endif
|
|
rcu_irq_exit();
|
|
+ trace_hardirq_exit(); /* must be last! */
|
|
sched_preempt_enable_no_resched();
|
|
}
|
|
|
|
@@ -406,15 +717,45 @@ struct tasklet_head
|
|
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
|
|
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
|
|
|
|
+static void inline
|
|
+__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
|
|
+{
|
|
+ if (tasklet_trylock(t)) {
|
|
+again:
|
|
+ /* We may have been preempted before tasklet_trylock
|
|
+ * and __tasklet_action may have already run.
|
|
+ * So double check the sched bit while the takslet
|
|
+ * is locked before adding it to the list.
|
|
+ */
|
|
+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
|
|
+ t->next = NULL;
|
|
+ *head->tail = t;
|
|
+ head->tail = &(t->next);
|
|
+ raise_softirq_irqoff(nr);
|
|
+ tasklet_unlock(t);
|
|
+ } else {
|
|
+ /* This is subtle. If we hit the corner case above
|
|
+ * It is possible that we get preempted right here,
|
|
+ * and another task has successfully called
|
|
+ * tasklet_schedule(), then this function, and
|
|
+ * failed on the trylock. Thus we must be sure
|
|
+ * before releasing the tasklet lock, that the
|
|
+ * SCHED_BIT is clear. Otherwise the tasklet
|
|
+ * may get its SCHED_BIT set, but not added to the
|
|
+ * list
|
|
+ */
|
|
+ if (!tasklet_tryunlock(t))
|
|
+ goto again;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
void __tasklet_schedule(struct tasklet_struct *t)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
- t->next = NULL;
|
|
- *__this_cpu_read(tasklet_vec.tail) = t;
|
|
- __this_cpu_write(tasklet_vec.tail, &(t->next));
|
|
- raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
|
+ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
@@ -425,10 +766,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
- t->next = NULL;
|
|
- *__this_cpu_read(tasklet_hi_vec.tail) = t;
|
|
- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
|
|
- raise_softirq_irqoff(HI_SOFTIRQ);
|
|
+ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
@@ -436,50 +774,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
|
|
|
|
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
|
|
{
|
|
- BUG_ON(!irqs_disabled());
|
|
-
|
|
- t->next = __this_cpu_read(tasklet_hi_vec.head);
|
|
- __this_cpu_write(tasklet_hi_vec.head, t);
|
|
- __raise_softirq_irqoff(HI_SOFTIRQ);
|
|
+ __tasklet_hi_schedule(t);
|
|
}
|
|
|
|
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
|
|
|
|
-static void tasklet_action(struct softirq_action *a)
|
|
+void tasklet_enable(struct tasklet_struct *t)
|
|
{
|
|
- struct tasklet_struct *list;
|
|
+ if (!atomic_dec_and_test(&t->count))
|
|
+ return;
|
|
+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
|
|
+ tasklet_schedule(t);
|
|
+}
|
|
|
|
- local_irq_disable();
|
|
- list = __this_cpu_read(tasklet_vec.head);
|
|
- __this_cpu_write(tasklet_vec.head, NULL);
|
|
- __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
|
|
- local_irq_enable();
|
|
+EXPORT_SYMBOL(tasklet_enable);
|
|
+
|
|
+void tasklet_hi_enable(struct tasklet_struct *t)
|
|
+{
|
|
+ if (!atomic_dec_and_test(&t->count))
|
|
+ return;
|
|
+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
|
|
+ tasklet_hi_schedule(t);
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(tasklet_hi_enable);
|
|
+
|
|
+static void
|
|
+__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
|
|
+{
|
|
+ int loops = 1000000;
|
|
|
|
while (list) {
|
|
struct tasklet_struct *t = list;
|
|
|
|
list = list->next;
|
|
|
|
- if (tasklet_trylock(t)) {
|
|
- if (!atomic_read(&t->count)) {
|
|
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
|
|
- BUG();
|
|
- t->func(t->data);
|
|
- tasklet_unlock(t);
|
|
- continue;
|
|
- }
|
|
- tasklet_unlock(t);
|
|
+ /*
|
|
+ * Should always succeed - after a tasklist got on the
|
|
+ * list (after getting the SCHED bit set from 0 to 1),
|
|
+ * nothing but the tasklet softirq it got queued to can
|
|
+ * lock it:
|
|
+ */
|
|
+ if (!tasklet_trylock(t)) {
|
|
+ WARN_ON(1);
|
|
+ continue;
|
|
}
|
|
|
|
- local_irq_disable();
|
|
t->next = NULL;
|
|
- *__this_cpu_read(tasklet_vec.tail) = t;
|
|
- __this_cpu_write(tasklet_vec.tail, &(t->next));
|
|
- __raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
|
- local_irq_enable();
|
|
+
|
|
+ /*
|
|
+ * If we cannot handle the tasklet because it's disabled,
|
|
+ * mark it as pending. tasklet_enable() will later
|
|
+ * re-schedule the tasklet.
|
|
+ */
|
|
+ if (unlikely(atomic_read(&t->count))) {
|
|
+out_disabled:
|
|
+ /* implicit unlock: */
|
|
+ wmb();
|
|
+ t->state = TASKLET_STATEF_PENDING;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * After this point on the tasklet might be rescheduled
|
|
+ * on another CPU, but it can only be added to another
|
|
+ * CPU's tasklet list if we unlock the tasklet (which we
|
|
+ * dont do yet).
|
|
+ */
|
|
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
|
|
+ WARN_ON(1);
|
|
+
|
|
+again:
|
|
+ t->func(t->data);
|
|
+
|
|
+ /*
|
|
+ * Try to unlock the tasklet. We must use cmpxchg, because
|
|
+ * another CPU might have scheduled or disabled the tasklet.
|
|
+ * We only allow the STATE_RUN -> 0 transition here.
|
|
+ */
|
|
+ while (!tasklet_tryunlock(t)) {
|
|
+ /*
|
|
+ * If it got disabled meanwhile, bail out:
|
|
+ */
|
|
+ if (atomic_read(&t->count))
|
|
+ goto out_disabled;
|
|
+ /*
|
|
+ * If it got scheduled meanwhile, re-execute
|
|
+ * the tasklet function:
|
|
+ */
|
|
+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
|
|
+ goto again;
|
|
+ if (!--loops) {
|
|
+ printk("hm, tasklet state: %08lx\n", t->state);
|
|
+ WARN_ON(1);
|
|
+ tasklet_unlock(t);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
}
|
|
}
|
|
|
|
+static void tasklet_action(struct softirq_action *a)
|
|
+{
|
|
+ struct tasklet_struct *list;
|
|
+
|
|
+ local_irq_disable();
|
|
+ list = __get_cpu_var(tasklet_vec).head;
|
|
+ __get_cpu_var(tasklet_vec).head = NULL;
|
|
+ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
|
|
+ local_irq_enable();
|
|
+
|
|
+ __tasklet_action(a, list);
|
|
+}
|
|
+
|
|
static void tasklet_hi_action(struct softirq_action *a)
|
|
{
|
|
struct tasklet_struct *list;
|
|
@@ -490,29 +897,7 @@ static void tasklet_hi_action(struct softirq_action *a)
|
|
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
|
|
local_irq_enable();
|
|
|
|
- while (list) {
|
|
- struct tasklet_struct *t = list;
|
|
-
|
|
- list = list->next;
|
|
-
|
|
- if (tasklet_trylock(t)) {
|
|
- if (!atomic_read(&t->count)) {
|
|
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
|
|
- BUG();
|
|
- t->func(t->data);
|
|
- tasklet_unlock(t);
|
|
- continue;
|
|
- }
|
|
- tasklet_unlock(t);
|
|
- }
|
|
-
|
|
- local_irq_disable();
|
|
- t->next = NULL;
|
|
- *__this_cpu_read(tasklet_hi_vec.tail) = t;
|
|
- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
|
|
- __raise_softirq_irqoff(HI_SOFTIRQ);
|
|
- local_irq_enable();
|
|
- }
|
|
+ __tasklet_action(a, list);
|
|
}
|
|
|
|
|
|
@@ -535,7 +920,7 @@ void tasklet_kill(struct tasklet_struct *t)
|
|
|
|
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
|
|
do {
|
|
- yield();
|
|
+ msleep(1);
|
|
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
|
|
}
|
|
tasklet_unlock_wait(t);
|
|
@@ -741,28 +1126,39 @@ void __init softirq_init(void)
|
|
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
|
}
|
|
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
+void tasklet_unlock_wait(struct tasklet_struct *t)
|
|
+{
|
|
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
|
|
+ /*
|
|
+ * Hack for now to avoid this busy-loop:
|
|
+ */
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ msleep(1);
|
|
+#else
|
|
+ barrier();
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL(tasklet_unlock_wait);
|
|
+#endif
|
|
+
|
|
static int run_ksoftirqd(void * __bind_cpu)
|
|
{
|
|
+ ksoftirqd_set_sched_params();
|
|
+
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
while (!kthread_should_stop()) {
|
|
preempt_disable();
|
|
- if (!local_softirq_pending()) {
|
|
+ if (!local_softirq_pending())
|
|
schedule_preempt_disabled();
|
|
- }
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
while (local_softirq_pending()) {
|
|
- /* Preempt disable stops cpu going offline.
|
|
- If already offline, we'll be on wrong CPU:
|
|
- don't process */
|
|
- if (cpu_is_offline((long)__bind_cpu))
|
|
+ if (ksoftirqd_do_softirq((long) __bind_cpu))
|
|
goto wait_to_die;
|
|
- local_irq_disable();
|
|
- if (local_softirq_pending())
|
|
- __do_softirq();
|
|
- local_irq_enable();
|
|
sched_preempt_enable_no_resched();
|
|
cond_resched();
|
|
preempt_disable();
|
|
@@ -776,6 +1172,7 @@ static int run_ksoftirqd(void * __bind_cpu)
|
|
|
|
wait_to_die:
|
|
preempt_enable();
|
|
+ ksoftirqd_clr_sched_params();
|
|
/* Wait for kthread_stop */
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
while (!kthread_should_stop()) {
|
|
@@ -852,9 +1249,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
|
|
int hotcpu = (unsigned long)hcpu;
|
|
struct task_struct *p;
|
|
|
|
- switch (action) {
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
case CPU_UP_PREPARE:
|
|
- case CPU_UP_PREPARE_FROZEN:
|
|
p = kthread_create_on_node(run_ksoftirqd,
|
|
hcpu,
|
|
cpu_to_node(hotcpu),
|
|
@@ -867,19 +1263,16 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
|
|
per_cpu(ksoftirqd, hotcpu) = p;
|
|
break;
|
|
case CPU_ONLINE:
|
|
- case CPU_ONLINE_FROZEN:
|
|
wake_up_process(per_cpu(ksoftirqd, hotcpu));
|
|
break;
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
case CPU_UP_CANCELED:
|
|
- case CPU_UP_CANCELED_FROZEN:
|
|
if (!per_cpu(ksoftirqd, hotcpu))
|
|
break;
|
|
/* Unbind so it can run. Fall thru. */
|
|
kthread_bind(per_cpu(ksoftirqd, hotcpu),
|
|
cpumask_any(cpu_online_mask));
|
|
- case CPU_DEAD:
|
|
- case CPU_DEAD_FROZEN: {
|
|
+ case CPU_POST_DEAD: {
|
|
static const struct sched_param param = {
|
|
.sched_priority = MAX_RT_PRIO-1
|
|
};
|
|
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
|
|
index 5cdd8065a3ce..da9775b3038f 100644
|
|
--- a/kernel/spinlock.c
|
|
+++ b/kernel/spinlock.c
|
|
@@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
|
|
* __[spin|read|write]_lock_bh()
|
|
*/
|
|
BUILD_LOCK_OPS(spin, raw_spinlock);
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
BUILD_LOCK_OPS(read, rwlock);
|
|
BUILD_LOCK_OPS(write, rwlock);
|
|
+#endif
|
|
|
|
#endif
|
|
|
|
@@ -195,6 +198,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
|
|
EXPORT_SYMBOL(_raw_spin_unlock_bh);
|
|
#endif
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#ifndef CONFIG_INLINE_READ_TRYLOCK
|
|
int __lockfunc _raw_read_trylock(rwlock_t *lock)
|
|
{
|
|
@@ -339,6 +344,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
|
|
EXPORT_SYMBOL(_raw_write_unlock_bh);
|
|
#endif
|
|
|
|
+#endif /* !PREEMPT_RT_FULL */
|
|
+
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
|
|
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
|
|
index 2f194e965715..e98c70b7f073 100644
|
|
--- a/kernel/stop_machine.c
|
|
+++ b/kernel/stop_machine.c
|
|
@@ -29,12 +29,12 @@ struct cpu_stop_done {
|
|
atomic_t nr_todo; /* nr left to execute */
|
|
bool executed; /* actually executed? */
|
|
int ret; /* collected return value */
|
|
- struct completion completion; /* fired if nr_todo reaches 0 */
|
|
+ struct task_struct *waiter; /* woken when nr_todo reaches 0 */
|
|
};
|
|
|
|
/* the actual stopper, one per every possible cpu, enabled on online cpus */
|
|
struct cpu_stopper {
|
|
- spinlock_t lock;
|
|
+ raw_spinlock_t lock;
|
|
bool enabled; /* is this stopper enabled? */
|
|
struct list_head works; /* list of pending works */
|
|
struct task_struct *thread; /* stopper thread */
|
|
@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
|
|
{
|
|
memset(done, 0, sizeof(*done));
|
|
atomic_set(&done->nr_todo, nr_todo);
|
|
- init_completion(&done->completion);
|
|
+ done->waiter = current;
|
|
}
|
|
|
|
/* signal completion unless @done is NULL */
|
|
@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
|
|
if (done) {
|
|
if (executed)
|
|
done->executed = true;
|
|
- if (atomic_dec_and_test(&done->nr_todo))
|
|
- complete(&done->completion);
|
|
+ if (atomic_dec_and_test(&done->nr_todo)) {
|
|
+ wake_up_process(done->waiter);
|
|
+ done->waiter = NULL;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -67,7 +69,7 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper,
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&stopper->lock, flags);
|
|
+ raw_spin_lock_irqsave(&stopper->lock, flags);
|
|
|
|
if (stopper->enabled) {
|
|
list_add_tail(&work->list, &stopper->works);
|
|
@@ -75,7 +77,23 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper,
|
|
} else
|
|
cpu_stop_signal_done(work->done, false);
|
|
|
|
- spin_unlock_irqrestore(&stopper->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
|
|
+}
|
|
+
|
|
+static void wait_for_stop_done(struct cpu_stop_done *done)
|
|
+{
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ while (atomic_read(&done->nr_todo)) {
|
|
+ schedule();
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+ }
|
|
+ /*
|
|
+ * We need to wait until cpu_stop_signal_done() has cleared
|
|
+ * done->waiter.
|
|
+ */
|
|
+ while (done->waiter)
|
|
+ cpu_relax();
|
|
+ set_current_state(TASK_RUNNING);
|
|
}
|
|
|
|
/**
|
|
@@ -109,7 +127,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
|
|
|
|
cpu_stop_init_done(&done, 1);
|
|
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
|
|
- wait_for_completion(&done.completion);
|
|
+ wait_for_stop_done(&done);
|
|
return done.executed ? done.ret : -ENOENT;
|
|
}
|
|
|
|
@@ -135,11 +153,12 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
|
|
|
/* static data for stop_cpus */
|
|
static DEFINE_MUTEX(stop_cpus_mutex);
|
|
+static DEFINE_MUTEX(stopper_lock);
|
|
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
|
|
|
|
static void queue_stop_cpus_work(const struct cpumask *cpumask,
|
|
cpu_stop_fn_t fn, void *arg,
|
|
- struct cpu_stop_done *done)
|
|
+ struct cpu_stop_done *done, bool inactive)
|
|
{
|
|
struct cpu_stop_work *work;
|
|
unsigned int cpu;
|
|
@@ -153,15 +172,19 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
|
|
}
|
|
|
|
/*
|
|
- * Disable preemption while queueing to avoid getting
|
|
- * preempted by a stopper which might wait for other stoppers
|
|
- * to enter @fn which can lead to deadlock.
|
|
+ * Make sure that all work is queued on all cpus before we
|
|
+ * any of the cpus can execute it.
|
|
*/
|
|
- preempt_disable();
|
|
+ if (!inactive) {
|
|
+ mutex_lock(&stopper_lock);
|
|
+ } else {
|
|
+ while (!mutex_trylock(&stopper_lock))
|
|
+ cpu_relax();
|
|
+ }
|
|
for_each_cpu(cpu, cpumask)
|
|
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
|
|
&per_cpu(stop_cpus_work, cpu));
|
|
- preempt_enable();
|
|
+ mutex_unlock(&stopper_lock);
|
|
}
|
|
|
|
static int __stop_cpus(const struct cpumask *cpumask,
|
|
@@ -170,8 +193,8 @@ static int __stop_cpus(const struct cpumask *cpumask,
|
|
struct cpu_stop_done done;
|
|
|
|
cpu_stop_init_done(&done, cpumask_weight(cpumask));
|
|
- queue_stop_cpus_work(cpumask, fn, arg, &done);
|
|
- wait_for_completion(&done.completion);
|
|
+ queue_stop_cpus_work(cpumask, fn, arg, &done, false);
|
|
+ wait_for_stop_done(&done);
|
|
return done.executed ? done.ret : -ENOENT;
|
|
}
|
|
|
|
@@ -259,13 +282,13 @@ repeat:
|
|
}
|
|
|
|
work = NULL;
|
|
- spin_lock_irq(&stopper->lock);
|
|
+ raw_spin_lock_irq(&stopper->lock);
|
|
if (!list_empty(&stopper->works)) {
|
|
work = list_first_entry(&stopper->works,
|
|
struct cpu_stop_work, list);
|
|
list_del_init(&work->list);
|
|
}
|
|
- spin_unlock_irq(&stopper->lock);
|
|
+ raw_spin_unlock_irq(&stopper->lock);
|
|
|
|
if (work) {
|
|
cpu_stop_fn_t fn = work->fn;
|
|
@@ -275,6 +298,16 @@ repeat:
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
+ /*
|
|
+ * Wait until the stopper finished scheduling on all
|
|
+ * cpus
|
|
+ */
|
|
+ mutex_lock(&stopper_lock);
|
|
+ /*
|
|
+ * Let other cpu threads continue as well
|
|
+ */
|
|
+ mutex_unlock(&stopper_lock);
|
|
+
|
|
/* cpu stop callbacks are not allowed to sleep */
|
|
preempt_disable();
|
|
|
|
@@ -289,7 +322,13 @@ repeat:
|
|
kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
|
|
ksym_buf), arg);
|
|
|
|
+ /*
|
|
+ * Make sure that the wakeup and setting done->waiter
|
|
+ * to NULL is atomic.
|
|
+ */
|
|
+ local_irq_disable();
|
|
cpu_stop_signal_done(done, true);
|
|
+ local_irq_enable();
|
|
} else
|
|
schedule();
|
|
|
|
@@ -317,6 +356,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
|
|
if (IS_ERR(p))
|
|
return notifier_from_errno(PTR_ERR(p));
|
|
get_task_struct(p);
|
|
+ p->flags |= PF_STOMPER;
|
|
kthread_bind(p, cpu);
|
|
sched_set_stop_task(cpu, p);
|
|
stopper->thread = p;
|
|
@@ -326,9 +366,9 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
|
|
/* strictly unnecessary, as first user will wake it */
|
|
wake_up_process(stopper->thread);
|
|
/* mark enabled */
|
|
- spin_lock_irq(&stopper->lock);
|
|
+ raw_spin_lock_irq(&stopper->lock);
|
|
stopper->enabled = true;
|
|
- spin_unlock_irq(&stopper->lock);
|
|
+ raw_spin_unlock_irq(&stopper->lock);
|
|
break;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -341,11 +381,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
|
|
/* kill the stopper */
|
|
kthread_stop(stopper->thread);
|
|
/* drain remaining works */
|
|
- spin_lock_irq(&stopper->lock);
|
|
+ raw_spin_lock_irq(&stopper->lock);
|
|
list_for_each_entry(work, &stopper->works, list)
|
|
cpu_stop_signal_done(work->done, false);
|
|
stopper->enabled = false;
|
|
- spin_unlock_irq(&stopper->lock);
|
|
+ raw_spin_unlock_irq(&stopper->lock);
|
|
/* release the stopper */
|
|
put_task_struct(stopper->thread);
|
|
stopper->thread = NULL;
|
|
@@ -376,7 +416,7 @@ static int __init cpu_stop_init(void)
|
|
for_each_possible_cpu(cpu) {
|
|
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
|
|
|
- spin_lock_init(&stopper->lock);
|
|
+ raw_spin_lock_init(&stopper->lock);
|
|
INIT_LIST_HEAD(&stopper->works);
|
|
}
|
|
|
|
@@ -566,11 +606,11 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
|
set_state(&smdata, STOPMACHINE_PREPARE);
|
|
cpu_stop_init_done(&done, num_active_cpus());
|
|
queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
|
|
- &done);
|
|
+ &done, true);
|
|
ret = stop_machine_cpu_stop(&smdata);
|
|
|
|
/* Busy wait for completion. */
|
|
- while (!completion_done(&done.completion))
|
|
+ while (atomic_read(&done.nr_todo))
|
|
cpu_relax();
|
|
|
|
mutex_unlock(&stop_cpus_mutex);
|
|
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
|
|
index 955560e25e05..c84f1ceeff23 100644
|
|
--- a/kernel/time/jiffies.c
|
|
+++ b/kernel/time/jiffies.c
|
|
@@ -80,9 +80,9 @@ u64 get_jiffies_64(void)
|
|
u64 ret;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&xtime_lock);
|
|
+ seq = read_seqcount_begin(&xtime_seq);
|
|
ret = jiffies_64;
|
|
- } while (read_seqretry(&xtime_lock, seq));
|
|
+ } while (read_seqcount_retry(&xtime_seq, seq));
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(get_jiffies_64);
|
|
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
|
|
index e32587e7a6ca..956a9594420e 100644
|
|
--- a/kernel/time/ntp.c
|
|
+++ b/kernel/time/ntp.c
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/workqueue.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/jiffies.h>
|
|
+#include <linux/kthread.h>
|
|
#include <linux/math64.h>
|
|
#include <linux/timex.h>
|
|
#include <linux/time.h>
|
|
@@ -22,7 +23,7 @@
|
|
* NTP timekeeping variables:
|
|
*/
|
|
|
|
-DEFINE_SPINLOCK(ntp_lock);
|
|
+DEFINE_RAW_SPINLOCK(ntp_lock);
|
|
|
|
|
|
/* USER_HZ period (usecs): */
|
|
@@ -347,7 +348,7 @@ void ntp_clear(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&ntp_lock, flags);
|
|
+ raw_spin_lock_irqsave(&ntp_lock, flags);
|
|
|
|
time_adjust = 0; /* stop active adjtime() */
|
|
time_status |= STA_UNSYNC;
|
|
@@ -361,7 +362,7 @@ void ntp_clear(void)
|
|
|
|
/* Clear PPS state variables */
|
|
pps_clear();
|
|
- spin_unlock_irqrestore(&ntp_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
|
|
|
|
}
|
|
|
|
@@ -371,9 +372,9 @@ u64 ntp_tick_length(void)
|
|
unsigned long flags;
|
|
s64 ret;
|
|
|
|
- spin_lock_irqsave(&ntp_lock, flags);
|
|
+ raw_spin_lock_irqsave(&ntp_lock, flags);
|
|
ret = tick_length;
|
|
- spin_unlock_irqrestore(&ntp_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
|
|
return ret;
|
|
}
|
|
|
|
@@ -394,7 +395,7 @@ int second_overflow(unsigned long secs)
|
|
int leap = 0;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&ntp_lock, flags);
|
|
+ raw_spin_lock_irqsave(&ntp_lock, flags);
|
|
|
|
/*
|
|
* Leap second processing. If in leap-insert state at the end of the
|
|
@@ -480,7 +481,7 @@ int second_overflow(unsigned long secs)
|
|
|
|
|
|
out:
|
|
- spin_unlock_irqrestore(&ntp_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
|
|
|
|
return leap;
|
|
}
|
|
@@ -531,10 +532,49 @@ static void sync_cmos_clock(struct work_struct *work)
|
|
schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * RT can not call schedule_delayed_work from real interrupt context.
|
|
+ * Need to make a thread to do the real work.
|
|
+ */
|
|
+static struct task_struct *cmos_delay_thread;
|
|
+static bool do_cmos_delay;
|
|
+
|
|
+static int run_cmos_delay(void *ignore)
|
|
+{
|
|
+ while (!kthread_should_stop()) {
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+ if (do_cmos_delay) {
|
|
+ do_cmos_delay = false;
|
|
+ schedule_delayed_work(&sync_cmos_work, 0);
|
|
+ }
|
|
+ schedule();
|
|
+ }
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void notify_cmos_timer(void)
|
|
+{
|
|
+ do_cmos_delay = true;
|
|
+ /* Make visible before waking up process */
|
|
+ smp_wmb();
|
|
+ wake_up_process(cmos_delay_thread);
|
|
+}
|
|
+
|
|
+static __init int create_cmos_delay_thread(void)
|
|
+{
|
|
+ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd");
|
|
+ BUG_ON(!cmos_delay_thread);
|
|
+ return 0;
|
|
+}
|
|
+early_initcall(create_cmos_delay_thread);
|
|
+#else
|
|
static void notify_cmos_timer(void)
|
|
{
|
|
schedule_delayed_work(&sync_cmos_work, 0);
|
|
}
|
|
+#endif /* CONFIG_PREEMPT_RT_FULL */
|
|
|
|
#else
|
|
static inline void notify_cmos_timer(void) { }
|
|
@@ -673,7 +713,7 @@ int do_adjtimex(struct timex *txc)
|
|
|
|
getnstimeofday(&ts);
|
|
|
|
- spin_lock_irq(&ntp_lock);
|
|
+ raw_spin_lock_irq(&ntp_lock);
|
|
|
|
if (txc->modes & ADJ_ADJTIME) {
|
|
long save_adjust = time_adjust;
|
|
@@ -715,7 +755,7 @@ int do_adjtimex(struct timex *txc)
|
|
/* fill PPS status fields */
|
|
pps_fill_timex(txc);
|
|
|
|
- spin_unlock_irq(&ntp_lock);
|
|
+ raw_spin_unlock_irq(&ntp_lock);
|
|
|
|
txc->time.tv_sec = ts.tv_sec;
|
|
txc->time.tv_usec = ts.tv_nsec;
|
|
@@ -913,7 +953,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
|
|
|
|
pts_norm = pps_normalize_ts(*phase_ts);
|
|
|
|
- spin_lock_irqsave(&ntp_lock, flags);
|
|
+ raw_spin_lock_irqsave(&ntp_lock, flags);
|
|
|
|
/* clear the error bits, they will be set again if needed */
|
|
time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
|
|
@@ -926,7 +966,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
|
|
* just start the frequency interval */
|
|
if (unlikely(pps_fbase.tv_sec == 0)) {
|
|
pps_fbase = *raw_ts;
|
|
- spin_unlock_irqrestore(&ntp_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
|
|
return;
|
|
}
|
|
|
|
@@ -941,7 +981,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
|
|
time_status |= STA_PPSJITTER;
|
|
/* restart the frequency calibration interval */
|
|
pps_fbase = *raw_ts;
|
|
- spin_unlock_irqrestore(&ntp_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
|
|
pr_err("hardpps: PPSJITTER: bad pulse\n");
|
|
return;
|
|
}
|
|
@@ -958,7 +998,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
|
|
|
|
hardpps_update_phase(pts_norm.nsec);
|
|
|
|
- spin_unlock_irqrestore(&ntp_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&ntp_lock, flags);
|
|
}
|
|
EXPORT_SYMBOL(hardpps);
|
|
|
|
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
|
|
index ead79bce1258..538d45a556ea 100644
|
|
--- a/kernel/time/tick-common.c
|
|
+++ b/kernel/time/tick-common.c
|
|
@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void)
|
|
static void tick_periodic(int cpu)
|
|
{
|
|
if (tick_do_timer_cpu == cpu) {
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_spin_lock(&xtime_lock);
|
|
+ write_seqcount_begin(&xtime_seq);
|
|
|
|
/* Keep track of the next tick event */
|
|
tick_next_period = ktime_add(tick_next_period, tick_period);
|
|
|
|
do_timer(1);
|
|
- write_sequnlock(&xtime_lock);
|
|
+ write_seqcount_end(&xtime_seq);
|
|
+ raw_spin_unlock(&xtime_lock);
|
|
}
|
|
|
|
update_process_times(user_mode(get_irq_regs()));
|
|
@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
|
|
ktime_t next;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&xtime_lock);
|
|
+ seq = read_seqcount_begin(&xtime_seq);
|
|
next = tick_next_period;
|
|
- } while (read_seqretry(&xtime_lock, seq));
|
|
+ } while (read_seqcount_retry(&xtime_seq, seq));
|
|
|
|
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
|
|
|
|
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
|
|
index 4e265b901fed..c91100d99fb9 100644
|
|
--- a/kernel/time/tick-internal.h
|
|
+++ b/kernel/time/tick-internal.h
|
|
@@ -141,4 +141,5 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
|
|
#endif
|
|
|
|
extern void do_timer(unsigned long ticks);
|
|
-extern seqlock_t xtime_lock;
|
|
+extern raw_spinlock_t xtime_lock;
|
|
+extern seqcount_t xtime_seq;
|
|
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
|
|
index 67106f88dca1..17c7e02c1316 100644
|
|
--- a/kernel/time/tick-sched.c
|
|
+++ b/kernel/time/tick-sched.c
|
|
@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(ktime_t now)
|
|
return;
|
|
|
|
/* Reevalute with xtime_lock held */
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_spin_lock(&xtime_lock);
|
|
+ write_seqcount_begin(&xtime_seq);
|
|
|
|
delta = ktime_sub(now, last_jiffies_update);
|
|
if (delta.tv64 >= tick_period.tv64) {
|
|
@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(ktime_t now)
|
|
/* Keep the tick_next_period variable up to date */
|
|
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
|
}
|
|
- write_sequnlock(&xtime_lock);
|
|
+ write_seqcount_end(&xtime_seq);
|
|
+ raw_spin_unlock(&xtime_lock);
|
|
}
|
|
|
|
/*
|
|
@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(void)
|
|
{
|
|
ktime_t period;
|
|
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_spin_lock(&xtime_lock);
|
|
+ write_seqcount_begin(&xtime_seq);
|
|
/* Did we start the jiffies update yet ? */
|
|
if (last_jiffies_update.tv64 == 0)
|
|
last_jiffies_update = tick_next_period;
|
|
period = last_jiffies_update;
|
|
- write_sequnlock(&xtime_lock);
|
|
+ write_seqcount_end(&xtime_seq);
|
|
+ raw_spin_unlock(&xtime_lock);
|
|
return period;
|
|
}
|
|
|
|
@@ -305,24 +309,18 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
|
|
return;
|
|
|
|
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
|
|
- static int ratelimit;
|
|
-
|
|
- if (ratelimit < 10) {
|
|
- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
|
- (unsigned int) local_softirq_pending());
|
|
- ratelimit++;
|
|
- }
|
|
+ softirq_check_pending_idle();
|
|
return;
|
|
}
|
|
|
|
ts->idle_calls++;
|
|
/* Read jiffies and the time when jiffies were updated last */
|
|
do {
|
|
- seq = read_seqbegin(&xtime_lock);
|
|
+ seq = read_seqcount_begin(&xtime_seq);
|
|
last_update = last_jiffies_update;
|
|
last_jiffies = jiffies;
|
|
time_delta = timekeeping_max_deferment();
|
|
- } while (read_seqretry(&xtime_lock, seq));
|
|
+ } while (read_seqcount_retry(&xtime_seq, seq));
|
|
|
|
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
|
|
arch_needs_cpu(cpu)) {
|
|
@@ -824,6 +822,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
|
return HRTIMER_RESTART;
|
|
}
|
|
|
|
+static int sched_skew_tick;
|
|
+
|
|
+static int __init skew_tick(char *str)
|
|
+{
|
|
+ get_option(&str, &sched_skew_tick);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+early_param("skew_tick", skew_tick);
|
|
+
|
|
/**
|
|
* tick_setup_sched_timer - setup the tick emulation timer
|
|
*/
|
|
@@ -836,11 +844,20 @@ void tick_setup_sched_timer(void)
|
|
* Emulate tick processing via per-CPU hrtimers:
|
|
*/
|
|
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
|
+ ts->sched_timer.irqsafe = 1;
|
|
ts->sched_timer.function = tick_sched_timer;
|
|
|
|
/* Get the next period (per cpu) */
|
|
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
|
|
|
|
+ /* Offset the tick to avert xtime_lock contention. */
|
|
+ if (sched_skew_tick) {
|
|
+ u64 offset = ktime_to_ns(tick_period) >> 1;
|
|
+ do_div(offset, num_possible_cpus());
|
|
+ offset *= smp_processor_id();
|
|
+ hrtimer_add_expires_ns(&ts->sched_timer, offset);
|
|
+ }
|
|
+
|
|
for (;;) {
|
|
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
|
hrtimer_start_expires(&ts->sched_timer,
|
|
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
|
|
index 32f0cb8f1fe8..fc7b4f201c53 100644
|
|
--- a/kernel/time/timekeeping.c
|
|
+++ b/kernel/time/timekeeping.c
|
|
@@ -76,8 +76,9 @@ struct timekeeper {
|
|
/* Offset clock monotonic -> clock boottime */
|
|
ktime_t offs_boot;
|
|
|
|
- /* Seqlock for all timekeeper values */
|
|
- seqlock_t lock;
|
|
+ /* Open coded seqlock for all timekeeper values */
|
|
+ seqcount_t seq;
|
|
+ raw_spinlock_t lock;
|
|
};
|
|
|
|
static struct timekeeper timekeeper;
|
|
@@ -86,7 +87,8 @@ static struct timekeeper timekeeper;
|
|
* This read-write spinlock protects us from races in SMP while
|
|
* playing with xtime.
|
|
*/
|
|
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
|
|
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock);
|
|
+seqcount_t xtime_seq;
|
|
|
|
|
|
/* flag for if timekeeping is suspended */
|
|
@@ -243,7 +245,7 @@ void getnstimeofday(struct timespec *ts)
|
|
WARN_ON(timekeeping_suspended);
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
|
|
*ts = timekeeper.xtime;
|
|
nsecs = timekeeping_get_ns();
|
|
@@ -251,7 +253,7 @@ void getnstimeofday(struct timespec *ts)
|
|
/* If arch requires, add in gettimeoffset() */
|
|
nsecs += arch_gettimeoffset();
|
|
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
timespec_add_ns(ts, nsecs);
|
|
}
|
|
@@ -266,7 +268,7 @@ ktime_t ktime_get(void)
|
|
WARN_ON(timekeeping_suspended);
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
secs = timekeeper.xtime.tv_sec +
|
|
timekeeper.wall_to_monotonic.tv_sec;
|
|
nsecs = timekeeper.xtime.tv_nsec +
|
|
@@ -275,7 +277,7 @@ ktime_t ktime_get(void)
|
|
/* If arch requires, add in gettimeoffset() */
|
|
nsecs += arch_gettimeoffset();
|
|
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
/*
|
|
* Use ktime_set/ktime_add_ns to create a proper ktime on
|
|
* 32-bit architectures without CONFIG_KTIME_SCALAR.
|
|
@@ -301,14 +303,14 @@ void ktime_get_ts(struct timespec *ts)
|
|
WARN_ON(timekeeping_suspended);
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
*ts = timekeeper.xtime;
|
|
tomono = timekeeper.wall_to_monotonic;
|
|
nsecs = timekeeping_get_ns();
|
|
/* If arch requires, add in gettimeoffset() */
|
|
nsecs += arch_gettimeoffset();
|
|
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
|
|
ts->tv_nsec + tomono.tv_nsec + nsecs);
|
|
@@ -336,7 +338,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
|
|
do {
|
|
u32 arch_offset;
|
|
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
|
|
*ts_raw = timekeeper.raw_time;
|
|
*ts_real = timekeeper.xtime;
|
|
@@ -349,7 +351,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
|
|
nsecs_raw += arch_offset;
|
|
nsecs_real += arch_offset;
|
|
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
timespec_add_ns(ts_raw, nsecs_raw);
|
|
timespec_add_ns(ts_real, nsecs_real);
|
|
@@ -388,7 +390,8 @@ int do_settimeofday(const struct timespec *tv)
|
|
if (!timespec_valid_strict(tv))
|
|
return -EINVAL;
|
|
|
|
- write_seqlock_irqsave(&timekeeper.lock, flags);
|
|
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
|
|
+ write_seqcount_begin(&timekeeper.seq);
|
|
|
|
timekeeping_forward_now();
|
|
|
|
@@ -400,7 +403,8 @@ int do_settimeofday(const struct timespec *tv)
|
|
timekeeper.xtime = *tv;
|
|
timekeeping_update(true);
|
|
|
|
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
|
+ write_seqcount_end(&timekeeper.seq);
|
|
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
|
|
|
|
/* signal hrtimers about time change */
|
|
clock_was_set();
|
|
@@ -426,7 +430,8 @@ int timekeeping_inject_offset(struct timespec *ts)
|
|
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
|
|
return -EINVAL;
|
|
|
|
- write_seqlock_irqsave(&timekeeper.lock, flags);
|
|
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
|
|
+ write_seqcount_begin(&timekeeper.seq);
|
|
|
|
timekeeping_forward_now();
|
|
|
|
@@ -443,7 +448,8 @@ int timekeeping_inject_offset(struct timespec *ts)
|
|
error: /* even if we error out, we forwarded the time, so call update */
|
|
timekeeping_update(true);
|
|
|
|
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
|
+ write_seqcount_end(&timekeeper.seq);
|
|
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
|
|
|
|
/* signal hrtimers about time change */
|
|
clock_was_set();
|
|
@@ -464,7 +470,8 @@ static int change_clocksource(void *data)
|
|
|
|
new = (struct clocksource *) data;
|
|
|
|
- write_seqlock_irqsave(&timekeeper.lock, flags);
|
|
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
|
|
+ write_seqcount_begin(&timekeeper.seq);
|
|
|
|
timekeeping_forward_now();
|
|
if (!new->enable || new->enable(new) == 0) {
|
|
@@ -475,7 +482,8 @@ static int change_clocksource(void *data)
|
|
}
|
|
timekeeping_update(true);
|
|
|
|
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
|
+ write_seqcount_end(&timekeeper.seq);
|
|
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -522,11 +530,11 @@ void getrawmonotonic(struct timespec *ts)
|
|
s64 nsecs;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
nsecs = timekeeping_get_ns_raw();
|
|
*ts = timekeeper.raw_time;
|
|
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
timespec_add_ns(ts, nsecs);
|
|
}
|
|
@@ -542,11 +550,11 @@ int timekeeping_valid_for_hres(void)
|
|
int ret;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
|
|
ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
|
|
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
return ret;
|
|
}
|
|
@@ -559,11 +567,11 @@ u64 timekeeping_max_deferment(void)
|
|
unsigned long seq;
|
|
u64 ret;
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
|
|
ret = timekeeper.clock->max_idle_ns;
|
|
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
return ret;
|
|
}
|
|
@@ -623,11 +631,13 @@ void __init timekeeping_init(void)
|
|
boot.tv_nsec = 0;
|
|
}
|
|
|
|
- seqlock_init(&timekeeper.lock);
|
|
+ raw_spin_lock_init(&timekeeper.lock);
|
|
+ seqcount_init(&timekeeper.seq);
|
|
|
|
ntp_init();
|
|
|
|
- write_seqlock_irqsave(&timekeeper.lock, flags);
|
|
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
|
|
+ write_seqcount_begin(&timekeeper.seq);
|
|
clock = clocksource_default_clock();
|
|
if (clock->enable)
|
|
clock->enable(clock);
|
|
@@ -646,7 +656,8 @@ void __init timekeeping_init(void)
|
|
update_rt_offset();
|
|
timekeeper.total_sleep_time.tv_sec = 0;
|
|
timekeeper.total_sleep_time.tv_nsec = 0;
|
|
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
|
+ write_seqcount_end(&timekeeper.seq);
|
|
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
|
|
}
|
|
|
|
/* time in seconds when suspend began */
|
|
@@ -700,7 +711,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
|
|
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
|
|
return;
|
|
|
|
- write_seqlock_irqsave(&timekeeper.lock, flags);
|
|
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
|
|
+ write_seqcount_begin(&timekeeper.seq);
|
|
|
|
timekeeping_forward_now();
|
|
|
|
@@ -708,7 +720,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
|
|
|
|
timekeeping_update(true);
|
|
|
|
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
|
+ write_seqcount_end(&timekeeper.seq);
|
|
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
|
|
|
|
/* signal hrtimers about time change */
|
|
clock_was_set();
|
|
@@ -731,7 +744,8 @@ static void timekeeping_resume(void)
|
|
|
|
clocksource_resume();
|
|
|
|
- write_seqlock_irqsave(&timekeeper.lock, flags);
|
|
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
|
|
+ write_seqcount_begin(&timekeeper.seq);
|
|
|
|
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
|
|
ts = timespec_sub(ts, timekeeping_suspend_time);
|
|
@@ -742,7 +756,8 @@ static void timekeeping_resume(void)
|
|
timekeeper.ntp_error = 0;
|
|
timekeeping_suspended = 0;
|
|
timekeeping_update(false);
|
|
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
|
+ write_seqcount_end(&timekeeper.seq);
|
|
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
|
|
|
|
touch_softlockup_watchdog();
|
|
|
|
@@ -760,7 +775,8 @@ static int timekeeping_suspend(void)
|
|
|
|
read_persistent_clock(&timekeeping_suspend_time);
|
|
|
|
- write_seqlock_irqsave(&timekeeper.lock, flags);
|
|
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
|
|
+ write_seqcount_begin(&timekeeper.seq);
|
|
timekeeping_forward_now();
|
|
timekeeping_suspended = 1;
|
|
|
|
@@ -783,7 +799,8 @@ static int timekeeping_suspend(void)
|
|
timekeeping_suspend_time =
|
|
timespec_add(timekeeping_suspend_time, delta_delta);
|
|
}
|
|
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
|
+ write_seqcount_end(&timekeeper.seq);
|
|
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
|
|
|
|
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
|
|
clocksource_suspend();
|
|
@@ -1046,7 +1063,8 @@ static void update_wall_time(void)
|
|
unsigned int clock_set = 0;
|
|
unsigned long flags;
|
|
|
|
- write_seqlock_irqsave(&timekeeper.lock, flags);
|
|
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
|
|
+ write_seqcount_begin(&timekeeper.seq);
|
|
|
|
/* Make sure we're fully resumed: */
|
|
if (unlikely(timekeeping_suspended))
|
|
@@ -1139,7 +1157,8 @@ static void update_wall_time(void)
|
|
timekeeping_update(false);
|
|
|
|
out:
|
|
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
|
+ write_seqcount_end(&timekeeper.seq);
|
|
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
|
|
|
|
if (clock_set)
|
|
clock_was_set_delayed();
|
|
@@ -1188,13 +1207,13 @@ void get_monotonic_boottime(struct timespec *ts)
|
|
WARN_ON(timekeeping_suspended);
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
*ts = timekeeper.xtime;
|
|
tomono = timekeeper.wall_to_monotonic;
|
|
sleep = timekeeper.total_sleep_time;
|
|
nsecs = timekeeping_get_ns();
|
|
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
|
|
(s64)ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
|
|
@@ -1245,10 +1264,10 @@ struct timespec current_kernel_time(void)
|
|
unsigned long seq;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
|
|
now = timekeeper.xtime;
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
return now;
|
|
}
|
|
@@ -1260,11 +1279,11 @@ struct timespec get_monotonic_coarse(void)
|
|
unsigned long seq;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
|
|
now = timekeeper.xtime;
|
|
mono = timekeeper.wall_to_monotonic;
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
|
|
now.tv_nsec + mono.tv_nsec);
|
|
@@ -1296,11 +1315,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
|
|
unsigned long seq;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
*xtim = timekeeper.xtime;
|
|
*wtom = timekeeper.wall_to_monotonic;
|
|
*sleep = timekeeper.total_sleep_time;
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
}
|
|
|
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
@@ -1319,7 +1338,7 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
|
|
u64 secs, nsecs;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
|
|
secs = timekeeper.xtime.tv_sec;
|
|
nsecs = timekeeper.xtime.tv_nsec;
|
|
@@ -1329,7 +1348,7 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
|
|
|
|
*offs_real = timekeeper.offs_real;
|
|
*offs_boot = timekeeper.offs_boot;
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
now = ktime_add_ns(ktime_set(secs, 0), nsecs);
|
|
now = ktime_sub(now, *offs_real);
|
|
@@ -1346,9 +1365,9 @@ ktime_t ktime_get_monotonic_offset(void)
|
|
struct timespec wtom;
|
|
|
|
do {
|
|
- seq = read_seqbegin(&timekeeper.lock);
|
|
+ seq = read_seqcount_begin(&timekeeper.seq);
|
|
wtom = timekeeper.wall_to_monotonic;
|
|
- } while (read_seqretry(&timekeeper.lock, seq));
|
|
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
|
|
|
|
return timespec_to_ktime(wtom);
|
|
}
|
|
@@ -1363,7 +1382,9 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
|
|
*/
|
|
void xtime_update(unsigned long ticks)
|
|
{
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_spin_lock(&xtime_lock);
|
|
+ write_seqcount_begin(&xtime_seq);
|
|
do_timer(ticks);
|
|
- write_sequnlock(&xtime_lock);
|
|
+ write_seqcount_end(&xtime_seq);
|
|
+ raw_spin_unlock(&xtime_lock);
|
|
}
|
|
diff --git a/kernel/timer.c b/kernel/timer.c
|
|
index 87ff7b19d27f..c9b100922747 100644
|
|
--- a/kernel/timer.c
|
|
+++ b/kernel/timer.c
|
|
@@ -76,6 +76,9 @@ struct tvec_root {
|
|
struct tvec_base {
|
|
spinlock_t lock;
|
|
struct timer_list *running_timer;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ wait_queue_head_t wait_for_running_timer;
|
|
+#endif
|
|
unsigned long timer_jiffies;
|
|
unsigned long next_timer;
|
|
struct tvec_root tv1;
|
|
@@ -703,6 +706,36 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
|
|
}
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
|
|
+ struct tvec_base *old,
|
|
+ struct tvec_base *new)
|
|
+{
|
|
+ /* See the comment in lock_timer_base() */
|
|
+ timer_set_base(timer, NULL);
|
|
+ spin_unlock(&old->lock);
|
|
+ spin_lock(&new->lock);
|
|
+ timer_set_base(timer, new);
|
|
+ return new;
|
|
+}
|
|
+#else
|
|
+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
|
|
+ struct tvec_base *old,
|
|
+ struct tvec_base *new)
|
|
+{
|
|
+ /*
|
|
+ * We cannot do the above because we might be preempted and
|
|
+ * then the preempter would see NULL and loop forever.
|
|
+ */
|
|
+ if (spin_trylock(&new->lock)) {
|
|
+ timer_set_base(timer, new);
|
|
+ spin_unlock(&old->lock);
|
|
+ return new;
|
|
+ }
|
|
+ return old;
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline int
|
|
__mod_timer(struct timer_list *timer, unsigned long expires,
|
|
bool pending_only, int pinned)
|
|
@@ -729,12 +762,15 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
|
|
|
|
debug_activate(timer, expires);
|
|
|
|
+ preempt_disable_rt();
|
|
cpu = smp_processor_id();
|
|
|
|
#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
|
|
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
|
|
cpu = get_nohz_timer_target();
|
|
#endif
|
|
+ preempt_enable_rt();
|
|
+
|
|
new_base = per_cpu(tvec_bases, cpu);
|
|
|
|
if (base != new_base) {
|
|
@@ -745,14 +781,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
|
|
* handler yet has not finished. This also guarantees that
|
|
* the timer is serialized wrt itself.
|
|
*/
|
|
- if (likely(base->running_timer != timer)) {
|
|
- /* See the comment in lock_timer_base() */
|
|
- timer_set_base(timer, NULL);
|
|
- spin_unlock(&base->lock);
|
|
- base = new_base;
|
|
- spin_lock(&base->lock);
|
|
- timer_set_base(timer, base);
|
|
- }
|
|
+ if (likely(base->running_timer != timer))
|
|
+ base = switch_timer_base(timer, base, new_base);
|
|
}
|
|
|
|
timer->expires = expires;
|
|
@@ -935,6 +965,29 @@ void add_timer_on(struct timer_list *timer, int cpu)
|
|
}
|
|
EXPORT_SYMBOL_GPL(add_timer_on);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * Wait for a running timer
|
|
+ */
|
|
+static void wait_for_running_timer(struct timer_list *timer)
|
|
+{
|
|
+ struct tvec_base *base = timer->base;
|
|
+
|
|
+ if (base->running_timer == timer)
|
|
+ wait_event(base->wait_for_running_timer,
|
|
+ base->running_timer != timer);
|
|
+}
|
|
+
|
|
+# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
|
|
+#else
|
|
+static inline void wait_for_running_timer(struct timer_list *timer)
|
|
+{
|
|
+ cpu_relax();
|
|
+}
|
|
+
|
|
+# define wakeup_timer_waiters(b) do { } while (0)
|
|
+#endif
|
|
+
|
|
/**
|
|
* del_timer - deactive a timer.
|
|
* @timer: the timer to be deactivated
|
|
@@ -1007,7 +1060,7 @@ out:
|
|
}
|
|
EXPORT_SYMBOL(try_to_del_timer_sync);
|
|
|
|
-#ifdef CONFIG_SMP
|
|
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
/**
|
|
* del_timer_sync - deactivate a timer and wait for the handler to finish.
|
|
* @timer: the timer to be deactivated
|
|
@@ -1067,7 +1120,7 @@ int del_timer_sync(struct timer_list *timer)
|
|
int ret = try_to_del_timer_sync(timer);
|
|
if (ret >= 0)
|
|
return ret;
|
|
- cpu_relax();
|
|
+ wait_for_running_timer(timer);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(del_timer_sync);
|
|
@@ -1178,10 +1231,11 @@ static inline void __run_timers(struct tvec_base *base)
|
|
|
|
spin_unlock_irq(&base->lock);
|
|
call_timer_fn(timer, fn, data);
|
|
+ base->running_timer = NULL;
|
|
spin_lock_irq(&base->lock);
|
|
}
|
|
}
|
|
- base->running_timer = NULL;
|
|
+ wakeup_timer_waiters(base);
|
|
spin_unlock_irq(&base->lock);
|
|
}
|
|
|
|
@@ -1320,6 +1374,23 @@ unsigned long get_next_timer_interrupt(unsigned long now)
|
|
*/
|
|
if (cpu_is_offline(smp_processor_id()))
|
|
return now + NEXT_TIMER_MAX_DELTA;
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ /*
|
|
+ * On PREEMPT_RT we cannot sleep here. If the trylock does not
|
|
+ * succeed then we return the worst-case 'expires in 1 tick'
|
|
+ * value. We use the rt functions here directly to avoid a
|
|
+ * migrate_disable() call.
|
|
+ */
|
|
+ if (spin_do_trylock(&base->lock)) {
|
|
+ if (time_before_eq(base->next_timer, base->timer_jiffies))
|
|
+ base->next_timer = __next_timer_interrupt(base);
|
|
+ expires = base->next_timer;
|
|
+ rt_spin_unlock_after_trylock_in_irq(&base->lock);
|
|
+ } else {
|
|
+ expires = now + 1;
|
|
+ }
|
|
+#else
|
|
spin_lock(&base->lock);
|
|
if (time_before_eq(base->next_timer, base->timer_jiffies))
|
|
base->next_timer = __next_timer_interrupt(base);
|
|
@@ -1328,7 +1399,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
|
|
|
|
if (time_before_eq(expires, now))
|
|
return now;
|
|
-
|
|
+#endif
|
|
return cmp_next_hrtimer_event(now, expires);
|
|
}
|
|
#endif
|
|
@@ -1344,14 +1415,13 @@ void update_process_times(int user_tick)
|
|
|
|
/* Note: this timer irq context must be accounted for as well. */
|
|
account_process_tick(p, user_tick);
|
|
+ scheduler_tick();
|
|
run_local_timers();
|
|
rcu_check_callbacks(cpu, user_tick);
|
|
- printk_tick();
|
|
-#ifdef CONFIG_IRQ_WORK
|
|
+#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
|
|
if (in_irq())
|
|
irq_work_run();
|
|
#endif
|
|
- scheduler_tick();
|
|
run_posix_cpu_timers(p);
|
|
}
|
|
|
|
@@ -1362,6 +1432,11 @@ static void run_timer_softirq(struct softirq_action *h)
|
|
{
|
|
struct tvec_base *base = __this_cpu_read(tvec_bases);
|
|
|
|
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
|
|
+ irq_work_run();
|
|
+#endif
|
|
+
|
|
+ printk_tick();
|
|
hrtimer_run_pending();
|
|
|
|
if (time_after_eq(jiffies, base->timer_jiffies))
|
|
@@ -1683,12 +1758,14 @@ static int __cpuinit init_timers_cpu(int cpu)
|
|
base = &boot_tvec_bases;
|
|
}
|
|
spin_lock_init(&base->lock);
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ init_waitqueue_head(&base->wait_for_running_timer);
|
|
+#endif
|
|
tvec_base_done[cpu] = 1;
|
|
} else {
|
|
base = per_cpu(tvec_bases, cpu);
|
|
}
|
|
|
|
-
|
|
for (j = 0; j < TVN_SIZE; j++) {
|
|
INIT_LIST_HEAD(base->tv5.vec + j);
|
|
INIT_LIST_HEAD(base->tv4.vec + j);
|
|
@@ -1727,7 +1804,7 @@ static void __cpuinit migrate_timers(int cpu)
|
|
|
|
BUG_ON(cpu_online(cpu));
|
|
old_base = per_cpu(tvec_bases, cpu);
|
|
- new_base = get_cpu_var(tvec_bases);
|
|
+ new_base = get_local_var(tvec_bases);
|
|
/*
|
|
* The caller is globally serialized and nobody else
|
|
* takes two locks at once, deadlock is not possible.
|
|
@@ -1748,7 +1825,7 @@ static void __cpuinit migrate_timers(int cpu)
|
|
|
|
spin_unlock(&old_base->lock);
|
|
spin_unlock_irq(&new_base->lock);
|
|
- put_cpu_var(tvec_bases);
|
|
+ put_local_var(tvec_bases);
|
|
}
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
|
|
index b659cd6b80f2..b2e6fb5b6cb8 100644
|
|
--- a/kernel/trace/Kconfig
|
|
+++ b/kernel/trace/Kconfig
|
|
@@ -192,6 +192,24 @@ config IRQSOFF_TRACER
|
|
enabled. This option and the preempt-off timing option can be
|
|
used together or separately.)
|
|
|
|
+config INTERRUPT_OFF_HIST
|
|
+ bool "Interrupts-off Latency Histogram"
|
|
+ depends on IRQSOFF_TRACER
|
|
+ help
|
|
+ This option generates continuously updated histograms (one per cpu)
|
|
+ of the duration of time periods with interrupts disabled. The
|
|
+ histograms are disabled by default. To enable them, write a non-zero
|
|
+ number to
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
|
|
+
|
|
+ If PREEMPT_OFF_HIST is also selected, additional histograms (one
|
|
+ per cpu) are generated that accumulate the duration of time periods
|
|
+ when both interrupts and preemption are disabled. The histogram data
|
|
+ will be located in the debug file system at
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/irqsoff
|
|
+
|
|
config PREEMPT_TRACER
|
|
bool "Preemption-off Latency Tracer"
|
|
default n
|
|
@@ -214,6 +232,24 @@ config PREEMPT_TRACER
|
|
enabled. This option and the irqs-off timing option can be
|
|
used together or separately.)
|
|
|
|
+config PREEMPT_OFF_HIST
|
|
+ bool "Preemption-off Latency Histogram"
|
|
+ depends on PREEMPT_TRACER
|
|
+ help
|
|
+ This option generates continuously updated histograms (one per cpu)
|
|
+ of the duration of time periods with preemption disabled. The
|
|
+ histograms are disabled by default. To enable them, write a non-zero
|
|
+ number to
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
|
|
+
|
|
+ If INTERRUPT_OFF_HIST is also selected, additional histograms (one
|
|
+ per cpu) are generated that accumulate the duration of time periods
|
|
+ when both interrupts and preemption are disabled. The histogram data
|
|
+ will be located in the debug file system at
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/preemptoff
|
|
+
|
|
config SCHED_TRACER
|
|
bool "Scheduling Latency Tracer"
|
|
select GENERIC_TRACER
|
|
@@ -223,6 +259,74 @@ config SCHED_TRACER
|
|
This tracer tracks the latency of the highest priority task
|
|
to be scheduled in, starting from the point it has woken up.
|
|
|
|
+config WAKEUP_LATENCY_HIST
|
|
+ bool "Scheduling Latency Histogram"
|
|
+ depends on SCHED_TRACER
|
|
+ help
|
|
+ This option generates continuously updated histograms (one per cpu)
|
|
+ of the scheduling latency of the highest priority task.
|
|
+ The histograms are disabled by default. To enable them, write a
|
|
+ non-zero number to
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/enable/wakeup
|
|
+
|
|
+ Two different algorithms are used, one to determine the latency of
|
|
+ processes that exclusively use the highest priority of the system and
|
|
+ another one to determine the latency of processes that share the
|
|
+ highest system priority with other processes. The former is used to
|
|
+ improve hardware and system software, the latter to optimize the
|
|
+ priority design of a given system. The histogram data will be
|
|
+ located in the debug file system at
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/wakeup
|
|
+
|
|
+ and
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
|
|
+
|
|
+ If both Scheduling Latency Histogram and Missed Timer Offsets
|
|
+ Histogram are selected, additional histogram data will be collected
|
|
+ that contain, in addition to the wakeup latency, the timer latency, in
|
|
+ case the wakeup was triggered by an expired timer. These histograms
|
|
+ are available in the
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
|
|
+
|
|
+ directory. They reflect the apparent interrupt and scheduling latency
|
|
+ and are best suitable to determine the worst-case latency of a given
|
|
+ system. To enable these histograms, write a non-zero number to
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
|
|
+
|
|
+config MISSED_TIMER_OFFSETS_HIST
|
|
+ depends on HIGH_RES_TIMERS
|
|
+ select GENERIC_TRACER
|
|
+ bool "Missed Timer Offsets Histogram"
|
|
+ help
|
|
+ Generate a histogram of missed timer offsets in microseconds. The
|
|
+ histograms are disabled by default. To enable them, write a non-zero
|
|
+ number to
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
|
|
+
|
|
+ The histogram data will be located in the debug file system at
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
|
|
+
|
|
+ If both Scheduling Latency Histogram and Missed Timer Offsets
|
|
+ Histogram are selected, additional histogram data will be collected
|
|
+ that contain, in addition to the wakeup latency, the timer latency, in
|
|
+ case the wakeup was triggered by an expired timer. These histograms
|
|
+ are available in the
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
|
|
+
|
|
+ directory. They reflect the apparent interrupt and scheduling latency
|
|
+ and are best suitable to determine the worst-case latency of a given
|
|
+ system. To enable these histograms, write a non-zero number to
|
|
+
|
|
+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
|
|
+
|
|
config ENABLE_DEFAULT_TRACERS
|
|
bool "Trace process context switches and events"
|
|
depends on !GENERIC_TRACER
|
|
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
|
|
index 5f39a07fe5ea..108a387e1459 100644
|
|
--- a/kernel/trace/Makefile
|
|
+++ b/kernel/trace/Makefile
|
|
@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
|
|
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
|
|
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
|
|
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
|
|
+obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
|
|
+obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
|
|
+obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
|
|
+obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
|
|
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
|
|
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
|
|
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
|
|
diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
|
|
new file mode 100644
|
|
index 000000000000..6a4c8694c55a
|
|
--- /dev/null
|
|
+++ b/kernel/trace/latency_hist.c
|
|
@@ -0,0 +1,1176 @@
|
|
+/*
|
|
+ * kernel/trace/latency_hist.c
|
|
+ *
|
|
+ * Add support for histograms of preemption-off latency and
|
|
+ * interrupt-off latency and wakeup latency, it depends on
|
|
+ * Real-Time Preemption Support.
|
|
+ *
|
|
+ * Copyright (C) 2005 MontaVista Software, Inc.
|
|
+ * Yi Yang <yyang@ch.mvista.com>
|
|
+ *
|
|
+ * Converted to work with the new latency tracer.
|
|
+ * Copyright (C) 2008 Red Hat, Inc.
|
|
+ * Steven Rostedt <srostedt@redhat.com>
|
|
+ *
|
|
+ */
|
|
+#include <linux/module.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/seq_file.h>
|
|
+#include <linux/percpu.h>
|
|
+#include <linux/kallsyms.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/slab.h>
|
|
+#include <asm/atomic.h>
|
|
+#include <asm/div64.h>
|
|
+
|
|
+#include "trace.h"
|
|
+#include <trace/events/sched.h>
|
|
+
|
|
+#define NSECS_PER_USECS 1000L
|
|
+
|
|
+#define CREATE_TRACE_POINTS
|
|
+#include <trace/events/hist.h>
|
|
+
|
|
+enum {
|
|
+ IRQSOFF_LATENCY = 0,
|
|
+ PREEMPTOFF_LATENCY,
|
|
+ PREEMPTIRQSOFF_LATENCY,
|
|
+ WAKEUP_LATENCY,
|
|
+ WAKEUP_LATENCY_SHAREDPRIO,
|
|
+ MISSED_TIMER_OFFSETS,
|
|
+ TIMERANDWAKEUP_LATENCY,
|
|
+ MAX_LATENCY_TYPE,
|
|
+};
|
|
+
|
|
+#define MAX_ENTRY_NUM 10240
|
|
+
|
|
+struct hist_data {
|
|
+ atomic_t hist_mode; /* 0 log, 1 don't log */
|
|
+ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
|
|
+ long min_lat;
|
|
+ long max_lat;
|
|
+ unsigned long long below_hist_bound_samples;
|
|
+ unsigned long long above_hist_bound_samples;
|
|
+ long long accumulate_lat;
|
|
+ unsigned long long total_samples;
|
|
+ unsigned long long hist_array[MAX_ENTRY_NUM];
|
|
+};
|
|
+
|
|
+struct enable_data {
|
|
+ int latency_type;
|
|
+ int enabled;
|
|
+};
|
|
+
|
|
+static char *latency_hist_dir_root = "latency_hist";
|
|
+
|
|
+#ifdef CONFIG_INTERRUPT_OFF_HIST
|
|
+static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
|
|
+static char *irqsoff_hist_dir = "irqsoff";
|
|
+static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
|
|
+static DEFINE_PER_CPU(int, hist_irqsoff_counting);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_OFF_HIST
|
|
+static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
|
|
+static char *preemptoff_hist_dir = "preemptoff";
|
|
+static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
|
|
+static DEFINE_PER_CPU(int, hist_preemptoff_counting);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
|
|
+static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
|
|
+static char *preemptirqsoff_hist_dir = "preemptirqsoff";
|
|
+static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
|
|
+static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
|
|
+static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
|
|
+static struct enable_data preemptirqsoff_enabled_data = {
|
|
+ .latency_type = PREEMPTIRQSOFF_LATENCY,
|
|
+ .enabled = 0,
|
|
+};
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+struct maxlatproc_data {
|
|
+ char comm[FIELD_SIZEOF(struct task_struct, comm)];
|
|
+ char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
|
|
+ int pid;
|
|
+ int current_pid;
|
|
+ int prio;
|
|
+ int current_prio;
|
|
+ long latency;
|
|
+ long timeroffset;
|
|
+ cycle_t timestamp;
|
|
+};
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
|
|
+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
|
|
+static char *wakeup_latency_hist_dir = "wakeup";
|
|
+static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
|
|
+static notrace void probe_wakeup_latency_hist_start(void *v,
|
|
+ struct task_struct *p, int success);
|
|
+static notrace void probe_wakeup_latency_hist_stop(void *v,
|
|
+ struct task_struct *prev, struct task_struct *next);
|
|
+static notrace void probe_sched_migrate_task(void *,
|
|
+ struct task_struct *task, int cpu);
|
|
+static struct enable_data wakeup_latency_enabled_data = {
|
|
+ .latency_type = WAKEUP_LATENCY,
|
|
+ .enabled = 0,
|
|
+};
|
|
+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
|
|
+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
|
|
+static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
|
|
+static DEFINE_PER_CPU(int, wakeup_sharedprio);
|
|
+static unsigned long wakeup_pid;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
|
|
+static char *missed_timer_offsets_dir = "missed_timer_offsets";
|
|
+static notrace void probe_hrtimer_interrupt(void *v, int cpu,
|
|
+ long long offset, struct task_struct *curr, struct task_struct *task);
|
|
+static struct enable_data missed_timer_offsets_enabled_data = {
|
|
+ .latency_type = MISSED_TIMER_OFFSETS,
|
|
+ .enabled = 0,
|
|
+};
|
|
+static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
|
|
+static unsigned long missed_timer_offsets_pid;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
|
|
+static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
|
|
+static struct enable_data timerandwakeup_enabled_data = {
|
|
+ .latency_type = TIMERANDWAKEUP_LATENCY,
|
|
+ .enabled = 0,
|
|
+};
|
|
+static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
|
|
+#endif
|
|
+
|
|
+void notrace latency_hist(int latency_type, int cpu, long latency,
|
|
+ long timeroffset, cycle_t stop,
|
|
+ struct task_struct *p)
|
|
+{
|
|
+ struct hist_data *my_hist;
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ struct maxlatproc_data *mp = NULL;
|
|
+#endif
|
|
+
|
|
+ if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 ||
|
|
+ latency_type >= MAX_LATENCY_TYPE)
|
|
+ return;
|
|
+
|
|
+ switch (latency_type) {
|
|
+#ifdef CONFIG_INTERRUPT_OFF_HIST
|
|
+ case IRQSOFF_LATENCY:
|
|
+ my_hist = &per_cpu(irqsoff_hist, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_PREEMPT_OFF_HIST
|
|
+ case PREEMPTOFF_LATENCY:
|
|
+ my_hist = &per_cpu(preemptoff_hist, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
|
|
+ case PREEMPTIRQSOFF_LATENCY:
|
|
+ my_hist = &per_cpu(preemptirqsoff_hist, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ case WAKEUP_LATENCY:
|
|
+ my_hist = &per_cpu(wakeup_latency_hist, cpu);
|
|
+ mp = &per_cpu(wakeup_maxlatproc, cpu);
|
|
+ break;
|
|
+ case WAKEUP_LATENCY_SHAREDPRIO:
|
|
+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
|
|
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ case MISSED_TIMER_OFFSETS:
|
|
+ my_hist = &per_cpu(missed_timer_offsets, cpu);
|
|
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ case TIMERANDWAKEUP_LATENCY:
|
|
+ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
|
|
+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+ default:
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ latency += my_hist->offset;
|
|
+
|
|
+ if (atomic_read(&my_hist->hist_mode) == 0)
|
|
+ return;
|
|
+
|
|
+ if (latency < 0 || latency >= MAX_ENTRY_NUM) {
|
|
+ if (latency < 0)
|
|
+ my_hist->below_hist_bound_samples++;
|
|
+ else
|
|
+ my_hist->above_hist_bound_samples++;
|
|
+ } else
|
|
+ my_hist->hist_array[latency]++;
|
|
+
|
|
+ if (unlikely(latency > my_hist->max_lat ||
|
|
+ my_hist->min_lat == LONG_MAX)) {
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ if (latency_type == WAKEUP_LATENCY ||
|
|
+ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
|
|
+ latency_type == MISSED_TIMER_OFFSETS ||
|
|
+ latency_type == TIMERANDWAKEUP_LATENCY) {
|
|
+ strncpy(mp->comm, p->comm, sizeof(mp->comm));
|
|
+ strncpy(mp->current_comm, current->comm,
|
|
+ sizeof(mp->current_comm));
|
|
+ mp->pid = task_pid_nr(p);
|
|
+ mp->current_pid = task_pid_nr(current);
|
|
+ mp->prio = p->prio;
|
|
+ mp->current_prio = current->prio;
|
|
+ mp->latency = latency;
|
|
+ mp->timeroffset = timeroffset;
|
|
+ mp->timestamp = stop;
|
|
+ }
|
|
+#endif
|
|
+ my_hist->max_lat = latency;
|
|
+ }
|
|
+ if (unlikely(latency < my_hist->min_lat))
|
|
+ my_hist->min_lat = latency;
|
|
+ my_hist->total_samples++;
|
|
+ my_hist->accumulate_lat += latency;
|
|
+}
|
|
+
|
|
+static void *l_start(struct seq_file *m, loff_t *pos)
|
|
+{
|
|
+ loff_t *index_ptr = NULL;
|
|
+ loff_t index = *pos;
|
|
+ struct hist_data *my_hist = m->private;
|
|
+
|
|
+ if (index == 0) {
|
|
+ char minstr[32], avgstr[32], maxstr[32];
|
|
+
|
|
+ atomic_dec(&my_hist->hist_mode);
|
|
+
|
|
+ if (likely(my_hist->total_samples)) {
|
|
+ long avg = (long) div64_s64(my_hist->accumulate_lat,
|
|
+ my_hist->total_samples);
|
|
+ snprintf(minstr, sizeof(minstr), "%ld",
|
|
+ my_hist->min_lat - my_hist->offset);
|
|
+ snprintf(avgstr, sizeof(avgstr), "%ld",
|
|
+ avg - my_hist->offset);
|
|
+ snprintf(maxstr, sizeof(maxstr), "%ld",
|
|
+ my_hist->max_lat - my_hist->offset);
|
|
+ } else {
|
|
+ strcpy(minstr, "<undef>");
|
|
+ strcpy(avgstr, minstr);
|
|
+ strcpy(maxstr, minstr);
|
|
+ }
|
|
+
|
|
+ seq_printf(m, "#Minimum latency: %s microseconds\n"
|
|
+ "#Average latency: %s microseconds\n"
|
|
+ "#Maximum latency: %s microseconds\n"
|
|
+ "#Total samples: %llu\n"
|
|
+ "#There are %llu samples lower than %ld"
|
|
+ " microseconds.\n"
|
|
+ "#There are %llu samples greater or equal"
|
|
+ " than %ld microseconds.\n"
|
|
+ "#usecs\t%16s\n",
|
|
+ minstr, avgstr, maxstr,
|
|
+ my_hist->total_samples,
|
|
+ my_hist->below_hist_bound_samples,
|
|
+ -my_hist->offset,
|
|
+ my_hist->above_hist_bound_samples,
|
|
+ MAX_ENTRY_NUM - my_hist->offset,
|
|
+ "samples");
|
|
+ }
|
|
+ if (index < MAX_ENTRY_NUM) {
|
|
+ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
|
|
+ if (index_ptr)
|
|
+ *index_ptr = index;
|
|
+ }
|
|
+
|
|
+ return index_ptr;
|
|
+}
|
|
+
|
|
+static void *l_next(struct seq_file *m, void *p, loff_t *pos)
|
|
+{
|
|
+ loff_t *index_ptr = p;
|
|
+ struct hist_data *my_hist = m->private;
|
|
+
|
|
+ if (++*pos >= MAX_ENTRY_NUM) {
|
|
+ atomic_inc(&my_hist->hist_mode);
|
|
+ return NULL;
|
|
+ }
|
|
+ *index_ptr = *pos;
|
|
+ return index_ptr;
|
|
+}
|
|
+
|
|
+static void l_stop(struct seq_file *m, void *p)
|
|
+{
|
|
+ kfree(p);
|
|
+}
|
|
+
|
|
+static int l_show(struct seq_file *m, void *p)
|
|
+{
|
|
+ int index = *(loff_t *) p;
|
|
+ struct hist_data *my_hist = m->private;
|
|
+
|
|
+ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
|
|
+ my_hist->hist_array[index]);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct seq_operations latency_hist_seq_op = {
|
|
+ .start = l_start,
|
|
+ .next = l_next,
|
|
+ .stop = l_stop,
|
|
+ .show = l_show
|
|
+};
|
|
+
|
|
+static int latency_hist_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = seq_open(file, &latency_hist_seq_op);
|
|
+ if (!ret) {
|
|
+ struct seq_file *seq = file->private_data;
|
|
+ seq->private = inode->i_private;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static struct file_operations latency_hist_fops = {
|
|
+ .open = latency_hist_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = seq_release,
|
|
+};
|
|
+
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+static void clear_maxlatprocdata(struct maxlatproc_data *mp)
|
|
+{
|
|
+ mp->comm[0] = mp->current_comm[0] = '\0';
|
|
+ mp->prio = mp->current_prio = mp->pid = mp->current_pid =
|
|
+ mp->latency = mp->timeroffset = -1;
|
|
+ mp->timestamp = 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void hist_reset(struct hist_data *hist)
|
|
+{
|
|
+ atomic_dec(&hist->hist_mode);
|
|
+
|
|
+ memset(hist->hist_array, 0, sizeof(hist->hist_array));
|
|
+ hist->below_hist_bound_samples = 0ULL;
|
|
+ hist->above_hist_bound_samples = 0ULL;
|
|
+ hist->min_lat = LONG_MAX;
|
|
+ hist->max_lat = LONG_MIN;
|
|
+ hist->total_samples = 0ULL;
|
|
+ hist->accumulate_lat = 0LL;
|
|
+
|
|
+ atomic_inc(&hist->hist_mode);
|
|
+}
|
|
+
|
|
+static ssize_t
|
|
+latency_hist_reset(struct file *file, const char __user *a,
|
|
+ size_t size, loff_t *off)
|
|
+{
|
|
+ int cpu;
|
|
+ struct hist_data *hist = NULL;
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ struct maxlatproc_data *mp = NULL;
|
|
+#endif
|
|
+ off_t latency_type = (off_t) file->private_data;
|
|
+
|
|
+ for_each_online_cpu(cpu) {
|
|
+
|
|
+ switch (latency_type) {
|
|
+#ifdef CONFIG_PREEMPT_OFF_HIST
|
|
+ case PREEMPTOFF_LATENCY:
|
|
+ hist = &per_cpu(preemptoff_hist, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_INTERRUPT_OFF_HIST
|
|
+ case IRQSOFF_LATENCY:
|
|
+ hist = &per_cpu(irqsoff_hist, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+ case PREEMPTIRQSOFF_LATENCY:
|
|
+ hist = &per_cpu(preemptirqsoff_hist, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ case WAKEUP_LATENCY:
|
|
+ hist = &per_cpu(wakeup_latency_hist, cpu);
|
|
+ mp = &per_cpu(wakeup_maxlatproc, cpu);
|
|
+ break;
|
|
+ case WAKEUP_LATENCY_SHAREDPRIO:
|
|
+ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
|
|
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ case MISSED_TIMER_OFFSETS:
|
|
+ hist = &per_cpu(missed_timer_offsets, cpu);
|
|
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
|
|
+ break;
|
|
+#endif
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ case TIMERANDWAKEUP_LATENCY:
|
|
+ hist = &per_cpu(timerandwakeup_latency_hist, cpu);
|
|
+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
|
|
+ break;
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ hist_reset(hist);
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ if (latency_type == WAKEUP_LATENCY ||
|
|
+ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
|
|
+ latency_type == MISSED_TIMER_OFFSETS ||
|
|
+ latency_type == TIMERANDWAKEUP_LATENCY)
|
|
+ clear_maxlatprocdata(mp);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+static ssize_t
|
|
+show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ char buf[64];
|
|
+ int r;
|
|
+ unsigned long *this_pid = file->private_data;
|
|
+
|
|
+ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
|
|
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
+}
|
|
+
|
|
+static ssize_t do_pid(struct file *file, const char __user *ubuf,
|
|
+ size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ char buf[64];
|
|
+ unsigned long pid;
|
|
+ unsigned long *this_pid = file->private_data;
|
|
+
|
|
+ if (cnt >= sizeof(buf))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (copy_from_user(&buf, ubuf, cnt))
|
|
+ return -EFAULT;
|
|
+
|
|
+ buf[cnt] = '\0';
|
|
+
|
|
+ if (strict_strtoul(buf, 10, &pid))
|
|
+ return(-EINVAL);
|
|
+
|
|
+ *this_pid = pid;
|
|
+
|
|
+ return cnt;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+static ssize_t
|
|
+show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ int r;
|
|
+ struct maxlatproc_data *mp = file->private_data;
|
|
+ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
|
|
+ unsigned long long t;
|
|
+ unsigned long usecs, secs;
|
|
+ char *buf;
|
|
+
|
|
+ if (mp->pid == -1 || mp->current_pid == -1) {
|
|
+ buf = "(none)\n";
|
|
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf,
|
|
+ strlen(buf));
|
|
+ }
|
|
+
|
|
+ buf = kmalloc(strmaxlen, GFP_KERNEL);
|
|
+ if (buf == NULL)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ t = ns2usecs(mp->timestamp);
|
|
+ usecs = do_div(t, USEC_PER_SEC);
|
|
+ secs = (unsigned long) t;
|
|
+ r = snprintf(buf, strmaxlen,
|
|
+ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
|
|
+ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
|
|
+ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
|
|
+ secs, usecs);
|
|
+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
+ kfree(buf);
|
|
+ return r;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static ssize_t
|
|
+show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ char buf[64];
|
|
+ struct enable_data *ed = file->private_data;
|
|
+ int r;
|
|
+
|
|
+ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
|
|
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
+}
|
|
+
|
|
+static ssize_t
|
|
+do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
|
|
+{
|
|
+ char buf[64];
|
|
+ long enable;
|
|
+ struct enable_data *ed = file->private_data;
|
|
+
|
|
+ if (cnt >= sizeof(buf))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (copy_from_user(&buf, ubuf, cnt))
|
|
+ return -EFAULT;
|
|
+
|
|
+ buf[cnt] = 0;
|
|
+
|
|
+ if (strict_strtol(buf, 10, &enable))
|
|
+ return(-EINVAL);
|
|
+
|
|
+ if ((enable && ed->enabled) || (!enable && !ed->enabled))
|
|
+ return cnt;
|
|
+
|
|
+ if (enable) {
|
|
+ int ret;
|
|
+
|
|
+ switch (ed->latency_type) {
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+ case PREEMPTIRQSOFF_LATENCY:
|
|
+ ret = register_trace_preemptirqsoff_hist(
|
|
+ probe_preemptirqsoff_hist, NULL);
|
|
+ if (ret) {
|
|
+ pr_info("wakeup trace: Couldn't assign "
|
|
+ "probe_preemptirqsoff_hist "
|
|
+ "to trace_preemptirqsoff_hist\n");
|
|
+ return ret;
|
|
+ }
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ case WAKEUP_LATENCY:
|
|
+ ret = register_trace_sched_wakeup(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ if (ret) {
|
|
+ pr_info("wakeup trace: Couldn't assign "
|
|
+ "probe_wakeup_latency_hist_start "
|
|
+ "to trace_sched_wakeup\n");
|
|
+ return ret;
|
|
+ }
|
|
+ ret = register_trace_sched_wakeup_new(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ if (ret) {
|
|
+ pr_info("wakeup trace: Couldn't assign "
|
|
+ "probe_wakeup_latency_hist_start "
|
|
+ "to trace_sched_wakeup_new\n");
|
|
+ unregister_trace_sched_wakeup(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ return ret;
|
|
+ }
|
|
+ ret = register_trace_sched_switch(
|
|
+ probe_wakeup_latency_hist_stop, NULL);
|
|
+ if (ret) {
|
|
+ pr_info("wakeup trace: Couldn't assign "
|
|
+ "probe_wakeup_latency_hist_stop "
|
|
+ "to trace_sched_switch\n");
|
|
+ unregister_trace_sched_wakeup(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ unregister_trace_sched_wakeup_new(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ return ret;
|
|
+ }
|
|
+ ret = register_trace_sched_migrate_task(
|
|
+ probe_sched_migrate_task, NULL);
|
|
+ if (ret) {
|
|
+ pr_info("wakeup trace: Couldn't assign "
|
|
+ "probe_sched_migrate_task "
|
|
+ "to trace_sched_migrate_task\n");
|
|
+ unregister_trace_sched_wakeup(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ unregister_trace_sched_wakeup_new(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ unregister_trace_sched_switch(
|
|
+ probe_wakeup_latency_hist_stop, NULL);
|
|
+ return ret;
|
|
+ }
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ case MISSED_TIMER_OFFSETS:
|
|
+ ret = register_trace_hrtimer_interrupt(
|
|
+ probe_hrtimer_interrupt, NULL);
|
|
+ if (ret) {
|
|
+ pr_info("wakeup trace: Couldn't assign "
|
|
+ "probe_hrtimer_interrupt "
|
|
+ "to trace_hrtimer_interrupt\n");
|
|
+ return ret;
|
|
+ }
|
|
+ break;
|
|
+#endif
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ case TIMERANDWAKEUP_LATENCY:
|
|
+ if (!wakeup_latency_enabled_data.enabled ||
|
|
+ !missed_timer_offsets_enabled_data.enabled)
|
|
+ return -EINVAL;
|
|
+ break;
|
|
+#endif
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ switch (ed->latency_type) {
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+ case PREEMPTIRQSOFF_LATENCY:
|
|
+ {
|
|
+ int cpu;
|
|
+
|
|
+ unregister_trace_preemptirqsoff_hist(
|
|
+ probe_preemptirqsoff_hist, NULL);
|
|
+ for_each_online_cpu(cpu) {
|
|
+#ifdef CONFIG_INTERRUPT_OFF_HIST
|
|
+ per_cpu(hist_irqsoff_counting,
|
|
+ cpu) = 0;
|
|
+#endif
|
|
+#ifdef CONFIG_PREEMPT_OFF_HIST
|
|
+ per_cpu(hist_preemptoff_counting,
|
|
+ cpu) = 0;
|
|
+#endif
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+ per_cpu(hist_preemptirqsoff_counting,
|
|
+ cpu) = 0;
|
|
+#endif
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ case WAKEUP_LATENCY:
|
|
+ {
|
|
+ int cpu;
|
|
+
|
|
+ unregister_trace_sched_wakeup(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ unregister_trace_sched_wakeup_new(
|
|
+ probe_wakeup_latency_hist_start, NULL);
|
|
+ unregister_trace_sched_switch(
|
|
+ probe_wakeup_latency_hist_stop, NULL);
|
|
+ unregister_trace_sched_migrate_task(
|
|
+ probe_sched_migrate_task, NULL);
|
|
+
|
|
+ for_each_online_cpu(cpu) {
|
|
+ per_cpu(wakeup_task, cpu) = NULL;
|
|
+ per_cpu(wakeup_sharedprio, cpu) = 0;
|
|
+ }
|
|
+ }
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ timerandwakeup_enabled_data.enabled = 0;
|
|
+#endif
|
|
+ break;
|
|
+#endif
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ case MISSED_TIMER_OFFSETS:
|
|
+ unregister_trace_hrtimer_interrupt(
|
|
+ probe_hrtimer_interrupt, NULL);
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ timerandwakeup_enabled_data.enabled = 0;
|
|
+#endif
|
|
+ break;
|
|
+#endif
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ ed->enabled = enable;
|
|
+ return cnt;
|
|
+}
|
|
+
|
|
+static const struct file_operations latency_hist_reset_fops = {
|
|
+ .open = tracing_open_generic,
|
|
+ .write = latency_hist_reset,
|
|
+};
|
|
+
|
|
+static const struct file_operations enable_fops = {
|
|
+ .open = tracing_open_generic,
|
|
+ .read = show_enable,
|
|
+ .write = do_enable,
|
|
+};
|
|
+
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+static const struct file_operations pid_fops = {
|
|
+ .open = tracing_open_generic,
|
|
+ .read = show_pid,
|
|
+ .write = do_pid,
|
|
+};
|
|
+
|
|
+static const struct file_operations maxlatproc_fops = {
|
|
+ .open = tracing_open_generic,
|
|
+ .read = show_maxlatproc,
|
|
+};
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+static notrace void probe_preemptirqsoff_hist(void *v, int reason,
|
|
+ int starthist)
|
|
+{
|
|
+ int cpu = raw_smp_processor_id();
|
|
+ int time_set = 0;
|
|
+
|
|
+ if (starthist) {
|
|
+ cycle_t uninitialized_var(start);
|
|
+
|
|
+ if (!preempt_count() && !irqs_disabled())
|
|
+ return;
|
|
+
|
|
+#ifdef CONFIG_INTERRUPT_OFF_HIST
|
|
+ if ((reason == IRQS_OFF || reason == TRACE_START) &&
|
|
+ !per_cpu(hist_irqsoff_counting, cpu)) {
|
|
+ per_cpu(hist_irqsoff_counting, cpu) = 1;
|
|
+ start = ftrace_now(cpu);
|
|
+ time_set++;
|
|
+ per_cpu(hist_irqsoff_start, cpu) = start;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_OFF_HIST
|
|
+ if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
|
|
+ !per_cpu(hist_preemptoff_counting, cpu)) {
|
|
+ per_cpu(hist_preemptoff_counting, cpu) = 1;
|
|
+ if (!(time_set++))
|
|
+ start = ftrace_now(cpu);
|
|
+ per_cpu(hist_preemptoff_start, cpu) = start;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+ if (per_cpu(hist_irqsoff_counting, cpu) &&
|
|
+ per_cpu(hist_preemptoff_counting, cpu) &&
|
|
+ !per_cpu(hist_preemptirqsoff_counting, cpu)) {
|
|
+ per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
|
|
+ if (!time_set)
|
|
+ start = ftrace_now(cpu);
|
|
+ per_cpu(hist_preemptirqsoff_start, cpu) = start;
|
|
+ }
|
|
+#endif
|
|
+ } else {
|
|
+ cycle_t uninitialized_var(stop);
|
|
+
|
|
+#ifdef CONFIG_INTERRUPT_OFF_HIST
|
|
+ if ((reason == IRQS_ON || reason == TRACE_STOP) &&
|
|
+ per_cpu(hist_irqsoff_counting, cpu)) {
|
|
+ cycle_t start = per_cpu(hist_irqsoff_start, cpu);
|
|
+
|
|
+ stop = ftrace_now(cpu);
|
|
+ time_set++;
|
|
+ if (start) {
|
|
+ long latency = ((long) (stop - start)) /
|
|
+ NSECS_PER_USECS;
|
|
+
|
|
+ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
|
|
+ stop, NULL);
|
|
+ }
|
|
+ per_cpu(hist_irqsoff_counting, cpu) = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_OFF_HIST
|
|
+ if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
|
|
+ per_cpu(hist_preemptoff_counting, cpu)) {
|
|
+ cycle_t start = per_cpu(hist_preemptoff_start, cpu);
|
|
+
|
|
+ if (!(time_set++))
|
|
+ stop = ftrace_now(cpu);
|
|
+ if (start) {
|
|
+ long latency = ((long) (stop - start)) /
|
|
+ NSECS_PER_USECS;
|
|
+
|
|
+ latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
|
|
+ 0, stop, NULL);
|
|
+ }
|
|
+ per_cpu(hist_preemptoff_counting, cpu) = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+ if ((!per_cpu(hist_irqsoff_counting, cpu) ||
|
|
+ !per_cpu(hist_preemptoff_counting, cpu)) &&
|
|
+ per_cpu(hist_preemptirqsoff_counting, cpu)) {
|
|
+ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
|
|
+
|
|
+ if (!time_set)
|
|
+ stop = ftrace_now(cpu);
|
|
+ if (start) {
|
|
+ long latency = ((long) (stop - start)) /
|
|
+ NSECS_PER_USECS;
|
|
+
|
|
+ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
|
|
+ latency, 0, stop, NULL);
|
|
+ }
|
|
+ per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+static DEFINE_RAW_SPINLOCK(wakeup_lock);
|
|
+static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
|
|
+ int cpu)
|
|
+{
|
|
+ int old_cpu = task_cpu(task);
|
|
+
|
|
+ if (cpu != old_cpu) {
|
|
+ unsigned long flags;
|
|
+ struct task_struct *cpu_wakeup_task;
|
|
+
|
|
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
|
|
+
|
|
+ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
|
|
+ if (task == cpu_wakeup_task) {
|
|
+ put_task_struct(cpu_wakeup_task);
|
|
+ per_cpu(wakeup_task, old_cpu) = NULL;
|
|
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
|
|
+ get_task_struct(cpu_wakeup_task);
|
|
+ }
|
|
+
|
|
+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
|
|
+ }
|
|
+}
|
|
+
|
|
+static notrace void probe_wakeup_latency_hist_start(void *v,
|
|
+ struct task_struct *p, int success)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ struct task_struct *curr = current;
|
|
+ int cpu = task_cpu(p);
|
|
+ struct task_struct *cpu_wakeup_task;
|
|
+
|
|
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
|
|
+
|
|
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
|
|
+
|
|
+ if (wakeup_pid) {
|
|
+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
|
|
+ p->prio == curr->prio)
|
|
+ per_cpu(wakeup_sharedprio, cpu) = 1;
|
|
+ if (likely(wakeup_pid != task_pid_nr(p)))
|
|
+ goto out;
|
|
+ } else {
|
|
+ if (likely(!rt_task(p)) ||
|
|
+ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
|
|
+ p->prio > curr->prio)
|
|
+ goto out;
|
|
+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
|
|
+ p->prio == curr->prio)
|
|
+ per_cpu(wakeup_sharedprio, cpu) = 1;
|
|
+ }
|
|
+
|
|
+ if (cpu_wakeup_task)
|
|
+ put_task_struct(cpu_wakeup_task);
|
|
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
|
|
+ get_task_struct(cpu_wakeup_task);
|
|
+ cpu_wakeup_task->preempt_timestamp_hist =
|
|
+ ftrace_now(raw_smp_processor_id());
|
|
+out:
|
|
+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
|
|
+}
|
|
+
|
|
+static notrace void probe_wakeup_latency_hist_stop(void *v,
|
|
+ struct task_struct *prev, struct task_struct *next)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ int cpu = task_cpu(next);
|
|
+ long latency;
|
|
+ cycle_t stop;
|
|
+ struct task_struct *cpu_wakeup_task;
|
|
+
|
|
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
|
|
+
|
|
+ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
|
|
+
|
|
+ if (cpu_wakeup_task == NULL)
|
|
+ goto out;
|
|
+
|
|
+ /* Already running? */
|
|
+ if (unlikely(current == cpu_wakeup_task))
|
|
+ goto out_reset;
|
|
+
|
|
+ if (next != cpu_wakeup_task) {
|
|
+ if (next->prio < cpu_wakeup_task->prio)
|
|
+ goto out_reset;
|
|
+
|
|
+ if (next->prio == cpu_wakeup_task->prio)
|
|
+ per_cpu(wakeup_sharedprio, cpu) = 1;
|
|
+
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (current->prio == cpu_wakeup_task->prio)
|
|
+ per_cpu(wakeup_sharedprio, cpu) = 1;
|
|
+
|
|
+ /*
|
|
+ * The task we are waiting for is about to be switched to.
|
|
+ * Calculate latency and store it in histogram.
|
|
+ */
|
|
+ stop = ftrace_now(raw_smp_processor_id());
|
|
+
|
|
+ latency = ((long) (stop - next->preempt_timestamp_hist)) /
|
|
+ NSECS_PER_USECS;
|
|
+
|
|
+ if (per_cpu(wakeup_sharedprio, cpu)) {
|
|
+ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
|
|
+ next);
|
|
+ per_cpu(wakeup_sharedprio, cpu) = 0;
|
|
+ } else {
|
|
+ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ if (timerandwakeup_enabled_data.enabled) {
|
|
+ latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
|
|
+ next->timer_offset + latency, next->timer_offset,
|
|
+ stop, next);
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+
|
|
+out_reset:
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ next->timer_offset = 0;
|
|
+#endif
|
|
+ put_task_struct(cpu_wakeup_task);
|
|
+ per_cpu(wakeup_task, cpu) = NULL;
|
|
+out:
|
|
+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+static notrace void probe_hrtimer_interrupt(void *v, int cpu,
|
|
+ long long latency_ns, struct task_struct *curr, struct task_struct *task)
|
|
+{
|
|
+ if (latency_ns <= 0 && task != NULL && rt_task(task) &&
|
|
+ (task->prio < curr->prio ||
|
|
+ (task->prio == curr->prio &&
|
|
+ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
|
|
+ long latency;
|
|
+ cycle_t now;
|
|
+
|
|
+ if (missed_timer_offsets_pid) {
|
|
+ if (likely(missed_timer_offsets_pid !=
|
|
+ task_pid_nr(task)))
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ now = ftrace_now(cpu);
|
|
+ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
|
|
+ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
|
|
+ task);
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ task->timer_offset = latency;
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+static __init int latency_hist_init(void)
|
|
+{
|
|
+ struct dentry *latency_hist_root = NULL;
|
|
+ struct dentry *dentry;
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ struct dentry *dentry_sharedprio;
|
|
+#endif
|
|
+ struct dentry *entry;
|
|
+ struct dentry *enable_root;
|
|
+ int i = 0;
|
|
+ struct hist_data *my_hist;
|
|
+ char name[64];
|
|
+ char *cpufmt = "CPU%d";
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ char *cpufmt_maxlatproc = "max_latency-CPU%d";
|
|
+ struct maxlatproc_data *mp = NULL;
|
|
+#endif
|
|
+
|
|
+ dentry = tracing_init_dentry();
|
|
+ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
|
|
+ enable_root = debugfs_create_dir("enable", latency_hist_root);
|
|
+
|
|
+#ifdef CONFIG_INTERRUPT_OFF_HIST
|
|
+ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
|
|
+ for_each_possible_cpu(i) {
|
|
+ sprintf(name, cpufmt, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry,
|
|
+ &per_cpu(irqsoff_hist, i), &latency_hist_fops);
|
|
+ my_hist = &per_cpu(irqsoff_hist, i);
|
|
+ atomic_set(&my_hist->hist_mode, 1);
|
|
+ my_hist->min_lat = LONG_MAX;
|
|
+ }
|
|
+ entry = debugfs_create_file("reset", 0644, dentry,
|
|
+ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_OFF_HIST
|
|
+ dentry = debugfs_create_dir(preemptoff_hist_dir,
|
|
+ latency_hist_root);
|
|
+ for_each_possible_cpu(i) {
|
|
+ sprintf(name, cpufmt, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry,
|
|
+ &per_cpu(preemptoff_hist, i), &latency_hist_fops);
|
|
+ my_hist = &per_cpu(preemptoff_hist, i);
|
|
+ atomic_set(&my_hist->hist_mode, 1);
|
|
+ my_hist->min_lat = LONG_MAX;
|
|
+ }
|
|
+ entry = debugfs_create_file("reset", 0644, dentry,
|
|
+ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+ dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
|
|
+ latency_hist_root);
|
|
+ for_each_possible_cpu(i) {
|
|
+ sprintf(name, cpufmt, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry,
|
|
+ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
|
|
+ my_hist = &per_cpu(preemptirqsoff_hist, i);
|
|
+ atomic_set(&my_hist->hist_mode, 1);
|
|
+ my_hist->min_lat = LONG_MAX;
|
|
+ }
|
|
+ entry = debugfs_create_file("reset", 0644, dentry,
|
|
+ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
|
|
+ entry = debugfs_create_file("preemptirqsoff", 0644,
|
|
+ enable_root, (void *)&preemptirqsoff_enabled_data,
|
|
+ &enable_fops);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
|
|
+ dentry = debugfs_create_dir(wakeup_latency_hist_dir,
|
|
+ latency_hist_root);
|
|
+ dentry_sharedprio = debugfs_create_dir(
|
|
+ wakeup_latency_hist_dir_sharedprio, dentry);
|
|
+ for_each_possible_cpu(i) {
|
|
+ sprintf(name, cpufmt, i);
|
|
+
|
|
+ entry = debugfs_create_file(name, 0444, dentry,
|
|
+ &per_cpu(wakeup_latency_hist, i),
|
|
+ &latency_hist_fops);
|
|
+ my_hist = &per_cpu(wakeup_latency_hist, i);
|
|
+ atomic_set(&my_hist->hist_mode, 1);
|
|
+ my_hist->min_lat = LONG_MAX;
|
|
+
|
|
+ entry = debugfs_create_file(name, 0444, dentry_sharedprio,
|
|
+ &per_cpu(wakeup_latency_hist_sharedprio, i),
|
|
+ &latency_hist_fops);
|
|
+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
|
|
+ atomic_set(&my_hist->hist_mode, 1);
|
|
+ my_hist->min_lat = LONG_MAX;
|
|
+
|
|
+ sprintf(name, cpufmt_maxlatproc, i);
|
|
+
|
|
+ mp = &per_cpu(wakeup_maxlatproc, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry, mp,
|
|
+ &maxlatproc_fops);
|
|
+ clear_maxlatprocdata(mp);
|
|
+
|
|
+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
|
|
+ &maxlatproc_fops);
|
|
+ clear_maxlatprocdata(mp);
|
|
+ }
|
|
+ entry = debugfs_create_file("pid", 0644, dentry,
|
|
+ (void *)&wakeup_pid, &pid_fops);
|
|
+ entry = debugfs_create_file("reset", 0644, dentry,
|
|
+ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
|
|
+ entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
|
|
+ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
|
|
+ entry = debugfs_create_file("wakeup", 0644,
|
|
+ enable_root, (void *)&wakeup_latency_enabled_data,
|
|
+ &enable_fops);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
|
|
+ dentry = debugfs_create_dir(missed_timer_offsets_dir,
|
|
+ latency_hist_root);
|
|
+ for_each_possible_cpu(i) {
|
|
+ sprintf(name, cpufmt, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry,
|
|
+ &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
|
|
+ my_hist = &per_cpu(missed_timer_offsets, i);
|
|
+ atomic_set(&my_hist->hist_mode, 1);
|
|
+ my_hist->min_lat = LONG_MAX;
|
|
+
|
|
+ sprintf(name, cpufmt_maxlatproc, i);
|
|
+ mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry, mp,
|
|
+ &maxlatproc_fops);
|
|
+ clear_maxlatprocdata(mp);
|
|
+ }
|
|
+ entry = debugfs_create_file("pid", 0644, dentry,
|
|
+ (void *)&missed_timer_offsets_pid, &pid_fops);
|
|
+ entry = debugfs_create_file("reset", 0644, dentry,
|
|
+ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
|
|
+ entry = debugfs_create_file("missed_timer_offsets", 0644,
|
|
+ enable_root, (void *)&missed_timer_offsets_enabled_data,
|
|
+ &enable_fops);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
|
|
+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
|
|
+ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
|
|
+ latency_hist_root);
|
|
+ for_each_possible_cpu(i) {
|
|
+ sprintf(name, cpufmt, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry,
|
|
+ &per_cpu(timerandwakeup_latency_hist, i),
|
|
+ &latency_hist_fops);
|
|
+ my_hist = &per_cpu(timerandwakeup_latency_hist, i);
|
|
+ atomic_set(&my_hist->hist_mode, 1);
|
|
+ my_hist->min_lat = LONG_MAX;
|
|
+
|
|
+ sprintf(name, cpufmt_maxlatproc, i);
|
|
+ mp = &per_cpu(timerandwakeup_maxlatproc, i);
|
|
+ entry = debugfs_create_file(name, 0444, dentry, mp,
|
|
+ &maxlatproc_fops);
|
|
+ clear_maxlatprocdata(mp);
|
|
+ }
|
|
+ entry = debugfs_create_file("reset", 0644, dentry,
|
|
+ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
|
|
+ entry = debugfs_create_file("timerandwakeup", 0644,
|
|
+ enable_root, (void *)&timerandwakeup_enabled_data,
|
|
+ &enable_fops);
|
|
+#endif
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+__initcall(latency_hist_init);
|
|
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
|
|
index bd0f1c499e0a..04aaae290fec 100644
|
|
--- a/kernel/trace/ring_buffer.c
|
|
+++ b/kernel/trace/ring_buffer.c
|
|
@@ -446,7 +446,7 @@ struct ring_buffer_per_cpu {
|
|
int cpu;
|
|
atomic_t record_disabled;
|
|
struct ring_buffer *buffer;
|
|
- raw_spinlock_t reader_lock; /* serialize readers */
|
|
+ spinlock_t reader_lock; /* serialize readers */
|
|
arch_spinlock_t lock;
|
|
struct lock_class_key lock_key;
|
|
struct list_head *pages;
|
|
@@ -1017,6 +1017,44 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+static inline int ok_to_lock(void)
|
|
+{
|
|
+ if (in_nmi())
|
|
+ return 0;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (in_atomic() || irqs_disabled())
|
|
+ return 0;
|
|
+#endif
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static int
|
|
+read_buffer_lock(struct ring_buffer_per_cpu *cpu_buffer,
|
|
+ unsigned long *flags)
|
|
+{
|
|
+ /*
|
|
+ * If an NMI die dumps out the content of the ring buffer
|
|
+ * do not grab locks. We also permanently disable the ring
|
|
+ * buffer too. A one time deal is all you get from reading
|
|
+ * the ring buffer from an NMI.
|
|
+ */
|
|
+ if (!ok_to_lock()) {
|
|
+ if (spin_trylock_irqsave(&cpu_buffer->reader_lock, *flags))
|
|
+ return 1;
|
|
+ tracing_off_permanent();
|
|
+ return 0;
|
|
+ }
|
|
+ spin_lock_irqsave(&cpu_buffer->reader_lock, *flags);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static void
|
|
+read_buffer_unlock(struct ring_buffer_per_cpu *cpu_buffer,
|
|
+ unsigned long flags, int locked)
|
|
+{
|
|
+ if (locked)
|
|
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
+}
|
|
static struct ring_buffer_per_cpu *
|
|
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
@@ -1032,7 +1070,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|
|
|
cpu_buffer->cpu = cpu;
|
|
cpu_buffer->buffer = buffer;
|
|
- raw_spin_lock_init(&cpu_buffer->reader_lock);
|
|
+ spin_lock_init(&cpu_buffer->reader_lock);
|
|
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
|
|
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
|
@@ -1227,9 +1265,11 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
|
|
{
|
|
struct buffer_page *bpage;
|
|
struct list_head *p;
|
|
+ unsigned long flags;
|
|
unsigned i;
|
|
+ int locked;
|
|
|
|
- raw_spin_lock_irq(&cpu_buffer->reader_lock);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
rb_head_page_deactivate(cpu_buffer);
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
@@ -1247,7 +1287,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
|
|
rb_check_pages(cpu_buffer);
|
|
|
|
out:
|
|
- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
}
|
|
|
|
static void
|
|
@@ -1256,9 +1296,11 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
|
{
|
|
struct buffer_page *bpage;
|
|
struct list_head *p;
|
|
+ unsigned long flags;
|
|
unsigned i;
|
|
+ int locked;
|
|
|
|
- raw_spin_lock_irq(&cpu_buffer->reader_lock);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
rb_head_page_deactivate(cpu_buffer);
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
@@ -1273,7 +1315,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
|
rb_check_pages(cpu_buffer);
|
|
|
|
out:
|
|
- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
}
|
|
|
|
/**
|
|
@@ -2721,7 +2763,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
|
|
return 0;
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
/*
|
|
* if the tail is on reader_page, oldest time stamp is on the reader
|
|
* page
|
|
@@ -2732,7 +2774,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
|
|
bpage = rb_set_head_page(cpu_buffer);
|
|
if (bpage)
|
|
ret = bpage->page->time_stamp;
|
|
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
|
|
return ret;
|
|
}
|
|
@@ -2896,15 +2938,16 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
unsigned long flags;
|
|
+ int locked;
|
|
|
|
if (!iter)
|
|
return;
|
|
|
|
cpu_buffer = iter->cpu_buffer;
|
|
|
|
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
rb_iter_reset(iter);
|
|
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
|
|
|
|
@@ -3324,21 +3367,6 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
|
|
|
|
-static inline int rb_ok_to_lock(void)
|
|
-{
|
|
- /*
|
|
- * If an NMI die dumps out the content of the ring buffer
|
|
- * do not grab locks. We also permanently disable the ring
|
|
- * buffer too. A one time deal is all you get from reading
|
|
- * the ring buffer from an NMI.
|
|
- */
|
|
- if (likely(!in_nmi()))
|
|
- return 1;
|
|
-
|
|
- tracing_off_permanent();
|
|
- return 0;
|
|
-}
|
|
-
|
|
/**
|
|
* ring_buffer_peek - peek at the next event to be read
|
|
* @buffer: The ring buffer to read
|
|
@@ -3356,22 +3384,17 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
|
|
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
|
struct ring_buffer_event *event;
|
|
unsigned long flags;
|
|
- int dolock;
|
|
+ int locked;
|
|
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return NULL;
|
|
|
|
- dolock = rb_ok_to_lock();
|
|
again:
|
|
- local_irq_save(flags);
|
|
- if (dolock)
|
|
- raw_spin_lock(&cpu_buffer->reader_lock);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
|
|
if (event && event->type_len == RINGBUF_TYPE_PADDING)
|
|
rb_advance_reader(cpu_buffer);
|
|
- if (dolock)
|
|
- raw_spin_unlock(&cpu_buffer->reader_lock);
|
|
- local_irq_restore(flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
|
|
if (event && event->type_len == RINGBUF_TYPE_PADDING)
|
|
goto again;
|
|
@@ -3393,11 +3416,12 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
|
struct ring_buffer_event *event;
|
|
unsigned long flags;
|
|
+ int locked;
|
|
|
|
again:
|
|
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
event = rb_iter_peek(iter, ts);
|
|
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
|
|
if (event && event->type_len == RINGBUF_TYPE_PADDING)
|
|
goto again;
|
|
@@ -3423,9 +3447,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_event *event = NULL;
|
|
unsigned long flags;
|
|
- int dolock;
|
|
-
|
|
- dolock = rb_ok_to_lock();
|
|
+ int locked;
|
|
|
|
again:
|
|
/* might be called in atomic */
|
|
@@ -3435,9 +3457,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
|
goto out;
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
- local_irq_save(flags);
|
|
- if (dolock)
|
|
- raw_spin_lock(&cpu_buffer->reader_lock);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
|
|
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
|
|
if (event) {
|
|
@@ -3445,9 +3465,8 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
|
rb_advance_reader(cpu_buffer);
|
|
}
|
|
|
|
- if (dolock)
|
|
- raw_spin_unlock(&cpu_buffer->reader_lock);
|
|
- local_irq_restore(flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
+
|
|
|
|
out:
|
|
preempt_enable();
|
|
@@ -3532,17 +3551,18 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
unsigned long flags;
|
|
+ int locked;
|
|
|
|
if (!iter)
|
|
return;
|
|
|
|
cpu_buffer = iter->cpu_buffer;
|
|
|
|
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
arch_spin_lock(&cpu_buffer->lock);
|
|
rb_iter_reset(iter);
|
|
arch_spin_unlock(&cpu_buffer->lock);
|
|
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
|
|
|
|
@@ -3576,8 +3596,9 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
|
|
struct ring_buffer_event *event;
|
|
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
|
unsigned long flags;
|
|
+ int locked;
|
|
|
|
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
again:
|
|
event = rb_iter_peek(iter, ts);
|
|
if (!event)
|
|
@@ -3588,7 +3609,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
|
|
|
|
rb_advance_iter(iter);
|
|
out:
|
|
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
|
|
return event;
|
|
}
|
|
@@ -3653,13 +3674,14 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
|
unsigned long flags;
|
|
+ int locked;
|
|
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return;
|
|
|
|
atomic_inc(&cpu_buffer->record_disabled);
|
|
|
|
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
|
|
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
|
|
goto out;
|
|
@@ -3671,7 +3693,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
|
|
arch_spin_unlock(&cpu_buffer->lock);
|
|
|
|
out:
|
|
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
|
|
atomic_dec(&cpu_buffer->record_disabled);
|
|
}
|
|
@@ -3698,22 +3720,16 @@ int ring_buffer_empty(struct ring_buffer *buffer)
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
unsigned long flags;
|
|
- int dolock;
|
|
+ int locked;
|
|
int cpu;
|
|
int ret;
|
|
|
|
- dolock = rb_ok_to_lock();
|
|
-
|
|
/* yes this is racy, but if you don't like the race, lock the buffer */
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
- local_irq_save(flags);
|
|
- if (dolock)
|
|
- raw_spin_lock(&cpu_buffer->reader_lock);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
ret = rb_per_cpu_empty(cpu_buffer);
|
|
- if (dolock)
|
|
- raw_spin_unlock(&cpu_buffer->reader_lock);
|
|
- local_irq_restore(flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
|
|
if (!ret)
|
|
return 0;
|
|
@@ -3732,22 +3748,16 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
unsigned long flags;
|
|
- int dolock;
|
|
+ int locked;
|
|
int ret;
|
|
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return 1;
|
|
|
|
- dolock = rb_ok_to_lock();
|
|
-
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
- local_irq_save(flags);
|
|
- if (dolock)
|
|
- raw_spin_lock(&cpu_buffer->reader_lock);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
ret = rb_per_cpu_empty(cpu_buffer);
|
|
- if (dolock)
|
|
- raw_spin_unlock(&cpu_buffer->reader_lock);
|
|
- local_irq_restore(flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
|
|
return ret;
|
|
}
|
|
@@ -3922,6 +3932,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
|
unsigned int commit;
|
|
unsigned int read;
|
|
u64 save_timestamp;
|
|
+ int locked;
|
|
int ret = -1;
|
|
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
@@ -3943,7 +3954,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
|
if (!bpage)
|
|
goto out;
|
|
|
|
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
+ locked = read_buffer_lock(cpu_buffer, &flags);
|
|
|
|
reader = rb_get_reader_page(cpu_buffer);
|
|
if (!reader)
|
|
@@ -4067,7 +4078,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
|
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
|
|
|
|
out_unlock:
|
|
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
+ read_buffer_unlock(cpu_buffer, flags, locked);
|
|
|
|
out:
|
|
return ret;
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index 8897a6f5c408..86bb19942b71 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -414,11 +414,13 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
|
|
*/
|
|
void trace_wake_up(void)
|
|
{
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
const unsigned long delay = msecs_to_jiffies(2);
|
|
|
|
if (trace_flags & TRACE_ITER_BLOCK)
|
|
return;
|
|
schedule_delayed_work(&wakeup_work, delay);
|
|
+#endif
|
|
}
|
|
|
|
static int __init set_buf_size(char *str)
|
|
@@ -787,6 +789,12 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
}
|
|
#endif /* CONFIG_TRACER_MAX_TRACE */
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+static void default_wait_pipe(struct trace_iterator *iter);
|
|
+#else
|
|
+#define default_wait_pipe poll_wait_pipe
|
|
+#endif
|
|
+
|
|
/**
|
|
* register_tracer - register a tracer with the ftrace system.
|
|
* @type - the plugin for the tracer
|
|
@@ -1189,6 +1197,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
|
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
|
|
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
|
|
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
|
|
+
|
|
+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
|
|
|
|
@@ -1947,9 +1957,10 @@ static void print_lat_help_header(struct seq_file *m)
|
|
seq_puts(m, "# | / _----=> need-resched \n");
|
|
seq_puts(m, "# || / _---=> hardirq/softirq \n");
|
|
seq_puts(m, "# ||| / _--=> preempt-depth \n");
|
|
- seq_puts(m, "# |||| / delay \n");
|
|
- seq_puts(m, "# cmd pid ||||| time | caller \n");
|
|
- seq_puts(m, "# \\ / ||||| \\ | / \n");
|
|
+ seq_puts(m, "# |||| / _--=> migrate-disable\n");
|
|
+ seq_puts(m, "# ||||| / delay \n");
|
|
+ seq_puts(m, "# cmd pid |||||| time | caller \n");
|
|
+ seq_puts(m, "# \\ / ||||| \\ | / \n");
|
|
}
|
|
|
|
static void print_event_info(struct trace_array *tr, struct seq_file *m)
|
|
@@ -3338,6 +3349,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
|
|
return 0;
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
static unsigned int
|
|
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
|
|
{
|
|
@@ -3359,8 +3371,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
|
|
}
|
|
}
|
|
|
|
-
|
|
-void default_wait_pipe(struct trace_iterator *iter)
|
|
+static void default_wait_pipe(struct trace_iterator *iter)
|
|
{
|
|
DEFINE_WAIT(wait);
|
|
|
|
@@ -3371,6 +3382,20 @@ void default_wait_pipe(struct trace_iterator *iter)
|
|
|
|
finish_wait(&trace_wait, &wait);
|
|
}
|
|
+#else
|
|
+static unsigned int
|
|
+tracing_poll_pipe(struct file *filp, poll_table *poll_table)
|
|
+{
|
|
+ struct trace_iterator *iter = filp->private_data;
|
|
+
|
|
+ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
|
|
+ return POLLIN | POLLRDNORM;
|
|
+ poll_wait_pipe(iter);
|
|
+ if (!trace_empty(iter))
|
|
+ return POLLIN | POLLRDNORM;
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
|
|
/*
|
|
* This is a make-shift waitqueue.
|
|
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
|
|
index 7360674ea049..f05c8fb48f39 100644
|
|
--- a/kernel/trace/trace.h
|
|
+++ b/kernel/trace/trace.h
|
|
@@ -358,7 +358,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
|
|
|
|
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
|
|
|
|
-void default_wait_pipe(struct trace_iterator *iter);
|
|
void poll_wait_pipe(struct trace_iterator *iter);
|
|
|
|
void ftrace(struct trace_array *tr,
|
|
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
|
|
index 394783531cbb..1bbb1b200cec 100644
|
|
--- a/kernel/trace/trace_clock.c
|
|
+++ b/kernel/trace/trace_clock.c
|
|
@@ -44,6 +44,7 @@ u64 notrace trace_clock_local(void)
|
|
|
|
return clock;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(trace_clock_local);
|
|
|
|
/*
|
|
* trace_clock(): 'between' trace clock. Not completely serialized,
|
|
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
|
|
index 2f737f536c27..b56bafeea694 100644
|
|
--- a/kernel/trace/trace_events.c
|
|
+++ b/kernel/trace/trace_events.c
|
|
@@ -116,7 +116,8 @@ static int trace_define_common_fields(void)
|
|
__common_field(unsigned char, flags);
|
|
__common_field(unsigned char, preempt_count);
|
|
__common_field(int, pid);
|
|
- __common_field(int, padding);
|
|
+ __common_field(unsigned short, migrate_disable);
|
|
+ __common_field(unsigned short, padding);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
|
|
index 8dd139a3f6c7..bafa41220311 100644
|
|
--- a/kernel/trace/trace_irqsoff.c
|
|
+++ b/kernel/trace/trace_irqsoff.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/fs.h>
|
|
|
|
#include "trace.h"
|
|
+#include <trace/events/hist.h>
|
|
|
|
static struct trace_array *irqsoff_trace __read_mostly;
|
|
static int tracer_enabled __read_mostly;
|
|
@@ -437,11 +438,13 @@ void start_critical_timings(void)
|
|
{
|
|
if (preempt_trace() || irq_trace())
|
|
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
+ trace_preemptirqsoff_hist(TRACE_START, 1);
|
|
}
|
|
EXPORT_SYMBOL_GPL(start_critical_timings);
|
|
|
|
void stop_critical_timings(void)
|
|
{
|
|
+ trace_preemptirqsoff_hist(TRACE_STOP, 0);
|
|
if (preempt_trace() || irq_trace())
|
|
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
}
|
|
@@ -451,6 +454,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
void time_hardirqs_on(unsigned long a0, unsigned long a1)
|
|
{
|
|
+ trace_preemptirqsoff_hist(IRQS_ON, 0);
|
|
if (!preempt_trace() && irq_trace())
|
|
stop_critical_timing(a0, a1);
|
|
}
|
|
@@ -459,6 +463,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
|
|
{
|
|
if (!preempt_trace() && irq_trace())
|
|
start_critical_timing(a0, a1);
|
|
+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
|
|
}
|
|
|
|
#else /* !CONFIG_PROVE_LOCKING */
|
|
@@ -484,6 +489,7 @@ inline void print_irqtrace_events(struct task_struct *curr)
|
|
*/
|
|
void trace_hardirqs_on(void)
|
|
{
|
|
+ trace_preemptirqsoff_hist(IRQS_ON, 0);
|
|
if (!preempt_trace() && irq_trace())
|
|
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
}
|
|
@@ -493,11 +499,13 @@ void trace_hardirqs_off(void)
|
|
{
|
|
if (!preempt_trace() && irq_trace())
|
|
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_off);
|
|
|
|
void trace_hardirqs_on_caller(unsigned long caller_addr)
|
|
{
|
|
+ trace_preemptirqsoff_hist(IRQS_ON, 0);
|
|
if (!preempt_trace() && irq_trace())
|
|
stop_critical_timing(CALLER_ADDR0, caller_addr);
|
|
}
|
|
@@ -507,6 +515,7 @@ void trace_hardirqs_off_caller(unsigned long caller_addr)
|
|
{
|
|
if (!preempt_trace() && irq_trace())
|
|
start_critical_timing(CALLER_ADDR0, caller_addr);
|
|
+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
|
|
|
@@ -516,12 +525,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
|
void trace_preempt_on(unsigned long a0, unsigned long a1)
|
|
{
|
|
+ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
|
|
if (preempt_trace() && !irq_trace())
|
|
stop_critical_timing(a0, a1);
|
|
}
|
|
|
|
void trace_preempt_off(unsigned long a0, unsigned long a1)
|
|
{
|
|
+ trace_preemptirqsoff_hist(PREEMPT_ON, 1);
|
|
if (preempt_trace() && !irq_trace())
|
|
start_critical_timing(a0, a1);
|
|
}
|
|
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
|
|
index df611a0e76c5..1b795358e51b 100644
|
|
--- a/kernel/trace/trace_output.c
|
|
+++ b/kernel/trace/trace_output.c
|
|
@@ -593,6 +593,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
|
else
|
|
ret = trace_seq_putc(s, '.');
|
|
|
|
+ if (entry->migrate_disable)
|
|
+ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
|
|
+ else
|
|
+ ret = trace_seq_putc(s, '.');
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/kernel/user.c b/kernel/user.c
|
|
index 71dd2363ab0f..b831e51c3489 100644
|
|
--- a/kernel/user.c
|
|
+++ b/kernel/user.c
|
|
@@ -129,11 +129,11 @@ void free_uid(struct user_struct *up)
|
|
if (!up)
|
|
return;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
|
|
free_user(up, flags);
|
|
else
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
}
|
|
|
|
struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
|
|
diff --git a/kernel/wait-simple.c b/kernel/wait-simple.c
|
|
new file mode 100644
|
|
index 000000000000..c35ec7808ae0
|
|
--- /dev/null
|
|
+++ b/kernel/wait-simple.c
|
|
@@ -0,0 +1,119 @@
|
|
+/*
|
|
+ * Simple waitqueues without fancy flags and callbacks
|
|
+ *
|
|
+ * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
|
|
+ *
|
|
+ * Based on kernel/wait.c
|
|
+ *
|
|
+ * For licencing details see kernel-base/COPYING
|
|
+ */
|
|
+#include <linux/init.h>
|
|
+#include <linux/export.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/wait-simple.h>
|
|
+
|
|
+/* Adds w to head->list. Must be called with head->lock locked. */
|
|
+static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
|
|
+{
|
|
+ list_add(&w->node, &head->list);
|
|
+}
|
|
+
|
|
+/* Removes w from head->list. Must be called with head->lock locked. */
|
|
+static inline void __swait_dequeue(struct swaiter *w)
|
|
+{
|
|
+ list_del_init(&w->node);
|
|
+}
|
|
+
|
|
+/* Check whether a head has waiters enqueued */
|
|
+static inline bool swait_head_has_waiters(struct swait_head *h)
|
|
+{
|
|
+ return !list_empty(&h->list);
|
|
+}
|
|
+
|
|
+void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
|
|
+{
|
|
+ raw_spin_lock_init(&head->lock);
|
|
+ lockdep_set_class(&head->lock, key);
|
|
+ INIT_LIST_HEAD(&head->list);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(__init_swait_head);
|
|
+
|
|
+void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
|
|
+{
|
|
+ w->task = current;
|
|
+ if (list_empty(&w->node))
|
|
+ __swait_enqueue(head, w);
|
|
+}
|
|
+
|
|
+void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&head->lock, flags);
|
|
+ swait_prepare_locked(head, w);
|
|
+ __set_current_state(state);
|
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(swait_prepare);
|
|
+
|
|
+void swait_finish_locked(struct swait_head *head, struct swaiter *w)
|
|
+{
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ if (w->task)
|
|
+ __swait_dequeue(w);
|
|
+}
|
|
+
|
|
+void swait_finish(struct swait_head *head, struct swaiter *w)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ if (w->task) {
|
|
+ raw_spin_lock_irqsave(&head->lock, flags);
|
|
+ __swait_dequeue(w);
|
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(swait_finish);
|
|
+
|
|
+unsigned int
|
|
+__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num)
|
|
+{
|
|
+ struct swaiter *curr, *next;
|
|
+ int woken = 0;
|
|
+
|
|
+ list_for_each_entry_safe(curr, next, &head->list, node) {
|
|
+ if (wake_up_state(curr->task, state)) {
|
|
+ __swait_dequeue(curr);
|
|
+ /*
|
|
+ * The waiting task can free the waiter as
|
|
+ * soon as curr->task = NULL is written,
|
|
+ * without taking any locks. A memory barrier
|
|
+ * is required here to prevent the following
|
|
+ * store to curr->task from getting ahead of
|
|
+ * the dequeue operation.
|
|
+ */
|
|
+ smp_wmb();
|
|
+ curr->task = NULL;
|
|
+ if (++woken == num)
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ return woken;
|
|
+}
|
|
+
|
|
+unsigned int
|
|
+__swait_wake(struct swait_head *head, unsigned int state, unsigned int num)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ int woken;
|
|
+
|
|
+ if (!swait_head_has_waiters(head))
|
|
+ return 0;
|
|
+
|
|
+ raw_spin_lock_irqsave(&head->lock, flags);
|
|
+ woken = __swait_wake_locked(head, state, num);
|
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
|
+ return woken;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(__swait_wake);
|
|
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
|
|
index 991aa9381a44..3b2084fa7c22 100644
|
|
--- a/kernel/watchdog.c
|
|
+++ b/kernel/watchdog.c
|
|
@@ -201,6 +201,8 @@ static int is_softlockup(unsigned long touch_ts)
|
|
|
|
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
|
|
|
+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
|
|
+
|
|
static struct perf_event_attr wd_hw_attr = {
|
|
.type = PERF_TYPE_HARDWARE,
|
|
.config = PERF_COUNT_HW_CPU_CYCLES,
|
|
@@ -235,10 +237,19 @@ static void watchdog_overflow_callback(struct perf_event *event,
|
|
if (__this_cpu_read(hard_watchdog_warn) == true)
|
|
return;
|
|
|
|
- if (hardlockup_panic)
|
|
+ /*
|
|
+ * If early-printk is enabled then make sure we do not
|
|
+ * lock up in printk() and kill console logging:
|
|
+ */
|
|
+ printk_kill();
|
|
+
|
|
+ if (hardlockup_panic) {
|
|
panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
|
|
- else
|
|
+ } else {
|
|
+ raw_spin_lock(&watchdog_output_lock);
|
|
WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
|
|
+ raw_spin_unlock(&watchdog_output_lock);
|
|
+ }
|
|
|
|
__this_cpu_write(hard_watchdog_warn, true);
|
|
return;
|
|
@@ -430,6 +441,7 @@ static void watchdog_prepare_cpu(int cpu)
|
|
WARN_ON(per_cpu(softlockup_watchdog, cpu));
|
|
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
hrtimer->function = watchdog_timer_fn;
|
|
+ hrtimer->irqsafe = 1;
|
|
|
|
#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
|
|
/*
|
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
|
index 9f2c923009cc..653d7fccb762 100644
|
|
--- a/kernel/workqueue.c
|
|
+++ b/kernel/workqueue.c
|
|
@@ -138,6 +138,7 @@ struct worker {
|
|
unsigned int flags; /* X: flags */
|
|
int id; /* I: worker id */
|
|
struct work_struct rebind_work; /* L: rebind worker to cpu */
|
|
+ int sleeping; /* None */
|
|
};
|
|
|
|
/*
|
|
@@ -318,6 +319,31 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
|
|
(cpu) < WORK_CPU_NONE; \
|
|
(cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+static inline void rt_lock_idle_list(struct global_cwq *gcwq)
|
|
+{
|
|
+ preempt_disable();
|
|
+}
|
|
+static inline void rt_unlock_idle_list(struct global_cwq *gcwq)
|
|
+{
|
|
+ preempt_enable();
|
|
+}
|
|
+static inline void sched_lock_idle_list(struct global_cwq *gcwq) { }
|
|
+static inline void sched_unlock_idle_list(struct global_cwq *gcwq) { }
|
|
+#else
|
|
+static inline void rt_lock_idle_list(struct global_cwq *gcwq) { }
|
|
+static inline void rt_unlock_idle_list(struct global_cwq *gcwq) { }
|
|
+static inline void sched_lock_idle_list(struct global_cwq *gcwq)
|
|
+{
|
|
+ spin_lock_irq(&gcwq->lock);
|
|
+}
|
|
+static inline void sched_unlock_idle_list(struct global_cwq *gcwq)
|
|
+{
|
|
+ spin_unlock_irq(&gcwq->lock);
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
|
|
|
static struct debug_obj_descr work_debug_descr;
|
|
@@ -649,73 +675,69 @@ static struct worker *first_worker(struct global_cwq *gcwq)
|
|
*/
|
|
static void wake_up_worker(struct global_cwq *gcwq)
|
|
{
|
|
- struct worker *worker = first_worker(gcwq);
|
|
+ struct worker *worker;
|
|
+
|
|
+ rt_lock_idle_list(gcwq);
|
|
+
|
|
+ worker = first_worker(gcwq);
|
|
|
|
if (likely(worker))
|
|
wake_up_process(worker->task);
|
|
+
|
|
+ rt_unlock_idle_list(gcwq);
|
|
}
|
|
|
|
/**
|
|
- * wq_worker_waking_up - a worker is waking up
|
|
- * @task: task waking up
|
|
- * @cpu: CPU @task is waking up to
|
|
+ * wq_worker_running - a worker is running again
|
|
+ * @task: task returning from sleep
|
|
*
|
|
- * This function is called during try_to_wake_up() when a worker is
|
|
- * being awoken.
|
|
- *
|
|
- * CONTEXT:
|
|
- * spin_lock_irq(rq->lock)
|
|
+ * This function is called when a worker returns from schedule()
|
|
*/
|
|
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
|
|
+void wq_worker_running(struct task_struct *task)
|
|
{
|
|
struct worker *worker = kthread_data(task);
|
|
|
|
+ if (!worker->sleeping)
|
|
+ return;
|
|
if (!(worker->flags & WORKER_NOT_RUNNING))
|
|
- atomic_inc(get_gcwq_nr_running(cpu));
|
|
+ atomic_inc(get_gcwq_nr_running(smp_processor_id()));
|
|
+ worker->sleeping = 0;
|
|
}
|
|
|
|
/**
|
|
* wq_worker_sleeping - a worker is going to sleep
|
|
* @task: task going to sleep
|
|
- * @cpu: CPU in question, must be the current CPU number
|
|
- *
|
|
- * This function is called during schedule() when a busy worker is
|
|
- * going to sleep. Worker on the same cpu can be woken up by
|
|
- * returning pointer to its task.
|
|
- *
|
|
- * CONTEXT:
|
|
- * spin_lock_irq(rq->lock)
|
|
*
|
|
- * RETURNS:
|
|
- * Worker task on @cpu to wake up, %NULL if none.
|
|
+ * This function is called from schedule() when a busy worker is
|
|
+ * going to sleep.
|
|
*/
|
|
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
|
|
- unsigned int cpu)
|
|
+void wq_worker_sleeping(struct task_struct *task)
|
|
{
|
|
- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
|
|
- struct global_cwq *gcwq = get_gcwq(cpu);
|
|
- atomic_t *nr_running = get_gcwq_nr_running(cpu);
|
|
+ struct worker *worker = kthread_data(task);
|
|
+ struct global_cwq *gcwq;
|
|
+ int cpu;
|
|
|
|
if (worker->flags & WORKER_NOT_RUNNING)
|
|
- return NULL;
|
|
+ return;
|
|
|
|
- /* this can only happen on the local cpu */
|
|
- BUG_ON(cpu != raw_smp_processor_id());
|
|
+ if (WARN_ON_ONCE(worker->sleeping))
|
|
+ return;
|
|
|
|
+ worker->sleeping = 1;
|
|
+
|
|
+ cpu = smp_processor_id();
|
|
+ gcwq = get_gcwq(cpu);
|
|
/*
|
|
* The counterpart of the following dec_and_test, implied mb,
|
|
* worklist not empty test sequence is in insert_work().
|
|
* Please read comment there.
|
|
- *
|
|
- * NOT_RUNNING is clear. This means that trustee is not in
|
|
- * charge and we're running on the local cpu w/ rq lock held
|
|
- * and preemption disabled, which in turn means that none else
|
|
- * could be manipulating idle_list, so dereferencing idle_list
|
|
- * without gcwq lock is safe.
|
|
*/
|
|
- if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
|
|
- to_wakeup = first_worker(gcwq);
|
|
- return to_wakeup ? to_wakeup->task : NULL;
|
|
+ if (atomic_dec_and_test(get_gcwq_nr_running(cpu)) &&
|
|
+ !list_empty(&gcwq->worklist)) {
|
|
+ sched_lock_idle_list(gcwq);
|
|
+ wake_up_worker(gcwq);
|
|
+ sched_unlock_idle_list(gcwq);
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
@@ -1085,8 +1107,8 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
|
|
{
|
|
int ret;
|
|
|
|
- ret = queue_work_on(get_cpu(), wq, work);
|
|
- put_cpu();
|
|
+ ret = queue_work_on(get_cpu_light(), wq, work);
|
|
+ put_cpu_light();
|
|
|
|
return ret;
|
|
}
|
|
@@ -1230,13 +1252,8 @@ static void worker_enter_idle(struct worker *worker)
|
|
} else
|
|
wake_up_all(&gcwq->trustee_wait);
|
|
|
|
- /*
|
|
- * Sanity check nr_running. Because trustee releases gcwq->lock
|
|
- * between setting %WORKER_ROGUE and zapping nr_running, the
|
|
- * warning may trigger spuriously. Check iff trustee is idle.
|
|
- */
|
|
- WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
|
|
- gcwq->nr_workers == gcwq->nr_idle &&
|
|
+ /* sanity check nr_running */
|
|
+ WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
|
|
atomic_read(get_gcwq_nr_running(gcwq->cpu)));
|
|
}
|
|
|
|
@@ -3579,6 +3596,25 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|
kthread_stop(new_trustee);
|
|
return NOTIFY_BAD;
|
|
}
|
|
+ break;
|
|
+ case CPU_POST_DEAD:
|
|
+ case CPU_UP_CANCELED:
|
|
+ case CPU_DOWN_FAILED:
|
|
+ case CPU_ONLINE:
|
|
+ break;
|
|
+ case CPU_DYING:
|
|
+ /*
|
|
+ * We access this lockless. We are on the dying CPU
|
|
+ * and called from stomp machine.
|
|
+ *
|
|
+ * Before this, the trustee and all workers except for
|
|
+ * the ones which are still executing works from
|
|
+ * before the last CPU down must be on the cpu. After
|
|
+ * this, they'll all be diasporas.
|
|
+ */
|
|
+ gcwq->flags |= GCWQ_DISASSOCIATED;
|
|
+ default:
|
|
+ goto out;
|
|
}
|
|
|
|
/* some are called w/ irq disabled, don't disturb irq status */
|
|
@@ -3598,16 +3634,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|
gcwq->first_idle = new_worker;
|
|
break;
|
|
|
|
- case CPU_DYING:
|
|
- /*
|
|
- * Before this, the trustee and all workers except for
|
|
- * the ones which are still executing works from
|
|
- * before the last CPU down must be on the cpu. After
|
|
- * this, they'll all be diasporas.
|
|
- */
|
|
- gcwq->flags |= GCWQ_DISASSOCIATED;
|
|
- break;
|
|
-
|
|
case CPU_POST_DEAD:
|
|
gcwq->trustee_state = TRUSTEE_BUTCHER;
|
|
/* fall through */
|
|
@@ -3641,6 +3667,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|
|
|
spin_unlock_irqrestore(&gcwq->lock, flags);
|
|
|
|
+out:
|
|
return notifier_from_errno(0);
|
|
}
|
|
|
|
diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h
|
|
index 2d10fc98dc79..3bf73e24e1ea 100644
|
|
--- a/kernel/workqueue_sched.h
|
|
+++ b/kernel/workqueue_sched.h
|
|
@@ -4,6 +4,5 @@
|
|
* Scheduler hooks for concurrency managed workqueue. Only to be
|
|
* included from sched.c and workqueue.c.
|
|
*/
|
|
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
|
|
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
|
|
- unsigned int cpu);
|
|
+void wq_worker_running(struct task_struct *task);
|
|
+void wq_worker_sleeping(struct task_struct *task);
|
|
diff --git a/lib/Kconfig b/lib/Kconfig
|
|
index 4a8aba2e5cc0..4c03fe3b7b1b 100644
|
|
--- a/lib/Kconfig
|
|
+++ b/lib/Kconfig
|
|
@@ -303,6 +303,7 @@ config CHECK_SIGNATURE
|
|
|
|
config CPUMASK_OFFSTACK
|
|
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
|
|
+ depends on !PREEMPT_RT_FULL
|
|
help
|
|
Use dynamic allocation for cpumask_var_t, instead of putting
|
|
them on the stack. This is a bit more expensive, but avoids
|
|
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
|
|
index 6777153f18f3..1d80795bb473 100644
|
|
--- a/lib/Kconfig.debug
|
|
+++ b/lib/Kconfig.debug
|
|
@@ -151,7 +151,7 @@ config DEBUG_KERNEL
|
|
|
|
config DEBUG_SHIRQ
|
|
bool "Debug shared IRQ handlers"
|
|
- depends on DEBUG_KERNEL && GENERIC_HARDIRQS
|
|
+ depends on DEBUG_KERNEL && GENERIC_HARDIRQS && !PREEMPT_RT_BASE
|
|
help
|
|
Enable this to generate a spurious interrupt as soon as a shared
|
|
interrupt handler is registered, and just before one is deregistered.
|
|
diff --git a/lib/Makefile b/lib/Makefile
|
|
index 801c567b9e4b..4efab9c50a49 100644
|
|
--- a/lib/Makefile
|
|
+++ b/lib/Makefile
|
|
@@ -38,8 +38,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
|
|
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
|
|
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
|
|
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
|
|
+
|
|
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
|
|
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
|
|
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
|
|
+endif
|
|
|
|
GCOV_PROFILE_hweight.o := n
|
|
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
|
|
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
|
|
index 0ab9ae8057f0..84c2f50d23ac 100644
|
|
--- a/lib/debugobjects.c
|
|
+++ b/lib/debugobjects.c
|
|
@@ -310,7 +310,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
|
|
struct debug_obj *obj;
|
|
unsigned long flags;
|
|
|
|
- fill_pool();
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (preempt_count() == 0 && !irqs_disabled())
|
|
+#endif
|
|
+ fill_pool();
|
|
|
|
db = get_bucket((unsigned long) addr);
|
|
|
|
@@ -1053,9 +1056,9 @@ static int __init debug_objects_replace_static_objects(void)
|
|
}
|
|
}
|
|
|
|
+ local_irq_enable();
|
|
printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
|
|
obj_pool_used);
|
|
- local_irq_enable();
|
|
return 0;
|
|
free:
|
|
hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
|
|
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
|
|
index 7aae0f2a5e0a..3508e2c80465 100644
|
|
--- a/lib/locking-selftest.c
|
|
+++ b/lib/locking-selftest.c
|
|
@@ -47,10 +47,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
|
|
* Normal standalone locks, for the circular and irq-context
|
|
* dependency tests:
|
|
*/
|
|
-static DEFINE_SPINLOCK(lock_A);
|
|
-static DEFINE_SPINLOCK(lock_B);
|
|
-static DEFINE_SPINLOCK(lock_C);
|
|
-static DEFINE_SPINLOCK(lock_D);
|
|
+static DEFINE_RAW_SPINLOCK(lock_A);
|
|
+static DEFINE_RAW_SPINLOCK(lock_B);
|
|
+static DEFINE_RAW_SPINLOCK(lock_C);
|
|
+static DEFINE_RAW_SPINLOCK(lock_D);
|
|
|
|
static DEFINE_RWLOCK(rwlock_A);
|
|
static DEFINE_RWLOCK(rwlock_B);
|
|
@@ -73,12 +73,12 @@ static DECLARE_RWSEM(rwsem_D);
|
|
* but X* and Y* are different classes. We do this so that
|
|
* we do not trigger a real lockup:
|
|
*/
|
|
-static DEFINE_SPINLOCK(lock_X1);
|
|
-static DEFINE_SPINLOCK(lock_X2);
|
|
-static DEFINE_SPINLOCK(lock_Y1);
|
|
-static DEFINE_SPINLOCK(lock_Y2);
|
|
-static DEFINE_SPINLOCK(lock_Z1);
|
|
-static DEFINE_SPINLOCK(lock_Z2);
|
|
+static DEFINE_RAW_SPINLOCK(lock_X1);
|
|
+static DEFINE_RAW_SPINLOCK(lock_X2);
|
|
+static DEFINE_RAW_SPINLOCK(lock_Y1);
|
|
+static DEFINE_RAW_SPINLOCK(lock_Y2);
|
|
+static DEFINE_RAW_SPINLOCK(lock_Z1);
|
|
+static DEFINE_RAW_SPINLOCK(lock_Z2);
|
|
|
|
static DEFINE_RWLOCK(rwlock_X1);
|
|
static DEFINE_RWLOCK(rwlock_X2);
|
|
@@ -107,10 +107,10 @@ static DECLARE_RWSEM(rwsem_Z2);
|
|
*/
|
|
#define INIT_CLASS_FUNC(class) \
|
|
static noinline void \
|
|
-init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \
|
|
- struct rw_semaphore *rwsem) \
|
|
+init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \
|
|
+ struct mutex *mutex, struct rw_semaphore *rwsem)\
|
|
{ \
|
|
- spin_lock_init(lock); \
|
|
+ raw_spin_lock_init(lock); \
|
|
rwlock_init(rwlock); \
|
|
mutex_init(mutex); \
|
|
init_rwsem(rwsem); \
|
|
@@ -168,10 +168,10 @@ static void init_shared_classes(void)
|
|
* Shortcuts for lock/unlock API variants, to keep
|
|
* the testcases compact:
|
|
*/
|
|
-#define L(x) spin_lock(&lock_##x)
|
|
-#define U(x) spin_unlock(&lock_##x)
|
|
+#define L(x) raw_spin_lock(&lock_##x)
|
|
+#define U(x) raw_spin_unlock(&lock_##x)
|
|
#define LU(x) L(x); U(x)
|
|
-#define SI(x) spin_lock_init(&lock_##x)
|
|
+#define SI(x) raw_spin_lock_init(&lock_##x)
|
|
|
|
#define WL(x) write_lock(&rwlock_##x)
|
|
#define WU(x) write_unlock(&rwlock_##x)
|
|
@@ -569,6 +569,8 @@ GENERATE_TESTCASE(init_held_rsem)
|
|
#include "locking-selftest-spin-hardirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#include "locking-selftest-rlock-hardirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
|
|
|
|
@@ -584,9 +586,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
|
|
#include "locking-selftest-wlock-softirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
|
|
|
|
+#endif
|
|
+
|
|
#undef E1
|
|
#undef E2
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Enabling hardirqs with a softirq-safe lock held:
|
|
*/
|
|
@@ -619,6 +624,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
|
|
#undef E1
|
|
#undef E2
|
|
|
|
+#endif
|
|
+
|
|
/*
|
|
* Enabling irqs with an irq-safe lock held:
|
|
*/
|
|
@@ -642,6 +649,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
|
|
#include "locking-selftest-spin-hardirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#include "locking-selftest-rlock-hardirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
|
|
|
|
@@ -657,6 +666,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
|
|
#include "locking-selftest-wlock-softirq.h"
|
|
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
|
|
|
|
+#endif
|
|
+
|
|
#undef E1
|
|
#undef E2
|
|
|
|
@@ -688,6 +699,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
|
|
#include "locking-selftest-spin-hardirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#include "locking-selftest-rlock-hardirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
|
|
|
|
@@ -703,6 +716,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
|
|
#include "locking-selftest-wlock-softirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
|
|
|
|
+#endif
|
|
+
|
|
#undef E1
|
|
#undef E2
|
|
#undef E3
|
|
@@ -736,6 +751,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
|
|
#include "locking-selftest-spin-hardirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
#include "locking-selftest-rlock-hardirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
|
|
|
|
@@ -751,10 +768,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
|
|
#include "locking-selftest-wlock-softirq.h"
|
|
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
|
|
|
|
+#endif
|
|
+
|
|
#undef E1
|
|
#undef E2
|
|
#undef E3
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
/*
|
|
* read-lock / write-lock irq inversion.
|
|
*
|
|
@@ -817,6 +838,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
|
|
#undef E2
|
|
#undef E3
|
|
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
/*
|
|
* read-lock / write-lock recursion that is actually safe.
|
|
*/
|
|
@@ -855,6 +880,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
|
|
#undef E2
|
|
#undef E3
|
|
|
|
+#endif
|
|
+
|
|
/*
|
|
* read-lock / write-lock recursion that is unsafe.
|
|
*/
|
|
@@ -911,7 +938,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
|
|
|
|
#define I2(x) \
|
|
do { \
|
|
- spin_lock_init(&lock_##x); \
|
|
+ raw_spin_lock_init(&lock_##x); \
|
|
rwlock_init(&rwlock_##x); \
|
|
mutex_init(&mutex_##x); \
|
|
init_rwsem(&rwsem_##x); \
|
|
@@ -1175,6 +1202,7 @@ void locking_selftest(void)
|
|
|
|
printk(" --------------------------------------------------------------------------\n");
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* irq-context testcases:
|
|
*/
|
|
@@ -1187,6 +1215,28 @@ void locking_selftest(void)
|
|
|
|
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
|
|
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
|
|
+#else
|
|
+ /* On -rt, we only do hardirq context test for raw spinlock */
|
|
+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
|
|
+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
|
|
+
|
|
+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
|
|
+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
|
|
+
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
|
|
+
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
|
|
+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
|
|
+#endif
|
|
|
|
if (unexpected_testcase_failures) {
|
|
printk("-----------------------------------------------------------------\n");
|
|
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
|
|
index 3ac50dc55638..52ebdb19f8af 100644
|
|
--- a/lib/radix-tree.c
|
|
+++ b/lib/radix-tree.c
|
|
@@ -202,12 +202,13 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
|
* succeed in getting a node here (and never reach
|
|
* kmem_cache_alloc)
|
|
*/
|
|
- rtp = &__get_cpu_var(radix_tree_preloads);
|
|
+ rtp = &get_cpu_var(radix_tree_preloads);
|
|
if (rtp->nr) {
|
|
ret = rtp->nodes[rtp->nr - 1];
|
|
rtp->nodes[rtp->nr - 1] = NULL;
|
|
rtp->nr--;
|
|
}
|
|
+ put_cpu_var(radix_tree_preloads);
|
|
}
|
|
if (ret == NULL)
|
|
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
|
@@ -242,6 +243,7 @@ radix_tree_node_free(struct radix_tree_node *node)
|
|
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
/*
|
|
* Load up this CPU's radix_tree_node buffer with sufficient objects to
|
|
* ensure that the addition of a single element in the tree cannot fail. On
|
|
@@ -276,6 +278,7 @@ out:
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(radix_tree_preload);
|
|
+#endif
|
|
|
|
/*
|
|
* Return the maximum key which can be store into a
|
|
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
|
|
index 8c2f278e5eb7..4f9d72a00bab 100644
|
|
--- a/lib/scatterlist.c
|
|
+++ b/lib/scatterlist.c
|
|
@@ -424,7 +424,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
|
|
flush_kernel_dcache_page(miter->page);
|
|
|
|
if (miter->__flags & SG_MITER_ATOMIC) {
|
|
- WARN_ON(!irqs_disabled());
|
|
+ WARN_ON_NONRT(!irqs_disabled());
|
|
kunmap_atomic(miter->addr);
|
|
} else
|
|
kunmap(miter->page);
|
|
@@ -464,7 +464,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
|
|
|
|
sg_miter_start(&miter, sgl, nents, sg_flags);
|
|
|
|
- local_irq_save(flags);
|
|
+ local_irq_save_nort(flags);
|
|
|
|
while (sg_miter_next(&miter) && offset < buflen) {
|
|
unsigned int len;
|
|
@@ -481,7 +481,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
|
|
|
|
sg_miter_stop(&miter);
|
|
|
|
- local_irq_restore(flags);
|
|
+ local_irq_restore_nort(flags);
|
|
return offset;
|
|
}
|
|
|
|
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
|
|
index 4c0d0e51d49e..dbb1570de195 100644
|
|
--- a/lib/smp_processor_id.c
|
|
+++ b/lib/smp_processor_id.c
|
|
@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
|
|
if (!printk_ratelimit())
|
|
goto out_enable;
|
|
|
|
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
|
|
- "code: %s/%d\n",
|
|
- preempt_count() - 1, current->comm, current->pid);
|
|
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
|
|
+ "code: %s/%d\n", preempt_count() - 1,
|
|
+ __migrate_disabled(current), current->comm, current->pid);
|
|
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
|
|
dump_stack();
|
|
|
|
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
|
|
index 525d160d44f0..5f5eae59a1ac 100644
|
|
--- a/lib/spinlock_debug.c
|
|
+++ b/lib/spinlock_debug.c
|
|
@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
|
|
|
|
EXPORT_SYMBOL(__raw_spin_lock_init);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
void __rwlock_init(rwlock_t *lock, const char *name,
|
|
struct lock_class_key *key)
|
|
{
|
|
@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
|
|
}
|
|
|
|
EXPORT_SYMBOL(__rwlock_init);
|
|
+#endif
|
|
|
|
static void spin_dump(raw_spinlock_t *lock, const char *msg)
|
|
{
|
|
@@ -155,6 +157,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
|
|
arch_spin_unlock(&lock->raw_lock);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
static void rwlock_bug(rwlock_t *lock, const char *msg)
|
|
{
|
|
if (!debug_locks_off())
|
|
@@ -296,3 +299,5 @@ void do_raw_write_unlock(rwlock_t *lock)
|
|
debug_write_unlock(lock);
|
|
arch_write_unlock(&lock->raw_lock);
|
|
}
|
|
+
|
|
+#endif
|
|
diff --git a/localversion-rt b/localversion-rt
|
|
new file mode 100644
|
|
index 000000000000..f824f53c19ea
|
|
--- /dev/null
|
|
+++ b/localversion-rt
|
|
@@ -0,0 +1 @@
|
|
+-rt136
|
|
diff --git a/mm/Kconfig b/mm/Kconfig
|
|
index e338407f1225..096f43635264 100644
|
|
--- a/mm/Kconfig
|
|
+++ b/mm/Kconfig
|
|
@@ -313,7 +313,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
|
|
|
|
config TRANSPARENT_HUGEPAGE
|
|
bool "Transparent Hugepage Support"
|
|
- depends on X86 && MMU
|
|
+ depends on X86 && MMU && !PREEMPT_RT_FULL
|
|
select COMPACTION
|
|
help
|
|
Transparent Hugepages allows the kernel to use huge pages and
|
|
diff --git a/mm/filemap.c b/mm/filemap.c
|
|
index 79c4b2b0b14e..51c34c0ff973 100644
|
|
--- a/mm/filemap.c
|
|
+++ b/mm/filemap.c
|
|
@@ -2036,7 +2036,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
|
char *kaddr;
|
|
size_t copied;
|
|
|
|
- BUG_ON(!in_atomic());
|
|
+ BUG_ON(!pagefault_disabled());
|
|
kaddr = kmap_atomic(page);
|
|
if (likely(i->nr_segs == 1)) {
|
|
int left;
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 9c364428cf71..8c3dd645ac10 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -2061,14 +2061,17 @@ static void drain_local_stock(struct work_struct *dummy)
|
|
*/
|
|
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
{
|
|
- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
|
|
+ struct memcg_stock_pcp *stock;
|
|
+ int cpu = get_cpu_light();
|
|
+
|
|
+ stock = &per_cpu(memcg_stock, cpu);
|
|
|
|
if (stock->cached != memcg) { /* reset if necessary */
|
|
drain_stock(stock);
|
|
stock->cached = memcg;
|
|
}
|
|
stock->nr_pages += nr_pages;
|
|
- put_cpu_var(memcg_stock);
|
|
+ put_cpu_light();
|
|
}
|
|
|
|
/*
|
|
@@ -2082,7 +2085,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
|
|
|
|
/* Notify other cpus that system-wide "drain" is running */
|
|
get_online_cpus();
|
|
- curcpu = get_cpu();
|
|
+ curcpu = get_cpu_light();
|
|
for_each_online_cpu(cpu) {
|
|
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
|
|
struct mem_cgroup *memcg;
|
|
@@ -2099,7 +2102,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
|
|
schedule_work_on(cpu, &stock->work);
|
|
}
|
|
}
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
|
|
if (!sync)
|
|
goto out;
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 02aef93416f2..eec97195b9b7 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -3533,6 +3533,32 @@ unlock:
|
|
return 0;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+void pagefault_disable(void)
|
|
+{
|
|
+ migrate_disable();
|
|
+ current->pagefault_disabled++;
|
|
+ /*
|
|
+ * make sure to have issued the store before a pagefault
|
|
+ * can hit.
|
|
+ */
|
|
+ barrier();
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(pagefault_disable);
|
|
+
|
|
+void pagefault_enable(void)
|
|
+{
|
|
+ /*
|
|
+ * make sure to issue those last loads/stores before enabling
|
|
+ * the pagefault handler again.
|
|
+ */
|
|
+ barrier();
|
|
+ current->pagefault_disabled--;
|
|
+ migrate_enable();
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(pagefault_enable);
|
|
+#endif
|
|
+
|
|
/*
|
|
* By the time we get here, we already hold the mm semaphore
|
|
*/
|
|
@@ -4087,3 +4113,35 @@ void copy_user_huge_page(struct page *dst, struct page *src,
|
|
}
|
|
}
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
|
|
+
|
|
+#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0)
|
|
+/*
|
|
+ * Heinous hack, relies on the caller doing something like:
|
|
+ *
|
|
+ * pte = alloc_pages(PGALLOC_GFP, 0);
|
|
+ * if (pte)
|
|
+ * pgtable_page_ctor(pte);
|
|
+ * return pte;
|
|
+ *
|
|
+ * This ensures we release the page and return NULL when the
|
|
+ * lock allocation fails.
|
|
+ */
|
|
+struct page *pte_lock_init(struct page *page)
|
|
+{
|
|
+ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
|
|
+ if (page->ptl) {
|
|
+ spin_lock_init(__pte_lockptr(page));
|
|
+ } else {
|
|
+ __free_page(page);
|
|
+ page = NULL;
|
|
+ }
|
|
+ return page;
|
|
+}
|
|
+
|
|
+void pte_lock_deinit(struct page *page)
|
|
+{
|
|
+ kfree(page->ptl);
|
|
+ page->mapping = NULL;
|
|
+}
|
|
+
|
|
+#endif
|
|
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
|
|
index 3dcfaf4ed355..1385e482fd80 100644
|
|
--- a/mm/mmu_context.c
|
|
+++ b/mm/mmu_context.c
|
|
@@ -26,6 +26,7 @@ void use_mm(struct mm_struct *mm)
|
|
struct task_struct *tsk = current;
|
|
|
|
task_lock(tsk);
|
|
+ preempt_disable_rt();
|
|
active_mm = tsk->active_mm;
|
|
if (active_mm != mm) {
|
|
atomic_inc(&mm->mm_count);
|
|
@@ -33,6 +34,7 @@ void use_mm(struct mm_struct *mm)
|
|
}
|
|
tsk->mm = mm;
|
|
switch_mm(active_mm, mm, tsk);
|
|
+ preempt_enable_rt();
|
|
task_unlock(tsk);
|
|
|
|
if (active_mm != mm)
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index 2891a9059f8a..735514ac81c0 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -57,6 +57,7 @@
|
|
#include <linux/ftrace_event.h>
|
|
#include <linux/memcontrol.h>
|
|
#include <linux/prefetch.h>
|
|
+#include <linux/locallock.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/page-debug-flags.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
@@ -216,6 +217,18 @@ EXPORT_SYMBOL(nr_node_ids);
|
|
EXPORT_SYMBOL(nr_online_nodes);
|
|
#endif
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define cpu_lock_irqsave(cpu, flags) \
|
|
+ local_lock_irqsave_on(pa_lock, flags, cpu)
|
|
+# define cpu_unlock_irqrestore(cpu, flags) \
|
|
+ local_unlock_irqrestore_on(pa_lock, flags, cpu)
|
|
+#else
|
|
+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
|
|
+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
|
|
+#endif
|
|
+
|
|
int page_group_by_mobility_disabled __read_mostly;
|
|
|
|
static void set_pageblock_migratetype(struct page *page, int migratetype)
|
|
@@ -619,7 +632,7 @@ static inline int free_pages_check(struct page *page)
|
|
}
|
|
|
|
/*
|
|
- * Frees a number of pages from the PCP lists
|
|
+ * Frees a number of pages which have been collected from the pcp lists.
|
|
* Assumes all pages on list are in same zone, and of same order.
|
|
* count is the number of pages to free.
|
|
*
|
|
@@ -630,16 +643,42 @@ static inline int free_pages_check(struct page *page)
|
|
* pinned" detection logic.
|
|
*/
|
|
static void free_pcppages_bulk(struct zone *zone, int count,
|
|
- struct per_cpu_pages *pcp)
|
|
+ struct list_head *list)
|
|
{
|
|
- int migratetype = 0;
|
|
- int batch_free = 0;
|
|
int to_free = count;
|
|
+ unsigned long flags;
|
|
|
|
- spin_lock(&zone->lock);
|
|
+ spin_lock_irqsave(&zone->lock, flags);
|
|
zone->all_unreclaimable = 0;
|
|
zone->pages_scanned = 0;
|
|
|
|
+ while (!list_empty(list)) {
|
|
+ struct page *page = list_first_entry(list, struct page, lru);
|
|
+
|
|
+ /* must delete as __free_one_page list manipulates */
|
|
+ list_del(&page->lru);
|
|
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
|
|
+ __free_one_page(page, zone, 0, page_private(page));
|
|
+ trace_mm_page_pcpu_drain(page, 0, page_private(page));
|
|
+ to_free--;
|
|
+ }
|
|
+ WARN_ON(to_free != 0);
|
|
+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
|
|
+ spin_unlock_irqrestore(&zone->lock, flags);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Moves a number of pages from the PCP lists to free list which
|
|
+ * is freed outside of the locked region.
|
|
+ *
|
|
+ * Assumes all pages on list are in same zone, and of same order.
|
|
+ * count is the number of pages to free.
|
|
+ */
|
|
+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
|
|
+ struct list_head *dst)
|
|
+{
|
|
+ int migratetype = 0, batch_free = 0;
|
|
+
|
|
while (to_free) {
|
|
struct page *page;
|
|
struct list_head *list;
|
|
@@ -655,7 +694,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
|
batch_free++;
|
|
if (++migratetype == MIGRATE_PCPTYPES)
|
|
migratetype = 0;
|
|
- list = &pcp->lists[migratetype];
|
|
+ list = &src->lists[migratetype];
|
|
} while (list_empty(list));
|
|
|
|
/* This is the only non-empty list. Free them all. */
|
|
@@ -663,28 +702,25 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
|
batch_free = to_free;
|
|
|
|
do {
|
|
- page = list_entry(list->prev, struct page, lru);
|
|
- /* must delete as __free_one_page list manipulates */
|
|
+ page = list_last_entry(list, struct page, lru);
|
|
list_del(&page->lru);
|
|
- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
|
|
- __free_one_page(page, zone, 0, page_private(page));
|
|
- trace_mm_page_pcpu_drain(page, 0, page_private(page));
|
|
+ list_add(&page->lru, dst);
|
|
} while (--to_free && --batch_free && !list_empty(list));
|
|
}
|
|
- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
|
|
- spin_unlock(&zone->lock);
|
|
}
|
|
|
|
static void free_one_page(struct zone *zone, struct page *page, int order,
|
|
int migratetype)
|
|
{
|
|
- spin_lock(&zone->lock);
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&zone->lock, flags);
|
|
zone->all_unreclaimable = 0;
|
|
zone->pages_scanned = 0;
|
|
|
|
__free_one_page(page, zone, order, migratetype);
|
|
__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
|
|
- spin_unlock(&zone->lock);
|
|
+ spin_unlock_irqrestore(&zone->lock, flags);
|
|
}
|
|
|
|
static bool free_pages_prepare(struct page *page, unsigned int order)
|
|
@@ -721,13 +757,13 @@ static void __free_pages_ok(struct page *page, unsigned int order)
|
|
if (!free_pages_prepare(page, order))
|
|
return;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
if (unlikely(wasMlocked))
|
|
free_page_mlock(page);
|
|
__count_vm_events(PGFREE, 1 << order);
|
|
free_one_page(page_zone(page), page, order,
|
|
get_pageblock_migratetype(page));
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
}
|
|
|
|
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
|
|
@@ -1111,16 +1147,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
|
|
{
|
|
unsigned long flags;
|
|
+ LIST_HEAD(dst);
|
|
int to_drain;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
if (pcp->count >= pcp->batch)
|
|
to_drain = pcp->batch;
|
|
else
|
|
to_drain = pcp->count;
|
|
- free_pcppages_bulk(zone, to_drain, pcp);
|
|
+ isolate_pcp_pages(to_drain, pcp, &dst);
|
|
pcp->count -= to_drain;
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
+ free_pcppages_bulk(zone, to_drain, &dst);
|
|
}
|
|
#endif
|
|
|
|
@@ -1139,16 +1177,21 @@ static void drain_pages(unsigned int cpu)
|
|
for_each_populated_zone(zone) {
|
|
struct per_cpu_pageset *pset;
|
|
struct per_cpu_pages *pcp;
|
|
+ LIST_HEAD(dst);
|
|
+ int count;
|
|
|
|
- local_irq_save(flags);
|
|
+ cpu_lock_irqsave(cpu, flags);
|
|
pset = per_cpu_ptr(zone->pageset, cpu);
|
|
|
|
pcp = &pset->pcp;
|
|
- if (pcp->count) {
|
|
- free_pcppages_bulk(zone, pcp->count, pcp);
|
|
+ count = pcp->count;
|
|
+ if (count) {
|
|
+ isolate_pcp_pages(count, pcp, &dst);
|
|
pcp->count = 0;
|
|
}
|
|
- local_irq_restore(flags);
|
|
+ cpu_unlock_irqrestore(cpu, flags);
|
|
+ if (count)
|
|
+ free_pcppages_bulk(zone, count, &dst);
|
|
}
|
|
}
|
|
|
|
@@ -1201,7 +1244,12 @@ void drain_all_pages(void)
|
|
else
|
|
cpumask_clear_cpu(cpu, &cpus_with_pcps);
|
|
}
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
|
|
+#else
|
|
+ for_each_cpu(cpu, &cpus_with_pcps)
|
|
+ drain_pages(cpu);
|
|
+#endif
|
|
}
|
|
|
|
#ifdef CONFIG_HIBERNATION
|
|
@@ -1257,7 +1305,7 @@ void free_hot_cold_page(struct page *page, int cold)
|
|
|
|
migratetype = get_pageblock_migratetype(page);
|
|
set_page_private(page, migratetype);
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
if (unlikely(wasMlocked))
|
|
free_page_mlock(page);
|
|
__count_vm_event(PGFREE);
|
|
@@ -1284,12 +1332,19 @@ void free_hot_cold_page(struct page *page, int cold)
|
|
list_add(&page->lru, &pcp->lists[migratetype]);
|
|
pcp->count++;
|
|
if (pcp->count >= pcp->high) {
|
|
- free_pcppages_bulk(zone, pcp->batch, pcp);
|
|
+ LIST_HEAD(dst);
|
|
+ int count;
|
|
+
|
|
+ isolate_pcp_pages(pcp->batch, pcp, &dst);
|
|
pcp->count -= pcp->batch;
|
|
+ count = pcp->batch;
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
+ free_pcppages_bulk(zone, count, &dst);
|
|
+ return;
|
|
}
|
|
|
|
out:
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -1397,7 +1452,7 @@ again:
|
|
struct per_cpu_pages *pcp;
|
|
struct list_head *list;
|
|
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(pa_lock, flags);
|
|
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
|
list = &pcp->lists[migratetype];
|
|
if (list_empty(list)) {
|
|
@@ -1429,17 +1484,19 @@ again:
|
|
*/
|
|
WARN_ON_ONCE(order > 1);
|
|
}
|
|
- spin_lock_irqsave(&zone->lock, flags);
|
|
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
|
|
page = __rmqueue(zone, order, migratetype);
|
|
- spin_unlock(&zone->lock);
|
|
- if (!page)
|
|
+ if (!page) {
|
|
+ spin_unlock(&zone->lock);
|
|
goto failed;
|
|
+ }
|
|
__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
|
|
+ spin_unlock(&zone->lock);
|
|
}
|
|
|
|
__count_zone_vm_events(PGALLOC, zone, 1 << order);
|
|
zone_statistics(preferred_zone, zone, gfp_flags);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
|
|
VM_BUG_ON(bad_range(zone, page));
|
|
if (prep_new_page(page, order, gfp_flags))
|
|
@@ -1447,7 +1504,7 @@ again:
|
|
return page;
|
|
|
|
failed:
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(pa_lock, flags);
|
|
return NULL;
|
|
}
|
|
|
|
@@ -2053,8 +2110,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
if (*did_some_progress != COMPACT_SKIPPED) {
|
|
|
|
/* Page migration frees to the PCP lists but we want merging */
|
|
- drain_pages(get_cpu());
|
|
- put_cpu();
|
|
+ drain_pages(get_cpu_light());
|
|
+ put_cpu_light();
|
|
|
|
page = get_page_from_freelist(gfp_mask, nodemask,
|
|
order, zonelist, high_zoneidx,
|
|
@@ -3869,14 +3926,16 @@ static int __zone_pcp_update(void *data)
|
|
for_each_possible_cpu(cpu) {
|
|
struct per_cpu_pageset *pset;
|
|
struct per_cpu_pages *pcp;
|
|
+ LIST_HEAD(dst);
|
|
|
|
pset = per_cpu_ptr(zone->pageset, cpu);
|
|
pcp = &pset->pcp;
|
|
|
|
- local_irq_save(flags);
|
|
- free_pcppages_bulk(zone, pcp->count, pcp);
|
|
+ cpu_lock_irqsave(cpu, flags);
|
|
+ isolate_pcp_pages(pcp->count, pcp, &dst);
|
|
+ free_pcppages_bulk(zone, pcp->count, &dst);
|
|
setup_pageset(pset, batch);
|
|
- local_irq_restore(flags);
|
|
+ cpu_unlock_irqrestore(cpu, flags);
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -4907,6 +4966,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
|
|
void __init page_alloc_init(void)
|
|
{
|
|
hotcpu_notifier(page_alloc_cpu_notify, 0);
|
|
+ local_irq_lock_init(pa_lock);
|
|
}
|
|
|
|
/*
|
|
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
|
|
index b7693fdc6bb5..901203877789 100644
|
|
--- a/mm/page_cgroup.c
|
|
+++ b/mm/page_cgroup.c
|
|
@@ -13,6 +13,14 @@
|
|
|
|
static unsigned long total_usage;
|
|
|
|
+static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ for (; nr_pages; nr_pages--, pc++)
|
|
+ spin_lock_init(&pc->pcg_lock);
|
|
+#endif
|
|
+}
|
|
+
|
|
#if !defined(CONFIG_SPARSEMEM)
|
|
|
|
|
|
@@ -60,6 +68,7 @@ static int __init alloc_node_page_cgroup(int nid)
|
|
return -ENOMEM;
|
|
NODE_DATA(nid)->node_page_cgroup = base;
|
|
total_usage += table_size;
|
|
+ page_cgroup_lock_init(base, nr_pages);
|
|
return 0;
|
|
}
|
|
|
|
@@ -150,6 +159,8 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+ page_cgroup_lock_init(base, PAGES_PER_SECTION);
|
|
+
|
|
/*
|
|
* The passed "pfn" may not be aligned to SECTION. For the calculation
|
|
* we need to apply a mask.
|
|
diff --git a/mm/slab.c b/mm/slab.c
|
|
index 3714dd9c5877..00cf24f5b477 100644
|
|
--- a/mm/slab.c
|
|
+++ b/mm/slab.c
|
|
@@ -116,6 +116,7 @@
|
|
#include <linux/kmemcheck.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/prefetch.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
@@ -611,6 +612,12 @@ int slab_is_available(void)
|
|
return g_cpucache_up >= EARLY;
|
|
}
|
|
|
|
+/*
|
|
+ * Guard access to the cache-chain.
|
|
+ */
|
|
+static DEFINE_MUTEX(cache_chain_mutex);
|
|
+static struct list_head cache_chain;
|
|
+
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
/*
|
|
@@ -672,38 +679,41 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
|
|
slab_set_debugobj_lock_classes_node(cachep, node);
|
|
}
|
|
|
|
-static void init_node_lock_keys(int q)
|
|
+static void init_lock_keys(struct kmem_cache *cachep, int node)
|
|
{
|
|
- struct cache_sizes *s = malloc_sizes;
|
|
+ struct kmem_list3 *l3;
|
|
|
|
if (g_cpucache_up < LATE)
|
|
return;
|
|
|
|
- for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
|
|
- struct kmem_list3 *l3;
|
|
+ l3 = cachep->nodelists[node];
|
|
+ if (!l3 || OFF_SLAB(cachep))
|
|
+ return;
|
|
|
|
- l3 = s->cs_cachep->nodelists[q];
|
|
- if (!l3 || OFF_SLAB(s->cs_cachep))
|
|
- continue;
|
|
+ slab_set_lock_classes(cachep, &on_slab_l3_key, &on_slab_alc_key, node);
|
|
+}
|
|
|
|
- slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
|
|
- &on_slab_alc_key, q);
|
|
- }
|
|
+static void init_node_lock_keys(int node)
|
|
+{
|
|
+ struct kmem_cache *cachep;
|
|
+
|
|
+ list_for_each_entry(cachep, &cache_chain, next)
|
|
+ init_lock_keys(cachep, node);
|
|
}
|
|
|
|
-static inline void init_lock_keys(void)
|
|
+static inline void init_cachep_lock_keys(struct kmem_cache *cachep)
|
|
{
|
|
int node;
|
|
|
|
for_each_node(node)
|
|
- init_node_lock_keys(node);
|
|
+ init_lock_keys(cachep, node);
|
|
}
|
|
#else
|
|
-static void init_node_lock_keys(int q)
|
|
+static void init_node_lock_keys(int node)
|
|
{
|
|
}
|
|
|
|
-static inline void init_lock_keys(void)
|
|
+static void init_cachep_lock_keys(struct kmem_cache *cachep)
|
|
{
|
|
}
|
|
|
|
@@ -716,19 +726,79 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
|
|
}
|
|
#endif
|
|
|
|
+static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
|
|
+static DEFINE_PER_CPU(struct list_head, slab_free_list);
|
|
+static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1)
|
|
+#else
|
|
/*
|
|
- * Guard access to the cache-chain.
|
|
+ * execute func() for all CPUs. On PREEMPT_RT we dont actually have
|
|
+ * to run on the remote CPUs - we only have to take their CPU-locks.
|
|
+ * (This is a rare operation, so cacheline bouncing is not an issue.)
|
|
*/
|
|
-static DEFINE_MUTEX(cache_chain_mutex);
|
|
-static struct list_head cache_chain;
|
|
+static void
|
|
+slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
|
|
+{
|
|
+ unsigned int i;
|
|
|
|
-static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
|
|
+ get_cpu_light();
|
|
+ for_each_online_cpu(i)
|
|
+ func(arg, i);
|
|
+ put_cpu_light();
|
|
+}
|
|
+
|
|
+static void lock_slab_on(unsigned int cpu)
|
|
+{
|
|
+ local_lock_irq_on(slab_lock, cpu);
|
|
+}
|
|
+
|
|
+static void unlock_slab_on(unsigned int cpu)
|
|
+{
|
|
+ local_unlock_irq_on(slab_lock, cpu);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void free_delayed(struct list_head *h)
|
|
+{
|
|
+ while(!list_empty(h)) {
|
|
+ struct page *page = list_first_entry(h, struct page, lru);
|
|
+
|
|
+ list_del(&page->lru);
|
|
+ __free_pages(page, page->index);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void unlock_l3_and_free_delayed(spinlock_t *list_lock)
|
|
+{
|
|
+ LIST_HEAD(tmp);
|
|
+
|
|
+ list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
|
|
+ local_spin_unlock_irq(slab_lock, list_lock);
|
|
+ free_delayed(&tmp);
|
|
+}
|
|
+
|
|
+static void unlock_slab_and_free_delayed(unsigned long flags)
|
|
+{
|
|
+ LIST_HEAD(tmp);
|
|
+
|
|
+ list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
|
|
+ local_unlock_irqrestore(slab_lock, flags);
|
|
+ free_delayed(&tmp);
|
|
+}
|
|
|
|
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
|
{
|
|
return cachep->array[smp_processor_id()];
|
|
}
|
|
|
|
+static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep,
|
|
+ int cpu)
|
|
+{
|
|
+ return cachep->array[cpu];
|
|
+}
|
|
+
|
|
static inline struct kmem_cache *__find_general_cachep(size_t size,
|
|
gfp_t gfpflags)
|
|
{
|
|
@@ -1077,9 +1147,10 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
|
|
if (l3->alien) {
|
|
struct array_cache *ac = l3->alien[node];
|
|
|
|
- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
|
|
+ if (ac && ac->avail &&
|
|
+ local_spin_trylock_irq(slab_lock, &ac->lock)) {
|
|
__drain_alien_cache(cachep, ac, node);
|
|
- spin_unlock_irq(&ac->lock);
|
|
+ local_spin_unlock_irq(slab_lock, &ac->lock);
|
|
}
|
|
}
|
|
}
|
|
@@ -1094,9 +1165,9 @@ static void drain_alien_cache(struct kmem_cache *cachep,
|
|
for_each_online_node(i) {
|
|
ac = alien[i];
|
|
if (ac) {
|
|
- spin_lock_irqsave(&ac->lock, flags);
|
|
+ local_spin_lock_irqsave(slab_lock, &ac->lock, flags);
|
|
__drain_alien_cache(cachep, ac, i);
|
|
- spin_unlock_irqrestore(&ac->lock, flags);
|
|
+ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags);
|
|
}
|
|
}
|
|
}
|
|
@@ -1175,11 +1246,11 @@ static int init_cache_nodelists_node(int node)
|
|
cachep->nodelists[node] = l3;
|
|
}
|
|
|
|
- spin_lock_irq(&cachep->nodelists[node]->list_lock);
|
|
+ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
|
|
cachep->nodelists[node]->free_limit =
|
|
(1 + nr_cpus_node(node)) *
|
|
cachep->batchcount + cachep->num;
|
|
- spin_unlock_irq(&cachep->nodelists[node]->list_lock);
|
|
+ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -1204,7 +1275,7 @@ static void __cpuinit cpuup_canceled(long cpu)
|
|
if (!l3)
|
|
goto free_array_cache;
|
|
|
|
- spin_lock_irq(&l3->list_lock);
|
|
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
|
|
|
|
/* Free limit for this kmem_list3 */
|
|
l3->free_limit -= cachep->batchcount;
|
|
@@ -1212,7 +1283,7 @@ static void __cpuinit cpuup_canceled(long cpu)
|
|
free_block(cachep, nc->entry, nc->avail, node);
|
|
|
|
if (!cpumask_empty(mask)) {
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
+ unlock_l3_and_free_delayed(&l3->list_lock);
|
|
goto free_array_cache;
|
|
}
|
|
|
|
@@ -1226,7 +1297,7 @@ static void __cpuinit cpuup_canceled(long cpu)
|
|
alien = l3->alien;
|
|
l3->alien = NULL;
|
|
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
+ unlock_l3_and_free_delayed(&l3->list_lock);
|
|
|
|
kfree(shared);
|
|
if (alien) {
|
|
@@ -1300,7 +1371,7 @@ static int __cpuinit cpuup_prepare(long cpu)
|
|
l3 = cachep->nodelists[node];
|
|
BUG_ON(!l3);
|
|
|
|
- spin_lock_irq(&l3->list_lock);
|
|
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
|
|
if (!l3->shared) {
|
|
/*
|
|
* We are serialised from CPU_DEAD or
|
|
@@ -1315,7 +1386,7 @@ static int __cpuinit cpuup_prepare(long cpu)
|
|
alien = NULL;
|
|
}
|
|
#endif
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
|
|
kfree(shared);
|
|
free_alien_cache(alien);
|
|
if (cachep->flags & SLAB_DEBUG_OBJECTS)
|
|
@@ -1506,6 +1577,10 @@ void __init kmem_cache_init(void)
|
|
if (num_possible_nodes() == 1)
|
|
use_alien_caches = 0;
|
|
|
|
+ local_irq_lock_init(slab_lock);
|
|
+ for_each_possible_cpu(i)
|
|
+ INIT_LIST_HEAD(&per_cpu(slab_free_list, i));
|
|
+
|
|
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
|
kmem_list3_init(&initkmem_list3[i]);
|
|
if (i < MAX_NUMNODES)
|
|
@@ -1687,14 +1762,13 @@ void __init kmem_cache_init_late(void)
|
|
|
|
/* 6) resize the head arrays to their final sizes */
|
|
mutex_lock(&cache_chain_mutex);
|
|
- list_for_each_entry(cachep, &cache_chain, next)
|
|
+ list_for_each_entry(cachep, &cache_chain, next) {
|
|
if (enable_cpucache(cachep, GFP_NOWAIT))
|
|
BUG();
|
|
+ init_cachep_lock_keys(cachep);
|
|
+ }
|
|
mutex_unlock(&cache_chain_mutex);
|
|
|
|
- /* Annotate slab for lockdep -- annotate the malloc caches */
|
|
- init_lock_keys();
|
|
-
|
|
/* Done! */
|
|
g_cpucache_up = FULL;
|
|
|
|
@@ -1834,12 +1908,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
|
/*
|
|
* Interface to system's page release.
|
|
*/
|
|
-static void kmem_freepages(struct kmem_cache *cachep, void *addr)
|
|
+static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed)
|
|
{
|
|
unsigned long i = (1 << cachep->gfporder);
|
|
- struct page *page = virt_to_page(addr);
|
|
+ struct page *page, *basepage = virt_to_page(addr);
|
|
const unsigned long nr_freed = i;
|
|
|
|
+ page = basepage;
|
|
+
|
|
kmemcheck_free_shadow(page, cachep->gfporder);
|
|
|
|
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
|
|
@@ -1855,7 +1931,13 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
|
|
}
|
|
if (current->reclaim_state)
|
|
current->reclaim_state->reclaimed_slab += nr_freed;
|
|
- free_pages((unsigned long)addr, cachep->gfporder);
|
|
+
|
|
+ if (!delayed) {
|
|
+ free_pages((unsigned long)addr, cachep->gfporder);
|
|
+ } else {
|
|
+ basepage->index = cachep->gfporder;
|
|
+ list_add(&basepage->lru, &__get_cpu_var(slab_free_list));
|
|
+ }
|
|
}
|
|
|
|
static void kmem_rcu_free(struct rcu_head *head)
|
|
@@ -1863,7 +1945,7 @@ static void kmem_rcu_free(struct rcu_head *head)
|
|
struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
|
|
struct kmem_cache *cachep = slab_rcu->cachep;
|
|
|
|
- kmem_freepages(cachep, slab_rcu->addr);
|
|
+ kmem_freepages(cachep, slab_rcu->addr, false);
|
|
if (OFF_SLAB(cachep))
|
|
kmem_cache_free(cachep->slabp_cache, slab_rcu);
|
|
}
|
|
@@ -2082,7 +2164,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
|
|
* Before calling the slab must have been unlinked from the cache. The
|
|
* cache-lock is not held/needed.
|
|
*/
|
|
-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
|
|
+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp,
|
|
+ bool delayed)
|
|
{
|
|
void *addr = slabp->s_mem - slabp->colouroff;
|
|
|
|
@@ -2095,7 +2178,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
|
|
slab_rcu->addr = addr;
|
|
call_rcu(&slab_rcu->head, kmem_rcu_free);
|
|
} else {
|
|
- kmem_freepages(cachep, addr);
|
|
+ kmem_freepages(cachep, addr, delayed);
|
|
if (OFF_SLAB(cachep))
|
|
kmem_cache_free(cachep->slabp_cache, slabp);
|
|
}
|
|
@@ -2544,6 +2627,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
slab_set_debugobj_lock_classes(cachep);
|
|
}
|
|
|
|
+ init_cachep_lock_keys(cachep);
|
|
+
|
|
/* cache setup completed, link it into the list */
|
|
list_add(&cachep->next, &cache_chain);
|
|
oops:
|
|
@@ -2561,7 +2646,7 @@ EXPORT_SYMBOL(kmem_cache_create);
|
|
#if DEBUG
|
|
static void check_irq_off(void)
|
|
{
|
|
- BUG_ON(!irqs_disabled());
|
|
+ BUG_ON_NONRT(!irqs_disabled());
|
|
}
|
|
|
|
static void check_irq_on(void)
|
|
@@ -2596,26 +2681,43 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
|
|
struct array_cache *ac,
|
|
int force, int node);
|
|
|
|
-static void do_drain(void *arg)
|
|
+static void __do_drain(void *arg, unsigned int cpu)
|
|
{
|
|
struct kmem_cache *cachep = arg;
|
|
struct array_cache *ac;
|
|
- int node = numa_mem_id();
|
|
+ int node = cpu_to_mem(cpu);
|
|
|
|
- check_irq_off();
|
|
- ac = cpu_cache_get(cachep);
|
|
+ ac = cpu_cache_get_on_cpu(cachep, cpu);
|
|
spin_lock(&cachep->nodelists[node]->list_lock);
|
|
free_block(cachep, ac->entry, ac->avail, node);
|
|
spin_unlock(&cachep->nodelists[node]->list_lock);
|
|
ac->avail = 0;
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+static void do_drain(void *arg)
|
|
+{
|
|
+ __do_drain(arg, smp_processor_id());
|
|
+}
|
|
+#else
|
|
+static void do_drain(void *arg, int cpu)
|
|
+{
|
|
+ LIST_HEAD(tmp);
|
|
+
|
|
+ lock_slab_on(cpu);
|
|
+ __do_drain(arg, cpu);
|
|
+ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp);
|
|
+ unlock_slab_on(cpu);
|
|
+ free_delayed(&tmp);
|
|
+}
|
|
+#endif
|
|
+
|
|
static void drain_cpu_caches(struct kmem_cache *cachep)
|
|
{
|
|
struct kmem_list3 *l3;
|
|
int node;
|
|
|
|
- on_each_cpu(do_drain, cachep, 1);
|
|
+ slab_on_each_cpu(do_drain, cachep);
|
|
check_irq_on();
|
|
for_each_online_node(node) {
|
|
l3 = cachep->nodelists[node];
|
|
@@ -2646,10 +2748,10 @@ static int drain_freelist(struct kmem_cache *cache,
|
|
nr_freed = 0;
|
|
while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
|
|
|
|
- spin_lock_irq(&l3->list_lock);
|
|
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
|
|
p = l3->slabs_free.prev;
|
|
if (p == &l3->slabs_free) {
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
|
|
goto out;
|
|
}
|
|
|
|
@@ -2663,8 +2765,8 @@ static int drain_freelist(struct kmem_cache *cache,
|
|
* to the cache.
|
|
*/
|
|
l3->free_objects -= cache->num;
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
- slab_destroy(cache, slabp);
|
|
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
|
|
+ slab_destroy(cache, slabp, false);
|
|
nr_freed++;
|
|
}
|
|
out:
|
|
@@ -2958,7 +3060,7 @@ static int cache_grow(struct kmem_cache *cachep,
|
|
offset *= cachep->colour_off;
|
|
|
|
if (local_flags & __GFP_WAIT)
|
|
- local_irq_enable();
|
|
+ local_unlock_irq(slab_lock);
|
|
|
|
/*
|
|
* The test for missing atomic flag is performed here, rather than
|
|
@@ -2988,7 +3090,7 @@ static int cache_grow(struct kmem_cache *cachep,
|
|
cache_init_objs(cachep, slabp);
|
|
|
|
if (local_flags & __GFP_WAIT)
|
|
- local_irq_disable();
|
|
+ local_lock_irq(slab_lock);
|
|
check_irq_off();
|
|
spin_lock(&l3->list_lock);
|
|
|
|
@@ -2999,10 +3101,10 @@ static int cache_grow(struct kmem_cache *cachep,
|
|
spin_unlock(&l3->list_lock);
|
|
return 1;
|
|
opps1:
|
|
- kmem_freepages(cachep, objp);
|
|
+ kmem_freepages(cachep, objp, false);
|
|
failed:
|
|
if (local_flags & __GFP_WAIT)
|
|
- local_irq_disable();
|
|
+ local_lock_irq(slab_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -3396,11 +3498,11 @@ retry:
|
|
* set and go into memory reserves if necessary.
|
|
*/
|
|
if (local_flags & __GFP_WAIT)
|
|
- local_irq_enable();
|
|
+ local_unlock_irq(slab_lock);
|
|
kmem_flagcheck(cache, flags);
|
|
obj = kmem_getpages(cache, local_flags, numa_mem_id());
|
|
if (local_flags & __GFP_WAIT)
|
|
- local_irq_disable();
|
|
+ local_lock_irq(slab_lock);
|
|
if (obj) {
|
|
/*
|
|
* Insert into the appropriate per node queues
|
|
@@ -3518,7 +3620,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
|
return NULL;
|
|
|
|
cache_alloc_debugcheck_before(cachep, flags);
|
|
- local_irq_save(save_flags);
|
|
+ local_lock_irqsave(slab_lock, save_flags);
|
|
|
|
if (nodeid == NUMA_NO_NODE)
|
|
nodeid = slab_node;
|
|
@@ -3543,7 +3645,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
|
/* ___cache_alloc_node can fall back to other nodes */
|
|
ptr = ____cache_alloc_node(cachep, flags, nodeid);
|
|
out:
|
|
- local_irq_restore(save_flags);
|
|
+ local_unlock_irqrestore(slab_lock, save_flags);
|
|
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
|
|
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
|
|
flags);
|
|
@@ -3603,9 +3705,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
|
|
return NULL;
|
|
|
|
cache_alloc_debugcheck_before(cachep, flags);
|
|
- local_irq_save(save_flags);
|
|
+ local_lock_irqsave(slab_lock, save_flags);
|
|
objp = __do_cache_alloc(cachep, flags);
|
|
- local_irq_restore(save_flags);
|
|
+ local_unlock_irqrestore(slab_lock, save_flags);
|
|
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
|
|
kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
|
|
flags);
|
|
@@ -3653,7 +3755,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
|
|
* a different cache, refer to comments before
|
|
* alloc_slabmgmt.
|
|
*/
|
|
- slab_destroy(cachep, slabp);
|
|
+ slab_destroy(cachep, slabp, true);
|
|
} else {
|
|
list_add(&slabp->list, &l3->slabs_free);
|
|
}
|
|
@@ -3915,12 +4017,12 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- local_irq_save(flags);
|
|
debug_check_no_locks_freed(objp, obj_size(cachep));
|
|
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
|
|
debug_check_no_obj_freed(objp, obj_size(cachep));
|
|
+ local_lock_irqsave(slab_lock, flags);
|
|
__cache_free(cachep, objp, __builtin_return_address(0));
|
|
- local_irq_restore(flags);
|
|
+ unlock_slab_and_free_delayed(flags);
|
|
|
|
trace_kmem_cache_free(_RET_IP_, objp);
|
|
}
|
|
@@ -3944,13 +4046,13 @@ void kfree(const void *objp)
|
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(objp)))
|
|
return;
|
|
- local_irq_save(flags);
|
|
kfree_debugcheck(objp);
|
|
c = virt_to_cache(objp);
|
|
debug_check_no_locks_freed(objp, obj_size(c));
|
|
debug_check_no_obj_freed(objp, obj_size(c));
|
|
+ local_lock_irqsave(slab_lock, flags);
|
|
__cache_free(c, (void *)objp, __builtin_return_address(0));
|
|
- local_irq_restore(flags);
|
|
+ unlock_slab_and_free_delayed(flags);
|
|
}
|
|
EXPORT_SYMBOL(kfree);
|
|
|
|
@@ -3993,7 +4095,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
|
|
if (l3) {
|
|
struct array_cache *shared = l3->shared;
|
|
|
|
- spin_lock_irq(&l3->list_lock);
|
|
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
|
|
|
|
if (shared)
|
|
free_block(cachep, shared->entry,
|
|
@@ -4006,7 +4108,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
|
|
}
|
|
l3->free_limit = (1 + nr_cpus_node(node)) *
|
|
cachep->batchcount + cachep->num;
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
+ unlock_l3_and_free_delayed(&l3->list_lock);
|
|
+
|
|
kfree(shared);
|
|
free_alien_cache(new_alien);
|
|
continue;
|
|
@@ -4053,17 +4156,30 @@ struct ccupdate_struct {
|
|
struct array_cache *new[0];
|
|
};
|
|
|
|
-static void do_ccupdate_local(void *info)
|
|
+static void __do_ccupdate_local(void *info, int cpu)
|
|
{
|
|
struct ccupdate_struct *new = info;
|
|
struct array_cache *old;
|
|
|
|
- check_irq_off();
|
|
- old = cpu_cache_get(new->cachep);
|
|
+ old = cpu_cache_get_on_cpu(new->cachep, cpu);
|
|
+
|
|
+ new->cachep->array[cpu] = new->new[cpu];
|
|
+ new->new[cpu] = old;
|
|
+}
|
|
|
|
- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
|
|
- new->new[smp_processor_id()] = old;
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+static void do_ccupdate_local(void *info)
|
|
+{
|
|
+ __do_ccupdate_local(info, smp_processor_id());
|
|
}
|
|
+#else
|
|
+static void do_ccupdate_local(void *info, int cpu)
|
|
+{
|
|
+ lock_slab_on(cpu);
|
|
+ __do_ccupdate_local(info, cpu);
|
|
+ unlock_slab_on(cpu);
|
|
+}
|
|
+#endif
|
|
|
|
/* Always called with the cache_chain_mutex held */
|
|
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
@@ -4089,7 +4205,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
}
|
|
new->cachep = cachep;
|
|
|
|
- on_each_cpu(do_ccupdate_local, (void *)new, 1);
|
|
+ slab_on_each_cpu(do_ccupdate_local, (void *)new);
|
|
|
|
check_irq_on();
|
|
cachep->batchcount = batchcount;
|
|
@@ -4100,9 +4216,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
struct array_cache *ccold = new->new[i];
|
|
if (!ccold)
|
|
continue;
|
|
- spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
|
|
+ local_spin_lock_irq(slab_lock,
|
|
+ &cachep->nodelists[cpu_to_mem(i)]->list_lock);
|
|
free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
|
|
- spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
|
|
+
|
|
+ unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
|
|
kfree(ccold);
|
|
}
|
|
kfree(new);
|
|
@@ -4178,7 +4296,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
|
|
if (ac->touched && !force) {
|
|
ac->touched = 0;
|
|
} else {
|
|
- spin_lock_irq(&l3->list_lock);
|
|
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
|
|
if (ac->avail) {
|
|
tofree = force ? ac->avail : (ac->limit + 4) / 5;
|
|
if (tofree > ac->avail)
|
|
@@ -4188,7 +4306,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
|
|
memmove(ac->entry, &(ac->entry[tofree]),
|
|
sizeof(void *) * ac->avail);
|
|
}
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
|
|
}
|
|
}
|
|
|
|
@@ -4327,7 +4445,7 @@ static int s_show(struct seq_file *m, void *p)
|
|
continue;
|
|
|
|
check_irq_on();
|
|
- spin_lock_irq(&l3->list_lock);
|
|
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
|
|
|
|
list_for_each_entry(slabp, &l3->slabs_full, list) {
|
|
if (slabp->inuse != cachep->num && !error)
|
|
@@ -4352,7 +4470,7 @@ static int s_show(struct seq_file *m, void *p)
|
|
if (l3->shared)
|
|
shared_avail += l3->shared->avail;
|
|
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
|
|
}
|
|
num_slabs += active_slabs;
|
|
num_objs = num_slabs * cachep->num;
|
|
@@ -4581,13 +4699,13 @@ static int leaks_show(struct seq_file *m, void *p)
|
|
continue;
|
|
|
|
check_irq_on();
|
|
- spin_lock_irq(&l3->list_lock);
|
|
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
|
|
|
|
list_for_each_entry(slabp, &l3->slabs_full, list)
|
|
handle_slab(n, cachep, slabp);
|
|
list_for_each_entry(slabp, &l3->slabs_partial, list)
|
|
handle_slab(n, cachep, slabp);
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
|
|
}
|
|
name = cachep->name;
|
|
if (n[0] == n[1]) {
|
|
diff --git a/mm/slub.c b/mm/slub.c
|
|
index aff06374dd5c..9308c8a2865b 100644
|
|
--- a/mm/slub.c
|
|
+++ b/mm/slub.c
|
|
@@ -1266,6 +1266,14 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
|
|
|
|
#endif /* CONFIG_SLUB_DEBUG */
|
|
|
|
+static void setup_object(struct kmem_cache *s, struct page *page,
|
|
+ void *object)
|
|
+{
|
|
+ setup_object_debug(s, page, object);
|
|
+ if (unlikely(s->ctor))
|
|
+ s->ctor(object);
|
|
+}
|
|
+
|
|
/*
|
|
* Slab allocation and freeing
|
|
*/
|
|
@@ -1287,6 +1295,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
struct page *page;
|
|
struct kmem_cache_order_objects oo = s->oo;
|
|
gfp_t alloc_gfp;
|
|
+ void *start, *last, *p;
|
|
+ int idx, order;
|
|
|
|
flags &= gfp_allowed_mask;
|
|
|
|
@@ -1309,17 +1319,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
* Try a lower order alloc if possible
|
|
*/
|
|
page = alloc_slab_page(flags, node, oo);
|
|
-
|
|
- if (page)
|
|
- stat(s, ORDER_FALLBACK);
|
|
+ if (unlikely(!page))
|
|
+ goto out;
|
|
+ stat(s, ORDER_FALLBACK);
|
|
}
|
|
|
|
- if (flags & __GFP_WAIT)
|
|
- local_irq_disable();
|
|
-
|
|
- if (!page)
|
|
- return NULL;
|
|
-
|
|
if (kmemcheck_enabled
|
|
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
|
|
int pages = 1 << oo_order(oo);
|
|
@@ -1337,37 +1341,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
}
|
|
|
|
page->objects = oo_objects(oo);
|
|
- mod_zone_page_state(page_zone(page),
|
|
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
|
- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
|
|
- 1 << oo_order(oo));
|
|
-
|
|
- return page;
|
|
-}
|
|
-
|
|
-static void setup_object(struct kmem_cache *s, struct page *page,
|
|
- void *object)
|
|
-{
|
|
- setup_object_debug(s, page, object);
|
|
- if (unlikely(s->ctor))
|
|
- s->ctor(object);
|
|
-}
|
|
-
|
|
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
-{
|
|
- struct page *page;
|
|
- void *start;
|
|
- void *last;
|
|
- void *p;
|
|
-
|
|
- BUG_ON(flags & GFP_SLAB_BUG_MASK);
|
|
-
|
|
- page = allocate_slab(s,
|
|
- flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
|
- if (!page)
|
|
- goto out;
|
|
-
|
|
- inc_slabs_node(s, page_to_nid(page), page->objects);
|
|
page->slab = s;
|
|
page->flags |= 1 << PG_slab;
|
|
|
|
@@ -1388,10 +1361,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
page->freelist = start;
|
|
page->inuse = page->objects;
|
|
page->frozen = 1;
|
|
+
|
|
out:
|
|
+ if (flags & __GFP_WAIT)
|
|
+ local_irq_disable();
|
|
+ if (!page)
|
|
+ return NULL;
|
|
+
|
|
+ mod_zone_page_state(page_zone(page),
|
|
+ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
|
+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
|
|
+ 1 << oo_order(oo));
|
|
+
|
|
+ inc_slabs_node(s, page_to_nid(page), page->objects);
|
|
+
|
|
return page;
|
|
}
|
|
|
|
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
+{
|
|
+ if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
|
|
+ pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
|
|
+ BUG();
|
|
+ }
|
|
+
|
|
+ return allocate_slab(s,
|
|
+ flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
|
+}
|
|
+
|
|
static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
{
|
|
int order = compound_order(page);
|
|
diff --git a/mm/swap.c b/mm/swap.c
|
|
index a8feea68a606..08b301177dc2 100644
|
|
--- a/mm/swap.c
|
|
+++ b/mm/swap.c
|
|
@@ -31,6 +31,7 @@
|
|
#include <linux/memcontrol.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/hugetlb.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#include "internal.h"
|
|
|
|
@@ -41,6 +42,9 @@ static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
|
|
+static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
|
|
+
|
|
/*
|
|
* This path almost never happens for VM activity - pages are normally
|
|
* freed via pagevecs. But it gets used by networking.
|
|
@@ -344,11 +348,11 @@ void rotate_reclaimable_page(struct page *page)
|
|
unsigned long flags;
|
|
|
|
page_cache_get(page);
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(rotate_lock, flags);
|
|
pvec = &__get_cpu_var(lru_rotate_pvecs);
|
|
if (!pagevec_add(pvec, page))
|
|
pagevec_move_tail(pvec);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(rotate_lock, flags);
|
|
}
|
|
}
|
|
|
|
@@ -404,12 +408,13 @@ static void activate_page_drain(int cpu)
|
|
void activate_page(struct page *page)
|
|
{
|
|
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
|
- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
|
|
+ activate_page_pvecs);
|
|
|
|
page_cache_get(page);
|
|
if (!pagevec_add(pvec, page))
|
|
pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
|
- put_cpu_var(activate_page_pvecs);
|
|
+ put_locked_var(swapvec_lock, activate_page_pvecs);
|
|
}
|
|
}
|
|
|
|
@@ -449,12 +454,12 @@ EXPORT_SYMBOL(mark_page_accessed);
|
|
|
|
void __lru_cache_add(struct page *page, enum lru_list lru)
|
|
{
|
|
- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru];
|
|
|
|
page_cache_get(page);
|
|
if (!pagevec_add(pvec, page))
|
|
__pagevec_lru_add(pvec, lru);
|
|
- put_cpu_var(lru_add_pvecs);
|
|
+ put_locked_var(swapvec_lock, lru_add_pvecs);
|
|
}
|
|
EXPORT_SYMBOL(__lru_cache_add);
|
|
|
|
@@ -589,9 +594,9 @@ void lru_add_drain_cpu(int cpu)
|
|
unsigned long flags;
|
|
|
|
/* No harm done if a racing interrupt already did this */
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(rotate_lock, flags);
|
|
pagevec_move_tail(pvec);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(rotate_lock, flags);
|
|
}
|
|
|
|
pvec = &per_cpu(lru_deactivate_pvecs, cpu);
|
|
@@ -619,18 +624,19 @@ void deactivate_page(struct page *page)
|
|
return;
|
|
|
|
if (likely(get_page_unless_zero(page))) {
|
|
- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
|
|
+ lru_deactivate_pvecs);
|
|
|
|
if (!pagevec_add(pvec, page))
|
|
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
|
- put_cpu_var(lru_deactivate_pvecs);
|
|
+ put_locked_var(swapvec_lock, lru_deactivate_pvecs);
|
|
}
|
|
}
|
|
|
|
void lru_add_drain(void)
|
|
{
|
|
- lru_add_drain_cpu(get_cpu());
|
|
- put_cpu();
|
|
+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
|
|
+ local_unlock_cpu(swapvec_lock);
|
|
}
|
|
|
|
static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
|
@@ -837,6 +843,15 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
|
|
}
|
|
EXPORT_SYMBOL(pagevec_lookup_tag);
|
|
|
|
+/* Early setup for the local locks */
|
|
+static int __init swap_init_locks(void)
|
|
+{
|
|
+ local_irq_lock_init(rotate_lock);
|
|
+ local_irq_lock_init(swapvec_lock);
|
|
+ return 1;
|
|
+}
|
|
+early_initcall(swap_init_locks);
|
|
+
|
|
/*
|
|
* Perform any setup for the swap system
|
|
*/
|
|
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
|
|
index ad9d90064a4b..86de9999ccb9 100644
|
|
--- a/mm/vmalloc.c
|
|
+++ b/mm/vmalloc.c
|
|
@@ -788,7 +788,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
|
|
struct vmap_block *vb;
|
|
struct vmap_area *va;
|
|
unsigned long vb_idx;
|
|
- int node, err;
|
|
+ int node, err, cpu;
|
|
|
|
node = numa_node_id();
|
|
|
|
@@ -827,12 +827,13 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
|
|
BUG_ON(err);
|
|
radix_tree_preload_end();
|
|
|
|
- vbq = &get_cpu_var(vmap_block_queue);
|
|
+ cpu = get_cpu_light();
|
|
+ vbq = &__get_cpu_var(vmap_block_queue);
|
|
vb->vbq = vbq;
|
|
spin_lock(&vbq->lock);
|
|
list_add_rcu(&vb->free_list, &vbq->free);
|
|
spin_unlock(&vbq->lock);
|
|
- put_cpu_var(vmap_block_queue);
|
|
+ put_cpu_light();
|
|
|
|
return vb;
|
|
}
|
|
@@ -906,7 +907,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|
struct vmap_block *vb;
|
|
unsigned long addr = 0;
|
|
unsigned int order;
|
|
- int purge = 0;
|
|
+ int purge = 0, cpu;
|
|
|
|
BUG_ON(size & ~PAGE_MASK);
|
|
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
|
|
@@ -914,7 +915,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|
|
|
again:
|
|
rcu_read_lock();
|
|
- vbq = &get_cpu_var(vmap_block_queue);
|
|
+ cpu = get_cpu_light();
|
|
+ vbq = &__get_cpu_var(vmap_block_queue);
|
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
|
int i;
|
|
|
|
@@ -951,7 +953,7 @@ next:
|
|
if (purge)
|
|
purge_fragmented_blocks_thiscpu();
|
|
|
|
- put_cpu_var(vmap_block_queue);
|
|
+ put_cpu_light();
|
|
rcu_read_unlock();
|
|
|
|
if (!addr) {
|
|
diff --git a/mm/vmstat.c b/mm/vmstat.c
|
|
index 7db1b9bab492..172212f6f213 100644
|
|
--- a/mm/vmstat.c
|
|
+++ b/mm/vmstat.c
|
|
@@ -216,6 +216,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
|
long x;
|
|
long t;
|
|
|
|
+ preempt_disable_rt();
|
|
x = delta + __this_cpu_read(*p);
|
|
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
@@ -225,6 +226,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
|
x = 0;
|
|
}
|
|
__this_cpu_write(*p, x);
|
|
+ preempt_enable_rt();
|
|
}
|
|
EXPORT_SYMBOL(__mod_zone_page_state);
|
|
|
|
@@ -257,6 +259,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
|
s8 v, t;
|
|
|
|
+ preempt_disable_rt();
|
|
v = __this_cpu_inc_return(*p);
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
if (unlikely(v > t)) {
|
|
@@ -265,6 +268,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
zone_page_state_add(v + overstep, zone, item);
|
|
__this_cpu_write(*p, -overstep);
|
|
}
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
|
|
@@ -279,6 +283,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
|
s8 v, t;
|
|
|
|
+ preempt_disable_rt();
|
|
v = __this_cpu_dec_return(*p);
|
|
t = __this_cpu_read(pcp->stat_threshold);
|
|
if (unlikely(v < - t)) {
|
|
@@ -287,6 +292,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
zone_page_state_add(v - overstep, zone, item);
|
|
__this_cpu_write(*p, overstep);
|
|
}
|
|
+ preempt_enable_rt();
|
|
}
|
|
|
|
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 901495216f85..46f960a4c83b 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -220,14 +220,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
|
|
static inline void rps_lock(struct softnet_data *sd)
|
|
{
|
|
#ifdef CONFIG_RPS
|
|
- spin_lock(&sd->input_pkt_queue.lock);
|
|
+ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
|
|
#endif
|
|
}
|
|
|
|
static inline void rps_unlock(struct softnet_data *sd)
|
|
{
|
|
#ifdef CONFIG_RPS
|
|
- spin_unlock(&sd->input_pkt_queue.lock);
|
|
+ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
|
|
#endif
|
|
}
|
|
|
|
@@ -1823,6 +1823,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
|
|
sd->output_queue_tailp = &q->next_sched;
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
void __netif_schedule(struct Qdisc *q)
|
|
@@ -1844,6 +1845,7 @@ void dev_kfree_skb_irq(struct sk_buff *skb)
|
|
sd->completion_queue = skb;
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(dev_kfree_skb_irq);
|
|
@@ -2904,6 +2906,7 @@ enqueue:
|
|
rps_unlock(sd);
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
|
|
atomic_long_inc(&skb->dev->rx_dropped);
|
|
kfree_skb(skb);
|
|
@@ -2941,7 +2944,7 @@ int netif_rx(struct sk_buff *skb)
|
|
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
|
int cpu;
|
|
|
|
- preempt_disable();
|
|
+ migrate_disable();
|
|
rcu_read_lock();
|
|
|
|
cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
|
@@ -2951,13 +2954,13 @@ int netif_rx(struct sk_buff *skb)
|
|
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
|
|
|
rcu_read_unlock();
|
|
- preempt_enable();
|
|
+ migrate_enable();
|
|
} else
|
|
#endif
|
|
{
|
|
unsigned int qtail;
|
|
- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
|
|
- put_cpu();
|
|
+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
|
|
+ put_cpu_light();
|
|
}
|
|
return ret;
|
|
}
|
|
@@ -2967,16 +2970,46 @@ int netif_rx_ni(struct sk_buff *skb)
|
|
{
|
|
int err;
|
|
|
|
- preempt_disable();
|
|
+ migrate_disable();
|
|
err = netif_rx(skb);
|
|
if (local_softirq_pending())
|
|
- do_softirq();
|
|
- preempt_enable();
|
|
+ thread_do_softirq();
|
|
+ migrate_enable();
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL(netif_rx_ni);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+/*
|
|
+ * RT runs ksoftirqd as a real time thread and the root_lock is a
|
|
+ * "sleeping spinlock". If the trylock fails then we can go into an
|
|
+ * infinite loop when ksoftirqd preempted the task which actually
|
|
+ * holds the lock, because we requeue q and raise NET_TX softirq
|
|
+ * causing ksoftirqd to loop forever.
|
|
+ *
|
|
+ * It's safe to use spin_lock on RT here as softirqs run in thread
|
|
+ * context and cannot deadlock against the thread which is holding
|
|
+ * root_lock.
|
|
+ *
|
|
+ * On !RT the trylock might fail, but there we bail out from the
|
|
+ * softirq loop after 10 attempts which we can't do on RT. And the
|
|
+ * task holding root_lock cannot be preempted, so the only downside of
|
|
+ * that trylock is that we need 10 loops to decide that we should have
|
|
+ * given up in the first one :)
|
|
+ */
|
|
+static inline int take_root_lock(spinlock_t *lock)
|
|
+{
|
|
+ spin_lock(lock);
|
|
+ return 1;
|
|
+}
|
|
+#else
|
|
+static inline int take_root_lock(spinlock_t *lock)
|
|
+{
|
|
+ return spin_trylock(lock);
|
|
+}
|
|
+#endif
|
|
+
|
|
static void net_tx_action(struct softirq_action *h)
|
|
{
|
|
struct softnet_data *sd = &__get_cpu_var(softnet_data);
|
|
@@ -3015,7 +3048,7 @@ static void net_tx_action(struct softirq_action *h)
|
|
head = head->next_sched;
|
|
|
|
root_lock = qdisc_lock(q);
|
|
- if (spin_trylock(root_lock)) {
|
|
+ if (take_root_lock(root_lock)) {
|
|
smp_mb__before_clear_bit();
|
|
clear_bit(__QDISC_STATE_SCHED,
|
|
&q->state);
|
|
@@ -3344,7 +3377,7 @@ static void flush_backlog(void *arg)
|
|
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
|
|
if (skb->dev == dev) {
|
|
__skb_unlink(skb, &sd->input_pkt_queue);
|
|
- kfree_skb(skb);
|
|
+ __skb_queue_tail(&sd->tofree_queue, skb);
|
|
input_queue_head_incr(sd);
|
|
}
|
|
}
|
|
@@ -3353,10 +3386,13 @@ static void flush_backlog(void *arg)
|
|
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
|
|
if (skb->dev == dev) {
|
|
__skb_unlink(skb, &sd->process_queue);
|
|
- kfree_skb(skb);
|
|
+ __skb_queue_tail(&sd->tofree_queue, skb);
|
|
input_queue_head_incr(sd);
|
|
}
|
|
}
|
|
+
|
|
+ if (!skb_queue_empty(&sd->tofree_queue))
|
|
+ raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
|
}
|
|
|
|
static int napi_gro_complete(struct sk_buff *skb)
|
|
@@ -3695,6 +3731,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
|
|
} else
|
|
#endif
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
static int process_backlog(struct napi_struct *napi, int quota)
|
|
@@ -3767,6 +3804,7 @@ void __napi_schedule(struct napi_struct *n)
|
|
local_irq_save(flags);
|
|
____napi_schedule(&__get_cpu_var(softnet_data), n);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(__napi_schedule);
|
|
|
|
@@ -3841,10 +3879,17 @@ static void net_rx_action(struct softirq_action *h)
|
|
struct softnet_data *sd = &__get_cpu_var(softnet_data);
|
|
unsigned long time_limit = jiffies + 2;
|
|
int budget = netdev_budget;
|
|
+ struct sk_buff *skb;
|
|
void *have;
|
|
|
|
local_irq_disable();
|
|
|
|
+ while ((skb = __skb_dequeue(&sd->tofree_queue))) {
|
|
+ local_irq_enable();
|
|
+ kfree_skb(skb);
|
|
+ local_irq_disable();
|
|
+ }
|
|
+
|
|
while (!list_empty(&sd->poll_list)) {
|
|
struct napi_struct *n;
|
|
int work, weight;
|
|
@@ -6259,6 +6304,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
|
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
/* Process offline CPU's input_pkt_queue */
|
|
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
|
|
@@ -6269,6 +6315,9 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
|
netif_rx(skb);
|
|
input_queue_head_incr(oldsd);
|
|
}
|
|
+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
|
|
+ kfree_skb(skb);
|
|
+ }
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
@@ -6533,8 +6582,9 @@ static int __init net_dev_init(void)
|
|
struct softnet_data *sd = &per_cpu(softnet_data, i);
|
|
|
|
memset(sd, 0, sizeof(*sd));
|
|
- skb_queue_head_init(&sd->input_pkt_queue);
|
|
- skb_queue_head_init(&sd->process_queue);
|
|
+ skb_queue_head_init_raw(&sd->input_pkt_queue);
|
|
+ skb_queue_head_init_raw(&sd->process_queue);
|
|
+ skb_queue_head_init_raw(&sd->tofree_queue);
|
|
sd->completion_queue = NULL;
|
|
INIT_LIST_HEAD(&sd->poll_list);
|
|
sd->output_queue = NULL;
|
|
diff --git a/net/core/sock.c b/net/core/sock.c
|
|
index 832cf043a8f7..6ab07d7867f1 100644
|
|
--- a/net/core/sock.c
|
|
+++ b/net/core/sock.c
|
|
@@ -2138,12 +2138,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
|
|
if (sk->sk_lock.owned)
|
|
__lock_sock(sk);
|
|
sk->sk_lock.owned = 1;
|
|
- spin_unlock(&sk->sk_lock.slock);
|
|
+ spin_unlock_bh(&sk->sk_lock.slock);
|
|
/*
|
|
* The sk_lock has mutex_lock() semantics here:
|
|
*/
|
|
mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
|
|
- local_bh_enable();
|
|
}
|
|
EXPORT_SYMBOL(lock_sock_nested);
|
|
|
|
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
|
|
index 2cb2bf845641..9a37732e8f32 100644
|
|
--- a/net/ipv4/icmp.c
|
|
+++ b/net/ipv4/icmp.c
|
|
@@ -69,6 +69,7 @@
|
|
#include <linux/jiffies.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/fcntl.h>
|
|
+#include <linux/sysrq.h>
|
|
#include <linux/socket.h>
|
|
#include <linux/in.h>
|
|
#include <linux/inet.h>
|
|
@@ -799,6 +800,30 @@ out_err:
|
|
}
|
|
|
|
/*
|
|
+ * 32bit and 64bit have different timestamp length, so we check for
|
|
+ * the cookie at offset 20 and verify it is repeated at offset 50
|
|
+ */
|
|
+#define CO_POS0 20
|
|
+#define CO_POS1 50
|
|
+#define CO_SIZE sizeof(int)
|
|
+#define ICMP_SYSRQ_SIZE 57
|
|
+
|
|
+/*
|
|
+ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
|
|
+ * pattern and if it matches send the next byte as a trigger to sysrq.
|
|
+ */
|
|
+static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
|
|
+{
|
|
+ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
|
|
+ char *p = skb->data;
|
|
+
|
|
+ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
|
|
+ !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
|
|
+ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
|
|
+ handle_sysrq(p[CO_POS0 + CO_SIZE]);
|
|
+}
|
|
+
|
|
+/*
|
|
* Handle ICMP_ECHO ("ping") requests.
|
|
*
|
|
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
|
|
@@ -825,6 +850,11 @@ static void icmp_echo(struct sk_buff *skb)
|
|
icmp_param.data_len = skb->len;
|
|
icmp_param.head_len = sizeof(struct icmphdr);
|
|
icmp_reply(&icmp_param, skb);
|
|
+
|
|
+ if (skb->len == ICMP_SYSRQ_SIZE &&
|
|
+ net->ipv4.sysctl_icmp_echo_sysrq) {
|
|
+ icmp_check_sysrq(net, skb);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index 6c34bc98bce7..bfe4de48f444 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -253,7 +253,7 @@ struct rt_hash_bucket {
|
|
};
|
|
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
|
|
- defined(CONFIG_PROVE_LOCKING)
|
|
+ defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_PREEMPT_RT_FULL)
|
|
/*
|
|
* Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
|
|
* The size of this table is a power of two and depends on the number of CPUS.
|
|
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
|
|
index 086c97327b9b..38d0c7e747a1 100644
|
|
--- a/net/ipv4/sysctl_net_ipv4.c
|
|
+++ b/net/ipv4/sysctl_net_ipv4.c
|
|
@@ -729,6 +729,13 @@ static struct ctl_table ipv4_net_table[] = {
|
|
.proc_handler = proc_dointvec
|
|
},
|
|
{
|
|
+ .procname = "icmp_echo_sysrq",
|
|
+ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
|
|
+ .maxlen = sizeof(int),
|
|
+ .mode = 0644,
|
|
+ .proc_handler = proc_dointvec
|
|
+ },
|
|
+ {
|
|
.procname = "icmp_ignore_bogus_error_responses",
|
|
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
|
|
.maxlen = sizeof(int),
|
|
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
|
|
index 6202d0504122..c83eddda1ead 100644
|
|
--- a/net/mac80211/rx.c
|
|
+++ b/net/mac80211/rx.c
|
|
@@ -3071,7 +3071,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|
struct ieee80211_supported_band *sband;
|
|
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
|
|
|
- WARN_ON_ONCE(softirq_count() == 0);
|
|
+ WARN_ON_ONCE_NONRT(softirq_count() == 0);
|
|
|
|
if (WARN_ON(status->band < 0 ||
|
|
status->band >= IEEE80211_NUM_BANDS))
|
|
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
|
|
index e1b7e051332e..151061b2e474 100644
|
|
--- a/net/netfilter/core.c
|
|
+++ b/net/netfilter/core.c
|
|
@@ -20,11 +20,17 @@
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/locallock.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/sock.h>
|
|
|
|
#include "nf_internals.h"
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
|
|
+EXPORT_PER_CPU_SYMBOL(xt_write_lock);
|
|
+#endif
|
|
+
|
|
static DEFINE_MUTEX(afinfo_mutex);
|
|
|
|
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index dbe1715c629f..af701f316bdc 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -88,6 +88,7 @@
|
|
#include <linux/virtio_net.h>
|
|
#include <linux/errqueue.h>
|
|
#include <linux/net_tstamp.h>
|
|
+#include <linux/delay.h>
|
|
|
|
#ifdef CONFIG_INET
|
|
#include <net/inet_common.h>
|
|
@@ -680,7 +681,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
|
|
if (BLOCK_NUM_PKTS(pbd)) {
|
|
while (atomic_read(&pkc->blk_fill_in_prog)) {
|
|
/* Waiting for skb_copy_bits to finish... */
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
}
|
|
}
|
|
|
|
@@ -925,7 +926,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
|
|
if (!(status & TP_STATUS_BLK_TMO)) {
|
|
while (atomic_read(&pkc->blk_fill_in_prog)) {
|
|
/* Waiting for skb_copy_bits to finish... */
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
}
|
|
}
|
|
prb_close_block(pkc, pbd, po, status);
|
|
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
|
|
index e8fdb172adbb..5a44c6e77cd8 100644
|
|
--- a/net/rds/ib_rdma.c
|
|
+++ b/net/rds/ib_rdma.c
|
|
@@ -34,6 +34,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/rculist.h>
|
|
#include <linux/llist.h>
|
|
+#include <linux/delay.h>
|
|
|
|
#include "rds.h"
|
|
#include "ib.h"
|
|
@@ -286,7 +287,7 @@ static inline void wait_clean_list_grace(void)
|
|
for_each_online_cpu(cpu) {
|
|
flag = &per_cpu(clean_list_grace, cpu);
|
|
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
|
|
- cpu_relax();
|
|
+ cpu_chill();
|
|
}
|
|
}
|
|
|
|
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
|
|
index 67fc573e013a..455d21a8b0e0 100644
|
|
--- a/net/sched/sch_generic.c
|
|
+++ b/net/sched/sch_generic.c
|
|
@@ -848,7 +848,7 @@ void dev_deactivate_many(struct list_head *head)
|
|
/* Wait for outstanding qdisc_run calls. */
|
|
list_for_each_entry(dev, head, unreg_list)
|
|
while (some_qdisc_is_busy(dev))
|
|
- yield();
|
|
+ msleep(1);
|
|
}
|
|
|
|
void dev_deactivate(struct net_device *dev)
|
|
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
|
|
index aec7dbb9d3dd..6686f46d673e 100644
|
|
--- a/net/sunrpc/svc_xprt.c
|
|
+++ b/net/sunrpc/svc_xprt.c
|
|
@@ -323,9 +323,9 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
|
|
if (!svc_xprt_has_something_to_do(xprt))
|
|
return;
|
|
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
|
|
spin_lock_bh(&pool->sp_lock);
|
|
|
|
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
|
|
index f221ddf69080..5f440097f6d2 100755
|
|
--- a/scripts/mkcompile_h
|
|
+++ b/scripts/mkcompile_h
|
|
@@ -4,7 +4,8 @@ TARGET=$1
|
|
ARCH=$2
|
|
SMP=$3
|
|
PREEMPT=$4
|
|
-CC=$5
|
|
+RT=$5
|
|
+CC=$6
|
|
|
|
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
|
|
|
|
@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
|
|
CONFIG_FLAGS=""
|
|
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
|
|
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
|
|
+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
|
|
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
|
|
|
|
# Truncate to maximum length
|