1 diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
2 index 3a3b30ac2a75..9e0745cafbd8 100644
3 --- a/Documentation/sysrq.txt
4 +++ b/Documentation/sysrq.txt
5 @@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
6 On other - If you know of the key combos for other architectures, please
7 let me know so I can add them to this section.
9 -On all - write a character to /proc/sysrq-trigger. e.g.:
11 +On all - write a character to /proc/sysrq-trigger, e.g.:
12 echo t > /proc/sysrq-trigger
14 +On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
15 + echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
16 + Send an ICMP echo request with this pattern plus the particular
17 + SysRq command key. Example:
18 + # ping -c1 -s57 -p0102030468
19 + will trigger the SysRq-H (help) command.
22 * What are the 'command' keys?
23 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 'b' - Will immediately reboot the system without syncing or unmounting
25 diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt
27 index 000000000000..6f2aeabf7faa
29 +++ b/Documentation/trace/histograms.txt
31 + Using the Linux Kernel Latency Histograms
34 +This document gives a short explanation how to enable, configure and use
35 +latency histograms. Latency histograms are primarily relevant in the
36 +context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
37 +and are used in the quality management of the Linux real-time
41 +* Purpose of latency histograms
43 +A latency histogram continuously accumulates the frequencies of latency
44 +data. There are two types of histograms
45 +- potential sources of latencies
46 +- effective latencies
49 +* Potential sources of latencies
51 +Potential sources of latencies are code segments where interrupts,
52 +preemption or both are disabled (aka critical sections). To create
53 +histograms of potential sources of latency, the kernel stores the time
54 +stamp at the start of a critical section, determines the time elapsed
55 +when the end of the section is reached, and increments the frequency
56 +counter of that latency value - irrespective of whether any concurrently
57 +running process is affected by latency or not.
58 +- Configuration items (in the Kernel hacking/Tracers submenu)
59 + CONFIG_INTERRUPT_OFF_LATENCY
60 + CONFIG_PREEMPT_OFF_LATENCY
63 +* Effective latencies
65 +Effective latencies are actually occuring during wakeup of a process. To
66 +determine effective latencies, the kernel stores the time stamp when a
67 +process is scheduled to be woken up, and determines the duration of the
68 +wakeup time shortly before control is passed over to this process. Note
69 +that the apparent latency in user space may be somewhat longer, since the
70 +process may be interrupted after control is passed over to it but before
71 +the execution in user space takes place. Simply measuring the interval
72 +between enqueuing and wakeup may also not appropriate in cases when a
73 +process is scheduled as a result of a timer expiration. The timer may have
74 +missed its deadline, e.g. due to disabled interrupts, but this latency
75 +would not be registered. Therefore, the offsets of missed timers are
76 +recorded in a separate histogram. If both wakeup latency and missed timer
77 +offsets are configured and enabled, a third histogram may be enabled that
78 +records the overall latency as a sum of the timer latency, if any, and the
79 +wakeup latency. This histogram is called "timerandwakeup".
80 +- Configuration items (in the Kernel hacking/Tracers submenu)
81 + CONFIG_WAKEUP_LATENCY
82 + CONFIG_MISSED_TIMER_OFSETS
87 +The interface to the administration of the latency histograms is located
88 +in the debugfs file system. To mount it, either enter
90 +mount -t sysfs nodev /sys
91 +mount -t debugfs nodev /sys/kernel/debug
93 +from shell command line level, or add
95 +nodev /sys sysfs defaults 0 0
96 +nodev /sys/kernel/debug debugfs defaults 0 0
98 +to the file /etc/fstab. All latency histogram related files are then
99 +available in the directory /sys/kernel/debug/tracing/latency_hist. A
100 +particular histogram type is enabled by writing non-zero to the related
101 +variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
102 +Select "preemptirqsoff" for the histograms of potential sources of
103 +latencies and "wakeup" for histograms of effective latencies etc. The
104 +histogram data - one per CPU - are available in the files
106 +/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
107 +/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
108 +/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
109 +/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
110 +/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
111 +/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
112 +/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
114 +The histograms are reset by writing non-zero to the file "reset" in a
115 +particular latency directory. To reset all latency data, use
119 +TRACINGDIR=/sys/kernel/debug/tracing
120 +HISTDIR=$TRACINGDIR/latency_hist
125 + for i in `find . | grep /reset$`
134 +Latency data are stored with a resolution of one microsecond. The
135 +maximum latency is 10,240 microseconds. The data are only valid, if the
136 +overflow register is empty. Every output line contains the latency in
137 +microseconds in the first row and the number of samples in the second
138 +row. To display only lines with a positive latency count, use, for
141 +grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
143 +#Minimum latency: 0 microseconds.
144 +#Average latency: 0 microseconds.
145 +#Maximum latency: 25 microseconds.
146 +#Total samples: 3104770694
147 +#There are 0 samples greater or equal than 10240 microseconds
176 +* Wakeup latency of a selected process
178 +To only collect wakeup latency data of a particular process, write the
179 +PID of the requested process to
181 +/sys/kernel/debug/tracing/latency_hist/wakeup/pid
183 +PIDs are not considered, if this variable is set to 0.
186 +* Details of the process with the highest wakeup latency so far
188 +Selected data of the process that suffered from the highest wakeup
189 +latency that occurred in a particular CPU are available in the file
191 +/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
193 +In addition, other relevant system data at the time when the
194 +latency occurred are given.
196 +The format of the data is (all in one line):
197 +<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
198 +<- <PID> <Priority> <Command> <Timestamp>
200 +The value of <Timeroffset> is only relevant in the combined timer
201 +and wakeup latency recording. In the wakeup recording, it is
202 +always 0, in the missed_timer_offsets recording, it is the same
205 +When retrospectively searching for the origin of a latency and
206 +tracing was not enabled, it may be helpful to know the name and
207 +some basic data of the task that (finally) was switching to the
208 +late real-tlme task. In addition to the victim's data, also the
209 +data of the possible culprit are therefore displayed after the
212 +Finally, the timestamp of the time when the latency occurred
213 +in <seconds>.<microseconds> after the most recent system boot
216 +These data are also reset when the wakeup histogram is reset.
217 diff --git a/MAINTAINERS b/MAINTAINERS
218 index 63cefa62324c..be0ea1e5c4cc 100644
221 @@ -5196,6 +5196,23 @@ F: fs/fuse/
222 F: include/uapi/linux/fuse.h
223 F: Documentation/filesystems/fuse.txt
226 +M: Thomas Gleixner <tglx@linutronix.de>
227 +M: Ingo Molnar <mingo@redhat.com>
228 +R: Peter Zijlstra <peterz@infradead.org>
229 +R: Darren Hart <dvhart@infradead.org>
230 +L: linux-kernel@vger.kernel.org
231 +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
234 +F: kernel/futex_compat.c
235 +F: include/asm-generic/futex.h
236 +F: include/linux/futex.h
237 +F: include/uapi/linux/futex.h
238 +F: tools/testing/selftests/futex/
239 +F: tools/perf/bench/futex*
240 +F: Documentation/*futex*
242 FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit)
243 M: Rik Faith <faith@cs.unc.edu>
244 L: linux-scsi@vger.kernel.org
245 diff --git a/arch/Kconfig b/arch/Kconfig
246 index 659bdd079277..099fc0f5155e 100644
249 @@ -9,6 +9,7 @@ config OPROFILE
250 tristate "OProfile system profiling"
252 depends on HAVE_OPROFILE
253 + depends on !PREEMPT_RT_FULL
255 select RING_BUFFER_ALLOW_SWAP
257 @@ -52,6 +53,7 @@ config KPROBES
259 bool "Optimize very unlikely/likely branches"
260 depends on HAVE_ARCH_JUMP_LABEL
261 + depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
263 This option enables a transparent branch optimization that
264 makes certain almost-always-true or almost-always-false branch
265 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
266 index b5d529fdffab..5715844e83e3 100644
267 --- a/arch/arm/Kconfig
268 +++ b/arch/arm/Kconfig
269 @@ -36,7 +36,7 @@ config ARM
270 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
271 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
272 select HAVE_ARCH_HARDENED_USERCOPY
273 - select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
274 + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
275 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
276 select HAVE_ARCH_MMAP_RND_BITS if MMU
277 select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
278 @@ -75,6 +75,7 @@ config ARM
279 select HAVE_PERF_EVENTS
280 select HAVE_PERF_REGS
281 select HAVE_PERF_USER_STACK_DUMP
282 + select HAVE_PREEMPT_LAZY
283 select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
284 select HAVE_REGS_AND_STACK_ACCESS_API
285 select HAVE_SYSCALL_TRACEPOINTS
286 diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
287 index e53638c8ed8a..6095a1649865 100644
288 --- a/arch/arm/include/asm/irq.h
289 +++ b/arch/arm/include/asm/irq.h
294 +#include <linux/cpumask.h>
298 extern void migrate_irqs(void);
299 diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
300 index 12ebfcc1d539..c962084605bc 100644
301 --- a/arch/arm/include/asm/switch_to.h
302 +++ b/arch/arm/include/asm/switch_to.h
305 #include <linux/thread_info.h>
307 +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
308 +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
311 +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
315 * For v7 SMP cores running a preemptible kernel we may be pre-empted
316 * during a TLB maintenance operation, so execute an inner-shareable dsb
317 @@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
318 #define switch_to(prev,next,last) \
320 __complete_pending_tlbi(); \
321 + switch_kmaps(prev, next); \
322 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
325 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
326 index 776757d1604a..1f36a4eccc72 100644
327 --- a/arch/arm/include/asm/thread_info.h
328 +++ b/arch/arm/include/asm/thread_info.h
329 @@ -49,6 +49,7 @@ struct cpu_context_save {
331 unsigned long flags; /* low level flags */
332 int preempt_count; /* 0 => preemptable, <0 => bug */
333 + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
334 mm_segment_t addr_limit; /* address limit */
335 struct task_struct *task; /* main task structure */
337 @@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
338 #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
339 #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
340 #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
341 -#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
342 +#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
343 +#define TIF_NEED_RESCHED_LAZY 7
345 #define TIF_NOHZ 12 /* in adaptive nohz mode */
346 #define TIF_USING_IWMMXT 17
347 @@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
348 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
349 #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
350 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
351 +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
352 #define _TIF_UPROBE (1 << TIF_UPROBE)
353 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
354 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
355 @@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
356 * Change these and you break ASM code in entry-common.S
358 #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
359 - _TIF_NOTIFY_RESUME | _TIF_UPROBE)
360 + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
361 + _TIF_NEED_RESCHED_LAZY)
363 #endif /* __KERNEL__ */
364 #endif /* __ASM_ARM_THREAD_INFO_H */
365 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
366 index 608008229c7d..3866da3f7bb7 100644
367 --- a/arch/arm/kernel/asm-offsets.c
368 +++ b/arch/arm/kernel/asm-offsets.c
369 @@ -65,6 +65,7 @@ int main(void)
371 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
372 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
373 + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
374 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
375 DEFINE(TI_TASK, offsetof(struct thread_info, task));
376 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
377 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
378 index 9f157e7c51e7..468e224d76aa 100644
379 --- a/arch/arm/kernel/entry-armv.S
380 +++ b/arch/arm/kernel/entry-armv.S
381 @@ -220,11 +220,18 @@ ENDPROC(__dabt_svc)
383 #ifdef CONFIG_PREEMPT
384 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
385 - ldr r0, [tsk, #TI_FLAGS] @ get flags
386 teq r8, #0 @ if preempt count != 0
387 + bne 1f @ return from exeption
388 + ldr r0, [tsk, #TI_FLAGS] @ get flags
389 + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
390 + blne svc_preempt @ preempt!
392 + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
393 + teq r8, #0 @ if preempt lazy count != 0
394 movne r0, #0 @ force flags to 0
395 - tst r0, #_TIF_NEED_RESCHED
396 + tst r0, #_TIF_NEED_RESCHED_LAZY
401 svc_exit r5, irq = 1 @ return from exception
402 @@ -239,8 +246,14 @@ ENDPROC(__irq_svc)
403 1: bl preempt_schedule_irq @ irq en/disable is done inside
404 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
405 tst r0, #_TIF_NEED_RESCHED
407 + tst r0, #_TIF_NEED_RESCHED_LAZY
410 + ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
411 + teq r0, #0 @ if preempt lazy count != 0
418 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
419 index 10c3283d6c19..8872937862cc 100644
420 --- a/arch/arm/kernel/entry-common.S
421 +++ b/arch/arm/kernel/entry-common.S
424 disable_irq_notrace @ disable interrupts
425 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
426 - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
427 + tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
428 + bne fast_work_pending
429 + tst r1, #_TIF_SECCOMP
430 bne fast_work_pending
432 /* perform architecture specific actions before user return */
433 @@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
434 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
435 disable_irq_notrace @ disable interrupts
436 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
437 - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
438 + tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
440 + tst r1, #_TIF_SECCOMP
444 ENDPROC(ret_fast_syscall)
446 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
447 index 69bda1a5707e..1f665acaa6a9 100644
448 --- a/arch/arm/kernel/patch.c
449 +++ b/arch/arm/kernel/patch.c
450 @@ -15,7 +15,7 @@ struct patch {
454 -static DEFINE_SPINLOCK(patch_lock);
455 +static DEFINE_RAW_SPINLOCK(patch_lock);
457 static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
458 __acquires(&patch_lock)
459 @@ -32,7 +32,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
463 - spin_lock_irqsave(&patch_lock, *flags);
464 + raw_spin_lock_irqsave(&patch_lock, *flags);
466 __acquire(&patch_lock);
468 @@ -47,7 +47,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
469 clear_fixmap(fixmap);
472 - spin_unlock_irqrestore(&patch_lock, *flags);
473 + raw_spin_unlock_irqrestore(&patch_lock, *flags);
475 __release(&patch_lock);
477 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
478 index 91d2d5b01414..750550098b59 100644
479 --- a/arch/arm/kernel/process.c
480 +++ b/arch/arm/kernel/process.c
481 @@ -322,6 +322,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
486 + * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
487 + * initialized by pgtable_page_ctor() then a coredump of the vector page will
490 +static int __init vectors_user_mapping_init_page(void)
493 + unsigned long addr = 0xffff0000;
498 + pgd = pgd_offset_k(addr);
499 + pud = pud_offset(pgd, addr);
500 + pmd = pmd_offset(pud, addr);
501 + page = pmd_page(*(pmd));
503 + pgtable_page_ctor(page);
507 +late_initcall(vectors_user_mapping_init_page);
509 #ifdef CONFIG_KUSER_HELPERS
511 * The vectors page is always readable from user space for the
512 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
513 index 7b8f2141427b..96541e00b74a 100644
514 --- a/arch/arm/kernel/signal.c
515 +++ b/arch/arm/kernel/signal.c
516 @@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
518 trace_hardirqs_off();
520 - if (likely(thread_flags & _TIF_NEED_RESCHED)) {
521 + if (likely(thread_flags & (_TIF_NEED_RESCHED |
522 + _TIF_NEED_RESCHED_LAZY))) {
525 if (unlikely(!user_mode(regs)))
526 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
527 index 7dd14e8395e6..4cd7e3d98035 100644
528 --- a/arch/arm/kernel/smp.c
529 +++ b/arch/arm/kernel/smp.c
530 @@ -234,8 +234,6 @@ int __cpu_disable(void)
532 local_flush_tlb_all();
534 - clear_tasks_mm_cpumask(cpu);
539 @@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu)
540 pr_err("CPU%u: cpu didn't die\n", cpu);
544 + clear_tasks_mm_cpumask(cpu);
546 pr_notice("CPU%u: shutdown\n", cpu);
549 diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
550 index 0bee233fef9a..314cfb232a63 100644
551 --- a/arch/arm/kernel/unwind.c
552 +++ b/arch/arm/kernel/unwind.c
553 @@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
554 static const struct unwind_idx *__origin_unwind_idx;
555 extern const struct unwind_idx __stop_unwind_idx[];
557 -static DEFINE_SPINLOCK(unwind_lock);
558 +static DEFINE_RAW_SPINLOCK(unwind_lock);
559 static LIST_HEAD(unwind_tables);
561 /* Convert a prel31 symbol to an absolute address */
562 @@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
563 /* module unwind tables */
564 struct unwind_table *table;
566 - spin_lock_irqsave(&unwind_lock, flags);
567 + raw_spin_lock_irqsave(&unwind_lock, flags);
568 list_for_each_entry(table, &unwind_tables, list) {
569 if (addr >= table->begin_addr &&
570 addr < table->end_addr) {
571 @@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
575 - spin_unlock_irqrestore(&unwind_lock, flags);
576 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
579 pr_debug("%s: idx = %p\n", __func__, idx);
580 @@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
581 tab->begin_addr = text_addr;
582 tab->end_addr = text_addr + text_size;
584 - spin_lock_irqsave(&unwind_lock, flags);
585 + raw_spin_lock_irqsave(&unwind_lock, flags);
586 list_add_tail(&tab->list, &unwind_tables);
587 - spin_unlock_irqrestore(&unwind_lock, flags);
588 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
592 @@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
596 - spin_lock_irqsave(&unwind_lock, flags);
597 + raw_spin_lock_irqsave(&unwind_lock, flags);
598 list_del(&tab->list);
599 - spin_unlock_irqrestore(&unwind_lock, flags);
600 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
604 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
605 index 19b5f5c1c0ff..82aa639e6737 100644
606 --- a/arch/arm/kvm/arm.c
607 +++ b/arch/arm/kvm/arm.c
608 @@ -619,7 +619,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
609 * involves poking the GIC, which must be done in a
610 * non-preemptible context.
614 kvm_pmu_flush_hwstate(vcpu);
615 kvm_timer_flush_hwstate(vcpu);
616 kvm_vgic_flush_hwstate(vcpu);
617 @@ -640,7 +640,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
618 kvm_pmu_sync_hwstate(vcpu);
619 kvm_timer_sync_hwstate(vcpu);
620 kvm_vgic_sync_hwstate(vcpu);
626 @@ -696,7 +696,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
628 kvm_vgic_sync_hwstate(vcpu);
633 ret = handle_exit(vcpu, run, ret);
635 diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
636 index 98ffe1e62ad5..df9769ddece5 100644
637 --- a/arch/arm/mach-exynos/platsmp.c
638 +++ b/arch/arm/mach-exynos/platsmp.c
639 @@ -229,7 +229,7 @@ static void __iomem *scu_base_addr(void)
640 return (void __iomem *)(S5P_VA_SCU);
643 -static DEFINE_SPINLOCK(boot_lock);
644 +static DEFINE_RAW_SPINLOCK(boot_lock);
646 static void exynos_secondary_init(unsigned int cpu)
648 @@ -242,8 +242,8 @@ static void exynos_secondary_init(unsigned int cpu)
650 * Synchronise with the boot thread.
652 - spin_lock(&boot_lock);
653 - spin_unlock(&boot_lock);
654 + raw_spin_lock(&boot_lock);
655 + raw_spin_unlock(&boot_lock);
658 int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
659 @@ -307,7 +307,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
660 * Set synchronisation state between this boot processor
661 * and the secondary one
663 - spin_lock(&boot_lock);
664 + raw_spin_lock(&boot_lock);
667 * The secondary processor is waiting to be released from
668 @@ -334,7 +334,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
671 printk(KERN_ERR "cpu1 power enable failed");
672 - spin_unlock(&boot_lock);
673 + raw_spin_unlock(&boot_lock);
677 @@ -380,7 +380,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
678 * calibrations, then wait for it to finish
681 - spin_unlock(&boot_lock);
682 + raw_spin_unlock(&boot_lock);
684 return pen_release != -1 ? ret : 0;
686 diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
687 index 4b653a8cb75c..b03d5a922cb1 100644
688 --- a/arch/arm/mach-hisi/platmcpm.c
689 +++ b/arch/arm/mach-hisi/platmcpm.c
692 static void __iomem *sysctrl, *fabric;
693 static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
694 -static DEFINE_SPINLOCK(boot_lock);
695 +static DEFINE_RAW_SPINLOCK(boot_lock);
696 static u32 fabric_phys_addr;
698 * [0]: bootwrapper physical address
699 @@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
700 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
703 - spin_lock_irq(&boot_lock);
704 + raw_spin_lock_irq(&boot_lock);
706 if (hip04_cpu_table[cluster][cpu])
708 @@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
711 hip04_cpu_table[cluster][cpu]++;
712 - spin_unlock_irq(&boot_lock);
713 + raw_spin_unlock_irq(&boot_lock);
717 @@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
718 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
719 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
721 - spin_lock(&boot_lock);
722 + raw_spin_lock(&boot_lock);
723 hip04_cpu_table[cluster][cpu]--;
724 if (hip04_cpu_table[cluster][cpu] == 1) {
725 /* A power_up request went ahead of us. */
726 - spin_unlock(&boot_lock);
727 + raw_spin_unlock(&boot_lock);
729 } else if (hip04_cpu_table[cluster][cpu] > 1) {
730 pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
731 @@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
734 last_man = hip04_cluster_is_down(cluster);
735 - spin_unlock(&boot_lock);
736 + raw_spin_unlock(&boot_lock);
738 /* Since it's Cortex A15, disable L2 prefetching. */
740 @@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
741 cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
743 count = TIMEOUT_MSEC / POLL_MSEC;
744 - spin_lock_irq(&boot_lock);
745 + raw_spin_lock_irq(&boot_lock);
746 for (tries = 0; tries < count; tries++) {
747 if (hip04_cpu_table[cluster][cpu])
749 @@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
750 data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
751 if (data & CORE_WFI_STATUS(cpu))
753 - spin_unlock_irq(&boot_lock);
754 + raw_spin_unlock_irq(&boot_lock);
755 /* Wait for clean L2 when the whole cluster is down. */
757 - spin_lock_irq(&boot_lock);
758 + raw_spin_lock_irq(&boot_lock);
762 @@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
764 if (hip04_cluster_is_down(cluster))
765 hip04_set_snoop_filter(cluster, 0);
766 - spin_unlock_irq(&boot_lock);
767 + raw_spin_unlock_irq(&boot_lock);
770 - spin_unlock_irq(&boot_lock);
771 + raw_spin_unlock_irq(&boot_lock);
775 diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
776 index b4de3da6dffa..b52893319d75 100644
777 --- a/arch/arm/mach-omap2/omap-smp.c
778 +++ b/arch/arm/mach-omap2/omap-smp.c
779 @@ -64,7 +64,7 @@ static const struct omap_smp_config omap5_cfg __initconst = {
780 .startup_addr = omap5_secondary_startup,
783 -static DEFINE_SPINLOCK(boot_lock);
784 +static DEFINE_RAW_SPINLOCK(boot_lock);
786 void __iomem *omap4_get_scu_base(void)
788 @@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigned int cpu)
790 * Synchronise with the boot thread.
792 - spin_lock(&boot_lock);
793 - spin_unlock(&boot_lock);
794 + raw_spin_lock(&boot_lock);
795 + raw_spin_unlock(&boot_lock);
798 static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
799 @@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
800 * Set synchronisation state between this boot processor
801 * and the secondary one
803 - spin_lock(&boot_lock);
804 + raw_spin_lock(&boot_lock);
807 * Update the AuxCoreBoot0 with boot state for secondary core.
808 @@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
809 * Now the secondary core is starting up let it run its
810 * calibrations, then wait for it to finish
812 - spin_unlock(&boot_lock);
813 + raw_spin_unlock(&boot_lock);
817 diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
818 index 0875b99add18..18b6d98d2581 100644
819 --- a/arch/arm/mach-prima2/platsmp.c
820 +++ b/arch/arm/mach-prima2/platsmp.c
823 static void __iomem *clk_base;
825 -static DEFINE_SPINLOCK(boot_lock);
826 +static DEFINE_RAW_SPINLOCK(boot_lock);
828 static void sirfsoc_secondary_init(unsigned int cpu)
830 @@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
832 * Synchronise with the boot thread.
834 - spin_lock(&boot_lock);
835 - spin_unlock(&boot_lock);
836 + raw_spin_lock(&boot_lock);
837 + raw_spin_unlock(&boot_lock);
840 static const struct of_device_id clk_ids[] = {
841 @@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
842 /* make sure write buffer is drained */
845 - spin_lock(&boot_lock);
846 + raw_spin_lock(&boot_lock);
849 * The secondary processor is waiting to be released from
850 @@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
851 * now the secondary core is starting up let it run its
852 * calibrations, then wait for it to finish
854 - spin_unlock(&boot_lock);
855 + raw_spin_unlock(&boot_lock);
857 return pen_release != -1 ? -ENOSYS : 0;
859 diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
860 index 5494c9e0c909..e8ce157d3548 100644
861 --- a/arch/arm/mach-qcom/platsmp.c
862 +++ b/arch/arm/mach-qcom/platsmp.c
865 extern void secondary_startup_arm(void);
867 -static DEFINE_SPINLOCK(boot_lock);
868 +static DEFINE_RAW_SPINLOCK(boot_lock);
870 #ifdef CONFIG_HOTPLUG_CPU
871 static void qcom_cpu_die(unsigned int cpu)
872 @@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
874 * Synchronise with the boot thread.
876 - spin_lock(&boot_lock);
877 - spin_unlock(&boot_lock);
878 + raw_spin_lock(&boot_lock);
879 + raw_spin_unlock(&boot_lock);
882 static int scss_release_secondary(unsigned int cpu)
883 @@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
884 * set synchronisation state between this boot processor
885 * and the secondary one
887 - spin_lock(&boot_lock);
888 + raw_spin_lock(&boot_lock);
891 * Send the secondary CPU a soft interrupt, thereby causing
892 @@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
893 * now the secondary core is starting up let it run its
894 * calibrations, then wait for it to finish
896 - spin_unlock(&boot_lock);
897 + raw_spin_unlock(&boot_lock);
901 diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
902 index 8d1e2d551786..7fa56cc78118 100644
903 --- a/arch/arm/mach-spear/platsmp.c
904 +++ b/arch/arm/mach-spear/platsmp.c
905 @@ -32,7 +32,7 @@ static void write_pen_release(int val)
906 sync_cache_w(&pen_release);
909 -static DEFINE_SPINLOCK(boot_lock);
910 +static DEFINE_RAW_SPINLOCK(boot_lock);
912 static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
914 @@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
916 * Synchronise with the boot thread.
918 - spin_lock(&boot_lock);
919 - spin_unlock(&boot_lock);
920 + raw_spin_lock(&boot_lock);
921 + raw_spin_unlock(&boot_lock);
924 static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
925 @@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
926 * set synchronisation state between this boot processor
927 * and the secondary one
929 - spin_lock(&boot_lock);
930 + raw_spin_lock(&boot_lock);
933 * The secondary processor is waiting to be released from
934 @@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
935 * now the secondary core is starting up let it run its
936 * calibrations, then wait for it to finish
938 - spin_unlock(&boot_lock);
939 + raw_spin_unlock(&boot_lock);
941 return pen_release != -1 ? -ENOSYS : 0;
943 diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
944 index ea5a2277ee46..b988e081ac79 100644
945 --- a/arch/arm/mach-sti/platsmp.c
946 +++ b/arch/arm/mach-sti/platsmp.c
947 @@ -35,7 +35,7 @@ static void write_pen_release(int val)
948 sync_cache_w(&pen_release);
951 -static DEFINE_SPINLOCK(boot_lock);
952 +static DEFINE_RAW_SPINLOCK(boot_lock);
954 static void sti_secondary_init(unsigned int cpu)
956 @@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
958 * Synchronise with the boot thread.
960 - spin_lock(&boot_lock);
961 - spin_unlock(&boot_lock);
962 + raw_spin_lock(&boot_lock);
963 + raw_spin_unlock(&boot_lock);
966 static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
967 @@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
968 * set synchronisation state between this boot processor
969 * and the secondary one
971 - spin_lock(&boot_lock);
972 + raw_spin_lock(&boot_lock);
975 * The secondary processor is waiting to be released from
976 @@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
977 * now the secondary core is starting up let it run its
978 * calibrations, then wait for it to finish
980 - spin_unlock(&boot_lock);
981 + raw_spin_unlock(&boot_lock);
983 return pen_release != -1 ? -ENOSYS : 0;
985 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
986 index 0122ad1a6027..926b1be48043 100644
987 --- a/arch/arm/mm/fault.c
988 +++ b/arch/arm/mm/fault.c
989 @@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
990 if (addr < TASK_SIZE)
991 return do_page_fault(addr, fsr, regs);
993 + if (interrupts_enabled(regs))
994 + local_irq_enable();
999 @@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
1001 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1003 + if (interrupts_enabled(regs))
1004 + local_irq_enable();
1006 do_bad_area(addr, fsr, regs);
1009 diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
1010 index d02f8187b1cc..542692dbd40a 100644
1011 --- a/arch/arm/mm/highmem.c
1012 +++ b/arch/arm/mm/highmem.c
1013 @@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
1017 +static unsigned int fixmap_idx(int type)
1019 + return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
1022 void *kmap(struct page *page)
1025 @@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
1027 void *kmap_atomic(struct page *page)
1029 + pte_t pte = mk_pte(page, kmap_prot);
1031 unsigned long vaddr;
1035 - preempt_disable();
1036 + preempt_disable_nort();
1037 pagefault_disable();
1038 if (!PageHighMem(page))
1039 return page_address(page);
1040 @@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
1042 type = kmap_atomic_idx_push();
1044 - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
1045 + idx = fixmap_idx(type);
1046 vaddr = __fix_to_virt(idx);
1047 #ifdef CONFIG_DEBUG_HIGHMEM
1049 @@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
1050 * in place, so the contained TLB flush ensures the TLB is updated
1051 * with the new mapping.
1053 - set_fixmap_pte(idx, mk_pte(page, kmap_prot));
1054 +#ifdef CONFIG_PREEMPT_RT_FULL
1055 + current->kmap_pte[type] = pte;
1057 + set_fixmap_pte(idx, pte);
1059 return (void *)vaddr;
1061 @@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr)
1063 if (kvaddr >= (void *)FIXADDR_START) {
1064 type = kmap_atomic_idx();
1065 - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
1066 + idx = fixmap_idx(type);
1068 if (cache_is_vivt())
1069 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
1070 +#ifdef CONFIG_PREEMPT_RT_FULL
1071 + current->kmap_pte[type] = __pte(0);
1073 #ifdef CONFIG_DEBUG_HIGHMEM
1074 BUG_ON(vaddr != __fix_to_virt(idx));
1075 - set_fixmap_pte(idx, __pte(0));
1077 (void) idx; /* to kill a warning */
1079 + set_fixmap_pte(idx, __pte(0));
1080 kmap_atomic_idx_pop();
1081 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
1082 /* this address was obtained through kmap_high_get() */
1083 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
1087 + preempt_enable_nort();
1089 EXPORT_SYMBOL(__kunmap_atomic);
1091 void *kmap_atomic_pfn(unsigned long pfn)
1093 + pte_t pte = pfn_pte(pfn, kmap_prot);
1094 unsigned long vaddr;
1096 struct page *page = pfn_to_page(pfn);
1098 - preempt_disable();
1099 + preempt_disable_nort();
1100 pagefault_disable();
1101 if (!PageHighMem(page))
1102 return page_address(page);
1104 type = kmap_atomic_idx_push();
1105 - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
1106 + idx = fixmap_idx(type);
1107 vaddr = __fix_to_virt(idx);
1108 #ifdef CONFIG_DEBUG_HIGHMEM
1109 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
1111 - set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
1112 +#ifdef CONFIG_PREEMPT_RT_FULL
1113 + current->kmap_pte[type] = pte;
1115 + set_fixmap_pte(idx, pte);
1117 return (void *)vaddr;
1119 +#if defined CONFIG_PREEMPT_RT_FULL
1120 +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
1125 + * Clear @prev's kmap_atomic mappings
1127 + for (i = 0; i < prev_p->kmap_idx; i++) {
1128 + int idx = fixmap_idx(i);
1130 + set_fixmap_pte(idx, __pte(0));
1133 + * Restore @next_p's kmap_atomic mappings
1135 + for (i = 0; i < next_p->kmap_idx; i++) {
1136 + int idx = fixmap_idx(i);
1138 + if (!pte_none(next_p->kmap_pte[i]))
1139 + set_fixmap_pte(idx, next_p->kmap_pte[i]);
1143 diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
1144 index c2366510187a..6b60f582b738 100644
1145 --- a/arch/arm/plat-versatile/platsmp.c
1146 +++ b/arch/arm/plat-versatile/platsmp.c
1147 @@ -32,7 +32,7 @@ static void write_pen_release(int val)
1148 sync_cache_w(&pen_release);
1151 -static DEFINE_SPINLOCK(boot_lock);
1152 +static DEFINE_RAW_SPINLOCK(boot_lock);
1154 void versatile_secondary_init(unsigned int cpu)
1156 @@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu)
1158 * Synchronise with the boot thread.
1160 - spin_lock(&boot_lock);
1161 - spin_unlock(&boot_lock);
1162 + raw_spin_lock(&boot_lock);
1163 + raw_spin_unlock(&boot_lock);
1166 int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
1167 @@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
1168 * Set synchronisation state between this boot processor
1169 * and the secondary one
1171 - spin_lock(&boot_lock);
1172 + raw_spin_lock(&boot_lock);
1175 * This is really belt and braces; we hold unintended secondary
1176 @@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
1177 * now the secondary core is starting up let it run its
1178 * calibrations, then wait for it to finish
1180 - spin_unlock(&boot_lock);
1181 + raw_spin_unlock(&boot_lock);
1183 return pen_release != -1 ? -ENOSYS : 0;
1185 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
1186 index cf57a7799a0f..78d1b49fbed5 100644
1187 --- a/arch/arm64/Kconfig
1188 +++ b/arch/arm64/Kconfig
1189 @@ -91,6 +91,7 @@ config ARM64
1190 select HAVE_PERF_EVENTS
1191 select HAVE_PERF_REGS
1192 select HAVE_PERF_USER_STACK_DUMP
1193 + select HAVE_PREEMPT_LAZY
1194 select HAVE_REGS_AND_STACK_ACCESS_API
1195 select HAVE_RCU_TABLE_FREE
1196 select HAVE_SYSCALL_TRACEPOINTS
1197 @@ -704,7 +705,7 @@ config XEN_DOM0
1200 bool "Xen guest support on ARM64"
1201 - depends on ARM64 && OF
1202 + depends on ARM64 && OF && !PREEMPT_RT_FULL
1206 diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
1207 index e9ea5a6bd449..6c500ad63c6a 100644
1208 --- a/arch/arm64/include/asm/thread_info.h
1209 +++ b/arch/arm64/include/asm/thread_info.h
1210 @@ -49,6 +49,7 @@ struct thread_info {
1211 mm_segment_t addr_limit; /* address limit */
1212 struct task_struct *task; /* main task structure */
1213 int preempt_count; /* 0 => preemptable, <0 => bug */
1214 + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
1218 @@ -112,6 +113,7 @@ static inline struct thread_info *current_thread_info(void)
1219 #define TIF_NEED_RESCHED 1
1220 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1221 #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
1222 +#define TIF_NEED_RESCHED_LAZY 4
1224 #define TIF_SYSCALL_TRACE 8
1225 #define TIF_SYSCALL_AUDIT 9
1226 @@ -127,6 +129,7 @@ static inline struct thread_info *current_thread_info(void)
1227 #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
1228 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
1229 #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
1230 +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
1231 #define _TIF_NOHZ (1 << TIF_NOHZ)
1232 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
1233 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
1234 @@ -135,7 +138,9 @@ static inline struct thread_info *current_thread_info(void)
1235 #define _TIF_32BIT (1 << TIF_32BIT)
1237 #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
1238 - _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
1239 + _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
1240 + _TIF_NEED_RESCHED_LAZY)
1241 +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
1243 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1244 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
1245 diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
1246 index c58ddf8c4062..a8f2f7c1fe12 100644
1247 --- a/arch/arm64/kernel/asm-offsets.c
1248 +++ b/arch/arm64/kernel/asm-offsets.c
1249 @@ -38,6 +38,7 @@ int main(void)
1251 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
1252 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
1253 + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
1254 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
1255 DEFINE(TI_TASK, offsetof(struct thread_info, task));
1256 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
1257 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
1258 index 79b0fe24d5b7..f3c959ade308 100644
1259 --- a/arch/arm64/kernel/entry.S
1260 +++ b/arch/arm64/kernel/entry.S
1261 @@ -428,11 +428,16 @@ ENDPROC(el1_sync)
1263 #ifdef CONFIG_PREEMPT
1264 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
1265 - cbnz w24, 1f // preempt count != 0
1266 + cbnz w24, 2f // preempt count != 0
1267 ldr x0, [tsk, #TI_FLAGS] // get flags
1268 - tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
1270 + tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
1272 + ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count
1273 + cbnz w24, 2f // preempt lazy count != 0
1274 + tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
1279 #ifdef CONFIG_TRACE_IRQFLAGS
1280 bl trace_hardirqs_on
1281 @@ -446,6 +451,7 @@ ENDPROC(el1_irq)
1282 1: bl preempt_schedule_irq // irq en/disable is done inside
1283 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
1284 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
1285 + tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
1289 diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
1290 index 404dd67080b9..639dc6d12e72 100644
1291 --- a/arch/arm64/kernel/signal.c
1292 +++ b/arch/arm64/kernel/signal.c
1293 @@ -409,7 +409,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
1295 trace_hardirqs_off();
1297 - if (thread_flags & _TIF_NEED_RESCHED) {
1298 + if (thread_flags & _TIF_NEED_RESCHED_MASK) {
1302 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
1303 index 5e844f68e847..dc613cc10f54 100644
1304 --- a/arch/mips/Kconfig
1305 +++ b/arch/mips/Kconfig
1306 @@ -2516,7 +2516,7 @@ config MIPS_ASID_BITS_VARIABLE
1309 bool "High Memory Support"
1310 - depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
1311 + depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
1313 config CPU_SUPPORTS_HIGHMEM
1315 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
1316 index 8f01f21e78f1..619485b777d2 100644
1317 --- a/arch/powerpc/Kconfig
1318 +++ b/arch/powerpc/Kconfig
1319 @@ -52,10 +52,11 @@ config LOCKDEP_SUPPORT
1321 config RWSEM_GENERIC_SPINLOCK
1323 + default y if PREEMPT_RT_FULL
1325 config RWSEM_XCHGADD_ALGORITHM
1328 + default y if !PREEMPT_RT_FULL
1330 config GENERIC_LOCKBREAK
1332 @@ -134,6 +135,7 @@ config PPC
1333 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
1334 select GENERIC_STRNCPY_FROM_USER
1335 select GENERIC_STRNLEN_USER
1336 + select HAVE_PREEMPT_LAZY
1337 select HAVE_MOD_ARCH_SPECIFIC
1338 select MODULES_USE_ELF_RELA
1339 select CLONE_BACKWARDS
1340 @@ -321,7 +323,7 @@ menu "Kernel options"
1343 bool "High memory support"
1345 + depends on PPC32 && !PREEMPT_RT_FULL
1347 source kernel/Kconfig.hz
1348 source kernel/Kconfig.preempt
1349 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
1350 index 87e4b2d8dcd4..981e501a4359 100644
1351 --- a/arch/powerpc/include/asm/thread_info.h
1352 +++ b/arch/powerpc/include/asm/thread_info.h
1353 @@ -43,6 +43,8 @@ struct thread_info {
1354 int cpu; /* cpu we're on */
1355 int preempt_count; /* 0 => preemptable,
1357 + int preempt_lazy_count; /* 0 => preemptable,
1359 unsigned long local_flags; /* private flags for thread */
1360 #ifdef CONFIG_LIVEPATCH
1361 unsigned long *livepatch_sp;
1362 @@ -88,8 +90,7 @@ static inline struct thread_info *current_thread_info(void)
1363 #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
1364 #define TIF_SIGPENDING 1 /* signal pending */
1365 #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
1366 -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
1367 - TIF_NEED_RESCHED */
1368 +#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
1369 #define TIF_32BIT 4 /* 32 bit binary */
1370 #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
1371 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
1372 @@ -107,6 +108,8 @@ static inline struct thread_info *current_thread_info(void)
1373 #if defined(CONFIG_PPC64)
1374 #define TIF_ELF2ABI 18 /* function descriptors must die! */
1376 +#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling
1377 + TIF_NEED_RESCHED */
1379 /* as above, but as bit values */
1380 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
1381 @@ -125,14 +128,16 @@ static inline struct thread_info *current_thread_info(void)
1382 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
1383 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
1384 #define _TIF_NOHZ (1<<TIF_NOHZ)
1385 +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
1386 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1387 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
1390 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
1391 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
1393 + _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
1394 #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
1395 +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
1397 /* Bits in local_flags */
1398 /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
1399 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
1400 index c833d88c423d..96e9fbc3f684 100644
1401 --- a/arch/powerpc/kernel/asm-offsets.c
1402 +++ b/arch/powerpc/kernel/asm-offsets.c
1403 @@ -156,6 +156,7 @@ int main(void)
1404 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
1405 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
1406 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
1407 + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
1408 DEFINE(TI_TASK, offsetof(struct thread_info, task));
1409 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
1411 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
1412 index 3841d749a430..6dbaeff192b9 100644
1413 --- a/arch/powerpc/kernel/entry_32.S
1414 +++ b/arch/powerpc/kernel/entry_32.S
1415 @@ -835,7 +835,14 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
1416 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
1418 andi. r8,r8,_TIF_NEED_RESCHED
1420 + lwz r0,TI_PREEMPT_LAZY(r9)
1421 + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
1423 + lwz r0,TI_FLAGS(r9)
1424 + andi. r0,r0,_TIF_NEED_RESCHED_LAZY
1428 andi. r0,r3,MSR_EE /* interrupts off? */
1429 beq restore /* don't schedule if so */
1430 @@ -846,11 +853,11 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
1432 bl trace_hardirqs_off
1434 -1: bl preempt_schedule_irq
1435 +2: bl preempt_schedule_irq
1436 CURRENT_THREAD_INFO(r9, r1)
1438 - andi. r0,r3,_TIF_NEED_RESCHED
1440 + andi. r0,r3,_TIF_NEED_RESCHED_MASK
1442 #ifdef CONFIG_TRACE_IRQFLAGS
1443 /* And now, to properly rebalance the above, we tell lockdep they
1444 * are being turned back on, which will happen when we return
1445 @@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
1446 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1448 do_work: /* r10 contains MSR_KERNEL here */
1449 - andi. r0,r9,_TIF_NEED_RESCHED
1450 + andi. r0,r9,_TIF_NEED_RESCHED_MASK
1453 do_resched: /* r10 contains MSR_KERNEL here */
1454 @@ -1192,7 +1199,7 @@ do_resched: /* r10 contains MSR_KERNEL here */
1455 MTMSRD(r10) /* disable interrupts */
1456 CURRENT_THREAD_INFO(r9, r1)
1458 - andi. r0,r9,_TIF_NEED_RESCHED
1459 + andi. r0,r9,_TIF_NEED_RESCHED_MASK
1461 andi. r0,r9,_TIF_USER_WORK_MASK
1463 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
1464 index 767ef6d68c9e..2cb4d5552319 100644
1465 --- a/arch/powerpc/kernel/entry_64.S
1466 +++ b/arch/powerpc/kernel/entry_64.S
1467 @@ -656,7 +656,7 @@ _GLOBAL(ret_from_except_lite)
1471 -1: andi. r0,r4,_TIF_NEED_RESCHED
1472 +1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
1474 bl restore_interrupts
1476 @@ -718,10 +718,18 @@ _GLOBAL(ret_from_except_lite)
1478 #ifdef CONFIG_PREEMPT
1479 /* Check if we need to preempt */
1480 - andi. r0,r4,_TIF_NEED_RESCHED
1482 - /* Check that preempt_count() == 0 and interrupts are enabled */
1483 lwz r8,TI_PREEMPT(r9)
1484 + cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
1486 + andi. r0,r4,_TIF_NEED_RESCHED
1489 + andi. r0,r4,_TIF_NEED_RESCHED_LAZY
1491 + lwz r8,TI_PREEMPT_LAZY(r9)
1493 + /* Check that preempt_count() == 0 and interrupts are enabled */
1498 @@ -738,7 +746,7 @@ _GLOBAL(ret_from_except_lite)
1499 /* Re-test flags and eventually loop */
1500 CURRENT_THREAD_INFO(r9, r1)
1502 - andi. r0,r4,_TIF_NEED_RESCHED
1503 + andi. r0,r4,_TIF_NEED_RESCHED_MASK
1507 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
1508 index 3c05c311e35e..f83f6ac1274d 100644
1509 --- a/arch/powerpc/kernel/irq.c
1510 +++ b/arch/powerpc/kernel/irq.c
1511 @@ -638,6 +638,7 @@ void irq_ctx_init(void)
1515 +#ifndef CONFIG_PREEMPT_RT_FULL
1516 void do_softirq_own_stack(void)
1518 struct thread_info *curtp, *irqtp;
1519 @@ -655,6 +656,7 @@ void do_softirq_own_stack(void)
1521 set_bits(irqtp->flags, &curtp->flags);
1525 irq_hw_number_t virq_to_hw(unsigned int virq)
1527 diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
1528 index 030d72df5dd5..b471a709e100 100644
1529 --- a/arch/powerpc/kernel/misc_32.S
1530 +++ b/arch/powerpc/kernel/misc_32.S
1532 * We store the saved ksp_limit in the unused part
1533 * of the STACK_FRAME_OVERHEAD
1535 +#ifndef CONFIG_PREEMPT_RT_FULL
1536 _GLOBAL(call_do_softirq)
1539 @@ -57,6 +58,7 @@ _GLOBAL(call_do_softirq)
1540 stw r10,THREAD+KSP_LIMIT(r2)
1546 * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
1547 diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
1548 index 4cefe6888b18..cb2ee4be999a 100644
1549 --- a/arch/powerpc/kernel/misc_64.S
1550 +++ b/arch/powerpc/kernel/misc_64.S
1555 +#ifndef CONFIG_PREEMPT_RT_FULL
1556 _GLOBAL(call_do_softirq)
1559 @@ -41,6 +42,7 @@ _GLOBAL(call_do_softirq)
1565 _GLOBAL(call_do_irq)
1567 diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
1568 index 029be26b5a17..9528089ea142 100644
1569 --- a/arch/powerpc/kvm/Kconfig
1570 +++ b/arch/powerpc/kvm/Kconfig
1571 @@ -175,6 +175,7 @@ config KVM_E500MC
1573 bool "KVM in-kernel MPIC emulation"
1574 depends on KVM && E500
1575 + depends on !PREEMPT_RT_FULL
1576 select HAVE_KVM_IRQCHIP
1577 select HAVE_KVM_IRQFD
1578 select HAVE_KVM_IRQ_ROUTING
1579 diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
1580 index e48462447ff0..2670cee66064 100644
1581 --- a/arch/powerpc/platforms/ps3/device-init.c
1582 +++ b/arch/powerpc/platforms/ps3/device-init.c
1583 @@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
1585 pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
1587 - res = wait_event_interruptible(dev->done.wait,
1588 + res = swait_event_interruptible(dev->done.wait,
1589 dev->done.done || kthread_should_stop());
1590 if (kthread_should_stop())
1592 diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
1593 index 6c0378c0b8b5..abd58b4dff97 100644
1594 --- a/arch/sh/kernel/irq.c
1595 +++ b/arch/sh/kernel/irq.c
1596 @@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu)
1597 hardirq_ctx[cpu] = NULL;
1600 +#ifndef CONFIG_PREEMPT_RT_FULL
1601 void do_softirq_own_stack(void)
1603 struct thread_info *curctx;
1604 @@ -174,6 +175,7 @@ void do_softirq_own_stack(void)
1605 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
1610 static inline void handle_one_irq(unsigned int irq)
1612 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
1613 index 165ecdd24d22..b68a464a22be 100644
1614 --- a/arch/sparc/Kconfig
1615 +++ b/arch/sparc/Kconfig
1616 @@ -194,12 +194,10 @@ config NR_CPUS
1617 source kernel/Kconfig.hz
1619 config RWSEM_GENERIC_SPINLOCK
1621 - default y if SPARC32
1622 + def_bool PREEMPT_RT_FULL
1624 config RWSEM_XCHGADD_ALGORITHM
1626 - default y if SPARC64
1627 + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
1629 config GENERIC_HWEIGHT
1631 diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
1632 index 34a7930b76ef..773740521008 100644
1633 --- a/arch/sparc/kernel/irq_64.c
1634 +++ b/arch/sparc/kernel/irq_64.c
1635 @@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
1636 set_irq_regs(old_regs);
1639 +#ifndef CONFIG_PREEMPT_RT_FULL
1640 void do_softirq_own_stack(void)
1642 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
1643 @@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
1644 __asm__ __volatile__("mov %0, %%sp"
1649 #ifdef CONFIG_HOTPLUG_CPU
1650 void fixup_irqs(void)
1651 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
1652 index bada636d1065..f8a995c90c01 100644
1653 --- a/arch/x86/Kconfig
1654 +++ b/arch/x86/Kconfig
1655 @@ -17,6 +17,7 @@ config X86_64
1659 + select HAVE_PREEMPT_LAZY
1660 select ACPI_LEGACY_TABLES_LOOKUP if ACPI
1661 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
1663 @@ -232,8 +233,11 @@ config ARCH_MAY_HAVE_PC_FDC
1665 depends on ISA_DMA_API
1667 +config RWSEM_GENERIC_SPINLOCK
1668 + def_bool PREEMPT_RT_FULL
1670 config RWSEM_XCHGADD_ALGORITHM
1672 + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
1674 config GENERIC_CALIBRATE_DELAY
1676 @@ -897,7 +901,7 @@ config IOMMU_HELPER
1678 bool "Enable Maximum number of SMP Processors and NUMA Nodes"
1679 depends on X86_64 && SMP && DEBUG_KERNEL
1680 - select CPUMASK_OFFSTACK
1681 + select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
1683 Enable maximum number of CPUS and NUMA Nodes for this architecture.
1685 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
1686 index aa8b0672f87a..2429414bfc71 100644
1687 --- a/arch/x86/crypto/aesni-intel_glue.c
1688 +++ b/arch/x86/crypto/aesni-intel_glue.c
1689 @@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
1690 err = blkcipher_walk_virt(desc, &walk);
1691 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1693 - kernel_fpu_begin();
1694 while ((nbytes = walk.nbytes)) {
1695 + kernel_fpu_begin();
1696 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1697 - nbytes & AES_BLOCK_MASK);
1698 + nbytes & AES_BLOCK_MASK);
1700 nbytes &= AES_BLOCK_SIZE - 1;
1701 err = blkcipher_walk_done(desc, &walk, nbytes);
1707 @@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
1708 err = blkcipher_walk_virt(desc, &walk);
1709 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1711 - kernel_fpu_begin();
1712 while ((nbytes = walk.nbytes)) {
1713 + kernel_fpu_begin();
1714 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1715 nbytes & AES_BLOCK_MASK);
1717 nbytes &= AES_BLOCK_SIZE - 1;
1718 err = blkcipher_walk_done(desc, &walk, nbytes);
1724 @@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
1725 err = blkcipher_walk_virt(desc, &walk);
1726 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1728 - kernel_fpu_begin();
1729 while ((nbytes = walk.nbytes)) {
1730 + kernel_fpu_begin();
1731 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1732 nbytes & AES_BLOCK_MASK, walk.iv);
1734 nbytes &= AES_BLOCK_SIZE - 1;
1735 err = blkcipher_walk_done(desc, &walk, nbytes);
1741 @@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
1742 err = blkcipher_walk_virt(desc, &walk);
1743 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1745 - kernel_fpu_begin();
1746 while ((nbytes = walk.nbytes)) {
1747 + kernel_fpu_begin();
1748 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1749 nbytes & AES_BLOCK_MASK, walk.iv);
1751 nbytes &= AES_BLOCK_SIZE - 1;
1752 err = blkcipher_walk_done(desc, &walk, nbytes);
1758 @@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
1759 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
1760 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1762 - kernel_fpu_begin();
1763 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
1764 + kernel_fpu_begin();
1765 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1766 nbytes & AES_BLOCK_MASK, walk.iv);
1768 nbytes &= AES_BLOCK_SIZE - 1;
1769 err = blkcipher_walk_done(desc, &walk, nbytes);
1772 + kernel_fpu_begin();
1773 ctr_crypt_final(ctx, &walk);
1775 err = blkcipher_walk_done(desc, &walk, 0);
1781 diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
1782 index 8648158f3916..d7699130ee36 100644
1783 --- a/arch/x86/crypto/cast5_avx_glue.c
1784 +++ b/arch/x86/crypto/cast5_avx_glue.c
1785 @@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
1786 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
1789 - bool fpu_enabled = false;
1791 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
1792 const unsigned int bsize = CAST5_BLOCK_SIZE;
1793 unsigned int nbytes;
1794 @@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
1795 u8 *wsrc = walk->src.virt.addr;
1796 u8 *wdst = walk->dst.virt.addr;
1798 - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
1799 + fpu_enabled = cast5_fpu_begin(false, nbytes);
1801 /* Process multi-block batch */
1802 if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
1803 @@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
1804 } while (nbytes >= bsize);
1807 + cast5_fpu_end(fpu_enabled);
1808 err = blkcipher_walk_done(desc, walk, nbytes);
1811 - cast5_fpu_end(fpu_enabled);
1815 @@ -227,7 +226,7 @@ static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
1816 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1817 struct scatterlist *src, unsigned int nbytes)
1819 - bool fpu_enabled = false;
1821 struct blkcipher_walk walk;
1824 @@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1825 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1827 while ((nbytes = walk.nbytes)) {
1828 - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
1829 + fpu_enabled = cast5_fpu_begin(false, nbytes);
1830 nbytes = __cbc_decrypt(desc, &walk);
1831 + cast5_fpu_end(fpu_enabled);
1832 err = blkcipher_walk_done(desc, &walk, nbytes);
1835 - cast5_fpu_end(fpu_enabled);
1839 @@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
1840 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1841 struct scatterlist *src, unsigned int nbytes)
1843 - bool fpu_enabled = false;
1845 struct blkcipher_walk walk;
1848 @@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1849 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1851 while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
1852 - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
1853 + fpu_enabled = cast5_fpu_begin(false, nbytes);
1854 nbytes = __ctr_crypt(desc, &walk);
1855 + cast5_fpu_end(fpu_enabled);
1856 err = blkcipher_walk_done(desc, &walk, nbytes);
1859 - cast5_fpu_end(fpu_enabled);
1862 ctr_crypt_final(desc, &walk);
1863 err = blkcipher_walk_done(desc, &walk, 0);
1864 diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
1865 index 6a85598931b5..3a506ce7ed93 100644
1866 --- a/arch/x86/crypto/glue_helper.c
1867 +++ b/arch/x86/crypto/glue_helper.c
1868 @@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
1869 void *ctx = crypto_blkcipher_ctx(desc->tfm);
1870 const unsigned int bsize = 128 / 8;
1871 unsigned int nbytes, i, func_bytes;
1872 - bool fpu_enabled = false;
1876 err = blkcipher_walk_virt(desc, walk);
1877 @@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
1878 u8 *wdst = walk->dst.virt.addr;
1880 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1881 - desc, fpu_enabled, nbytes);
1882 + desc, false, nbytes);
1884 for (i = 0; i < gctx->num_funcs; i++) {
1885 func_bytes = bsize * gctx->funcs[i].num_blocks;
1886 @@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
1890 + glue_fpu_end(fpu_enabled);
1891 err = blkcipher_walk_done(desc, walk, nbytes);
1894 - glue_fpu_end(fpu_enabled);
1898 @@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
1899 struct scatterlist *src, unsigned int nbytes)
1901 const unsigned int bsize = 128 / 8;
1902 - bool fpu_enabled = false;
1904 struct blkcipher_walk walk;
1907 @@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
1909 while ((nbytes = walk.nbytes)) {
1910 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1911 - desc, fpu_enabled, nbytes);
1912 + desc, false, nbytes);
1913 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
1914 + glue_fpu_end(fpu_enabled);
1915 err = blkcipher_walk_done(desc, &walk, nbytes);
1918 - glue_fpu_end(fpu_enabled);
1921 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
1922 @@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
1923 struct scatterlist *src, unsigned int nbytes)
1925 const unsigned int bsize = 128 / 8;
1926 - bool fpu_enabled = false;
1928 struct blkcipher_walk walk;
1931 @@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
1933 while ((nbytes = walk.nbytes) >= bsize) {
1934 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1935 - desc, fpu_enabled, nbytes);
1936 + desc, false, nbytes);
1937 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
1938 + glue_fpu_end(fpu_enabled);
1939 err = blkcipher_walk_done(desc, &walk, nbytes);
1942 - glue_fpu_end(fpu_enabled);
1945 glue_ctr_crypt_final_128bit(
1946 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
1947 @@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
1948 void *tweak_ctx, void *crypt_ctx)
1950 const unsigned int bsize = 128 / 8;
1951 - bool fpu_enabled = false;
1953 struct blkcipher_walk walk;
1956 @@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
1958 /* set minimum length to bsize, for tweak_fn */
1959 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1960 - desc, fpu_enabled,
1962 nbytes < bsize ? bsize : nbytes);
1964 /* calculate first value of T */
1965 tweak_fn(tweak_ctx, walk.iv, walk.iv);
1966 + glue_fpu_end(fpu_enabled);
1969 + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1970 + desc, false, nbytes);
1971 nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
1973 + glue_fpu_end(fpu_enabled);
1974 err = blkcipher_walk_done(desc, &walk, nbytes);
1975 nbytes = walk.nbytes;
1978 - glue_fpu_end(fpu_enabled);
1982 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
1983 diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
1984 index bdd9cc59d20f..56d01a339ba4 100644
1985 --- a/arch/x86/entry/common.c
1986 +++ b/arch/x86/entry/common.c
1987 @@ -129,7 +129,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
1989 #define EXIT_TO_USERMODE_LOOP_FLAGS \
1990 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
1991 - _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
1992 + _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY)
1994 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
1996 @@ -145,9 +145,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
1997 /* We have work to do. */
2000 - if (cached_flags & _TIF_NEED_RESCHED)
2001 + if (cached_flags & _TIF_NEED_RESCHED_MASK)
2004 +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
2005 + if (unlikely(current->forced_info.si_signo)) {
2006 + struct task_struct *t = current;
2007 + force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
2008 + t->forced_info.si_signo = 0;
2011 if (cached_flags & _TIF_UPROBE)
2012 uprobe_notify_resume(regs);
2014 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
2015 index edba8606b99a..4a3389535fc6 100644
2016 --- a/arch/x86/entry/entry_32.S
2017 +++ b/arch/x86/entry/entry_32.S
2018 @@ -308,8 +308,25 @@ END(ret_from_exception)
2019 ENTRY(resume_kernel)
2020 DISABLE_INTERRUPTS(CLBR_ANY)
2022 + # preempt count == 0 + NEED_RS set?
2023 cmpl $0, PER_CPU_VAR(__preempt_count)
2024 +#ifndef CONFIG_PREEMPT_LAZY
2029 + # atleast preempt count == 0 ?
2030 + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
2033 + movl PER_CPU_VAR(current_task), %ebp
2034 + cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
2037 + testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
2041 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
2043 call preempt_schedule_irq
2044 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
2045 index ef766a358b37..28401f826ab1 100644
2046 --- a/arch/x86/entry/entry_64.S
2047 +++ b/arch/x86/entry/entry_64.S
2048 @@ -546,7 +546,23 @@ GLOBAL(retint_user)
2049 bt $9, EFLAGS(%rsp) /* were interrupts off? */
2051 0: cmpl $0, PER_CPU_VAR(__preempt_count)
2052 +#ifndef CONFIG_PREEMPT_LAZY
2055 + jz do_preempt_schedule_irq
2057 + # atleast preempt count == 0 ?
2058 + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
2061 + movq PER_CPU_VAR(current_task), %rcx
2062 + cmpl $0, TASK_TI_preempt_lazy_count(%rcx)
2065 + bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
2067 +do_preempt_schedule_irq:
2069 call preempt_schedule_irq
2072 @@ -894,6 +910,7 @@ EXPORT_SYMBOL(native_load_gs_index)
2076 +#ifndef CONFIG_PREEMPT_RT_FULL
2077 /* Call softirq on interrupt stack. Interrupts are off. */
2078 ENTRY(do_softirq_own_stack)
2080 @@ -906,6 +923,7 @@ ENTRY(do_softirq_own_stack)
2081 decl PER_CPU_VAR(irq_count)
2083 END(do_softirq_own_stack)
2087 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
2088 diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
2089 index 17f218645701..11bd1b7ee6eb 100644
2090 --- a/arch/x86/include/asm/preempt.h
2091 +++ b/arch/x86/include/asm/preempt.h
2092 @@ -79,17 +79,46 @@ static __always_inline void __preempt_count_sub(int val)
2093 * a decrement which hits zero means we have no preempt_count and should
2096 -static __always_inline bool __preempt_count_dec_and_test(void)
2097 +static __always_inline bool ____preempt_count_dec_and_test(void)
2099 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
2102 +static __always_inline bool __preempt_count_dec_and_test(void)
2104 + if (____preempt_count_dec_and_test())
2106 +#ifdef CONFIG_PREEMPT_LAZY
2107 + if (current_thread_info()->preempt_lazy_count)
2109 + return test_thread_flag(TIF_NEED_RESCHED_LAZY);
2116 * Returns true when we need to resched and can (barring IRQ state).
2118 static __always_inline bool should_resched(int preempt_offset)
2120 +#ifdef CONFIG_PREEMPT_LAZY
2123 + tmp = raw_cpu_read_4(__preempt_count);
2124 + if (tmp == preempt_offset)
2127 + /* preempt count == 0 ? */
2128 + tmp &= ~PREEMPT_NEED_RESCHED;
2131 + if (current_thread_info()->preempt_lazy_count)
2133 + return test_thread_flag(TIF_NEED_RESCHED_LAZY);
2135 return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
2139 #ifdef CONFIG_PREEMPT
2140 diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
2141 index 8af22be0fe61..d1328789b759 100644
2142 --- a/arch/x86/include/asm/signal.h
2143 +++ b/arch/x86/include/asm/signal.h
2144 @@ -27,6 +27,19 @@ typedef struct {
2145 #define SA_IA32_ABI 0x02000000u
2146 #define SA_X32_ABI 0x01000000u
2149 + * Because some traps use the IST stack, we must keep preemption
2150 + * disabled while calling do_trap(), but do_trap() may call
2151 + * force_sig_info() which will grab the signal spin_locks for the
2152 + * task, which in PREEMPT_RT_FULL are mutexes. By defining
2153 + * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
2154 + * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
2157 +#if defined(CONFIG_PREEMPT_RT_FULL)
2158 +#define ARCH_RT_DELAYS_SIGNAL_SEND
2161 #ifndef CONFIG_COMPAT
2162 typedef sigset_t compat_sigset_t;
2164 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
2165 index 58505f01962f..02fa39652cd6 100644
2166 --- a/arch/x86/include/asm/stackprotector.h
2167 +++ b/arch/x86/include/asm/stackprotector.h
2170 static __always_inline void boot_init_stack_canary(void)
2173 + u64 uninitialized_var(canary);
2176 #ifdef CONFIG_X86_64
2177 @@ -70,8 +70,15 @@ static __always_inline void boot_init_stack_canary(void)
2178 * of randomness. The TSC only matters for very early init,
2179 * there it already has some randomness on most systems. Later
2180 * on during the bootup the random pool has true entropy too.
2182 + * For preempt-rt we need to weaken the randomness a bit, as
2183 + * we can't call into the random generator from atomic context
2184 + * due to locking constraints. We just leave canary
2185 + * uninitialized and use the TSC based randomness on top of it.
2187 +#ifndef CONFIG_PREEMPT_RT_FULL
2188 get_random_bytes(&canary, sizeof(canary));
2191 canary += tsc + (tsc << 32UL);
2193 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
2194 index ad6f5eb07a95..5ceb3a1c2b1a 100644
2195 --- a/arch/x86/include/asm/thread_info.h
2196 +++ b/arch/x86/include/asm/thread_info.h
2197 @@ -54,11 +54,14 @@ struct task_struct;
2199 struct thread_info {
2200 unsigned long flags; /* low level flags */
2201 + int preempt_lazy_count; /* 0 => lazy preemptable
2205 #define INIT_THREAD_INFO(tsk) \
2208 + .preempt_lazy_count = 0, \
2211 #define init_stack (init_thread_union.stack)
2212 @@ -67,6 +70,10 @@ struct thread_info {
2214 #include <asm/asm-offsets.h>
2216 +#define GET_THREAD_INFO(reg) \
2217 + _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
2218 + _ASM_SUB $(THREAD_SIZE),reg ;
2223 @@ -85,6 +92,7 @@ struct thread_info {
2224 #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
2225 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
2226 #define TIF_SECCOMP 8 /* secure computing */
2227 +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
2228 #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
2229 #define TIF_UPROBE 12 /* breakpointed or singlestepping */
2230 #define TIF_NOTSC 16 /* TSC is not accessible in userland */
2231 @@ -108,6 +116,7 @@ struct thread_info {
2232 #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
2233 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
2234 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2235 +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
2236 #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
2237 #define _TIF_UPROBE (1 << TIF_UPROBE)
2238 #define _TIF_NOTSC (1 << TIF_NOTSC)
2239 @@ -143,6 +152,8 @@ struct thread_info {
2240 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
2241 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
2243 +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
2245 #define STACK_WARN (THREAD_SIZE/8)
2248 diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
2249 index 57ab86d94d64..35d25e27180f 100644
2250 --- a/arch/x86/include/asm/uv/uv_bau.h
2251 +++ b/arch/x86/include/asm/uv/uv_bau.h
2252 @@ -624,9 +624,9 @@ struct bau_control {
2253 cycles_t send_message;
2254 cycles_t period_end;
2255 cycles_t period_time;
2256 - spinlock_t uvhub_lock;
2257 - spinlock_t queue_lock;
2258 - spinlock_t disable_lock;
2259 + raw_spinlock_t uvhub_lock;
2260 + raw_spinlock_t queue_lock;
2261 + raw_spinlock_t disable_lock;
2264 int max_concurr_const;
2265 @@ -815,15 +815,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
2266 * to be lowered below the current 'v'. atomic_add_unless can only stop
2269 -static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
2270 +static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
2273 + raw_spin_lock(lock);
2274 if (atomic_read(v) >= u) {
2275 - spin_unlock(lock);
2276 + raw_spin_unlock(lock);
2280 - spin_unlock(lock);
2281 + raw_spin_unlock(lock);
2285 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
2286 index 931ced8ca345..167975ac8af7 100644
2287 --- a/arch/x86/kernel/acpi/boot.c
2288 +++ b/arch/x86/kernel/acpi/boot.c
2289 @@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
2293 +#ifdef CONFIG_X86_IO_APIC
2294 static DEFINE_MUTEX(acpi_ioapic_lock);
2297 /* --------------------------------------------------------------------------
2298 Boot-time Configuration
2299 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
2300 index 7249f1500bcb..a79d5c224004 100644
2301 --- a/arch/x86/kernel/apic/io_apic.c
2302 +++ b/arch/x86/kernel/apic/io_apic.c
2303 @@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
2304 static inline bool ioapic_irqd_mask(struct irq_data *data)
2306 /* If we are moving the irq we need to mask it */
2307 - if (unlikely(irqd_is_setaffinity_pending(data))) {
2308 + if (unlikely(irqd_is_setaffinity_pending(data) &&
2309 + !irqd_irq_inprogress(data))) {
2310 mask_ioapic_irq(data);
2313 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
2314 index c62e015b126c..0cc71257fca6 100644
2315 --- a/arch/x86/kernel/asm-offsets.c
2316 +++ b/arch/x86/kernel/asm-offsets.c
2317 @@ -36,6 +36,7 @@ void common(void) {
2320 OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
2321 + OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count);
2322 OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
2325 @@ -91,4 +92,5 @@ void common(void) {
2328 DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
2329 + DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
2331 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
2332 index 22cda29d654e..57c85e3af092 100644
2333 --- a/arch/x86/kernel/cpu/mcheck/mce.c
2334 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
2336 #include <linux/debugfs.h>
2337 #include <linux/irq_work.h>
2338 #include <linux/export.h>
2339 +#include <linux/jiffies.h>
2340 +#include <linux/swork.h>
2341 #include <linux/jump_label.h>
2343 #include <asm/processor.h>
2344 @@ -1307,7 +1309,7 @@ void mce_log_therm_throt_event(__u64 status)
2345 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
2347 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
2348 -static DEFINE_PER_CPU(struct timer_list, mce_timer);
2349 +static DEFINE_PER_CPU(struct hrtimer, mce_timer);
2351 static unsigned long mce_adjust_timer_default(unsigned long interval)
2353 @@ -1316,32 +1318,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
2355 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
2357 -static void __restart_timer(struct timer_list *t, unsigned long interval)
2358 +static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval)
2360 - unsigned long when = jiffies + interval;
2361 - unsigned long flags;
2363 - local_irq_save(flags);
2365 - if (timer_pending(t)) {
2366 - if (time_before(when, t->expires))
2367 - mod_timer(t, when);
2369 - t->expires = round_jiffies(when);
2370 - add_timer_on(t, smp_processor_id());
2373 - local_irq_restore(flags);
2375 + return HRTIMER_NORESTART;
2376 + hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
2377 + return HRTIMER_RESTART;
2380 -static void mce_timer_fn(unsigned long data)
2381 +static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
2383 - struct timer_list *t = this_cpu_ptr(&mce_timer);
2384 - int cpu = smp_processor_id();
2387 - WARN_ON(cpu != data);
2389 iv = __this_cpu_read(mce_next_interval);
2391 if (mce_available(this_cpu_ptr(&cpu_info))) {
2392 @@ -1364,7 +1352,7 @@ static void mce_timer_fn(unsigned long data)
2395 __this_cpu_write(mce_next_interval, iv);
2396 - __restart_timer(t, iv);
2397 + return __restart_timer(timer, iv);
2401 @@ -1372,7 +1360,7 @@ static void mce_timer_fn(unsigned long data)
2403 void mce_timer_kick(unsigned long interval)
2405 - struct timer_list *t = this_cpu_ptr(&mce_timer);
2406 + struct hrtimer *t = this_cpu_ptr(&mce_timer);
2407 unsigned long iv = __this_cpu_read(mce_next_interval);
2409 __restart_timer(t, interval);
2410 @@ -1387,7 +1375,7 @@ static void mce_timer_delete_all(void)
2413 for_each_online_cpu(cpu)
2414 - del_timer_sync(&per_cpu(mce_timer, cpu));
2415 + hrtimer_cancel(&per_cpu(mce_timer, cpu));
2418 static void mce_do_trigger(struct work_struct *work)
2419 @@ -1397,6 +1385,56 @@ static void mce_do_trigger(struct work_struct *work)
2421 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
2423 +static void __mce_notify_work(struct swork_event *event)
2425 + /* Not more than two messages every minute */
2426 + static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
2428 + /* wake processes polling /dev/mcelog */
2429 + wake_up_interruptible(&mce_chrdev_wait);
2432 + * There is no risk of missing notifications because
2433 + * work_pending is always cleared before the function is
2436 + if (mce_helper[0] && !work_pending(&mce_trigger_work))
2437 + schedule_work(&mce_trigger_work);
2439 + if (__ratelimit(&ratelimit))
2440 + pr_info(HW_ERR "Machine check events logged\n");
2443 +#ifdef CONFIG_PREEMPT_RT_FULL
2444 +static bool notify_work_ready __read_mostly;
2445 +static struct swork_event notify_work;
2447 +static int mce_notify_work_init(void)
2451 + err = swork_get();
2455 + INIT_SWORK(¬ify_work, __mce_notify_work);
2456 + notify_work_ready = true;
2460 +static void mce_notify_work(void)
2462 + if (notify_work_ready)
2463 + swork_queue(¬ify_work);
2466 +static void mce_notify_work(void)
2468 + __mce_notify_work(NULL);
2470 +static inline int mce_notify_work_init(void) { return 0; }
2474 * Notify the user(s) about new machine check events.
2475 * Can be called from interrupt context, but not from machine check/NMI
2476 @@ -1404,19 +1442,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
2478 int mce_notify_irq(void)
2480 - /* Not more than two messages every minute */
2481 - static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
2483 if (test_and_clear_bit(0, &mce_need_notify)) {
2484 - /* wake processes polling /dev/mcelog */
2485 - wake_up_interruptible(&mce_chrdev_wait);
2487 - if (mce_helper[0])
2488 - schedule_work(&mce_trigger_work);
2490 - if (__ratelimit(&ratelimit))
2491 - pr_info(HW_ERR "Machine check events logged\n");
2493 + mce_notify_work();
2497 @@ -1722,7 +1749,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
2501 -static void mce_start_timer(unsigned int cpu, struct timer_list *t)
2502 +static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
2504 unsigned long iv = check_interval * HZ;
2506 @@ -1731,16 +1758,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
2508 per_cpu(mce_next_interval, cpu) = iv;
2510 - t->expires = round_jiffies(jiffies + iv);
2511 - add_timer_on(t, cpu);
2512 + hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
2513 + 0, HRTIMER_MODE_REL_PINNED);
2516 static void __mcheck_cpu_init_timer(void)
2518 - struct timer_list *t = this_cpu_ptr(&mce_timer);
2519 + struct hrtimer *t = this_cpu_ptr(&mce_timer);
2520 unsigned int cpu = smp_processor_id();
2522 - setup_pinned_timer(t, mce_timer_fn, cpu);
2523 + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2524 + t->function = mce_timer_fn;
2525 mce_start_timer(cpu, t);
2528 @@ -2465,6 +2493,8 @@ static void mce_disable_cpu(void *h)
2529 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2532 + hrtimer_cancel(this_cpu_ptr(&mce_timer));
2534 if (!(action & CPU_TASKS_FROZEN))
2537 @@ -2487,6 +2517,7 @@ static void mce_reenable_cpu(void *h)
2539 wrmsrl(msr_ops.ctl(i), b->ctl);
2541 + __mcheck_cpu_init_timer();
2544 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
2545 @@ -2494,7 +2525,6 @@ static int
2546 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2548 unsigned int cpu = (unsigned long)hcpu;
2549 - struct timer_list *t = &per_cpu(mce_timer, cpu);
2551 switch (action & ~CPU_TASKS_FROZEN) {
2553 @@ -2514,11 +2544,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2555 case CPU_DOWN_PREPARE:
2556 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
2557 - del_timer_sync(t);
2559 case CPU_DOWN_FAILED:
2560 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2561 - mce_start_timer(cpu, t);
2565 @@ -2557,6 +2585,10 @@ static __init int mcheck_init_device(void)
2569 + err = mce_notify_work_init();
2573 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2576 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
2577 index 1f38d9a4d9de..053bf3b2ef39 100644
2578 --- a/arch/x86/kernel/irq_32.c
2579 +++ b/arch/x86/kernel/irq_32.c
2580 @@ -127,6 +127,7 @@ void irq_ctx_init(int cpu)
2581 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
2584 +#ifndef CONFIG_PREEMPT_RT_FULL
2585 void do_softirq_own_stack(void)
2587 struct irq_stack *irqstk;
2588 @@ -143,6 +144,7 @@ void do_softirq_own_stack(void)
2590 call_on_stack(__do_softirq, isp);
2594 bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
2596 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
2597 index bd7be8efdc4c..b3b0a7f7b1ca 100644
2598 --- a/arch/x86/kernel/process_32.c
2599 +++ b/arch/x86/kernel/process_32.c
2601 #include <linux/uaccess.h>
2602 #include <linux/io.h>
2603 #include <linux/kdebug.h>
2604 +#include <linux/highmem.h>
2606 #include <asm/pgtable.h>
2607 #include <asm/ldt.h>
2608 @@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
2610 EXPORT_SYMBOL_GPL(start_thread);
2612 +#ifdef CONFIG_PREEMPT_RT_FULL
2613 +static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
2618 + * Clear @prev's kmap_atomic mappings
2620 + for (i = 0; i < prev_p->kmap_idx; i++) {
2621 + int idx = i + KM_TYPE_NR * smp_processor_id();
2622 + pte_t *ptep = kmap_pte - idx;
2624 + kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
2627 + * Restore @next_p's kmap_atomic mappings
2629 + for (i = 0; i < next_p->kmap_idx; i++) {
2630 + int idx = i + KM_TYPE_NR * smp_processor_id();
2632 + if (!pte_none(next_p->kmap_pte[i]))
2633 + set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
2638 +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
2643 * switch_to(x,y) should switch tasks from x to y.
2644 @@ -271,6 +301,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
2645 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
2646 __switch_to_xtra(prev_p, next_p, tss);
2648 + switch_kmaps(prev_p, next_p);
2651 * Leave lazy mode, flushing any hypercalls made here.
2652 * This must be done before restoring TLS segments so
2653 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
2654 index 3f05c044720b..fe68afd37162 100644
2655 --- a/arch/x86/kvm/lapic.c
2656 +++ b/arch/x86/kvm/lapic.c
2657 @@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
2658 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2659 HRTIMER_MODE_ABS_PINNED);
2660 apic->lapic_timer.timer.function = apic_timer_fn;
2661 + apic->lapic_timer.timer.irqsafe = 1;
2664 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2665 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2666 index 81bba3c2137d..fcb84512e85d 100644
2667 --- a/arch/x86/kvm/x86.c
2668 +++ b/arch/x86/kvm/x86.c
2669 @@ -5958,6 +5958,13 @@ int kvm_arch_init(void *opaque)
2673 +#ifdef CONFIG_PREEMPT_RT_FULL
2674 + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
2675 + printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
2676 + return -EOPNOTSUPP;
2680 r = kvm_mmu_module_init();
2682 goto out_free_percpu;
2683 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
2684 index 6d18b70ed5a9..f752724c22e8 100644
2685 --- a/arch/x86/mm/highmem_32.c
2686 +++ b/arch/x86/mm/highmem_32.c
2687 @@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
2689 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
2691 + pte_t pte = mk_pte(page, prot);
2692 unsigned long vaddr;
2695 - preempt_disable();
2696 + preempt_disable_nort();
2697 pagefault_disable();
2699 if (!PageHighMem(page))
2700 @@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
2701 idx = type + KM_TYPE_NR*smp_processor_id();
2702 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
2703 BUG_ON(!pte_none(*(kmap_pte-idx)));
2704 - set_pte(kmap_pte-idx, mk_pte(page, prot));
2705 +#ifdef CONFIG_PREEMPT_RT_FULL
2706 + current->kmap_pte[type] = pte;
2708 + set_pte(kmap_pte-idx, pte);
2709 arch_flush_lazy_mmu_mode();
2711 return (void *)vaddr;
2712 @@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
2713 * is a bad idea also, in case the page changes cacheability
2714 * attributes or becomes a protected page in a hypervisor.
2716 +#ifdef CONFIG_PREEMPT_RT_FULL
2717 + current->kmap_pte[type] = __pte(0);
2719 kpte_clear_flush(kmap_pte-idx, vaddr);
2720 kmap_atomic_idx_pop();
2721 arch_flush_lazy_mmu_mode();
2722 @@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
2727 + preempt_enable_nort();
2729 EXPORT_SYMBOL(__kunmap_atomic);
2731 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
2732 index ada98b39b8ad..585f6829653b 100644
2733 --- a/arch/x86/mm/iomap_32.c
2734 +++ b/arch/x86/mm/iomap_32.c
2735 @@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
2737 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
2739 + pte_t pte = pfn_pte(pfn, prot);
2740 unsigned long vaddr;
2743 @@ -65,7 +66,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
2744 type = kmap_atomic_idx_push();
2745 idx = type + KM_TYPE_NR * smp_processor_id();
2746 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
2747 - set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
2748 + WARN_ON(!pte_none(*(kmap_pte - idx)));
2750 +#ifdef CONFIG_PREEMPT_RT_FULL
2751 + current->kmap_pte[type] = pte;
2753 + set_pte(kmap_pte - idx, pte);
2754 arch_flush_lazy_mmu_mode();
2756 return (void *)vaddr;
2757 @@ -113,6 +119,9 @@ iounmap_atomic(void __iomem *kvaddr)
2758 * is a bad idea also, in case the page changes cacheability
2759 * attributes or becomes a protected page in a hypervisor.
2761 +#ifdef CONFIG_PREEMPT_RT_FULL
2762 + current->kmap_pte[type] = __pte(0);
2764 kpte_clear_flush(kmap_pte-idx, vaddr);
2765 kmap_atomic_idx_pop();
2767 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
2768 index e3353c97d086..01664968555c 100644
2769 --- a/arch/x86/mm/pageattr.c
2770 +++ b/arch/x86/mm/pageattr.c
2771 @@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
2772 int in_flags, struct page **pages)
2774 unsigned int i, level;
2775 +#ifdef CONFIG_PREEMPT
2777 + * Avoid wbinvd() because it causes latencies on all CPUs,
2778 + * regardless of any CPU isolation that may be in effect.
2780 + unsigned long do_wbinvd = 0;
2782 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
2785 BUG_ON(irqs_disabled());
2787 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
2788 index 9e42842e924a..5398f97172f9 100644
2789 --- a/arch/x86/platform/uv/tlb_uv.c
2790 +++ b/arch/x86/platform/uv/tlb_uv.c
2791 @@ -748,9 +748,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
2793 quiesce_local_uvhub(hmaster);
2795 - spin_lock(&hmaster->queue_lock);
2796 + raw_spin_lock(&hmaster->queue_lock);
2797 reset_with_ipi(&bau_desc->distribution, bcp);
2798 - spin_unlock(&hmaster->queue_lock);
2799 + raw_spin_unlock(&hmaster->queue_lock);
2801 end_uvhub_quiesce(hmaster);
2803 @@ -770,9 +770,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
2805 quiesce_local_uvhub(hmaster);
2807 - spin_lock(&hmaster->queue_lock);
2808 + raw_spin_lock(&hmaster->queue_lock);
2809 reset_with_ipi(&bau_desc->distribution, bcp);
2810 - spin_unlock(&hmaster->queue_lock);
2811 + raw_spin_unlock(&hmaster->queue_lock);
2813 end_uvhub_quiesce(hmaster);
2815 @@ -793,7 +793,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
2818 hmaster = bcp->uvhub_master;
2819 - spin_lock(&hmaster->disable_lock);
2820 + raw_spin_lock(&hmaster->disable_lock);
2821 if (!bcp->baudisabled) {
2822 stat->s_bau_disabled++;
2824 @@ -806,7 +806,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
2828 - spin_unlock(&hmaster->disable_lock);
2829 + raw_spin_unlock(&hmaster->disable_lock);
2832 static void count_max_concurr(int stat, struct bau_control *bcp,
2833 @@ -869,7 +869,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
2835 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
2837 - spinlock_t *lock = &hmaster->uvhub_lock;
2838 + raw_spinlock_t *lock = &hmaster->uvhub_lock;
2841 v = &hmaster->active_descriptor_count;
2842 @@ -1002,7 +1002,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
2843 struct bau_control *hmaster;
2845 hmaster = bcp->uvhub_master;
2846 - spin_lock(&hmaster->disable_lock);
2847 + raw_spin_lock(&hmaster->disable_lock);
2848 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
2849 stat->s_bau_reenabled++;
2850 for_each_present_cpu(tcpu) {
2851 @@ -1014,10 +1014,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
2852 tbcp->period_giveups = 0;
2855 - spin_unlock(&hmaster->disable_lock);
2856 + raw_spin_unlock(&hmaster->disable_lock);
2859 - spin_unlock(&hmaster->disable_lock);
2860 + raw_spin_unlock(&hmaster->disable_lock);
2864 @@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables(void)
2865 bcp->cong_reps = congested_reps;
2866 bcp->disabled_period = sec_2_cycles(disabled_period);
2867 bcp->giveup_limit = giveup_limit;
2868 - spin_lock_init(&bcp->queue_lock);
2869 - spin_lock_init(&bcp->uvhub_lock);
2870 - spin_lock_init(&bcp->disable_lock);
2871 + raw_spin_lock_init(&bcp->queue_lock);
2872 + raw_spin_lock_init(&bcp->uvhub_lock);
2873 + raw_spin_lock_init(&bcp->disable_lock);
2877 diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
2878 index b333fc45f9ec..8b85916e6986 100644
2879 --- a/arch/x86/platform/uv/uv_time.c
2880 +++ b/arch/x86/platform/uv/uv_time.c
2881 @@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
2883 /* There is one of these allocated per node */
2884 struct uv_rtc_timer_head {
2886 + raw_spinlock_t lock;
2887 /* next cpu waiting for timer, local node relative: */
2889 /* number of cpus on this node: */
2890 @@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
2891 uv_rtc_deallocate_timers();
2894 - spin_lock_init(&head->lock);
2895 + raw_spin_lock_init(&head->lock);
2896 head->ncpus = uv_blade_nr_possible_cpus(bid);
2897 head->next_cpu = -1;
2898 blade_info[bid] = head;
2899 @@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
2900 unsigned long flags;
2903 - spin_lock_irqsave(&head->lock, flags);
2904 + raw_spin_lock_irqsave(&head->lock, flags);
2906 next_cpu = head->next_cpu;
2908 @@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
2909 if (uv_setup_intr(cpu, expires)) {
2911 uv_rtc_find_next_timer(head, pnode);
2912 - spin_unlock_irqrestore(&head->lock, flags);
2913 + raw_spin_unlock_irqrestore(&head->lock, flags);
2918 - spin_unlock_irqrestore(&head->lock, flags);
2919 + raw_spin_unlock_irqrestore(&head->lock, flags);
2923 @@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
2924 unsigned long flags;
2927 - spin_lock_irqsave(&head->lock, flags);
2928 + raw_spin_lock_irqsave(&head->lock, flags);
2930 if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
2932 @@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
2933 uv_rtc_find_next_timer(head, pnode);
2936 - spin_unlock_irqrestore(&head->lock, flags);
2937 + raw_spin_unlock_irqrestore(&head->lock, flags);
2941 @@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
2942 static cycle_t uv_read_rtc(struct clocksource *cs)
2944 unsigned long offset;
2947 + preempt_disable();
2948 if (uv_get_min_hub_revision_id() == 1)
2951 offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
2953 - return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
2954 + cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
2961 diff --git a/block/blk-core.c b/block/blk-core.c
2962 index d1f2801ce836..6f945bb0fa1a 100644
2963 --- a/block/blk-core.c
2964 +++ b/block/blk-core.c
2965 @@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
2967 INIT_LIST_HEAD(&rq->queuelist);
2968 INIT_LIST_HEAD(&rq->timeout_list);
2969 +#ifdef CONFIG_PREEMPT_RT_FULL
2970 + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
2974 rq->__sector = (sector_t) -1;
2975 @@ -233,7 +236,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
2977 void blk_start_queue(struct request_queue *q)
2979 - WARN_ON(!irqs_disabled());
2980 + WARN_ON_NONRT(!irqs_disabled());
2982 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
2984 @@ -659,7 +662,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
2988 - ret = wait_event_interruptible(q->mq_freeze_wq,
2989 + ret = swait_event_interruptible(q->mq_freeze_wq,
2990 !atomic_read(&q->mq_freeze_depth) ||
2991 blk_queue_dying(q));
2992 if (blk_queue_dying(q))
2993 @@ -679,7 +682,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
2994 struct request_queue *q =
2995 container_of(ref, struct request_queue, q_usage_counter);
2997 - wake_up_all(&q->mq_freeze_wq);
2998 + swake_up_all(&q->mq_freeze_wq);
3001 static void blk_rq_timed_out_timer(unsigned long data)
3002 @@ -748,7 +751,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
3003 q->bypass_depth = 1;
3004 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
3006 - init_waitqueue_head(&q->mq_freeze_wq);
3007 + init_swait_queue_head(&q->mq_freeze_wq);
3010 * Init percpu_ref in atomic mode so that it's faster to shutdown.
3011 @@ -3200,7 +3203,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
3012 blk_run_queue_async(q);
3015 - spin_unlock(q->queue_lock);
3016 + spin_unlock_irq(q->queue_lock);
3019 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
3020 @@ -3248,7 +3251,6 @@ EXPORT_SYMBOL(blk_check_plugged);
3021 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3023 struct request_queue *q;
3024 - unsigned long flags;
3028 @@ -3268,11 +3270,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3033 - * Save and disable interrupts here, to avoid doing it for every
3034 - * queue lock we have to take.
3036 - local_irq_save(flags);
3037 while (!list_empty(&list)) {
3038 rq = list_entry_rq(list.next);
3039 list_del_init(&rq->queuelist);
3040 @@ -3285,7 +3282,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3041 queue_unplugged(q, depth, from_schedule);
3044 - spin_lock(q->queue_lock);
3045 + spin_lock_irq(q->queue_lock);
3049 @@ -3312,8 +3309,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3052 queue_unplugged(q, depth, from_schedule);
3054 - local_irq_restore(flags);
3057 void blk_finish_plug(struct blk_plug *plug)
3058 diff --git a/block/blk-ioc.c b/block/blk-ioc.c
3059 index 381cb50a673c..dc8785233d94 100644
3060 --- a/block/blk-ioc.c
3061 +++ b/block/blk-ioc.c
3063 #include <linux/bio.h>
3064 #include <linux/blkdev.h>
3065 #include <linux/slab.h>
3066 +#include <linux/delay.h>
3070 @@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work)
3071 spin_unlock(q->queue_lock);
3073 spin_unlock_irqrestore(&ioc->lock, flags);
3076 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
3079 @@ -187,7 +188,7 @@ void put_io_context_active(struct io_context *ioc)
3080 spin_unlock(icq->q->queue_lock);
3082 spin_unlock_irqrestore(&ioc->lock, flags);
3088 diff --git a/block/blk-mq.c b/block/blk-mq.c
3089 index 7b597ec4e9c5..48c9652a701c 100644
3090 --- a/block/blk-mq.c
3091 +++ b/block/blk-mq.c
3092 @@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
3094 static void blk_mq_freeze_queue_wait(struct request_queue *q)
3096 - wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
3097 + swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
3101 @@ -110,7 +110,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
3102 WARN_ON_ONCE(freeze_depth < 0);
3103 if (!freeze_depth) {
3104 percpu_ref_reinit(&q->q_usage_counter);
3105 - wake_up_all(&q->mq_freeze_wq);
3106 + swake_up_all(&q->mq_freeze_wq);
3109 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
3110 @@ -129,7 +129,7 @@ void blk_mq_wake_waiters(struct request_queue *q)
3111 * dying, we need to ensure that processes currently waiting on
3112 * the queue are notified as well.
3114 - wake_up_all(&q->mq_freeze_wq);
3115 + swake_up_all(&q->mq_freeze_wq);
3118 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
3119 @@ -177,6 +177,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
3123 +#ifdef CONFIG_PREEMPT_RT_FULL
3124 + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
3126 INIT_LIST_HEAD(&rq->timeout_list);
3129 @@ -345,6 +348,17 @@ void blk_mq_end_request(struct request *rq, int error)
3131 EXPORT_SYMBOL(blk_mq_end_request);
3133 +#ifdef CONFIG_PREEMPT_RT_FULL
3135 +void __blk_mq_complete_request_remote_work(struct work_struct *work)
3137 + struct request *rq = container_of(work, struct request, work);
3139 + rq->q->softirq_done_fn(rq);
3144 static void __blk_mq_complete_request_remote(void *data)
3146 struct request *rq = data;
3147 @@ -352,6 +366,8 @@ static void __blk_mq_complete_request_remote(void *data)
3148 rq->q->softirq_done_fn(rq);
3153 static void blk_mq_ipi_complete_request(struct request *rq)
3155 struct blk_mq_ctx *ctx = rq->mq_ctx;
3156 @@ -363,19 +379,23 @@ static void blk_mq_ipi_complete_request(struct request *rq)
3161 + cpu = get_cpu_light();
3162 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
3163 shared = cpus_share_cache(cpu, ctx->cpu);
3165 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
3166 +#ifdef CONFIG_PREEMPT_RT_FULL
3167 + schedule_work_on(ctx->cpu, &rq->work);
3169 rq->csd.func = __blk_mq_complete_request_remote;
3172 smp_call_function_single_async(ctx->cpu, &rq->csd);
3175 rq->q->softirq_done_fn(rq);
3181 static void __blk_mq_complete_request(struct request *rq)
3182 @@ -906,14 +926,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
3185 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
3186 - int cpu = get_cpu();
3187 + int cpu = get_cpu_light();
3188 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
3189 __blk_mq_run_hw_queue(hctx);
3199 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
3200 diff --git a/block/blk-mq.h b/block/blk-mq.h
3201 index e5d25249028c..1e846b842eab 100644
3202 --- a/block/blk-mq.h
3203 +++ b/block/blk-mq.h
3204 @@ -72,12 +72,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
3206 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
3208 - return __blk_mq_get_ctx(q, get_cpu());
3209 + return __blk_mq_get_ctx(q, get_cpu_light());
3212 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
3218 struct blk_mq_alloc_data {
3219 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
3220 index 06cf9807f49a..c40342643ca0 100644
3221 --- a/block/blk-softirq.c
3222 +++ b/block/blk-softirq.c
3223 @@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
3224 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3226 local_irq_restore(flags);
3227 + preempt_check_resched_rt();
3231 @@ -89,6 +90,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
3232 this_cpu_ptr(&blk_cpu_done));
3233 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3235 + preempt_check_resched_rt();
3239 @@ -141,6 +143,7 @@ void __blk_complete_request(struct request *req)
3242 local_irq_restore(flags);
3243 + preempt_check_resched_rt();
3247 diff --git a/block/bounce.c b/block/bounce.c
3248 index 1cb5dd3a5da1..2f1ec8a67cbe 100644
3249 --- a/block/bounce.c
3250 +++ b/block/bounce.c
3251 @@ -55,11 +55,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
3252 unsigned long flags;
3255 - local_irq_save(flags);
3256 + local_irq_save_nort(flags);
3257 vto = kmap_atomic(to->bv_page);
3258 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
3260 - local_irq_restore(flags);
3261 + local_irq_restore_nort(flags);
3264 #else /* CONFIG_HIGHMEM */
3265 diff --git a/crypto/algapi.c b/crypto/algapi.c
3266 index 1fad2a6b3bbb..ecb7315426a9 100644
3267 --- a/crypto/algapi.c
3268 +++ b/crypto/algapi.c
3269 @@ -719,13 +719,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
3271 int crypto_register_notifier(struct notifier_block *nb)
3273 - return blocking_notifier_chain_register(&crypto_chain, nb);
3274 + return srcu_notifier_chain_register(&crypto_chain, nb);
3276 EXPORT_SYMBOL_GPL(crypto_register_notifier);
3278 int crypto_unregister_notifier(struct notifier_block *nb)
3280 - return blocking_notifier_chain_unregister(&crypto_chain, nb);
3281 + return srcu_notifier_chain_unregister(&crypto_chain, nb);
3283 EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
3285 diff --git a/crypto/api.c b/crypto/api.c
3286 index bbc147cb5dec..bc1a848f02ec 100644
3289 @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
3290 DECLARE_RWSEM(crypto_alg_sem);
3291 EXPORT_SYMBOL_GPL(crypto_alg_sem);
3293 -BLOCKING_NOTIFIER_HEAD(crypto_chain);
3294 +SRCU_NOTIFIER_HEAD(crypto_chain);
3295 EXPORT_SYMBOL_GPL(crypto_chain);
3297 static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
3298 @@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v)
3302 - ok = blocking_notifier_call_chain(&crypto_chain, val, v);
3303 + ok = srcu_notifier_call_chain(&crypto_chain, val, v);
3304 if (ok == NOTIFY_DONE) {
3305 request_module("cryptomgr");
3306 - ok = blocking_notifier_call_chain(&crypto_chain, val, v);
3307 + ok = srcu_notifier_call_chain(&crypto_chain, val, v);
3311 diff --git a/crypto/internal.h b/crypto/internal.h
3312 index 7eefcdb00227..0ecc7f5a2f40 100644
3313 --- a/crypto/internal.h
3314 +++ b/crypto/internal.h
3315 @@ -47,7 +47,7 @@ struct crypto_larval {
3317 extern struct list_head crypto_alg_list;
3318 extern struct rw_semaphore crypto_alg_sem;
3319 -extern struct blocking_notifier_head crypto_chain;
3320 +extern struct srcu_notifier_head crypto_chain;
3322 #ifdef CONFIG_PROC_FS
3323 void __init crypto_init_proc(void);
3324 @@ -146,7 +146,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
3326 static inline void crypto_notify(unsigned long val, void *v)
3328 - blocking_notifier_call_chain(&crypto_chain, val, v);
3329 + srcu_notifier_call_chain(&crypto_chain, val, v);
3332 #endif /* _CRYPTO_INTERNAL_H */
3333 diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
3334 index 750fa824d42c..441edf51484a 100644
3335 --- a/drivers/acpi/acpica/acglobal.h
3336 +++ b/drivers/acpi/acpica/acglobal.h
3337 @@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
3340 ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
3341 -ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
3342 +ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
3343 ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
3345 /* Mutex for _OSI support */
3346 diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
3347 index 3b7fb99362b6..696bf8e62afb 100644
3348 --- a/drivers/acpi/acpica/hwregs.c
3349 +++ b/drivers/acpi/acpica/hwregs.c
3350 @@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
3351 ACPI_BITMASK_ALL_FIXED_STATUS,
3352 ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
3354 - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
3355 + raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
3357 /* Clear the fixed events in PM1 A/B */
3359 status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
3360 ACPI_BITMASK_ALL_FIXED_STATUS);
3362 - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
3363 + raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
3365 if (ACPI_FAILURE(status)) {
3367 diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
3368 index 98c26ff39409..6e236f2ea791 100644
3369 --- a/drivers/acpi/acpica/hwxface.c
3370 +++ b/drivers/acpi/acpica/hwxface.c
3371 @@ -373,7 +373,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
3372 return_ACPI_STATUS(AE_BAD_PARAMETER);
3375 - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
3376 + raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
3379 * At this point, we know that the parent register is one of the
3380 @@ -434,7 +434,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
3384 - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
3385 + raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
3386 return_ACPI_STATUS(status);
3389 diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
3390 index 15073375bd00..357e7ca5a587 100644
3391 --- a/drivers/acpi/acpica/utmutex.c
3392 +++ b/drivers/acpi/acpica/utmutex.c
3393 @@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
3394 return_ACPI_STATUS (status);
3397 - status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
3398 + status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
3399 if (ACPI_FAILURE (status)) {
3400 return_ACPI_STATUS (status);
3402 @@ -145,7 +145,7 @@ void acpi_ut_mutex_terminate(void)
3403 /* Delete the spinlocks */
3405 acpi_os_delete_lock(acpi_gbl_gpe_lock);
3406 - acpi_os_delete_lock(acpi_gbl_hardware_lock);
3407 + acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
3408 acpi_os_delete_lock(acpi_gbl_reference_count_lock);
3410 /* Delete the reader/writer lock */
3411 diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
3412 index 051b6158d1b7..7ad293bef6ed 100644
3413 --- a/drivers/ata/libata-sff.c
3414 +++ b/drivers/ata/libata-sff.c
3415 @@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
3416 unsigned long flags;
3417 unsigned int consumed;
3419 - local_irq_save(flags);
3420 + local_irq_save_nort(flags);
3421 consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
3422 - local_irq_restore(flags);
3423 + local_irq_restore_nort(flags);
3427 @@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3428 unsigned long flags;
3430 /* FIXME: use a bounce buffer */
3431 - local_irq_save(flags);
3432 + local_irq_save_nort(flags);
3433 buf = kmap_atomic(page);
3435 /* do the actual data transfer */
3436 @@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3440 - local_irq_restore(flags);
3441 + local_irq_restore_nort(flags);
3443 buf = page_address(page);
3444 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
3445 @@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3446 unsigned long flags;
3448 /* FIXME: use bounce buffer */
3449 - local_irq_save(flags);
3450 + local_irq_save_nort(flags);
3451 buf = kmap_atomic(page);
3453 /* do the actual data transfer */
3454 @@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3458 - local_irq_restore(flags);
3459 + local_irq_restore_nort(flags);
3461 buf = page_address(page);
3462 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
3463 diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
3464 index 4b5cd3a7b2b6..fa8329ad79fd 100644
3465 --- a/drivers/block/zram/zcomp.c
3466 +++ b/drivers/block/zram/zcomp.c
3467 @@ -118,12 +118,19 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
3469 struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
3471 - return *get_cpu_ptr(comp->stream);
3472 + struct zcomp_strm *zstrm;
3474 + zstrm = *this_cpu_ptr(comp->stream);
3475 + spin_lock(&zstrm->zcomp_lock);
3479 void zcomp_stream_put(struct zcomp *comp)
3481 - put_cpu_ptr(comp->stream);
3482 + struct zcomp_strm *zstrm;
3484 + zstrm = *this_cpu_ptr(comp->stream);
3485 + spin_unlock(&zstrm->zcomp_lock);
3488 int zcomp_compress(struct zcomp_strm *zstrm,
3489 @@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
3490 pr_err("Can't allocate a compression stream\n");
3493 + spin_lock_init(&zstrm->zcomp_lock);
3494 *per_cpu_ptr(comp->stream, cpu) = zstrm;
3497 diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
3498 index 478cac2ed465..f7a6efdc3285 100644
3499 --- a/drivers/block/zram/zcomp.h
3500 +++ b/drivers/block/zram/zcomp.h
3501 @@ -14,6 +14,7 @@ struct zcomp_strm {
3502 /* compression/decompression buffer */
3504 struct crypto_comp *tfm;
3505 + spinlock_t zcomp_lock;
3508 /* dynamic per-device compression frontend */
3509 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
3510 index c9914d653968..2038d138f286 100644
3511 --- a/drivers/block/zram/zram_drv.c
3512 +++ b/drivers/block/zram/zram_drv.c
3513 @@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
3517 + zram_meta_init_table_locks(meta, disksize);
3522 @@ -575,28 +577,28 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
3523 struct zram_meta *meta = zram->meta;
3524 unsigned long handle;
3526 + struct zcomp_strm *zstrm;
3528 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3529 + zram_lock_table(&meta->table[index]);
3530 handle = meta->table[index].handle;
3531 size = zram_get_obj_size(meta, index);
3533 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
3534 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3535 + zram_unlock_table(&meta->table[index]);
3536 memset(mem, 0, PAGE_SIZE);
3540 + zstrm = zcomp_stream_get(zram->comp);
3541 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
3542 if (size == PAGE_SIZE) {
3543 memcpy(mem, cmem, PAGE_SIZE);
3545 - struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
3547 ret = zcomp_decompress(zstrm, cmem, size, mem);
3548 - zcomp_stream_put(zram->comp);
3550 zs_unmap_object(meta->mem_pool, handle);
3551 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3552 + zcomp_stream_put(zram->comp);
3553 + zram_unlock_table(&meta->table[index]);
3555 /* Should NEVER happen. Return bio error if it does. */
3556 if (unlikely(ret)) {
3557 @@ -616,14 +618,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
3558 struct zram_meta *meta = zram->meta;
3559 page = bvec->bv_page;
3561 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3562 + zram_lock_table(&meta->table[index]);
3563 if (unlikely(!meta->table[index].handle) ||
3564 zram_test_flag(meta, index, ZRAM_ZERO)) {
3565 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3566 + zram_unlock_table(&meta->table[index]);
3567 handle_zero_page(bvec);
3570 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3571 + zram_unlock_table(&meta->table[index]);
3573 if (is_partial_io(bvec))
3574 /* Use a temporary buffer to decompress the page */
3575 @@ -700,10 +702,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
3577 kunmap_atomic(user_mem);
3578 /* Free memory associated with this sector now. */
3579 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3580 + zram_lock_table(&meta->table[index]);
3581 zram_free_page(zram, index);
3582 zram_set_flag(meta, index, ZRAM_ZERO);
3583 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3584 + zram_unlock_table(&meta->table[index]);
3586 atomic64_inc(&zram->stats.zero_pages);
3588 @@ -794,12 +796,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
3589 * Free memory associated with this sector
3590 * before overwriting unused sectors.
3592 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3593 + zram_lock_table(&meta->table[index]);
3594 zram_free_page(zram, index);
3596 meta->table[index].handle = handle;
3597 zram_set_obj_size(meta, index, clen);
3598 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3599 + zram_unlock_table(&meta->table[index]);
3602 atomic64_add(clen, &zram->stats.compr_data_size);
3603 @@ -842,9 +844,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
3606 while (n >= PAGE_SIZE) {
3607 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3608 + zram_lock_table(&meta->table[index]);
3609 zram_free_page(zram, index);
3610 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3611 + zram_unlock_table(&meta->table[index]);
3612 atomic64_inc(&zram->stats.notify_free);
3615 @@ -973,9 +975,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
3616 zram = bdev->bd_disk->private_data;
3619 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3620 + zram_lock_table(&meta->table[index]);
3621 zram_free_page(zram, index);
3622 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3623 + zram_unlock_table(&meta->table[index]);
3624 atomic64_inc(&zram->stats.notify_free);
3627 diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
3628 index 74fcf10da374..fd4020c99b9e 100644
3629 --- a/drivers/block/zram/zram_drv.h
3630 +++ b/drivers/block/zram/zram_drv.h
3631 @@ -73,6 +73,9 @@ enum zram_pageflags {
3632 struct zram_table_entry {
3633 unsigned long handle;
3634 unsigned long value;
3635 +#ifdef CONFIG_PREEMPT_RT_BASE
3641 @@ -120,4 +123,42 @@ struct zram {
3643 bool claim; /* Protected by bdev->bd_mutex */
3646 +#ifndef CONFIG_PREEMPT_RT_BASE
3647 +static inline void zram_lock_table(struct zram_table_entry *table)
3649 + bit_spin_lock(ZRAM_ACCESS, &table->value);
3652 +static inline void zram_unlock_table(struct zram_table_entry *table)
3654 + bit_spin_unlock(ZRAM_ACCESS, &table->value);
3657 +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { }
3658 +#else /* CONFIG_PREEMPT_RT_BASE */
3659 +static inline void zram_lock_table(struct zram_table_entry *table)
3661 + spin_lock(&table->lock);
3662 + __set_bit(ZRAM_ACCESS, &table->value);
3665 +static inline void zram_unlock_table(struct zram_table_entry *table)
3667 + __clear_bit(ZRAM_ACCESS, &table->value);
3668 + spin_unlock(&table->lock);
3671 +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
3673 + size_t num_pages = disksize >> PAGE_SHIFT;
3676 + for (index = 0; index < num_pages; index++) {
3677 + spinlock_t *lock = &meta->table[index].lock;
3678 + spin_lock_init(lock);
3681 +#endif /* CONFIG_PREEMPT_RT_BASE */
3684 diff --git a/drivers/char/random.c b/drivers/char/random.c
3685 index 08d1dd58c0d2..25ee319dc8e3 100644
3686 --- a/drivers/char/random.c
3687 +++ b/drivers/char/random.c
3689 #include <linux/syscalls.h>
3690 #include <linux/completion.h>
3691 #include <linux/uuid.h>
3692 +#include <linux/locallock.h>
3693 #include <crypto/chacha20.h>
3695 #include <asm/processor.h>
3696 @@ -1028,8 +1029,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
3698 long delta, delta2, delta3;
3700 - preempt_disable();
3702 sample.jiffies = jiffies;
3703 sample.cycles = random_get_entropy();
3705 @@ -1070,7 +1069,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
3707 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
3712 void add_input_randomness(unsigned int type, unsigned int code,
3713 @@ -1123,28 +1121,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
3714 return *(ptr + f->reg_idx++);
3717 -void add_interrupt_randomness(int irq, int irq_flags)
3718 +void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
3720 struct entropy_store *r;
3721 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
3722 - struct pt_regs *regs = get_irq_regs();
3723 unsigned long now = jiffies;
3724 cycles_t cycles = random_get_entropy();
3725 __u32 c_high, j_high;
3731 - cycles = get_reg(fast_pool, regs);
3732 + cycles = get_reg(fast_pool, NULL);
3733 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
3734 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
3735 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
3736 fast_pool->pool[1] ^= now ^ c_high;
3737 - ip = regs ? instruction_pointer(regs) : _RET_IP_;
3740 fast_pool->pool[2] ^= ip;
3741 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
3742 - get_reg(fast_pool, regs);
3743 + get_reg(fast_pool, NULL);
3745 fast_mix(fast_pool);
3746 add_interrupt_bench(cycles);
3747 @@ -2056,6 +2053,7 @@ struct batched_entropy {
3748 * goal of being quite fast and not depleting entropy.
3750 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
3751 +static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_long_lock);
3752 unsigned long get_random_long(void)
3755 @@ -2064,13 +2062,13 @@ unsigned long get_random_long(void)
3756 if (arch_get_random_long(&ret))
3759 - batch = &get_cpu_var(batched_entropy_long);
3760 + batch = &get_locked_var(batched_entropy_long_lock, batched_entropy_long);
3761 if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
3762 extract_crng((u8 *)batch->entropy_long);
3763 batch->position = 0;
3765 ret = batch->entropy_long[batch->position++];
3766 - put_cpu_var(batched_entropy_long);
3767 + put_locked_var(batched_entropy_long_lock, batched_entropy_long);
3770 EXPORT_SYMBOL(get_random_long);
3771 @@ -2082,6 +2080,8 @@ unsigned int get_random_int(void)
3774 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
3775 +static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_int_lock);
3777 unsigned int get_random_int(void)
3780 @@ -2090,13 +2090,13 @@ unsigned int get_random_int(void)
3781 if (arch_get_random_int(&ret))
3784 - batch = &get_cpu_var(batched_entropy_int);
3785 + batch = &get_locked_var(batched_entropy_int_lock, batched_entropy_int);
3786 if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
3787 extract_crng((u8 *)batch->entropy_int);
3788 batch->position = 0;
3790 ret = batch->entropy_int[batch->position++];
3791 - put_cpu_var(batched_entropy_int);
3792 + put_locked_var(batched_entropy_int_lock, batched_entropy_int);
3796 diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
3797 index 4da2af9694a2..5b6f57f500b8 100644
3798 --- a/drivers/clocksource/tcb_clksrc.c
3799 +++ b/drivers/clocksource/tcb_clksrc.c
3801 * this 32 bit free-running counter. the second channel is not used.
3803 * - The third channel may be used to provide a 16-bit clockevent
3804 - * source, used in either periodic or oneshot mode. This runs
3805 - * at 32 KiHZ, and can handle delays of up to two seconds.
3806 + * source, used in either periodic or oneshot mode.
3808 * A boot clocksource and clockevent source are also currently needed,
3809 * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
3810 @@ -74,6 +73,8 @@ static struct clocksource clksrc = {
3811 struct tc_clkevt_device {
3812 struct clock_event_device clkevt;
3819 @@ -82,15 +83,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
3820 return container_of(clkevt, struct tc_clkevt_device, clkevt);
3823 -/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
3824 - * because using one of the divided clocks would usually mean the
3825 - * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
3827 - * A divided clock could be good for high resolution timers, since
3828 - * 30.5 usec resolution can seem "low".
3830 static u32 timer_clock;
3832 +static void tc_clk_disable(struct clock_event_device *d)
3834 + struct tc_clkevt_device *tcd = to_tc_clkevt(d);
3836 + clk_disable(tcd->clk);
3837 + tcd->clk_enabled = false;
3840 +static void tc_clk_enable(struct clock_event_device *d)
3842 + struct tc_clkevt_device *tcd = to_tc_clkevt(d);
3844 + if (tcd->clk_enabled)
3846 + clk_enable(tcd->clk);
3847 + tcd->clk_enabled = true;
3850 static int tc_shutdown(struct clock_event_device *d)
3852 struct tc_clkevt_device *tcd = to_tc_clkevt(d);
3853 @@ -98,8 +110,14 @@ static int tc_shutdown(struct clock_event_device *d)
3855 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
3856 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
3860 +static int tc_shutdown_clk_off(struct clock_event_device *d)
3863 if (!clockevent_state_detached(d))
3864 - clk_disable(tcd->clk);
3865 + tc_clk_disable(d);
3869 @@ -112,9 +130,9 @@ static int tc_set_oneshot(struct clock_event_device *d)
3870 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
3873 - clk_enable(tcd->clk);
3876 - /* slow clock, count up to RC, then irq and stop */
3877 + /* count up to RC, then irq and stop */
3878 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
3879 ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
3880 __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
3881 @@ -134,12 +152,12 @@ static int tc_set_periodic(struct clock_event_device *d)
3882 /* By not making the gentime core emulate periodic mode on top
3883 * of oneshot, we get lower overhead and improved accuracy.
3885 - clk_enable(tcd->clk);
3888 - /* slow clock, count up to RC, then irq and restart */
3889 + /* count up to RC, then irq and restart */
3890 __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
3891 regs + ATMEL_TC_REG(2, CMR));
3892 - __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
3893 + __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
3895 /* Enable clock and interrupts on RC compare */
3896 __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
3897 @@ -166,9 +184,13 @@ static struct tc_clkevt_device clkevt = {
3898 .features = CLOCK_EVT_FEAT_PERIODIC |
3899 CLOCK_EVT_FEAT_ONESHOT,
3900 /* Should be lower than at91rm9200's system timer */
3901 +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
3906 .set_next_event = tc_next_event,
3907 - .set_state_shutdown = tc_shutdown,
3908 + .set_state_shutdown = tc_shutdown_clk_off,
3909 .set_state_periodic = tc_set_periodic,
3910 .set_state_oneshot = tc_set_oneshot,
3912 @@ -188,8 +210,9 @@ static irqreturn_t ch2_irq(int irq, void *handle)
3916 -static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
3917 +static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
3919 + unsigned divisor = atmel_tc_divisors[divisor_idx];
3921 struct clk *t2_clk = tc->clk[2];
3922 int irq = tc->irq[2];
3923 @@ -210,7 +233,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
3924 clkevt.regs = tc->regs;
3925 clkevt.clk = t2_clk;
3927 - timer_clock = clk32k_divisor_idx;
3928 + timer_clock = divisor_idx;
3930 + clkevt.freq = 32768;
3932 + clkevt.freq = clk_get_rate(t2_clk) / divisor;
3934 clkevt.clkevt.cpumask = cpumask_of(0);
3936 @@ -221,7 +248,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
3940 - clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
3941 + clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
3945 @@ -358,7 +385,11 @@ static int __init tcb_clksrc_init(void)
3946 goto err_disable_t1;
3948 /* channel 2: periodic and oneshot timer support */
3949 +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
3950 ret = setup_clkevents(tc, clk32k_divisor_idx);
3952 + ret = setup_clkevents(tc, best_divisor_idx);
3955 goto err_unregister_clksrc;
3957 diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
3958 index 6555821bbdae..93288849b2bd 100644
3959 --- a/drivers/clocksource/timer-atmel-pit.c
3960 +++ b/drivers/clocksource/timer-atmel-pit.c
3961 @@ -46,6 +46,7 @@ struct pit_data {
3965 + bool irq_requested;
3969 @@ -96,15 +97,29 @@ static int pit_clkevt_shutdown(struct clock_event_device *dev)
3971 /* disable irq, leaving the clocksource active */
3972 pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
3973 + if (data->irq_requested) {
3974 + free_irq(data->irq, data);
3975 + data->irq_requested = false;
3980 +static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id);
3982 * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
3984 static int pit_clkevt_set_periodic(struct clock_event_device *dev)
3986 struct pit_data *data = clkevt_to_pit_data(dev);
3989 + ret = request_irq(data->irq, at91sam926x_pit_interrupt,
3990 + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
3991 + "at91_tick", data);
3993 + panic(pr_fmt("Unable to setup IRQ\n"));
3995 + data->irq_requested = true;
3997 /* update clocksource counter */
3998 data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
3999 @@ -230,15 +245,6 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
4003 - /* Set up irq handler */
4004 - ret = request_irq(data->irq, at91sam926x_pit_interrupt,
4005 - IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
4006 - "at91_tick", data);
4008 - pr_err("Unable to setup IRQ\n");
4012 /* Set up and register clockevents */
4013 data->clkevt.name = "pit";
4014 data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
4015 diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
4016 index e90ab5b63a90..9e124087c55f 100644
4017 --- a/drivers/clocksource/timer-atmel-st.c
4018 +++ b/drivers/clocksource/timer-atmel-st.c
4019 @@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_irq(void)
4020 last_crtr = read_CRTR();
4023 +static int atmel_st_irq;
4025 static int clkevt32k_shutdown(struct clock_event_device *evt)
4027 clkdev32k_disable_and_flush_irq();
4029 regmap_write(regmap_st, AT91_ST_IER, irqmask);
4030 + free_irq(atmel_st_irq, regmap_st);
4034 static int clkevt32k_set_oneshot(struct clock_event_device *dev)
4038 clkdev32k_disable_and_flush_irq();
4040 + ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
4041 + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
4042 + "at91_tick", regmap_st);
4044 + panic(pr_fmt("Unable to setup IRQ\n"));
4047 * ALM for oneshot irqs, set by next_event()
4048 * before 32 seconds have passed.
4049 @@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct clock_event_device *dev)
4051 static int clkevt32k_set_periodic(struct clock_event_device *dev)
4055 clkdev32k_disable_and_flush_irq();
4057 + ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
4058 + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
4059 + "at91_tick", regmap_st);
4061 + panic(pr_fmt("Unable to setup IRQ\n"));
4063 /* PIT for periodic irqs; fixed rate of 1/HZ */
4064 irqmask = AT91_ST_PITS;
4065 regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
4066 @@ -198,7 +217,7 @@ static int __init atmel_st_timer_init(struct device_node *node)
4069 unsigned int sclk_rate, val;
4073 regmap_st = syscon_node_to_regmap(node);
4074 if (IS_ERR(regmap_st)) {
4075 @@ -212,21 +231,12 @@ static int __init atmel_st_timer_init(struct device_node *node)
4076 regmap_read(regmap_st, AT91_ST_SR, &val);
4078 /* Get the interrupts property */
4079 - irq = irq_of_parse_and_map(node, 0);
4081 + atmel_st_irq = irq_of_parse_and_map(node, 0);
4082 + if (!atmel_st_irq) {
4083 pr_err("Unable to get IRQ from DT\n");
4087 - /* Make IRQs happen for the system timer */
4088 - ret = request_irq(irq, at91rm9200_timer_interrupt,
4089 - IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
4090 - "at91_tick", regmap_st);
4092 - pr_err("Unable to setup IRQ\n");
4096 sclk = of_clk_get(node, 0);
4098 pr_err("Unable to get slow clock\n");
4099 diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
4100 index a782ce87715c..19d265948526 100644
4101 --- a/drivers/connector/cn_proc.c
4102 +++ b/drivers/connector/cn_proc.c
4104 #include <linux/pid_namespace.h>
4106 #include <linux/cn_proc.h>
4107 +#include <linux/locallock.h>
4110 * Size of a cn_msg followed by a proc_event structure. Since the
4111 @@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
4113 /* proc_event_counts is used as the sequence number of the netlink message */
4114 static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
4115 +static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock);
4117 static inline void send_msg(struct cn_msg *msg)
4119 - preempt_disable();
4120 + local_lock(send_msg_lock);
4122 msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
4123 ((struct proc_event *)msg->data)->cpu = smp_processor_id();
4124 @@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg)
4126 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
4129 + local_unlock(send_msg_lock);
4132 void proc_fork_connector(struct task_struct *task)
4133 diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
4134 index adbd1de1cea5..1fac5074f2cf 100644
4135 --- a/drivers/cpufreq/Kconfig.x86
4136 +++ b/drivers/cpufreq/Kconfig.x86
4137 @@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI
4139 config X86_POWERNOW_K8
4140 tristate "AMD Opteron/Athlon64 PowerNow!"
4141 - depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
4142 + depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
4144 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
4145 Support for K10 and newer processors is now in acpi-cpufreq.
4146 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
4147 index 2117f172d7a2..96c15501b0c8 100644
4148 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
4149 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
4150 @@ -1489,7 +1489,9 @@ execbuf_submit(struct i915_execbuffer_params *params,
4154 +#ifndef CONFIG_PREEMPT_RT_BASE
4155 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
4158 i915_gem_execbuffer_move_to_active(vmas, params->request);
4160 diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
4161 index 755d78832a66..97fb03dc4971 100644
4162 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
4163 +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
4164 @@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4165 if (!mutex_is_locked(mutex))
4168 -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
4169 +#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
4170 return mutex->owner == task;
4172 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4173 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
4174 index 02908e37c228..05c0480576e1 100644
4175 --- a/drivers/gpu/drm/i915/i915_irq.c
4176 +++ b/drivers/gpu/drm/i915/i915_irq.c
4177 @@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4178 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4180 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
4181 + preempt_disable_rt();
4183 /* Get optional system timestamp before query. */
4185 @@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4186 *etime = ktime_get();
4188 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
4189 + preempt_enable_rt();
4191 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4193 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
4194 index 5dc6082639db..c32458fb3be2 100644
4195 --- a/drivers/gpu/drm/i915/intel_display.c
4196 +++ b/drivers/gpu/drm/i915/intel_display.c
4197 @@ -12131,7 +12131,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
4198 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4199 struct intel_flip_work *work;
4201 - WARN_ON(!in_interrupt());
4202 + WARN_ON_NONRT(!in_interrupt());
4206 diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
4207 index dbed12c484c9..5c540b78e8b5 100644
4208 --- a/drivers/gpu/drm/i915/intel_sprite.c
4209 +++ b/drivers/gpu/drm/i915/intel_sprite.c
4211 #include <drm/drm_rect.h>
4212 #include <drm/drm_atomic.h>
4213 #include <drm/drm_plane_helper.h>
4214 +#include <linux/locallock.h>
4215 #include "intel_drv.h"
4216 #include "intel_frontbuffer.h"
4217 #include <drm/i915_drm.h>
4218 @@ -65,6 +66,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
4219 1000 * adjusted_mode->crtc_htotal);
4222 +static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock);
4225 * intel_pipe_update_start() - start update of a set of display registers
4226 * @crtc: the crtc of which the registers are going to be updated
4227 @@ -95,7 +98,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
4228 min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
4229 max = vblank_start - 1;
4231 - local_irq_disable();
4232 + local_lock_irq(pipe_update_lock);
4234 if (min <= 0 || max <= 0)
4236 @@ -125,11 +128,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
4240 - local_irq_enable();
4241 + local_unlock_irq(pipe_update_lock);
4243 timeout = schedule_timeout(timeout);
4245 - local_irq_disable();
4246 + local_lock_irq(pipe_update_lock);
4249 finish_wait(wq, &wait);
4250 @@ -181,7 +184,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
4251 crtc->base.state->event = NULL;
4254 - local_irq_enable();
4255 + local_unlock_irq(pipe_update_lock);
4257 if (crtc->debug.start_vbl_count &&
4258 crtc->debug.start_vbl_count != end_vbl_count) {
4259 diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
4260 index 192b2d3a79cb..d5372a207326 100644
4261 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
4262 +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
4263 @@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4264 if (!mutex_is_locked(mutex))
4267 -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4268 +#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
4269 return mutex->owner == task;
4271 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4272 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
4273 index cdb8cb568c15..b6d7fd964cbc 100644
4274 --- a/drivers/gpu/drm/radeon/radeon_display.c
4275 +++ b/drivers/gpu/drm/radeon/radeon_display.c
4276 @@ -1845,6 +1845,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4277 struct radeon_device *rdev = dev->dev_private;
4279 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
4280 + preempt_disable_rt();
4282 /* Get optional system timestamp before query. */
4284 @@ -1937,6 +1938,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4285 *etime = ktime_get();
4287 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
4288 + preempt_enable_rt();
4290 /* Decode into vertical and horizontal scanout position. */
4291 *vpos = position & 0x1fff;
4292 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
4293 index 0276d2ef06ee..8868045eabde 100644
4294 --- a/drivers/hv/vmbus_drv.c
4295 +++ b/drivers/hv/vmbus_drv.c
4296 @@ -761,6 +761,8 @@ static void vmbus_isr(void)
4298 struct hv_message *msg;
4299 union hv_synic_event_flags *event;
4300 + struct pt_regs *regs = get_irq_regs();
4301 + u64 ip = regs ? instruction_pointer(regs) : 0;
4302 bool handled = false;
4304 page_addr = hv_context.synic_event_page[cpu];
4305 @@ -808,7 +810,7 @@ static void vmbus_isr(void)
4306 tasklet_schedule(hv_context.msg_dpc[cpu]);
4309 - add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
4310 + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip);
4314 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
4315 index 36f76e28a0bf..394f142f90c7 100644
4316 --- a/drivers/ide/alim15x3.c
4317 +++ b/drivers/ide/alim15x3.c
4318 @@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
4320 isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
4322 - local_irq_save(flags);
4323 + local_irq_save_nort(flags);
4325 if (m5229_revision < 0xC2) {
4327 @@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
4330 pci_dev_put(isa_dev);
4331 - local_irq_restore(flags);
4332 + local_irq_restore_nort(flags);
4336 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
4337 index 0ceae5cbd89a..c212e85d7f3e 100644
4338 --- a/drivers/ide/hpt366.c
4339 +++ b/drivers/ide/hpt366.c
4340 @@ -1236,7 +1236,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
4342 dma_old = inb(base + 2);
4344 - local_irq_save(flags);
4345 + local_irq_save_nort(flags);
4348 pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
4349 @@ -1247,7 +1247,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
4350 if (dma_new != dma_old)
4351 outb(dma_new, base + 2);
4353 - local_irq_restore(flags);
4354 + local_irq_restore_nort(flags);
4356 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
4357 hwif->name, base, base + 7);
4358 diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
4359 index 19763977568c..4169433faab5 100644
4360 --- a/drivers/ide/ide-io-std.c
4361 +++ b/drivers/ide/ide-io-std.c
4362 @@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
4363 unsigned long uninitialized_var(flags);
4365 if ((io_32bit & 2) && !mmio) {
4366 - local_irq_save(flags);
4367 + local_irq_save_nort(flags);
4368 ata_vlb_sync(io_ports->nsect_addr);
4371 @@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
4372 insl(data_addr, buf, words);
4374 if ((io_32bit & 2) && !mmio)
4375 - local_irq_restore(flags);
4376 + local_irq_restore_nort(flags);
4378 if (((len + 1) & 3) < 2)
4380 @@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
4381 unsigned long uninitialized_var(flags);
4383 if ((io_32bit & 2) && !mmio) {
4384 - local_irq_save(flags);
4385 + local_irq_save_nort(flags);
4386 ata_vlb_sync(io_ports->nsect_addr);
4389 @@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
4390 outsl(data_addr, buf, words);
4392 if ((io_32bit & 2) && !mmio)
4393 - local_irq_restore(flags);
4394 + local_irq_restore_nort(flags);
4396 if (((len + 1) & 3) < 2)
4398 diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
4399 index 669ea1e45795..e12e43e62245 100644
4400 --- a/drivers/ide/ide-io.c
4401 +++ b/drivers/ide/ide-io.c
4402 @@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data)
4403 /* disable_irq_nosync ?? */
4404 disable_irq(hwif->irq);
4405 /* local CPU only, as if we were handling an interrupt */
4406 - local_irq_disable();
4407 + local_irq_disable_nort();
4408 if (hwif->polling) {
4409 startstop = handler(drive);
4410 } else if (drive_is_ready(drive)) {
4411 diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
4412 index 376f2dc410c5..f014dd1b73dc 100644
4413 --- a/drivers/ide/ide-iops.c
4414 +++ b/drivers/ide/ide-iops.c
4415 @@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
4416 if ((stat & ATA_BUSY) == 0)
4419 - local_irq_restore(flags);
4420 + local_irq_restore_nort(flags);
4425 - local_irq_restore(flags);
4426 + local_irq_restore_nort(flags);
4429 * Allow status to settle, then read it again.
4430 diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
4431 index 0b63facd1d87..4ceba37afc0c 100644
4432 --- a/drivers/ide/ide-probe.c
4433 +++ b/drivers/ide/ide-probe.c
4434 @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
4437 /* local CPU only; some systems need this */
4438 - local_irq_save(flags);
4439 + local_irq_save_nort(flags);
4440 /* read 512 bytes of id info */
4441 hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
4442 - local_irq_restore(flags);
4443 + local_irq_restore_nort(flags);
4445 drive->dev_flags |= IDE_DFLAG_ID_READ;
4447 diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
4448 index a716693417a3..be0568c722d6 100644
4449 --- a/drivers/ide/ide-taskfile.c
4450 +++ b/drivers/ide/ide-taskfile.c
4451 @@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
4453 page_is_high = PageHighMem(page);
4455 - local_irq_save(flags);
4456 + local_irq_save_nort(flags);
4458 buf = kmap_atomic(page) + offset;
4460 @@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
4464 - local_irq_restore(flags);
4465 + local_irq_restore_nort(flags);
4469 @@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
4472 if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
4473 - local_irq_disable();
4474 + local_irq_disable_nort();
4476 ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
4478 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
4479 index fddff403d5d2..cca1bb4fbfe3 100644
4480 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
4481 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
4482 @@ -902,7 +902,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
4484 ipoib_dbg_mcast(priv, "restarting multicast task\n");
4486 - local_irq_save(flags);
4487 + local_irq_save_nort(flags);
4488 netif_addr_lock(dev);
4489 spin_lock(&priv->lock);
4491 @@ -984,7 +984,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
4493 spin_unlock(&priv->lock);
4494 netif_addr_unlock(dev);
4495 - local_irq_restore(flags);
4496 + local_irq_restore_nort(flags);
4499 * make sure the in-flight joins have finished before we attempt
4500 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
4501 index 4a2a9e370be7..e970d9afd179 100644
4502 --- a/drivers/input/gameport/gameport.c
4503 +++ b/drivers/input/gameport/gameport.c
4504 @@ -91,13 +91,13 @@ static int gameport_measure_speed(struct gameport *gameport)
4507 for (i = 0; i < 50; i++) {
4508 - local_irq_save(flags);
4509 + local_irq_save_nort(flags);
4510 t1 = ktime_get_ns();
4511 for (t = 0; t < 50; t++)
4512 gameport_read(gameport);
4513 t2 = ktime_get_ns();
4514 t3 = ktime_get_ns();
4515 - local_irq_restore(flags);
4516 + local_irq_restore_nort(flags);
4518 t = (t2 - t1) - (t3 - t2);
4520 @@ -124,12 +124,12 @@ static int old_gameport_measure_speed(struct gameport *gameport)
4523 for(i = 0; i < 50; i++) {
4524 - local_irq_save(flags);
4525 + local_irq_save_nort(flags);
4527 for (t = 0; t < 50; t++) gameport_read(gameport);
4530 - local_irq_restore(flags);
4531 + local_irq_restore_nort(flags);
4533 if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
4535 @@ -148,11 +148,11 @@ static int old_gameport_measure_speed(struct gameport *gameport)
4538 for(i = 0; i < 50; i++) {
4539 - local_irq_save(flags);
4540 + local_irq_save_nort(flags);
4542 for (t = 0; t < 50; t++) gameport_read(gameport);
4544 - local_irq_restore(flags);
4545 + local_irq_restore_nort(flags);
4547 if (t2 - t1 < tx) tx = t2 - t1;
4549 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
4550 index 11a13b5be73a..baaed0ac274b 100644
4551 --- a/drivers/iommu/amd_iommu.c
4552 +++ b/drivers/iommu/amd_iommu.c
4553 @@ -1923,10 +1923,10 @@ static int __attach_device(struct iommu_dev_data *dev_data,
4557 - * Must be called with IRQs disabled. Warn here to detect early
4559 + * Must be called with IRQs disabled on a non RT kernel. Warn here to
4560 + * detect early when its not.
4562 - WARN_ON(!irqs_disabled());
4563 + WARN_ON_NONRT(!irqs_disabled());
4566 spin_lock(&domain->lock);
4567 @@ -2094,10 +2094,10 @@ static void __detach_device(struct iommu_dev_data *dev_data)
4568 struct protection_domain *domain;
4571 - * Must be called with IRQs disabled. Warn here to detect early
4573 + * Must be called with IRQs disabled on a non RT kernel. Warn here to
4574 + * detect early when its not.
4576 - WARN_ON(!irqs_disabled());
4577 + WARN_ON_NONRT(!irqs_disabled());
4579 if (WARN_ON(!dev_data->domain))
4581 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
4582 index 87fcbf71b85a..674c82b61f36 100644
4583 --- a/drivers/iommu/intel-iommu.c
4584 +++ b/drivers/iommu/intel-iommu.c
4585 @@ -479,7 +479,7 @@ struct deferred_flush_data {
4586 struct deferred_flush_table *tables;
4589 -DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
4590 +static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
4592 /* bitmap for indexing intel_iommus */
4593 static int g_num_of_iommus;
4594 @@ -3719,10 +3719,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
4595 struct intel_iommu *iommu;
4596 struct deferred_flush_entry *entry;
4597 struct deferred_flush_data *flush_data;
4598 - unsigned int cpuid;
4600 - cpuid = get_cpu();
4601 - flush_data = per_cpu_ptr(&deferred_flush, cpuid);
4602 + flush_data = raw_cpu_ptr(&deferred_flush);
4604 /* Flush all CPUs' entries to avoid deferring too much. If
4605 * this becomes a bottleneck, can just flush us, and rely on
4606 @@ -3755,8 +3753,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
4609 spin_unlock_irqrestore(&flush_data->lock, flags);
4614 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
4615 diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
4616 index e23001bfcfee..359d5d169ec0 100644
4617 --- a/drivers/iommu/iova.c
4618 +++ b/drivers/iommu/iova.c
4620 #include <linux/slab.h>
4621 #include <linux/smp.h>
4622 #include <linux/bitops.h>
4623 +#include <linux/cpu.h>
4625 static bool iova_rcache_insert(struct iova_domain *iovad,
4627 @@ -420,10 +421,8 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
4629 /* Try replenishing IOVAs by flushing rcache. */
4630 flushed_rcache = true;
4631 - preempt_disable();
4632 for_each_online_cpu(cpu)
4633 free_cpu_cached_iovas(cpu, iovad);
4638 @@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
4639 bool can_insert = false;
4640 unsigned long flags;
4642 - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
4643 + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
4644 spin_lock_irqsave(&cpu_rcache->lock, flags);
4646 if (!iova_magazine_full(cpu_rcache->loaded)) {
4647 @@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
4648 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
4650 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
4651 - put_cpu_ptr(rcache->cpu_rcaches);
4654 iova_magazine_free_pfns(mag_to_free, iovad);
4655 @@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
4656 bool has_pfn = false;
4657 unsigned long flags;
4659 - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
4660 + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
4661 spin_lock_irqsave(&cpu_rcache->lock, flags);
4663 if (!iova_magazine_empty(cpu_rcache->loaded)) {
4664 @@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
4665 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
4667 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
4668 - put_cpu_ptr(rcache->cpu_rcaches);
4672 diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
4673 index 3f9ddb9fafa7..09da5b6b44a1 100644
4674 --- a/drivers/leds/trigger/Kconfig
4675 +++ b/drivers/leds/trigger/Kconfig
4676 @@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT
4678 config LEDS_TRIGGER_CPU
4679 bool "LED CPU Trigger"
4680 - depends on LEDS_TRIGGERS
4681 + depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE
4683 This allows LEDs to be controlled by active CPUs. This shows
4684 the active CPUs across an array of LEDs so you can see which
4685 diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
4686 index 4d200883c505..98b64ed5cb81 100644
4687 --- a/drivers/md/bcache/Kconfig
4688 +++ b/drivers/md/bcache/Kconfig
4692 tristate "Block device as cache"
4693 + depends on !PREEMPT_RT_FULL
4695 Allows a block device to be used as cache for other devices; uses
4696 a btree for indexing and the layout is optimized for SSDs.
4697 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
4698 index ba7c4c685db3..834ec328f217 100644
4699 --- a/drivers/md/dm-rq.c
4700 +++ b/drivers/md/dm-rq.c
4701 @@ -842,7 +842,7 @@ static void dm_old_request_fn(struct request_queue *q)
4702 /* Establish tio->ti before queuing work (map_tio_request) */
4704 kthread_queue_work(&md->kworker, &tio->work);
4705 - BUG_ON(!irqs_disabled());
4706 + BUG_ON_NONRT(!irqs_disabled());
4710 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
4711 index f34ad2be66a1..123469f7b560 100644
4712 --- a/drivers/md/raid5.c
4713 +++ b/drivers/md/raid5.c
4714 @@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
4715 struct raid5_percpu *percpu;
4719 + cpu = get_cpu_light();
4720 percpu = per_cpu_ptr(conf->percpu, cpu);
4721 + spin_lock(&percpu->lock);
4722 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
4723 ops_run_biofill(sh);
4725 @@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
4726 if (test_and_clear_bit(R5_Overlap, &dev->flags))
4727 wake_up(&sh->raid_conf->wait_for_overlap);
4730 + spin_unlock(&percpu->lock);
4734 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
4735 @@ -6393,6 +6395,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
4739 + spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
4743 @@ -6403,7 +6406,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
4744 conf->percpu = alloc_percpu(struct raid5_percpu);
4748 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
4750 conf->scribble_disks = max(conf->raid_disks,
4751 diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
4752 index 57ec49f0839e..0739604990b7 100644
4753 --- a/drivers/md/raid5.h
4754 +++ b/drivers/md/raid5.h
4755 @@ -504,6 +504,7 @@ struct r5conf {
4756 int recovery_disabled;
4757 /* per cpu variables */
4758 struct raid5_percpu {
4759 + spinlock_t lock; /* Protection for -RT */
4760 struct page *spare_page; /* Used when checking P/Q in raid6 */
4761 struct flex_array *scribble; /* space for constructing buffer
4762 * lists and performing address
4763 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
4764 index 64971baf11fa..215e91e36198 100644
4765 --- a/drivers/misc/Kconfig
4766 +++ b/drivers/misc/Kconfig
4767 @@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
4769 bool "Atmel AT32/AT91 Timer/Counter Library"
4770 depends on (AVR32 || ARCH_AT91)
4771 + default y if PREEMPT_RT_FULL
4773 Select this if you want a library to allocate the Timer/Counter
4774 blocks found on many Atmel processors. This facilitates using
4775 @@ -69,8 +70,7 @@ config ATMEL_TCB_CLKSRC
4776 are combined to make a single 32-bit timer.
4778 When GENERIC_CLOCKEVENTS is defined, the third timer channel
4779 - may be used as a clock event device supporting oneshot mode
4780 - (delays of up to two seconds) based on the 32 KiHz clock.
4781 + may be used as a clock event device supporting oneshot mode.
4783 config ATMEL_TCB_CLKSRC_BLOCK
4785 @@ -84,6 +84,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
4786 TC can be used for other purposes, such as PWM generation and
4789 +config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
4790 + bool "TC Block use 32 KiHz clock"
4791 + depends on ATMEL_TCB_CLKSRC
4792 + default y if !PREEMPT_RT_FULL
4794 + Select this to use 32 KiHz base clock rate as TC block clock
4795 + source for clock events.
4799 tristate "Dummy IRQ handler"
4801 diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
4802 index df990bb8c873..1a162709a85e 100644
4803 --- a/drivers/mmc/host/mmci.c
4804 +++ b/drivers/mmc/host/mmci.c
4805 @@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
4806 struct sg_mapping_iter *sg_miter = &host->sg_miter;
4807 struct variant_data *variant = host->variant;
4808 void __iomem *base = host->base;
4809 - unsigned long flags;
4812 status = readl(base + MMCISTATUS);
4814 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
4816 - local_irq_save(flags);
4819 unsigned int remain, len;
4821 @@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
4823 sg_miter_stop(sg_miter);
4825 - local_irq_restore(flags);
4828 * If we have less than the fifo 'half-full' threshold to transfer,
4829 * trigger a PIO interrupt as soon as any data is available.
4830 diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
4831 index 9133e7926da5..63afb921ed40 100644
4832 --- a/drivers/net/ethernet/3com/3c59x.c
4833 +++ b/drivers/net/ethernet/3com/3c59x.c
4834 @@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev)
4836 struct vortex_private *vp = netdev_priv(dev);
4837 unsigned long flags;
4838 - local_irq_save(flags);
4839 + local_irq_save_nort(flags);
4840 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
4841 - local_irq_restore(flags);
4842 + local_irq_restore_nort(flags);
4846 @@ -1910,12 +1910,12 @@ static void vortex_tx_timeout(struct net_device *dev)
4847 * Block interrupts because vortex_interrupt does a bare spin_lock()
4849 unsigned long flags;
4850 - local_irq_save(flags);
4851 + local_irq_save_nort(flags);
4852 if (vp->full_bus_master_tx)
4853 boomerang_interrupt(dev->irq, dev);
4855 vortex_interrupt(dev->irq, dev);
4856 - local_irq_restore(flags);
4857 + local_irq_restore_nort(flags);
4861 diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
4862 index da4c2d8a4173..1420dfb56bac 100644
4863 --- a/drivers/net/ethernet/realtek/8139too.c
4864 +++ b/drivers/net/ethernet/realtek/8139too.c
4865 @@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
4866 struct rtl8139_private *tp = netdev_priv(dev);
4867 const int irq = tp->pci_dev->irq;
4870 + disable_irq_nosync(irq);
4871 rtl8139_interrupt(irq, dev);
4874 diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4875 index bca6935a94db..d7a35ee34d03 100644
4876 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4877 +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4878 @@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
4879 while (!ctx->done.done && msecs--)
4882 - wait_event_interruptible(ctx->done.wait,
4883 + swait_event_interruptible(ctx->done.wait,
4887 diff --git a/drivers/pci/access.c b/drivers/pci/access.c
4888 index d11cdbb8fba3..223bbb9acb03 100644
4889 --- a/drivers/pci/access.c
4890 +++ b/drivers/pci/access.c
4891 @@ -672,7 +672,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
4892 WARN_ON(!dev->block_cfg_access);
4894 dev->block_cfg_access = 0;
4895 - wake_up_all(&pci_cfg_wait);
4896 + wake_up_all_locked(&pci_cfg_wait);
4897 raw_spin_unlock_irqrestore(&pci_lock, flags);
4899 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
4900 diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
4901 index bedce3453dd3..faf038978650 100644
4902 --- a/drivers/pinctrl/qcom/pinctrl-msm.c
4903 +++ b/drivers/pinctrl/qcom/pinctrl-msm.c
4904 @@ -61,7 +61,7 @@ struct msm_pinctrl {
4905 struct notifier_block restart_nb;
4909 + raw_spinlock_t lock;
4911 DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
4912 DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
4913 @@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
4914 if (WARN_ON(i == g->nfuncs))
4917 - spin_lock_irqsave(&pctrl->lock, flags);
4918 + raw_spin_lock_irqsave(&pctrl->lock, flags);
4920 val = readl(pctrl->regs + g->ctl_reg);
4922 val |= i << g->mux_bit;
4923 writel(val, pctrl->regs + g->ctl_reg);
4925 - spin_unlock_irqrestore(&pctrl->lock, flags);
4926 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4930 @@ -323,14 +323,14 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
4932 case PIN_CONFIG_OUTPUT:
4933 /* set output value */
4934 - spin_lock_irqsave(&pctrl->lock, flags);
4935 + raw_spin_lock_irqsave(&pctrl->lock, flags);
4936 val = readl(pctrl->regs + g->io_reg);
4938 val |= BIT(g->out_bit);
4940 val &= ~BIT(g->out_bit);
4941 writel(val, pctrl->regs + g->io_reg);
4942 - spin_unlock_irqrestore(&pctrl->lock, flags);
4943 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4947 @@ -351,12 +351,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
4951 - spin_lock_irqsave(&pctrl->lock, flags);
4952 + raw_spin_lock_irqsave(&pctrl->lock, flags);
4953 val = readl(pctrl->regs + g->ctl_reg);
4954 val &= ~(mask << bit);
4956 writel(val, pctrl->regs + g->ctl_reg);
4957 - spin_unlock_irqrestore(&pctrl->lock, flags);
4958 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4962 @@ -384,13 +384,13 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
4964 g = &pctrl->soc->groups[offset];
4966 - spin_lock_irqsave(&pctrl->lock, flags);
4967 + raw_spin_lock_irqsave(&pctrl->lock, flags);
4969 val = readl(pctrl->regs + g->ctl_reg);
4970 val &= ~BIT(g->oe_bit);
4971 writel(val, pctrl->regs + g->ctl_reg);
4973 - spin_unlock_irqrestore(&pctrl->lock, flags);
4974 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4978 @@ -404,7 +404,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
4980 g = &pctrl->soc->groups[offset];
4982 - spin_lock_irqsave(&pctrl->lock, flags);
4983 + raw_spin_lock_irqsave(&pctrl->lock, flags);
4985 val = readl(pctrl->regs + g->io_reg);
4987 @@ -417,7 +417,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
4988 val |= BIT(g->oe_bit);
4989 writel(val, pctrl->regs + g->ctl_reg);
4991 - spin_unlock_irqrestore(&pctrl->lock, flags);
4992 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4996 @@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
4998 g = &pctrl->soc->groups[offset];
5000 - spin_lock_irqsave(&pctrl->lock, flags);
5001 + raw_spin_lock_irqsave(&pctrl->lock, flags);
5003 val = readl(pctrl->regs + g->io_reg);
5005 @@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
5006 val &= ~BIT(g->out_bit);
5007 writel(val, pctrl->regs + g->io_reg);
5009 - spin_unlock_irqrestore(&pctrl->lock, flags);
5010 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
5013 #ifdef CONFIG_DEBUG_FS
5014 @@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
5016 g = &pctrl->soc->groups[d->hwirq];
5018 - spin_lock_irqsave(&pctrl->lock, flags);
5019 + raw_spin_lock_irqsave(&pctrl->lock, flags);
5021 val = readl(pctrl->regs + g->intr_cfg_reg);
5022 val &= ~BIT(g->intr_enable_bit);
5023 @@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
5025 clear_bit(d->hwirq, pctrl->enabled_irqs);
5027 - spin_unlock_irqrestore(&pctrl->lock, flags);
5028 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
5031 static void msm_gpio_irq_unmask(struct irq_data *d)
5032 @@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
5034 g = &pctrl->soc->groups[d->hwirq];
5036 - spin_lock_irqsave(&pctrl->lock, flags);
5037 + raw_spin_lock_irqsave(&pctrl->lock, flags);
5039 val = readl(pctrl->regs + g->intr_cfg_reg);
5040 val |= BIT(g->intr_enable_bit);
5041 @@ -600,7 +600,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
5043 set_bit(d->hwirq, pctrl->enabled_irqs);
5045 - spin_unlock_irqrestore(&pctrl->lock, flags);
5046 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
5049 static void msm_gpio_irq_ack(struct irq_data *d)
5050 @@ -613,7 +613,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
5052 g = &pctrl->soc->groups[d->hwirq];
5054 - spin_lock_irqsave(&pctrl->lock, flags);
5055 + raw_spin_lock_irqsave(&pctrl->lock, flags);
5057 val = readl(pctrl->regs + g->intr_status_reg);
5058 if (g->intr_ack_high)
5059 @@ -625,7 +625,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
5060 if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
5061 msm_gpio_update_dual_edge_pos(pctrl, g, d);
5063 - spin_unlock_irqrestore(&pctrl->lock, flags);
5064 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
5067 static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
5068 @@ -638,7 +638,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
5070 g = &pctrl->soc->groups[d->hwirq];
5072 - spin_lock_irqsave(&pctrl->lock, flags);
5073 + raw_spin_lock_irqsave(&pctrl->lock, flags);
5076 * For hw without possibility of detecting both edges
5077 @@ -712,7 +712,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
5078 if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
5079 msm_gpio_update_dual_edge_pos(pctrl, g, d);
5081 - spin_unlock_irqrestore(&pctrl->lock, flags);
5082 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
5084 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
5085 irq_set_handler_locked(d, handle_level_irq);
5086 @@ -728,11 +728,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
5087 struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
5088 unsigned long flags;
5090 - spin_lock_irqsave(&pctrl->lock, flags);
5091 + raw_spin_lock_irqsave(&pctrl->lock, flags);
5093 irq_set_irq_wake(pctrl->irq, on);
5095 - spin_unlock_irqrestore(&pctrl->lock, flags);
5096 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
5100 @@ -878,7 +878,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
5101 pctrl->soc = soc_data;
5102 pctrl->chip = msm_gpio_template;
5104 - spin_lock_init(&pctrl->lock);
5105 + raw_spin_lock_init(&pctrl->lock);
5107 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5108 pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
5109 diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
5110 index 9bd41a35a78a..8e2d436c2e3f 100644
5111 --- a/drivers/scsi/fcoe/fcoe.c
5112 +++ b/drivers/scsi/fcoe/fcoe.c
5113 @@ -1455,11 +1455,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
5114 static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
5116 struct fcoe_percpu_s *fps;
5118 + int rc, cpu = get_cpu_light();
5120 - fps = &get_cpu_var(fcoe_percpu);
5121 + fps = &per_cpu(fcoe_percpu, cpu);
5122 rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
5123 - put_cpu_var(fcoe_percpu);
5128 @@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
5132 - stats = per_cpu_ptr(lport->stats, get_cpu());
5133 + stats = per_cpu_ptr(lport->stats, get_cpu_light());
5134 stats->InvalidCRCCount++;
5135 if (stats->InvalidCRCCount < 5)
5136 printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
5142 @@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
5144 hp = (struct fcoe_hdr *) skb_network_header(skb);
5146 - stats = per_cpu_ptr(lport->stats, get_cpu());
5147 + stats = per_cpu_ptr(lport->stats, get_cpu_light());
5148 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
5149 if (stats->ErrorFrames < 5)
5150 printk(KERN_WARNING "fcoe: FCoE version "
5151 @@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
5154 if (!fcoe_filter_frames(lport, fp)) {
5157 fc_exch_recv(lport, fp);
5161 stats->ErrorFrames++;
5167 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
5168 index dcf36537a767..1a1f2e46452c 100644
5169 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
5170 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
5171 @@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
5173 INIT_LIST_HEAD(&del_list);
5175 - stats = per_cpu_ptr(fip->lp->stats, get_cpu());
5176 + stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
5178 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
5179 deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
5180 @@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
5181 sel_time = fcf->time;
5187 list_for_each_entry_safe(fcf, next, &del_list, list) {
5188 /* Removes fcf from current list */
5189 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
5190 index 16ca31ad5ec0..c3987347e762 100644
5191 --- a/drivers/scsi/libfc/fc_exch.c
5192 +++ b/drivers/scsi/libfc/fc_exch.c
5193 @@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
5195 memset(ep, 0, sizeof(*ep));
5198 + cpu = get_cpu_light();
5199 pool = per_cpu_ptr(mp->pool, cpu);
5200 spin_lock_bh(&pool->lock);
5204 /* peek cache of free slot */
5205 if (pool->left != FC_XID_UNKNOWN) {
5206 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
5207 index 87f5e694dbed..23c0a50fb6aa 100644
5208 --- a/drivers/scsi/libsas/sas_ata.c
5209 +++ b/drivers/scsi/libsas/sas_ata.c
5210 @@ -190,7 +190,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
5211 /* TODO: audit callers to ensure they are ready for qc_issue to
5212 * unconditionally re-enable interrupts
5214 - local_irq_save(flags);
5215 + local_irq_save_nort(flags);
5216 spin_unlock(ap->lock);
5218 /* If the device fell off, no sense in issuing commands */
5219 @@ -252,7 +252,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
5222 spin_lock(ap->lock);
5223 - local_irq_restore(flags);
5224 + local_irq_restore_nort(flags);
5228 diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
5229 index edc48f3b8230..ee5c6f9dfb6f 100644
5230 --- a/drivers/scsi/qla2xxx/qla_inline.h
5231 +++ b/drivers/scsi/qla2xxx/qla_inline.h
5232 @@ -59,12 +59,12 @@ qla2x00_poll(struct rsp_que *rsp)
5234 unsigned long flags;
5235 struct qla_hw_data *ha = rsp->hw;
5236 - local_irq_save(flags);
5237 + local_irq_save_nort(flags);
5238 if (IS_P3P_TYPE(ha))
5239 qla82xx_poll(0, rsp);
5241 ha->isp_ops->intr_handler(0, rsp);
5242 - local_irq_restore(flags);
5243 + local_irq_restore_nort(flags);
5246 static inline uint8_t *
5247 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
5248 index 068c4e47fac9..a2090f640397 100644
5249 --- a/drivers/scsi/qla2xxx/qla_isr.c
5250 +++ b/drivers/scsi/qla2xxx/qla_isr.c
5251 @@ -3125,7 +3125,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
5254 kref_get(&qentry->irq_notify.kref);
5255 +#ifdef CONFIG_PREEMPT_RT_BASE
5256 + swork_queue(&qentry->irq_notify.swork);
5258 schedule_work(&qentry->irq_notify.work);
5263 diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
5264 index 95f4c1bcdb4c..0be934799bff 100644
5265 --- a/drivers/thermal/x86_pkg_temp_thermal.c
5266 +++ b/drivers/thermal/x86_pkg_temp_thermal.c
5268 #include <linux/pm.h>
5269 #include <linux/thermal.h>
5270 #include <linux/debugfs.h>
5271 +#include <linux/swork.h>
5272 #include <asm/cpu_device_id.h>
5273 #include <asm/mce.h>
5275 @@ -353,7 +354,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
5279 -static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5280 +static void platform_thermal_notify_work(struct swork_event *event)
5282 unsigned long flags;
5283 int cpu = smp_processor_id();
5284 @@ -370,7 +371,7 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5285 pkg_work_scheduled[phy_id]) {
5286 disable_pkg_thres_interrupt();
5287 spin_unlock_irqrestore(&pkg_work_lock, flags);
5291 pkg_work_scheduled[phy_id] = 1;
5292 spin_unlock_irqrestore(&pkg_work_lock, flags);
5293 @@ -379,9 +380,48 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5294 schedule_delayed_work_on(cpu,
5295 &per_cpu(pkg_temp_thermal_threshold_work, cpu),
5296 msecs_to_jiffies(notify_delay_ms));
5299 +#ifdef CONFIG_PREEMPT_RT_FULL
5300 +static struct swork_event notify_work;
5302 +static int thermal_notify_work_init(void)
5306 + err = swork_get();
5310 + INIT_SWORK(¬ify_work, platform_thermal_notify_work);
5314 +static void thermal_notify_work_cleanup(void)
5319 +static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5321 + swork_queue(¬ify_work);
5325 +#else /* !CONFIG_PREEMPT_RT_FULL */
5327 +static int thermal_notify_work_init(void) { return 0; }
5329 +static void thermal_notify_work_cleanup(void) { }
5331 +static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5333 + platform_thermal_notify_work(NULL);
5337 +#endif /* CONFIG_PREEMPT_RT_FULL */
5339 static int find_siblings_cpu(int cpu)
5342 @@ -585,6 +625,9 @@ static int __init pkg_temp_thermal_init(void)
5343 if (!x86_match_cpu(pkg_temp_thermal_ids))
5346 + if (!thermal_notify_work_init())
5349 spin_lock_init(&pkg_work_lock);
5350 platform_thermal_package_notify =
5351 pkg_temp_thermal_platform_thermal_notify;
5352 @@ -609,7 +652,7 @@ static int __init pkg_temp_thermal_init(void)
5353 kfree(pkg_work_scheduled);
5354 platform_thermal_package_notify = NULL;
5355 platform_thermal_package_rate_control = NULL;
5357 + thermal_notify_work_cleanup();
5361 @@ -634,6 +677,7 @@ static void __exit pkg_temp_thermal_exit(void)
5362 mutex_unlock(&phy_dev_list_mutex);
5363 platform_thermal_package_notify = NULL;
5364 platform_thermal_package_rate_control = NULL;
5365 + thermal_notify_work_cleanup();
5366 for_each_online_cpu(i)
5367 cancel_delayed_work_sync(
5368 &per_cpu(pkg_temp_thermal_threshold_work, i));
5369 diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
5370 index e8819aa20415..dd7f9bf45d6c 100644
5371 --- a/drivers/tty/serial/8250/8250_core.c
5372 +++ b/drivers/tty/serial/8250/8250_core.c
5373 @@ -58,7 +58,16 @@ static struct uart_driver serial8250_reg;
5375 static unsigned int skip_txen_test; /* force skip of txen test at init time */
5377 -#define PASS_LIMIT 512
5379 + * On -rt we can have a more delays, and legitimately
5380 + * so - so don't drop work spuriously and spam the
5383 +#ifdef CONFIG_PREEMPT_RT_FULL
5384 +# define PASS_LIMIT 1000000
5386 +# define PASS_LIMIT 512
5389 #include <asm/serial.h>
5391 diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
5392 index 080d5a59d0a7..eecc4f111473 100644
5393 --- a/drivers/tty/serial/8250/8250_port.c
5394 +++ b/drivers/tty/serial/8250/8250_port.c
5396 #include <linux/nmi.h>
5397 #include <linux/mutex.h>
5398 #include <linux/slab.h>
5399 +#include <linux/kdb.h>
5400 #include <linux/uaccess.h>
5401 #include <linux/pm_runtime.h>
5402 #include <linux/timer.h>
5403 @@ -3144,9 +3145,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
5405 serial8250_rpm_get(up);
5408 + if (port->sysrq || oops_in_progress)
5410 - else if (oops_in_progress)
5411 + else if (in_kdb_printk())
5412 locked = spin_trylock_irqsave(&port->lock, flags);
5414 spin_lock_irqsave(&port->lock, flags);
5415 diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
5416 index e2c33b9528d8..53af53c43e8c 100644
5417 --- a/drivers/tty/serial/amba-pl011.c
5418 +++ b/drivers/tty/serial/amba-pl011.c
5419 @@ -2194,13 +2194,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
5421 clk_enable(uap->clk);
5423 - local_irq_save(flags);
5425 + * local_irq_save(flags);
5427 + * This local_irq_save() is nonsense. If we come in via sysrq
5428 + * handling then interrupts are already disabled. Aside of
5429 + * that the port.sysrq check is racy on SMP regardless.
5431 if (uap->port.sysrq)
5433 else if (oops_in_progress)
5434 - locked = spin_trylock(&uap->port.lock);
5435 + locked = spin_trylock_irqsave(&uap->port.lock, flags);
5437 - spin_lock(&uap->port.lock);
5438 + spin_lock_irqsave(&uap->port.lock, flags);
5441 * First save the CR then disable the interrupts
5442 @@ -2224,8 +2230,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
5443 pl011_write(old_cr, uap, REG_CR);
5446 - spin_unlock(&uap->port.lock);
5447 - local_irq_restore(flags);
5448 + spin_unlock_irqrestore(&uap->port.lock, flags);
5450 clk_disable(uap->clk);
5452 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
5453 index 44e5b5bf713b..400140d1dfff 100644
5454 --- a/drivers/tty/serial/omap-serial.c
5455 +++ b/drivers/tty/serial/omap-serial.c
5456 @@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console *co, const char *s,
5458 pm_runtime_get_sync(up->dev);
5460 - local_irq_save(flags);
5461 - if (up->port.sysrq)
5463 - else if (oops_in_progress)
5464 - locked = spin_trylock(&up->port.lock);
5465 + if (up->port.sysrq || oops_in_progress)
5466 + locked = spin_trylock_irqsave(&up->port.lock, flags);
5468 - spin_lock(&up->port.lock);
5469 + spin_lock_irqsave(&up->port.lock, flags);
5472 * First save the IER then disable the interrupts
5473 @@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console *co, const char *s,
5474 pm_runtime_mark_last_busy(up->dev);
5475 pm_runtime_put_autosuspend(up->dev);
5477 - spin_unlock(&up->port.lock);
5478 - local_irq_restore(flags);
5479 + spin_unlock_irqrestore(&up->port.lock, flags);
5483 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
5484 index 3b9735abf2e0..73ba3239869e 100644
5485 --- a/drivers/usb/core/hcd.c
5486 +++ b/drivers/usb/core/hcd.c
5487 @@ -1764,9 +1764,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
5488 * and no one may trigger the above deadlock situation when
5489 * running complete() in tasklet.
5491 - local_irq_save(flags);
5492 + local_irq_save_nort(flags);
5494 - local_irq_restore(flags);
5495 + local_irq_restore_nort(flags);
5497 usb_anchor_resume_wakeups(anchor);
5498 atomic_dec(&urb->use_count);
5499 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
5500 index 89081b834615..90b231b7ad0a 100644
5501 --- a/drivers/usb/gadget/function/f_fs.c
5502 +++ b/drivers/usb/gadget/function/f_fs.c
5503 @@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data *ffs)
5504 pr_info("%s(): freeing\n", __func__);
5505 ffs_data_clear(ffs);
5506 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
5507 - waitqueue_active(&ffs->ep0req_completion.wait));
5508 + swait_active(&ffs->ep0req_completion.wait));
5509 kfree(ffs->dev_name);
5512 diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
5513 index 1468d8f085a3..6aae3ae25c18 100644
5514 --- a/drivers/usb/gadget/legacy/inode.c
5515 +++ b/drivers/usb/gadget/legacy/inode.c
5516 @@ -346,7 +346,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
5517 spin_unlock_irq (&epdata->dev->lock);
5519 if (likely (value == 0)) {
5520 - value = wait_event_interruptible (done.wait, done.done);
5521 + value = swait_event_interruptible (done.wait, done.done);
5523 spin_lock_irq (&epdata->dev->lock);
5524 if (likely (epdata->ep != NULL)) {
5525 @@ -355,7 +355,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
5526 usb_ep_dequeue (epdata->ep, epdata->req);
5527 spin_unlock_irq (&epdata->dev->lock);
5529 - wait_event (done.wait, done.done);
5530 + swait_event (done.wait, done.done);
5531 if (epdata->status == -ECONNRESET)
5532 epdata->status = -EINTR;
5534 diff --git a/fs/aio.c b/fs/aio.c
5535 index 428484f2f841..2b02e2eb2158 100644
5539 #include <linux/ramfs.h>
5540 #include <linux/percpu-refcount.h>
5541 #include <linux/mount.h>
5542 +#include <linux/swork.h>
5544 #include <asm/kmap_types.h>
5545 #include <asm/uaccess.h>
5546 @@ -115,7 +116,7 @@ struct kioctx {
5547 struct page **ring_pages;
5550 - struct work_struct free_work;
5551 + struct swork_event free_work;
5554 * signals when all in-flight requests are done
5555 @@ -258,6 +259,7 @@ static int __init aio_setup(void)
5557 .kill_sb = kill_anon_super,
5559 + BUG_ON(swork_get());
5560 aio_mnt = kern_mount(&aio_fs);
5561 if (IS_ERR(aio_mnt))
5562 panic("Failed to create aio fs mount.");
5563 @@ -581,9 +583,9 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
5564 return cancel(&kiocb->common);
5567 -static void free_ioctx(struct work_struct *work)
5568 +static void free_ioctx(struct swork_event *sev)
5570 - struct kioctx *ctx = container_of(work, struct kioctx, free_work);
5571 + struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
5573 pr_debug("freeing %p\n", ctx);
5575 @@ -602,8 +604,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
5576 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
5577 complete(&ctx->rq_wait->comp);
5579 - INIT_WORK(&ctx->free_work, free_ioctx);
5580 - schedule_work(&ctx->free_work);
5581 + INIT_SWORK(&ctx->free_work, free_ioctx);
5582 + swork_queue(&ctx->free_work);
5586 @@ -611,9 +613,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
5587 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
5588 * now it's safe to cancel any that need to be.
5590 -static void free_ioctx_users(struct percpu_ref *ref)
5591 +static void free_ioctx_users_work(struct swork_event *sev)
5593 - struct kioctx *ctx = container_of(ref, struct kioctx, users);
5594 + struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
5595 struct aio_kiocb *req;
5597 spin_lock_irq(&ctx->ctx_lock);
5598 @@ -632,6 +634,14 @@ static void free_ioctx_users(struct percpu_ref *ref)
5599 percpu_ref_put(&ctx->reqs);
5602 +static void free_ioctx_users(struct percpu_ref *ref)
5604 + struct kioctx *ctx = container_of(ref, struct kioctx, users);
5606 + INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
5607 + swork_queue(&ctx->free_work);
5610 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
5613 diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
5614 index a1fba4285277..3796769b4cd1 100644
5615 --- a/fs/autofs4/autofs_i.h
5616 +++ b/fs/autofs4/autofs_i.h
5618 #include <linux/sched.h>
5619 #include <linux/mount.h>
5620 #include <linux/namei.h>
5621 +#include <linux/delay.h>
5622 #include <asm/current.h>
5623 #include <linux/uaccess.h>
5625 diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
5626 index d8e6d421c27f..2e689ab1306b 100644
5627 --- a/fs/autofs4/expire.c
5628 +++ b/fs/autofs4/expire.c
5629 @@ -148,7 +148,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev,
5630 parent = p->d_parent;
5631 if (!spin_trylock(&parent->d_lock)) {
5632 spin_unlock(&p->d_lock);
5637 spin_unlock(&p->d_lock);
5638 diff --git a/fs/buffer.c b/fs/buffer.c
5639 index b205a629001d..5646afc022ba 100644
5642 @@ -301,8 +301,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
5643 * decide that the page is now completely done.
5645 first = page_buffers(page);
5646 - local_irq_save(flags);
5647 - bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
5648 + flags = bh_uptodate_lock_irqsave(first);
5649 clear_buffer_async_read(bh);
5652 @@ -315,8 +314,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
5654 tmp = tmp->b_this_page;
5655 } while (tmp != bh);
5656 - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
5657 - local_irq_restore(flags);
5658 + bh_uptodate_unlock_irqrestore(first, flags);
5661 * If none of the buffers had errors and they are all
5662 @@ -328,9 +326,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
5666 - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
5667 - local_irq_restore(flags);
5669 + bh_uptodate_unlock_irqrestore(first, flags);
5673 @@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
5676 first = page_buffers(page);
5677 - local_irq_save(flags);
5678 - bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
5679 + flags = bh_uptodate_lock_irqsave(first);
5681 clear_buffer_async_write(bh);
5683 @@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
5685 tmp = tmp->b_this_page;
5687 - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
5688 - local_irq_restore(flags);
5689 + bh_uptodate_unlock_irqrestore(first, flags);
5690 end_page_writeback(page);
5694 - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
5695 - local_irq_restore(flags);
5697 + bh_uptodate_unlock_irqrestore(first, flags);
5699 EXPORT_SYMBOL(end_buffer_async_write);
5701 @@ -3383,6 +3375,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
5702 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
5704 INIT_LIST_HEAD(&ret->b_assoc_buffers);
5705 + buffer_head_init_locks(ret);
5707 __this_cpu_inc(bh_accounting.nr);
5709 diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
5710 index a27fc8791551..791aecb7c1ac 100644
5711 --- a/fs/cifs/readdir.c
5712 +++ b/fs/cifs/readdir.c
5713 @@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
5714 struct inode *inode;
5715 struct super_block *sb = parent->d_sb;
5716 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
5717 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
5718 + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
5720 cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
5722 diff --git a/fs/dcache.c b/fs/dcache.c
5723 index 4485a48f4091..691039a6a872 100644
5727 #include <linux/mm.h>
5728 #include <linux/fs.h>
5729 #include <linux/fsnotify.h>
5730 +#include <linux/delay.h>
5731 #include <linux/slab.h>
5732 #include <linux/init.h>
5733 #include <linux/hash.h>
5734 @@ -750,6 +751,8 @@ static inline bool fast_dput(struct dentry *dentry)
5736 void dput(struct dentry *dentry)
5738 + struct dentry *parent;
5740 if (unlikely(!dentry))
5743 @@ -788,9 +791,18 @@ void dput(struct dentry *dentry)
5747 - dentry = dentry_kill(dentry);
5750 + parent = dentry_kill(dentry);
5754 + if (parent == dentry) {
5755 + /* the task with the highest priority won't schedule */
5756 + r = cond_resched();
5765 @@ -2324,7 +2336,7 @@ void d_delete(struct dentry * dentry)
5766 if (dentry->d_lockref.count == 1) {
5767 if (!spin_trylock(&inode->i_lock)) {
5768 spin_unlock(&dentry->d_lock);
5773 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
5774 @@ -2384,21 +2396,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n)
5776 static void d_wait_lookup(struct dentry *dentry)
5778 - if (d_in_lookup(dentry)) {
5779 - DECLARE_WAITQUEUE(wait, current);
5780 - add_wait_queue(dentry->d_wait, &wait);
5782 - set_current_state(TASK_UNINTERRUPTIBLE);
5783 - spin_unlock(&dentry->d_lock);
5785 - spin_lock(&dentry->d_lock);
5786 - } while (d_in_lookup(dentry));
5788 + struct swait_queue __wait;
5790 + if (!d_in_lookup(dentry))
5793 + INIT_LIST_HEAD(&__wait.task_list);
5795 + prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
5796 + spin_unlock(&dentry->d_lock);
5798 + spin_lock(&dentry->d_lock);
5799 + } while (d_in_lookup(dentry));
5800 + finish_swait(dentry->d_wait, &__wait);
5803 struct dentry *d_alloc_parallel(struct dentry *parent,
5804 const struct qstr *name,
5805 - wait_queue_head_t *wq)
5806 + struct swait_queue_head *wq)
5808 unsigned int hash = name->hash;
5809 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
5810 @@ -2507,7 +2522,7 @@ void __d_lookup_done(struct dentry *dentry)
5812 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
5813 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
5814 - wake_up_all(dentry->d_wait);
5815 + swake_up_all(dentry->d_wait);
5816 dentry->d_wait = NULL;
5818 INIT_HLIST_NODE(&dentry->d_u.d_alias);
5819 @@ -3604,6 +3619,11 @@ EXPORT_SYMBOL(d_genocide);
5821 void __init vfs_caches_init_early(void)
5825 + for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
5826 + INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
5828 dcache_init_early();
5831 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
5832 index 10db91218933..42af0a06f657 100644
5833 --- a/fs/eventpoll.c
5834 +++ b/fs/eventpoll.c
5835 @@ -510,12 +510,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
5837 static void ep_poll_safewake(wait_queue_head_t *wq)
5839 - int this_cpu = get_cpu();
5840 + int this_cpu = get_cpu_light();
5842 ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
5843 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
5849 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
5850 diff --git a/fs/exec.c b/fs/exec.c
5851 index 67e86571685a..fe14cdd84016 100644
5854 @@ -1017,12 +1017,14 @@ static int exec_mmap(struct mm_struct *mm)
5858 + preempt_disable_rt();
5859 active_mm = tsk->active_mm;
5861 tsk->active_mm = mm;
5862 activate_mm(active_mm, mm);
5863 tsk->mm->vmacache_seqnum = 0;
5864 vmacache_flush(tsk);
5865 + preempt_enable_rt();
5868 up_read(&old_mm->mmap_sem);
5869 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
5870 index 642c57b8de7b..8494b9308333 100644
5873 @@ -1191,7 +1191,7 @@ static int fuse_direntplus_link(struct file *file,
5874 struct inode *dir = d_inode(parent);
5875 struct fuse_conn *fc;
5876 struct inode *inode;
5877 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
5878 + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
5882 diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
5883 index 684996c8a3a4..6e18a06aaabe 100644
5884 --- a/fs/jbd2/checkpoint.c
5885 +++ b/fs/jbd2/checkpoint.c
5886 @@ -116,6 +116,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
5887 nblocks = jbd2_space_needed(journal);
5888 while (jbd2_log_space_left(journal) < nblocks) {
5889 write_unlock(&journal->j_state_lock);
5890 + if (current->plug)
5892 mutex_lock(&journal->j_checkpoint_mutex);
5895 diff --git a/fs/locks.c b/fs/locks.c
5896 index 22c5b4aa4961..269c6a44449a 100644
5899 @@ -935,7 +935,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
5903 - percpu_down_read_preempt_disable(&file_rwsem);
5904 + percpu_down_read(&file_rwsem);
5905 spin_lock(&ctx->flc_lock);
5906 if (request->fl_flags & FL_ACCESS)
5908 @@ -976,7 +976,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
5911 spin_unlock(&ctx->flc_lock);
5912 - percpu_up_read_preempt_enable(&file_rwsem);
5913 + percpu_up_read(&file_rwsem);
5915 locks_free_lock(new_fl);
5916 locks_dispose_list(&dispose);
5917 @@ -1013,7 +1013,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
5918 new_fl2 = locks_alloc_lock();
5921 - percpu_down_read_preempt_disable(&file_rwsem);
5922 + percpu_down_read(&file_rwsem);
5923 spin_lock(&ctx->flc_lock);
5925 * New lock request. Walk all POSIX locks and look for conflicts. If
5926 @@ -1185,7 +1185,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
5929 spin_unlock(&ctx->flc_lock);
5930 - percpu_up_read_preempt_enable(&file_rwsem);
5931 + percpu_up_read(&file_rwsem);
5933 * Free any unused locks.
5935 @@ -1460,7 +1460,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
5939 - percpu_down_read_preempt_disable(&file_rwsem);
5940 + percpu_down_read(&file_rwsem);
5941 spin_lock(&ctx->flc_lock);
5943 time_out_leases(inode, &dispose);
5944 @@ -1512,13 +1512,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
5945 locks_insert_block(fl, new_fl);
5946 trace_break_lease_block(inode, new_fl);
5947 spin_unlock(&ctx->flc_lock);
5948 - percpu_up_read_preempt_enable(&file_rwsem);
5949 + percpu_up_read(&file_rwsem);
5951 locks_dispose_list(&dispose);
5952 error = wait_event_interruptible_timeout(new_fl->fl_wait,
5953 !new_fl->fl_next, break_time);
5955 - percpu_down_read_preempt_disable(&file_rwsem);
5956 + percpu_down_read(&file_rwsem);
5957 spin_lock(&ctx->flc_lock);
5958 trace_break_lease_unblock(inode, new_fl);
5959 locks_delete_block(new_fl);
5960 @@ -1535,7 +1535,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
5963 spin_unlock(&ctx->flc_lock);
5964 - percpu_up_read_preempt_enable(&file_rwsem);
5965 + percpu_up_read(&file_rwsem);
5966 locks_dispose_list(&dispose);
5967 locks_free_lock(new_fl);
5969 @@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp)
5971 ctx = smp_load_acquire(&inode->i_flctx);
5972 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
5973 - percpu_down_read_preempt_disable(&file_rwsem);
5974 + percpu_down_read(&file_rwsem);
5975 spin_lock(&ctx->flc_lock);
5976 time_out_leases(inode, &dispose);
5977 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
5978 @@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
5981 spin_unlock(&ctx->flc_lock);
5982 - percpu_up_read_preempt_enable(&file_rwsem);
5983 + percpu_up_read(&file_rwsem);
5985 locks_dispose_list(&dispose);
5987 @@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
5991 - percpu_down_read_preempt_disable(&file_rwsem);
5992 + percpu_down_read(&file_rwsem);
5993 spin_lock(&ctx->flc_lock);
5994 time_out_leases(inode, &dispose);
5995 error = check_conflicting_open(dentry, arg, lease->fl_flags);
5996 @@ -1765,7 +1765,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
5997 lease->fl_lmops->lm_setup(lease, priv);
5999 spin_unlock(&ctx->flc_lock);
6000 - percpu_up_read_preempt_enable(&file_rwsem);
6001 + percpu_up_read(&file_rwsem);
6002 locks_dispose_list(&dispose);
6004 inode_unlock(inode);
6005 @@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
6009 - percpu_down_read_preempt_disable(&file_rwsem);
6010 + percpu_down_read(&file_rwsem);
6011 spin_lock(&ctx->flc_lock);
6012 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
6013 if (fl->fl_file == filp &&
6014 @@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
6016 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
6017 spin_unlock(&ctx->flc_lock);
6018 - percpu_up_read_preempt_enable(&file_rwsem);
6019 + percpu_up_read(&file_rwsem);
6020 locks_dispose_list(&dispose);
6023 @@ -2532,13 +2532,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
6024 if (list_empty(&ctx->flc_lease))
6027 - percpu_down_read_preempt_disable(&file_rwsem);
6028 + percpu_down_read(&file_rwsem);
6029 spin_lock(&ctx->flc_lock);
6030 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
6031 if (filp == fl->fl_file)
6032 lease_modify(fl, F_UNLCK, &dispose);
6033 spin_unlock(&ctx->flc_lock);
6034 - percpu_up_read_preempt_enable(&file_rwsem);
6035 + percpu_up_read(&file_rwsem);
6037 locks_dispose_list(&dispose);
6039 diff --git a/fs/namei.c b/fs/namei.c
6040 index d5e5140c1045..150fbdd8e04c 100644
6043 @@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const struct qstr *name,
6045 struct dentry *dentry = ERR_PTR(-ENOENT), *old;
6046 struct inode *inode = dir->d_inode;
6047 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6048 + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6050 inode_lock_shared(inode);
6051 /* Don't go there if it's already dead */
6052 @@ -3083,7 +3083,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
6053 struct dentry *dentry;
6054 int error, create_error = 0;
6055 umode_t mode = op->mode;
6056 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6057 + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6059 if (unlikely(IS_DEADDIR(dir_inode)))
6061 diff --git a/fs/namespace.c b/fs/namespace.c
6062 index 5e35057f07ac..843d274ba167 100644
6063 --- a/fs/namespace.c
6064 +++ b/fs/namespace.c
6066 #include <linux/mnt_namespace.h>
6067 #include <linux/user_namespace.h>
6068 #include <linux/namei.h>
6069 +#include <linux/delay.h>
6070 #include <linux/security.h>
6071 #include <linux/idr.h>
6072 #include <linux/init.h> /* init_rootfs */
6073 @@ -356,8 +357,11 @@ int __mnt_want_write(struct vfsmount *m)
6074 * incremented count after it has set MNT_WRITE_HOLD.
6077 - while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
6079 + while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
6082 + preempt_disable();
6085 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
6086 * be set to match its requirements. So we must not load that until
6087 diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
6088 index dff600ae0d74..d726d2e09353 100644
6089 --- a/fs/nfs/delegation.c
6090 +++ b/fs/nfs/delegation.c
6091 @@ -150,11 +150,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
6093 /* Block nfs4_proc_unlck */
6094 mutex_lock(&sp->so_delegreturn_mutex);
6095 - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
6096 + seq = read_seqbegin(&sp->so_reclaim_seqlock);
6097 err = nfs4_open_delegation_recall(ctx, state, stateid, type);
6099 err = nfs_delegation_claim_locks(ctx, state, stateid);
6100 - if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
6101 + if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq))
6103 mutex_unlock(&sp->so_delegreturn_mutex);
6104 put_nfs_open_context(ctx);
6105 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
6106 index 53e02b8bd9bd..a66e7d77cfbb 100644
6109 @@ -485,7 +485,7 @@ static
6110 void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
6112 struct qstr filename = QSTR_INIT(entry->name, entry->len);
6113 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6114 + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6115 struct dentry *dentry;
6116 struct dentry *alias;
6117 struct inode *dir = d_inode(parent);
6118 @@ -1487,7 +1487,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
6119 struct file *file, unsigned open_flags,
6120 umode_t mode, int *opened)
6122 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6123 + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6124 struct nfs_open_context *ctx;
6126 struct iattr attr = { .ia_valid = ATTR_OPEN };
6127 @@ -1802,7 +1802,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
6129 trace_nfs_rmdir_enter(dir, dentry);
6130 if (d_really_is_positive(dentry)) {
6131 +#ifdef CONFIG_PREEMPT_RT_BASE
6132 + down(&NFS_I(d_inode(dentry))->rmdir_sem);
6134 down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
6136 error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
6137 /* Ensure the VFS deletes this inode */
6139 @@ -1812,7 +1816,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
6141 nfs_dentry_handle_enoent(dentry);
6143 +#ifdef CONFIG_PREEMPT_RT_BASE
6144 + up(&NFS_I(d_inode(dentry))->rmdir_sem);
6146 up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
6149 error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
6150 trace_nfs_rmdir_exit(dir, dentry, error);
6151 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
6152 index bf4ec5ecc97e..36cd5fc9192c 100644
6153 --- a/fs/nfs/inode.c
6154 +++ b/fs/nfs/inode.c
6155 @@ -1957,7 +1957,11 @@ static void init_once(void *foo)
6156 nfsi->nrequests = 0;
6157 nfsi->commit_info.ncommit = 0;
6158 atomic_set(&nfsi->commit_info.rpcs_out, 0);
6159 +#ifdef CONFIG_PREEMPT_RT_BASE
6160 + sema_init(&nfsi->rmdir_sem, 1);
6162 init_rwsem(&nfsi->rmdir_sem);
6164 nfs4_init_once(nfsi);
6167 diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
6168 index 1452177c822d..f43b01d54c59 100644
6169 --- a/fs/nfs/nfs4_fs.h
6170 +++ b/fs/nfs/nfs4_fs.h
6171 @@ -111,7 +111,7 @@ struct nfs4_state_owner {
6172 unsigned long so_flags;
6173 struct list_head so_states;
6174 struct nfs_seqid_counter so_seqid;
6175 - seqcount_t so_reclaim_seqcount;
6176 + seqlock_t so_reclaim_seqlock;
6177 struct mutex so_delegreturn_mutex;
6180 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
6181 index fc9b04941739..7c9bc1c7efe7 100644
6182 --- a/fs/nfs/nfs4proc.c
6183 +++ b/fs/nfs/nfs4proc.c
6184 @@ -2697,7 +2697,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
6188 - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
6189 + seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
6191 ret = _nfs4_proc_open(opendata);
6193 @@ -2735,7 +2735,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
6195 if (d_inode(dentry) == state->inode) {
6196 nfs_inode_attach_open_context(ctx);
6197 - if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
6198 + if (read_seqretry(&sp->so_reclaim_seqlock, seq))
6199 nfs4_schedule_stateid_recovery(server, state);
6202 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
6203 index 0959c9661662..dabd834d7686 100644
6204 --- a/fs/nfs/nfs4state.c
6205 +++ b/fs/nfs/nfs4state.c
6206 @@ -488,7 +488,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
6207 nfs4_init_seqid_counter(&sp->so_seqid);
6208 atomic_set(&sp->so_count, 1);
6209 INIT_LIST_HEAD(&sp->so_lru);
6210 - seqcount_init(&sp->so_reclaim_seqcount);
6211 + seqlock_init(&sp->so_reclaim_seqlock);
6212 mutex_init(&sp->so_delegreturn_mutex);
6215 @@ -1497,8 +1497,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
6216 * recovering after a network partition or a reboot from a
6217 * server that doesn't support a grace period.
6219 +#ifdef CONFIG_PREEMPT_RT_FULL
6220 + write_seqlock(&sp->so_reclaim_seqlock);
6222 + write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
6224 spin_lock(&sp->so_lock);
6225 - raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
6227 list_for_each_entry(state, &sp->so_states, open_states) {
6228 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
6229 @@ -1567,14 +1571,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
6230 spin_lock(&sp->so_lock);
6233 - raw_write_seqcount_end(&sp->so_reclaim_seqcount);
6234 spin_unlock(&sp->so_lock);
6235 +#ifdef CONFIG_PREEMPT_RT_FULL
6236 + write_sequnlock(&sp->so_reclaim_seqlock);
6238 + write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
6242 nfs4_put_open_state(state);
6243 - spin_lock(&sp->so_lock);
6244 - raw_write_seqcount_end(&sp->so_reclaim_seqcount);
6245 - spin_unlock(&sp->so_lock);
6246 +#ifdef CONFIG_PREEMPT_RT_FULL
6247 + write_sequnlock(&sp->so_reclaim_seqlock);
6249 + write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
6254 diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
6255 index 191aa577dd1f..58990c8f52e0 100644
6256 --- a/fs/nfs/unlink.c
6257 +++ b/fs/nfs/unlink.c
6259 #include <linux/sunrpc/clnt.h>
6260 #include <linux/nfs_fs.h>
6261 #include <linux/sched.h>
6262 -#include <linux/wait.h>
6263 +#include <linux/swait.h>
6264 #include <linux/namei.h>
6265 #include <linux/fsnotify.h>
6267 @@ -51,6 +51,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
6268 rpc_restart_call_prepare(task);
6271 +#ifdef CONFIG_PREEMPT_RT_BASE
6272 +static void nfs_down_anon(struct semaphore *sema)
6277 +static void nfs_up_anon(struct semaphore *sema)
6283 +static void nfs_down_anon(struct rw_semaphore *rwsem)
6285 + down_read_non_owner(rwsem);
6288 +static void nfs_up_anon(struct rw_semaphore *rwsem)
6290 + up_read_non_owner(rwsem);
6295 * nfs_async_unlink_release - Release the sillydelete data.
6296 * @task: rpc_task of the sillydelete
6297 @@ -64,7 +87,7 @@ static void nfs_async_unlink_release(void *calldata)
6298 struct dentry *dentry = data->dentry;
6299 struct super_block *sb = dentry->d_sb;
6301 - up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
6302 + nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
6303 d_lookup_done(dentry);
6304 nfs_free_unlinkdata(data);
6306 @@ -117,10 +140,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
6307 struct inode *dir = d_inode(dentry->d_parent);
6308 struct dentry *alias;
6310 - down_read_non_owner(&NFS_I(dir)->rmdir_sem);
6311 + nfs_down_anon(&NFS_I(dir)->rmdir_sem);
6312 alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq);
6313 if (IS_ERR(alias)) {
6314 - up_read_non_owner(&NFS_I(dir)->rmdir_sem);
6315 + nfs_up_anon(&NFS_I(dir)->rmdir_sem);
6318 if (!d_in_lookup(alias)) {
6319 @@ -142,7 +165,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
6321 spin_unlock(&alias->d_lock);
6323 - up_read_non_owner(&NFS_I(dir)->rmdir_sem);
6324 + nfs_up_anon(&NFS_I(dir)->rmdir_sem);
6326 * If we'd displaced old cached devname, free it. At that
6327 * point dentry is definitely not a root, so we won't need
6328 @@ -182,7 +205,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
6331 data->res.dir_attr = &data->dir_attr;
6332 - init_waitqueue_head(&data->wq);
6333 + init_swait_queue_head(&data->wq);
6336 spin_lock(&dentry->d_lock);
6337 diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
6338 index fe251f187ff8..e89da4fb14c2 100644
6339 --- a/fs/ntfs/aops.c
6340 +++ b/fs/ntfs/aops.c
6341 @@ -92,13 +92,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6343 if (file_ofs < init_size)
6344 ofs = init_size - file_ofs;
6345 - local_irq_save(flags);
6346 + local_irq_save_nort(flags);
6347 kaddr = kmap_atomic(page);
6348 memset(kaddr + bh_offset(bh) + ofs, 0,
6350 flush_dcache_page(page);
6351 kunmap_atomic(kaddr);
6352 - local_irq_restore(flags);
6353 + local_irq_restore_nort(flags);
6356 clear_buffer_uptodate(bh);
6357 @@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6358 "0x%llx.", (unsigned long long)bh->b_blocknr);
6360 first = page_buffers(page);
6361 - local_irq_save(flags);
6362 - bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
6363 + flags = bh_uptodate_lock_irqsave(first);
6364 clear_buffer_async_read(bh);
6367 @@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6369 tmp = tmp->b_this_page;
6370 } while (tmp != bh);
6371 - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
6372 - local_irq_restore(flags);
6373 + bh_uptodate_unlock_irqrestore(first, flags);
6375 * If none of the buffers had errors then we can set the page uptodate,
6376 * but we first have to perform the post read mst fixups, if the
6377 @@ -145,13 +143,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6378 recs = PAGE_SIZE / rec_size;
6379 /* Should have been verified before we got here... */
6381 - local_irq_save(flags);
6382 + local_irq_save_nort(flags);
6383 kaddr = kmap_atomic(page);
6384 for (i = 0; i < recs; i++)
6385 post_read_mst_fixup((NTFS_RECORD*)(kaddr +
6386 i * rec_size), rec_size);
6387 kunmap_atomic(kaddr);
6388 - local_irq_restore(flags);
6389 + local_irq_restore_nort(flags);
6390 flush_dcache_page(page);
6391 if (likely(page_uptodate && !PageError(page)))
6392 SetPageUptodate(page);
6393 @@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6397 - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
6398 - local_irq_restore(flags);
6400 + bh_uptodate_unlock_irqrestore(first, flags);
6404 diff --git a/fs/proc/base.c b/fs/proc/base.c
6405 index ca651ac00660..41d9dc789285 100644
6406 --- a/fs/proc/base.c
6407 +++ b/fs/proc/base.c
6408 @@ -1834,7 +1834,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
6410 child = d_hash_and_lookup(dir, &qname);
6412 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6413 + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6414 child = d_alloc_parallel(dir, &qname, &wq);
6416 goto end_instantiate;
6417 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
6418 index d4e37acd4821..000cea46434a 100644
6419 --- a/fs/proc/proc_sysctl.c
6420 +++ b/fs/proc/proc_sysctl.c
6421 @@ -632,7 +632,7 @@ static bool proc_sys_fill_cache(struct file *file,
6423 child = d_lookup(dir, &qname);
6425 - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6426 + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6427 child = d_alloc_parallel(dir, &qname, &wq);
6430 diff --git a/fs/timerfd.c b/fs/timerfd.c
6431 index ab8dd1538381..5580853f57dd 100644
6434 @@ -471,7 +471,10 @@ static int do_timerfd_settime(int ufd, int flags,
6437 spin_unlock_irq(&ctx->wqh.lock);
6440 + hrtimer_wait_for_timer(&ctx->t.alarm.timer);
6442 + hrtimer_wait_for_timer(&ctx->t.tmr);
6446 diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
6447 index e861a24f06f2..b5c97d3059c7 100644
6448 --- a/include/acpi/platform/aclinux.h
6449 +++ b/include/acpi/platform/aclinux.h
6452 #define acpi_cache_t struct kmem_cache
6453 #define acpi_spinlock spinlock_t *
6454 +#define acpi_raw_spinlock raw_spinlock_t *
6455 #define acpi_cpu_flags unsigned long
6457 /* Use native linux version of acpi_os_allocate_zeroed */
6458 @@ -151,6 +152,20 @@
6459 #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
6460 #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
6462 +#define acpi_os_create_raw_lock(__handle) \
6464 + raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
6467 + *(__handle) = lock; \
6468 + raw_spin_lock_init(*(__handle)); \
6470 + lock ? AE_OK : AE_NO_MEMORY; \
6473 +#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
6477 * OSL interfaces used by debugger/disassembler
6479 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
6480 index 6f96247226a4..fa53a21263c2 100644
6481 --- a/include/asm-generic/bug.h
6482 +++ b/include/asm-generic/bug.h
6483 @@ -215,6 +215,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
6484 # define WARN_ON_SMP(x) ({0;})
6487 +#ifdef CONFIG_PREEMPT_RT_BASE
6488 +# define BUG_ON_RT(c) BUG_ON(c)
6489 +# define BUG_ON_NONRT(c) do { } while (0)
6490 +# define WARN_ON_RT(condition) WARN_ON(condition)
6491 +# define WARN_ON_NONRT(condition) do { } while (0)
6492 +# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
6494 +# define BUG_ON_RT(c) do { } while (0)
6495 +# define BUG_ON_NONRT(c) BUG_ON(c)
6496 +# define WARN_ON_RT(condition) do { } while (0)
6497 +# define WARN_ON_NONRT(condition) WARN_ON(condition)
6498 +# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
6501 #endif /* __ASSEMBLY__ */
6504 diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
6505 index 535ab2e13d2e..cfc246899473 100644
6506 --- a/include/linux/blk-mq.h
6507 +++ b/include/linux/blk-mq.h
6508 @@ -209,7 +209,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
6509 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
6513 +void __blk_mq_complete_request_remote_work(struct work_struct *work);
6514 int blk_mq_request_started(struct request *rq);
6515 void blk_mq_start_request(struct request *rq);
6516 void blk_mq_end_request(struct request *rq, int error);
6517 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
6518 index f6a816129856..ec7a4676f8a8 100644
6519 --- a/include/linux/blkdev.h
6520 +++ b/include/linux/blkdev.h
6521 @@ -89,6 +89,7 @@ struct request {
6522 struct list_head queuelist;
6524 struct call_single_data csd;
6525 + struct work_struct work;
6529 @@ -467,7 +468,7 @@ struct request_queue {
6530 struct throtl_data *td;
6532 struct rcu_head rcu_head;
6533 - wait_queue_head_t mq_freeze_wq;
6534 + struct swait_queue_head mq_freeze_wq;
6535 struct percpu_ref q_usage_counter;
6536 struct list_head all_q_node;
6538 diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
6539 index 8fdcb783197d..d07dbeec7bc1 100644
6540 --- a/include/linux/bottom_half.h
6541 +++ b/include/linux/bottom_half.h
6544 #include <linux/preempt.h>
6546 +#ifdef CONFIG_PREEMPT_RT_FULL
6548 +extern void __local_bh_disable(void);
6549 +extern void _local_bh_enable(void);
6550 +extern void __local_bh_enable(void);
6552 +static inline void local_bh_disable(void)
6554 + __local_bh_disable();
6557 +static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
6559 + __local_bh_disable();
6562 +static inline void local_bh_enable(void)
6564 + __local_bh_enable();
6567 +static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
6569 + __local_bh_enable();
6572 +static inline void local_bh_enable_ip(unsigned long ip)
6574 + __local_bh_enable();
6579 #ifdef CONFIG_TRACE_IRQFLAGS
6580 extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
6582 @@ -30,5 +63,6 @@ static inline void local_bh_enable(void)
6584 __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
6588 #endif /* _LINUX_BH_H */
6589 diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
6590 index ebbacd14d450..be5e87f6360a 100644
6591 --- a/include/linux/buffer_head.h
6592 +++ b/include/linux/buffer_head.h
6593 @@ -75,8 +75,50 @@ struct buffer_head {
6594 struct address_space *b_assoc_map; /* mapping this buffer is
6596 atomic_t b_count; /* users using this buffer_head */
6597 +#ifdef CONFIG_PREEMPT_RT_BASE
6598 + spinlock_t b_uptodate_lock;
6599 +#if IS_ENABLED(CONFIG_JBD2)
6600 + spinlock_t b_state_lock;
6601 + spinlock_t b_journal_head_lock;
6606 +static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
6608 + unsigned long flags;
6610 +#ifndef CONFIG_PREEMPT_RT_BASE
6611 + local_irq_save(flags);
6612 + bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
6614 + spin_lock_irqsave(&bh->b_uptodate_lock, flags);
6620 +bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
6622 +#ifndef CONFIG_PREEMPT_RT_BASE
6623 + bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
6624 + local_irq_restore(flags);
6626 + spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
6630 +static inline void buffer_head_init_locks(struct buffer_head *bh)
6632 +#ifdef CONFIG_PREEMPT_RT_BASE
6633 + spin_lock_init(&bh->b_uptodate_lock);
6634 +#if IS_ENABLED(CONFIG_JBD2)
6635 + spin_lock_init(&bh->b_state_lock);
6636 + spin_lock_init(&bh->b_journal_head_lock);
6642 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
6643 * and buffer_foo() functions.
6644 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
6645 index 5b17de62c962..56027cc01a56 100644
6646 --- a/include/linux/cgroup-defs.h
6647 +++ b/include/linux/cgroup-defs.h
6649 #include <linux/percpu-refcount.h>
6650 #include <linux/percpu-rwsem.h>
6651 #include <linux/workqueue.h>
6652 +#include <linux/swork.h>
6654 #ifdef CONFIG_CGROUPS
6656 @@ -137,6 +138,7 @@ struct cgroup_subsys_state {
6657 /* percpu_ref killing and RCU release */
6658 struct rcu_head rcu_head;
6659 struct work_struct destroy_work;
6660 + struct swork_event destroy_swork;
6664 diff --git a/include/linux/completion.h b/include/linux/completion.h
6665 index 5d5aaae3af43..3bca1590e29f 100644
6666 --- a/include/linux/completion.h
6667 +++ b/include/linux/completion.h
6669 * Atomic wait-for-completion handler data structures.
6670 * See kernel/sched/completion.c for details.
6673 -#include <linux/wait.h>
6674 +#include <linux/swait.h>
6677 * struct completion - structure used to maintain state for a "completion"
6682 - wait_queue_head_t wait;
6683 + struct swait_queue_head wait;
6686 #define COMPLETION_INITIALIZER(work) \
6687 - { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
6688 + { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
6690 #define COMPLETION_INITIALIZER_ONSTACK(work) \
6691 ({ init_completion(&work); work; })
6692 @@ -73,7 +72,7 @@ struct completion {
6693 static inline void init_completion(struct completion *x)
6696 - init_waitqueue_head(&x->wait);
6697 + init_swait_queue_head(&x->wait);
6701 diff --git a/include/linux/cpu.h b/include/linux/cpu.h
6702 index e571128ad99a..5e52d28c20c1 100644
6703 --- a/include/linux/cpu.h
6704 +++ b/include/linux/cpu.h
6705 @@ -182,6 +182,8 @@ extern void get_online_cpus(void);
6706 extern void put_online_cpus(void);
6707 extern void cpu_hotplug_disable(void);
6708 extern void cpu_hotplug_enable(void);
6709 +extern void pin_current_cpu(void);
6710 +extern void unpin_current_cpu(void);
6711 #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
6712 #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
6713 #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
6714 @@ -199,6 +201,8 @@ static inline void cpu_hotplug_done(void) {}
6715 #define put_online_cpus() do { } while (0)
6716 #define cpu_hotplug_disable() do { } while (0)
6717 #define cpu_hotplug_enable() do { } while (0)
6718 +static inline void pin_current_cpu(void) { }
6719 +static inline void unpin_current_cpu(void) { }
6720 #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
6721 #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
6722 /* These aren't inline functions due to a GCC bug. */
6723 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
6724 index 5beed7b30561..61cab7ef458e 100644
6725 --- a/include/linux/dcache.h
6726 +++ b/include/linux/dcache.h
6728 #include <linux/rcupdate.h>
6729 #include <linux/lockref.h>
6730 #include <linux/stringhash.h>
6731 +#include <linux/wait.h>
6735 @@ -100,7 +101,7 @@ struct dentry {
6738 struct list_head d_lru; /* LRU list */
6739 - wait_queue_head_t *d_wait; /* in-lookup ones only */
6740 + struct swait_queue_head *d_wait; /* in-lookup ones only */
6742 struct list_head d_child; /* child of parent list */
6743 struct list_head d_subdirs; /* our children */
6744 @@ -230,7 +231,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
6745 extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
6746 extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
6747 extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
6748 - wait_queue_head_t *);
6749 + struct swait_queue_head *);
6750 extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
6751 extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
6752 extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
6753 diff --git a/include/linux/delay.h b/include/linux/delay.h
6754 index a6ecb34cf547..37caab306336 100644
6755 --- a/include/linux/delay.h
6756 +++ b/include/linux/delay.h
6757 @@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds)
6758 msleep(seconds * 1000);
6761 +#ifdef CONFIG_PREEMPT_RT_FULL
6762 +extern void cpu_chill(void);
6764 +# define cpu_chill() cpu_relax()
6767 #endif /* defined(_LINUX_DELAY_H) */
6768 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
6769 index bb3f3297062a..a117a33ef72c 100644
6770 --- a/include/linux/highmem.h
6771 +++ b/include/linux/highmem.h
6773 #include <linux/mm.h>
6774 #include <linux/uaccess.h>
6775 #include <linux/hardirq.h>
6776 +#include <linux/sched.h>
6778 #include <asm/cacheflush.h>
6780 @@ -65,7 +66,7 @@ static inline void kunmap(struct page *page)
6782 static inline void *kmap_atomic(struct page *page)
6784 - preempt_disable();
6785 + preempt_disable_nort();
6786 pagefault_disable();
6787 return page_address(page);
6789 @@ -74,7 +75,7 @@ static inline void *kmap_atomic(struct page *page)
6790 static inline void __kunmap_atomic(void *addr)
6794 + preempt_enable_nort();
6797 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
6798 @@ -86,32 +87,51 @@ static inline void __kunmap_atomic(void *addr)
6800 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
6802 +#ifndef CONFIG_PREEMPT_RT_FULL
6803 DECLARE_PER_CPU(int, __kmap_atomic_idx);
6806 static inline int kmap_atomic_idx_push(void)
6808 +#ifndef CONFIG_PREEMPT_RT_FULL
6809 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
6811 -#ifdef CONFIG_DEBUG_HIGHMEM
6812 +# ifdef CONFIG_DEBUG_HIGHMEM
6813 WARN_ON_ONCE(in_irq() && !irqs_disabled());
6814 BUG_ON(idx >= KM_TYPE_NR);
6819 + current->kmap_idx++;
6820 + BUG_ON(current->kmap_idx > KM_TYPE_NR);
6821 + return current->kmap_idx - 1;
6825 static inline int kmap_atomic_idx(void)
6827 +#ifndef CONFIG_PREEMPT_RT_FULL
6828 return __this_cpu_read(__kmap_atomic_idx) - 1;
6830 + return current->kmap_idx - 1;
6834 static inline void kmap_atomic_idx_pop(void)
6836 -#ifdef CONFIG_DEBUG_HIGHMEM
6837 +#ifndef CONFIG_PREEMPT_RT_FULL
6838 +# ifdef CONFIG_DEBUG_HIGHMEM
6839 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
6844 __this_cpu_dec(__kmap_atomic_idx);
6847 + current->kmap_idx--;
6848 +# ifdef CONFIG_DEBUG_HIGHMEM
6849 + BUG_ON(current->kmap_idx < 0);
6854 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
6855 index 5e00f80b1535..a34e10b55cde 100644
6856 --- a/include/linux/hrtimer.h
6857 +++ b/include/linux/hrtimer.h
6858 @@ -87,6 +87,9 @@ enum hrtimer_restart {
6859 * @function: timer expiry callback function
6860 * @base: pointer to the timer base (per cpu and per clock)
6861 * @state: state information (See bit values above)
6862 + * @cb_entry: list entry to defer timers from hardirq context
6863 + * @irqsafe: timer can run in hardirq context
6864 + * @praecox: timer expiry time if expired at the time of programming
6865 * @is_rel: Set if the timer was armed relative
6866 * @start_pid: timer statistics field to store the pid of the task which
6868 @@ -103,6 +106,11 @@ struct hrtimer {
6869 enum hrtimer_restart (*function)(struct hrtimer *);
6870 struct hrtimer_clock_base *base;
6872 + struct list_head cb_entry;
6874 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
6878 #ifdef CONFIG_TIMER_STATS
6880 @@ -123,11 +131,7 @@ struct hrtimer_sleeper {
6881 struct task_struct *task;
6884 -#ifdef CONFIG_64BIT
6885 # define HRTIMER_CLOCK_BASE_ALIGN 64
6887 -# define HRTIMER_CLOCK_BASE_ALIGN 32
6891 * struct hrtimer_clock_base - the timer base for a specific clock
6892 @@ -136,6 +140,7 @@ struct hrtimer_sleeper {
6893 * timer to a base on another cpu.
6894 * @clockid: clock id for per_cpu support
6895 * @active: red black tree root node for the active timers
6896 + * @expired: list head for deferred timers.
6897 * @get_time: function to retrieve the current time of the clock
6898 * @offset: offset of this clock to the monotonic base
6900 @@ -144,6 +149,7 @@ struct hrtimer_clock_base {
6903 struct timerqueue_head active;
6904 + struct list_head expired;
6905 ktime_t (*get_time)(void);
6907 } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
6908 @@ -187,6 +193,7 @@ struct hrtimer_cpu_base {
6909 raw_spinlock_t lock;
6911 struct hrtimer *running;
6912 + struct hrtimer *running_soft;
6914 unsigned int active_bases;
6915 unsigned int clock_was_set_seq;
6916 @@ -203,6 +210,9 @@ struct hrtimer_cpu_base {
6917 unsigned int nr_hangs;
6918 unsigned int max_hang_time;
6920 +#ifdef CONFIG_PREEMPT_RT_BASE
6921 + wait_queue_head_t wait;
6923 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
6924 } ____cacheline_aligned;
6926 @@ -412,6 +422,13 @@ static inline void hrtimer_restart(struct hrtimer *timer)
6927 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
6930 +/* Softirq preemption could deadlock timer removal */
6931 +#ifdef CONFIG_PREEMPT_RT_BASE
6932 + extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
6934 +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
6938 extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
6940 @@ -436,9 +453,15 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
6941 * Helper function to check, whether the timer is running the callback
6944 -static inline int hrtimer_callback_running(struct hrtimer *timer)
6945 +static inline int hrtimer_callback_running(const struct hrtimer *timer)
6947 - return timer->base->cpu_base->running == timer;
6948 + if (timer->base->cpu_base->running == timer)
6950 +#ifdef CONFIG_PREEMPT_RT_BASE
6951 + if (timer->base->cpu_base->running_soft == timer)
6957 /* Forward a hrtimer so it expires after now: */
6958 diff --git a/include/linux/idr.h b/include/linux/idr.h
6959 index 083d61e92706..5899796f50cb 100644
6960 --- a/include/linux/idr.h
6961 +++ b/include/linux/idr.h
6962 @@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
6963 * Each idr_preload() should be matched with an invocation of this
6964 * function. See idr_preload() for details.
6966 +#ifdef CONFIG_PREEMPT_RT_FULL
6967 +void idr_preload_end(void);
6969 static inline void idr_preload_end(void)
6976 * idr_find - return pointer for given id
6977 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
6978 index 325f649d77ff..a56e263f5005 100644
6979 --- a/include/linux/init_task.h
6980 +++ b/include/linux/init_task.h
6981 @@ -150,6 +150,12 @@ extern struct task_group root_task_group;
6982 # define INIT_PERF_EVENTS(tsk)
6985 +#ifdef CONFIG_PREEMPT_RT_BASE
6986 +# define INIT_TIMER_LIST .posix_timer_list = NULL,
6988 +# define INIT_TIMER_LIST
6991 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
6992 # define INIT_VTIME(tsk) \
6993 .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \
6994 @@ -164,6 +170,7 @@ extern struct task_group root_task_group;
6995 #ifdef CONFIG_RT_MUTEXES
6996 # define INIT_RT_MUTEXES(tsk) \
6997 .pi_waiters = RB_ROOT, \
6998 + .pi_top_task = NULL, \
6999 .pi_waiters_leftmost = NULL,
7001 # define INIT_RT_MUTEXES(tsk)
7002 @@ -250,6 +257,7 @@ extern struct task_group root_task_group;
7003 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
7004 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
7005 .timer_slack_ns = 50000, /* 50 usec default slack */ \
7008 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
7009 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
7010 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
7011 index 72f0721f75e7..480972ae47d3 100644
7012 --- a/include/linux/interrupt.h
7013 +++ b/include/linux/interrupt.h
7015 #include <linux/hrtimer.h>
7016 #include <linux/kref.h>
7017 #include <linux/workqueue.h>
7018 +#include <linux/swork.h>
7020 #include <linux/atomic.h>
7021 #include <asm/ptrace.h>
7023 * interrupt handler after suspending interrupts. For system
7024 * wakeup devices users need to implement wakeup detection in
7025 * their interrupt handlers.
7026 + * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
7028 #define IRQF_SHARED 0x00000080
7029 #define IRQF_PROBE_SHARED 0x00000100
7031 #define IRQF_NO_THREAD 0x00010000
7032 #define IRQF_EARLY_RESUME 0x00020000
7033 #define IRQF_COND_SUSPEND 0x00040000
7034 +#define IRQF_NO_SOFTIRQ_CALL 0x00080000
7036 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
7038 @@ -196,7 +199,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
7039 #ifdef CONFIG_LOCKDEP
7040 # define local_irq_enable_in_hardirq() do { } while (0)
7042 -# define local_irq_enable_in_hardirq() local_irq_enable()
7043 +# define local_irq_enable_in_hardirq() local_irq_enable_nort()
7046 extern void disable_irq_nosync(unsigned int irq);
7047 @@ -216,6 +219,7 @@ extern void resume_device_irqs(void);
7048 * struct irq_affinity_notify - context for notification of IRQ affinity changes
7049 * @irq: Interrupt to which notification applies
7050 * @kref: Reference count, for internal use
7051 + * @swork: Swork item, for internal use
7052 * @work: Work item, for internal use
7053 * @notify: Function to be called on change. This will be
7054 * called in process context.
7055 @@ -227,7 +231,11 @@ extern void resume_device_irqs(void);
7056 struct irq_affinity_notify {
7059 +#ifdef CONFIG_PREEMPT_RT_BASE
7060 + struct swork_event swork;
7062 struct work_struct work;
7064 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
7065 void (*release)(struct kref *ref);
7067 @@ -406,9 +414,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
7070 #ifdef CONFIG_IRQ_FORCED_THREADING
7071 +# ifndef CONFIG_PREEMPT_RT_BASE
7072 extern bool force_irqthreads;
7074 +# define force_irqthreads (true)
7077 -#define force_irqthreads (0)
7078 +#define force_irqthreads (false)
7081 #ifndef __ARCH_SET_SOFTIRQ_PENDING
7082 @@ -465,9 +477,10 @@ struct softirq_action
7083 void (*action)(struct softirq_action *);
7086 +#ifndef CONFIG_PREEMPT_RT_FULL
7087 asmlinkage void do_softirq(void);
7088 asmlinkage void __do_softirq(void);
7090 +static inline void thread_do_softirq(void) { do_softirq(); }
7091 #ifdef __ARCH_HAS_DO_SOFTIRQ
7092 void do_softirq_own_stack(void);
7094 @@ -476,13 +489,25 @@ static inline void do_softirq_own_stack(void)
7099 +extern void thread_do_softirq(void);
7102 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
7103 extern void softirq_init(void);
7104 extern void __raise_softirq_irqoff(unsigned int nr);
7105 +#ifdef CONFIG_PREEMPT_RT_FULL
7106 +extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
7108 +static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
7110 + __raise_softirq_irqoff(nr);
7114 extern void raise_softirq_irqoff(unsigned int nr);
7115 extern void raise_softirq(unsigned int nr);
7116 +extern void softirq_check_pending_idle(void);
7118 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
7120 @@ -504,8 +529,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
7121 to be executed on some cpu at least once after this.
7122 * If the tasklet is already scheduled, but its execution is still not
7123 started, it will be executed only once.
7124 - * If this tasklet is already running on another CPU (or schedule is called
7125 - from tasklet itself), it is rescheduled for later.
7126 + * If this tasklet is already running on another CPU, it is rescheduled
7128 + * Schedule must not be called from the tasklet itself (a lockup occurs)
7129 * Tasklet is strictly serialized wrt itself, but not
7130 wrt another tasklets. If client needs some intertask synchronization,
7131 he makes it with spinlocks.
7132 @@ -530,27 +556,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
7135 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
7136 - TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
7137 + TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
7138 + TASKLET_STATE_PENDING /* Tasklet is pending */
7142 +#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
7143 +#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
7144 +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
7146 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
7147 static inline int tasklet_trylock(struct tasklet_struct *t)
7149 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
7152 +static inline int tasklet_tryunlock(struct tasklet_struct *t)
7154 + return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
7157 static inline void tasklet_unlock(struct tasklet_struct *t)
7159 smp_mb__before_atomic();
7160 clear_bit(TASKLET_STATE_RUN, &(t)->state);
7163 -static inline void tasklet_unlock_wait(struct tasklet_struct *t)
7165 - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
7167 +extern void tasklet_unlock_wait(struct tasklet_struct *t);
7170 #define tasklet_trylock(t) 1
7171 +#define tasklet_tryunlock(t) 1
7172 #define tasklet_unlock_wait(t) do { } while (0)
7173 #define tasklet_unlock(t) do { } while (0)
7175 @@ -599,12 +634,7 @@ static inline void tasklet_disable(struct tasklet_struct *t)
7179 -static inline void tasklet_enable(struct tasklet_struct *t)
7181 - smp_mb__before_atomic();
7182 - atomic_dec(&t->count);
7185 +extern void tasklet_enable(struct tasklet_struct *t);
7186 extern void tasklet_kill(struct tasklet_struct *t);
7187 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
7188 extern void tasklet_init(struct tasklet_struct *t,
7189 @@ -635,6 +665,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
7190 tasklet_kill(&ttimer->tasklet);
7193 +#ifdef CONFIG_PREEMPT_RT_FULL
7194 +extern void softirq_early_init(void);
7196 +static inline void softirq_early_init(void) { }
7200 * Autoprobing for irqs:
7202 diff --git a/include/linux/irq.h b/include/linux/irq.h
7203 index 39e3254e5769..8ebac94fbb9f 100644
7204 --- a/include/linux/irq.h
7205 +++ b/include/linux/irq.h
7206 @@ -72,6 +72,7 @@ enum irqchip_irq_state;
7207 * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
7208 * it from the spurious interrupt detection
7209 * mechanism and from core side polling.
7210 + * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
7211 * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
7214 @@ -99,13 +100,14 @@ enum {
7215 IRQ_PER_CPU_DEVID = (1 << 17),
7216 IRQ_IS_POLLED = (1 << 18),
7217 IRQ_DISABLE_UNLAZY = (1 << 19),
7218 + IRQ_NO_SOFTIRQ_CALL = (1 << 20),
7221 #define IRQF_MODIFY_MASK \
7222 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
7223 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
7224 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
7225 - IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
7226 + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
7228 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
7230 diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
7231 index 47b9ebd4a74f..2543aab05daa 100644
7232 --- a/include/linux/irq_work.h
7233 +++ b/include/linux/irq_work.h
7235 #define IRQ_WORK_BUSY 2UL
7236 #define IRQ_WORK_FLAGS 3UL
7237 #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
7238 +#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
7241 unsigned long flags;
7242 @@ -51,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
7243 static inline void irq_work_run(void) { }
7246 +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
7247 +void irq_work_tick_soft(void);
7249 +static inline void irq_work_tick_soft(void) { }
7252 #endif /* _LINUX_IRQ_WORK_H */
7253 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
7254 index c9be57931b58..eeeb540971ae 100644
7255 --- a/include/linux/irqdesc.h
7256 +++ b/include/linux/irqdesc.h
7257 @@ -66,6 +66,7 @@ struct irq_desc {
7258 unsigned int irqs_unhandled;
7259 atomic_t threads_handled;
7260 int threads_handled_last;
7262 raw_spinlock_t lock;
7263 struct cpumask *percpu_enabled;
7264 const struct cpumask *percpu_affinity;
7265 diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
7266 index 5dd1272d1ab2..9b77034f7c5e 100644
7267 --- a/include/linux/irqflags.h
7268 +++ b/include/linux/irqflags.h
7270 # define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
7271 # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
7272 # define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
7273 -# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
7274 -# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
7275 # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
7277 # define trace_hardirqs_on() do { } while (0)
7279 # define trace_softirqs_enabled(p) 0
7280 # define trace_hardirq_enter() do { } while (0)
7281 # define trace_hardirq_exit() do { } while (0)
7282 +# define INIT_TRACE_IRQFLAGS
7285 +#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
7286 +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
7287 +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
7289 # define lockdep_softirq_enter() do { } while (0)
7290 # define lockdep_softirq_exit() do { } while (0)
7291 -# define INIT_TRACE_IRQFLAGS
7294 #if defined(CONFIG_IRQSOFF_TRACER) || \
7295 @@ -148,4 +152,23 @@
7297 #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
7300 + * local_irq* variants depending on RT/!RT
7302 +#ifdef CONFIG_PREEMPT_RT_FULL
7303 +# define local_irq_disable_nort() do { } while (0)
7304 +# define local_irq_enable_nort() do { } while (0)
7305 +# define local_irq_save_nort(flags) local_save_flags(flags)
7306 +# define local_irq_restore_nort(flags) (void)(flags)
7307 +# define local_irq_disable_rt() local_irq_disable()
7308 +# define local_irq_enable_rt() local_irq_enable()
7310 +# define local_irq_disable_nort() local_irq_disable()
7311 +# define local_irq_enable_nort() local_irq_enable()
7312 +# define local_irq_save_nort(flags) local_irq_save(flags)
7313 +# define local_irq_restore_nort(flags) local_irq_restore(flags)
7314 +# define local_irq_disable_rt() do { } while (0)
7315 +# define local_irq_enable_rt() do { } while (0)
7319 diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
7320 index dfaa1f4dcb0c..d57dd06544a1 100644
7321 --- a/include/linux/jbd2.h
7322 +++ b/include/linux/jbd2.h
7323 @@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
7325 static inline void jbd_lock_bh_state(struct buffer_head *bh)
7327 +#ifndef CONFIG_PREEMPT_RT_BASE
7328 bit_spin_lock(BH_State, &bh->b_state);
7330 + spin_lock(&bh->b_state_lock);
7334 static inline int jbd_trylock_bh_state(struct buffer_head *bh)
7336 +#ifndef CONFIG_PREEMPT_RT_BASE
7337 return bit_spin_trylock(BH_State, &bh->b_state);
7339 + return spin_trylock(&bh->b_state_lock);
7343 static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
7345 +#ifndef CONFIG_PREEMPT_RT_BASE
7346 return bit_spin_is_locked(BH_State, &bh->b_state);
7348 + return spin_is_locked(&bh->b_state_lock);
7352 static inline void jbd_unlock_bh_state(struct buffer_head *bh)
7354 +#ifndef CONFIG_PREEMPT_RT_BASE
7355 bit_spin_unlock(BH_State, &bh->b_state);
7357 + spin_unlock(&bh->b_state_lock);
7361 static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
7363 +#ifndef CONFIG_PREEMPT_RT_BASE
7364 bit_spin_lock(BH_JournalHead, &bh->b_state);
7366 + spin_lock(&bh->b_journal_head_lock);
7370 static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
7372 +#ifndef CONFIG_PREEMPT_RT_BASE
7373 bit_spin_unlock(BH_JournalHead, &bh->b_state);
7375 + spin_unlock(&bh->b_journal_head_lock);
7379 #define J_ASSERT(assert) BUG_ON(!(assert))
7380 diff --git a/include/linux/kdb.h b/include/linux/kdb.h
7381 index 410decacff8f..0861bebfc188 100644
7382 --- a/include/linux/kdb.h
7383 +++ b/include/linux/kdb.h
7384 @@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
7385 extern __printf(1, 2) int kdb_printf(const char *, ...);
7386 typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
7388 +#define in_kdb_printk() (kdb_trap_printk)
7389 extern void kdb_init(int level);
7391 /* Access to kdb specific polling devices */
7392 @@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
7393 extern int kdb_unregister(char *);
7394 #else /* ! CONFIG_KGDB_KDB */
7395 static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
7396 +#define in_kdb_printk() (0)
7397 static inline void kdb_init(int level) {}
7398 static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
7399 char *help, short minlen) { return 0; }
7400 diff --git a/include/linux/kernel.h b/include/linux/kernel.h
7401 index bc6ed52a39b9..7894d55e4998 100644
7402 --- a/include/linux/kernel.h
7403 +++ b/include/linux/kernel.h
7404 @@ -194,6 +194,9 @@ extern int _cond_resched(void);
7406 # define might_sleep() \
7407 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
7409 +# define might_sleep_no_state_check() \
7410 + do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
7411 # define sched_annotate_sleep() (current->task_state_change = 0)
7413 static inline void ___might_sleep(const char *file, int line,
7414 @@ -201,6 +204,7 @@ extern int _cond_resched(void);
7415 static inline void __might_sleep(const char *file, int line,
7416 int preempt_offset) { }
7417 # define might_sleep() do { might_resched(); } while (0)
7418 +# define might_sleep_no_state_check() do { might_resched(); } while (0)
7419 # define sched_annotate_sleep() do { } while (0)
7422 @@ -488,6 +492,7 @@ extern enum system_states {
7429 #define TAINT_PROPRIETARY_MODULE 0
7430 diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
7431 index cb483305e1f5..4e5062316bb6 100644
7432 --- a/include/linux/list_bl.h
7433 +++ b/include/linux/list_bl.h
7435 #define _LINUX_LIST_BL_H
7437 #include <linux/list.h>
7438 +#include <linux/spinlock.h>
7439 #include <linux/bit_spinlock.h>
7444 struct hlist_bl_head {
7445 struct hlist_bl_node *first;
7446 +#ifdef CONFIG_PREEMPT_RT_BASE
7447 + raw_spinlock_t lock;
7451 struct hlist_bl_node {
7452 struct hlist_bl_node *next, **pprev;
7454 -#define INIT_HLIST_BL_HEAD(ptr) \
7455 - ((ptr)->first = NULL)
7457 +#ifdef CONFIG_PREEMPT_RT_BASE
7458 +#define INIT_HLIST_BL_HEAD(h) \
7460 + (h)->first = NULL; \
7461 + raw_spin_lock_init(&(h)->lock); \
7464 +#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL
7467 static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
7469 @@ -118,12 +130,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
7471 static inline void hlist_bl_lock(struct hlist_bl_head *b)
7473 +#ifndef CONFIG_PREEMPT_RT_BASE
7474 bit_spin_lock(0, (unsigned long *)b);
7476 + raw_spin_lock(&b->lock);
7477 +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
7478 + __set_bit(0, (unsigned long *)b);
7483 static inline void hlist_bl_unlock(struct hlist_bl_head *b)
7485 +#ifndef CONFIG_PREEMPT_RT_BASE
7486 __bit_spin_unlock(0, (unsigned long *)b);
7488 +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
7489 + __clear_bit(0, (unsigned long *)b);
7491 + raw_spin_unlock(&b->lock);
7495 static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
7496 diff --git a/include/linux/locallock.h b/include/linux/locallock.h
7497 new file mode 100644
7498 index 000000000000..845c77f1a5ca
7500 +++ b/include/linux/locallock.h
7502 +#ifndef _LINUX_LOCALLOCK_H
7503 +#define _LINUX_LOCALLOCK_H
7505 +#include <linux/percpu.h>
7506 +#include <linux/spinlock.h>
7508 +#ifdef CONFIG_PREEMPT_RT_BASE
7510 +#ifdef CONFIG_DEBUG_SPINLOCK
7511 +# define LL_WARN(cond) WARN_ON(cond)
7513 +# define LL_WARN(cond) do { } while (0)
7517 + * per cpu lock based substitute for local_irq_*()
7519 +struct local_irq_lock {
7521 + struct task_struct *owner;
7523 + unsigned long flags;
7526 +#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
7527 + DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
7528 + .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
7530 +#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
7531 + DECLARE_PER_CPU(struct local_irq_lock, lvar)
7533 +#define local_irq_lock_init(lvar) \
7536 + for_each_possible_cpu(__cpu) \
7537 + spin_lock_init(&per_cpu(lvar, __cpu).lock); \
7541 + * spin_lock|trylock|unlock_local flavour that does not migrate disable
7542 + * used for __local_lock|trylock|unlock where get_local_var/put_local_var
7543 + * already takes care of the migrate_disable/enable
7544 + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
7546 +#ifdef CONFIG_PREEMPT_RT_FULL
7547 +# define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
7548 +# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
7549 +# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
7551 +# define spin_lock_local(lock) spin_lock(lock)
7552 +# define spin_trylock_local(lock) spin_trylock(lock)
7553 +# define spin_unlock_local(lock) spin_unlock(lock)
7556 +static inline void __local_lock(struct local_irq_lock *lv)
7558 + if (lv->owner != current) {
7559 + spin_lock_local(&lv->lock);
7560 + LL_WARN(lv->owner);
7561 + LL_WARN(lv->nestcnt);
7562 + lv->owner = current;
7567 +#define local_lock(lvar) \
7568 + do { __local_lock(&get_local_var(lvar)); } while (0)
7570 +#define local_lock_on(lvar, cpu) \
7571 + do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
7573 +static inline int __local_trylock(struct local_irq_lock *lv)
7575 + if (lv->owner != current && spin_trylock_local(&lv->lock)) {
7576 + LL_WARN(lv->owner);
7577 + LL_WARN(lv->nestcnt);
7578 + lv->owner = current;
7585 +#define local_trylock(lvar) \
7588 + __locked = __local_trylock(&get_local_var(lvar)); \
7590 + put_local_var(lvar); \
7594 +static inline void __local_unlock(struct local_irq_lock *lv)
7596 + LL_WARN(lv->nestcnt == 0);
7597 + LL_WARN(lv->owner != current);
7598 + if (--lv->nestcnt)
7602 + spin_unlock_local(&lv->lock);
7605 +#define local_unlock(lvar) \
7607 + __local_unlock(this_cpu_ptr(&lvar)); \
7608 + put_local_var(lvar); \
7611 +#define local_unlock_on(lvar, cpu) \
7612 + do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
7614 +static inline void __local_lock_irq(struct local_irq_lock *lv)
7616 + spin_lock_irqsave(&lv->lock, lv->flags);
7617 + LL_WARN(lv->owner);
7618 + LL_WARN(lv->nestcnt);
7619 + lv->owner = current;
7623 +#define local_lock_irq(lvar) \
7624 + do { __local_lock_irq(&get_local_var(lvar)); } while (0)
7626 +#define local_lock_irq_on(lvar, cpu) \
7627 + do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
7629 +static inline void __local_unlock_irq(struct local_irq_lock *lv)
7631 + LL_WARN(!lv->nestcnt);
7632 + LL_WARN(lv->owner != current);
7635 + spin_unlock_irq(&lv->lock);
7638 +#define local_unlock_irq(lvar) \
7640 + __local_unlock_irq(this_cpu_ptr(&lvar)); \
7641 + put_local_var(lvar); \
7644 +#define local_unlock_irq_on(lvar, cpu) \
7646 + __local_unlock_irq(&per_cpu(lvar, cpu)); \
7649 +static inline int __local_lock_irqsave(struct local_irq_lock *lv)
7651 + if (lv->owner != current) {
7652 + __local_lock_irq(lv);
7660 +#define local_lock_irqsave(lvar, _flags) \
7662 + if (__local_lock_irqsave(&get_local_var(lvar))) \
7663 + put_local_var(lvar); \
7664 + _flags = __this_cpu_read(lvar.flags); \
7667 +#define local_lock_irqsave_on(lvar, _flags, cpu) \
7669 + __local_lock_irqsave(&per_cpu(lvar, cpu)); \
7670 + _flags = per_cpu(lvar, cpu).flags; \
7673 +static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
7674 + unsigned long flags)
7676 + LL_WARN(!lv->nestcnt);
7677 + LL_WARN(lv->owner != current);
7678 + if (--lv->nestcnt)
7682 + spin_unlock_irqrestore(&lv->lock, lv->flags);
7686 +#define local_unlock_irqrestore(lvar, flags) \
7688 + if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
7689 + put_local_var(lvar); \
7692 +#define local_unlock_irqrestore_on(lvar, flags, cpu) \
7694 + __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
7697 +#define local_spin_trylock_irq(lvar, lock) \
7700 + local_lock_irq(lvar); \
7701 + __locked = spin_trylock(lock); \
7703 + local_unlock_irq(lvar); \
7707 +#define local_spin_lock_irq(lvar, lock) \
7709 + local_lock_irq(lvar); \
7710 + spin_lock(lock); \
7713 +#define local_spin_unlock_irq(lvar, lock) \
7715 + spin_unlock(lock); \
7716 + local_unlock_irq(lvar); \
7719 +#define local_spin_lock_irqsave(lvar, lock, flags) \
7721 + local_lock_irqsave(lvar, flags); \
7722 + spin_lock(lock); \
7725 +#define local_spin_unlock_irqrestore(lvar, lock, flags) \
7727 + spin_unlock(lock); \
7728 + local_unlock_irqrestore(lvar, flags); \
7731 +#define get_locked_var(lvar, var) \
7733 + local_lock(lvar); \
7734 + this_cpu_ptr(&var); \
7737 +#define put_locked_var(lvar, var) local_unlock(lvar);
7739 +#define local_lock_cpu(lvar) \
7741 + local_lock(lvar); \
7742 + smp_processor_id(); \
7745 +#define local_unlock_cpu(lvar) local_unlock(lvar)
7747 +#else /* PREEMPT_RT_BASE */
7749 +#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
7750 +#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
7752 +static inline void local_irq_lock_init(int lvar) { }
7754 +#define local_lock(lvar) preempt_disable()
7755 +#define local_unlock(lvar) preempt_enable()
7756 +#define local_lock_irq(lvar) local_irq_disable()
7757 +#define local_lock_irq_on(lvar, cpu) local_irq_disable()
7758 +#define local_unlock_irq(lvar) local_irq_enable()
7759 +#define local_unlock_irq_on(lvar, cpu) local_irq_enable()
7760 +#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
7761 +#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
7763 +#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
7764 +#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
7765 +#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
7766 +#define local_spin_lock_irqsave(lvar, lock, flags) \
7767 + spin_lock_irqsave(lock, flags)
7768 +#define local_spin_unlock_irqrestore(lvar, lock, flags) \
7769 + spin_unlock_irqrestore(lock, flags)
7771 +#define get_locked_var(lvar, var) get_cpu_var(var)
7772 +#define put_locked_var(lvar, var) put_cpu_var(var)
7774 +#define local_lock_cpu(lvar) get_cpu()
7775 +#define local_unlock_cpu(lvar) put_cpu()
7780 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
7781 index 08d947fc4c59..705fb564a605 100644
7782 --- a/include/linux/mm_types.h
7783 +++ b/include/linux/mm_types.h
7785 #include <linux/completion.h>
7786 #include <linux/cpumask.h>
7787 #include <linux/uprobes.h>
7788 +#include <linux/rcupdate.h>
7789 #include <linux/page-flags-layout.h>
7790 #include <linux/workqueue.h>
7791 #include <asm/page.h>
7792 @@ -509,6 +510,9 @@ struct mm_struct {
7793 bool tlb_flush_pending;
7795 struct uprobes_state uprobes_state;
7796 +#ifdef CONFIG_PREEMPT_RT_BASE
7797 + struct rcu_head delayed_drop;
7799 #ifdef CONFIG_X86_INTEL_MPX
7800 /* address of the bounds directory */
7801 void __user *bd_addr;
7802 diff --git a/include/linux/module.h b/include/linux/module.h
7803 index 0c3207d26ac0..5944baaa3f28 100644
7804 --- a/include/linux/module.h
7805 +++ b/include/linux/module.h
7806 @@ -496,6 +496,7 @@ static inline int module_is_live(struct module *mod)
7807 struct module *__module_text_address(unsigned long addr);
7808 struct module *__module_address(unsigned long addr);
7809 bool is_module_address(unsigned long addr);
7810 +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
7811 bool is_module_percpu_address(unsigned long addr);
7812 bool is_module_text_address(unsigned long addr);
7814 @@ -663,6 +664,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
7818 +static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
7823 static inline bool is_module_text_address(unsigned long addr)
7826 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
7827 index 2cb7531e7d7a..b3fdfc820216 100644
7828 --- a/include/linux/mutex.h
7829 +++ b/include/linux/mutex.h
7831 #include <asm/processor.h>
7832 #include <linux/osq_lock.h>
7834 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
7835 +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
7836 + , .dep_map = { .name = #lockname }
7838 +# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
7841 +#ifdef CONFIG_PREEMPT_RT_FULL
7842 +# include <linux/mutex_rt.h>
7846 * Simple, straightforward mutexes with strict semantics:
7848 @@ -99,13 +110,6 @@ do { \
7849 static inline void mutex_destroy(struct mutex *lock) {}
7852 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
7853 -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
7854 - , .dep_map = { .name = #lockname }
7856 -# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
7859 #define __MUTEX_INITIALIZER(lockname) \
7860 { .count = ATOMIC_INIT(1) \
7861 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
7862 @@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
7863 extern int mutex_trylock(struct mutex *lock);
7864 extern void mutex_unlock(struct mutex *lock);
7866 +#endif /* !PREEMPT_RT_FULL */
7868 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
7870 #endif /* __LINUX_MUTEX_H */
7871 diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
7872 new file mode 100644
7873 index 000000000000..e0284edec655
7875 +++ b/include/linux/mutex_rt.h
7877 +#ifndef __LINUX_MUTEX_RT_H
7878 +#define __LINUX_MUTEX_RT_H
7880 +#ifndef __LINUX_MUTEX_H
7881 +#error "Please include mutex.h"
7884 +#include <linux/rtmutex.h>
7886 +/* FIXME: Just for __lockfunc */
7887 +#include <linux/spinlock.h>
7890 + struct rt_mutex lock;
7891 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
7892 + struct lockdep_map dep_map;
7896 +#define __MUTEX_INITIALIZER(mutexname) \
7898 + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
7899 + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
7902 +#define DEFINE_MUTEX(mutexname) \
7903 + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
7905 +extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
7906 +extern void __lockfunc _mutex_lock(struct mutex *lock);
7907 +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
7908 +extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
7909 +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
7910 +extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
7911 +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
7912 +extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
7913 +extern int __lockfunc _mutex_trylock(struct mutex *lock);
7914 +extern void __lockfunc _mutex_unlock(struct mutex *lock);
7916 +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
7917 +#define mutex_lock(l) _mutex_lock(l)
7918 +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
7919 +#define mutex_lock_killable(l) _mutex_lock_killable(l)
7920 +#define mutex_trylock(l) _mutex_trylock(l)
7921 +#define mutex_unlock(l) _mutex_unlock(l)
7923 +#ifdef CONFIG_DEBUG_MUTEXES
7924 +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
7926 +static inline void mutex_destroy(struct mutex *lock) {}
7929 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
7930 +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
7931 +# define mutex_lock_interruptible_nested(l, s) \
7932 + _mutex_lock_interruptible_nested(l, s)
7933 +# define mutex_lock_killable_nested(l, s) \
7934 + _mutex_lock_killable_nested(l, s)
7936 +# define mutex_lock_nest_lock(lock, nest_lock) \
7938 + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
7939 + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
7943 +# define mutex_lock_nested(l, s) _mutex_lock(l)
7944 +# define mutex_lock_interruptible_nested(l, s) \
7945 + _mutex_lock_interruptible(l)
7946 +# define mutex_lock_killable_nested(l, s) \
7947 + _mutex_lock_killable(l)
7948 +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
7951 +# define mutex_init(mutex) \
7953 + static struct lock_class_key __key; \
7955 + rt_mutex_init(&(mutex)->lock); \
7956 + __mutex_do_init((mutex), #mutex, &__key); \
7959 +# define __mutex_init(mutex, name, key) \
7961 + rt_mutex_init(&(mutex)->lock); \
7962 + __mutex_do_init((mutex), name, key); \
7966 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
7967 index bb9b102c15cd..a5b12b8ad196 100644
7968 --- a/include/linux/netdevice.h
7969 +++ b/include/linux/netdevice.h
7970 @@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handler_result_t;
7971 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
7973 void __napi_schedule(struct napi_struct *n);
7976 + * When PREEMPT_RT_FULL is defined, all device interrupt handlers
7977 + * run as threads, and they can also be preempted (without PREEMPT_RT
7978 + * interrupt threads can not be preempted). Which means that calling
7979 + * __napi_schedule_irqoff() from an interrupt handler can be preempted
7980 + * and can corrupt the napi->poll_list.
7982 +#ifdef CONFIG_PREEMPT_RT_FULL
7983 +#define __napi_schedule_irqoff(n) __napi_schedule(n)
7985 void __napi_schedule_irqoff(struct napi_struct *n);
7988 static inline bool napi_disable_pending(struct napi_struct *n)
7990 @@ -2463,14 +2475,53 @@ void netdev_freemem(struct net_device *dev);
7991 void synchronize_net(void);
7992 int init_dummy_netdev(struct net_device *dev);
7994 -DECLARE_PER_CPU(int, xmit_recursion);
7995 #define XMIT_RECURSION_LIMIT 10
7996 +#ifdef CONFIG_PREEMPT_RT_FULL
7997 +static inline int dev_recursion_level(void)
7999 + return current->xmit_recursion;
8002 +static inline int xmit_rec_read(void)
8004 + return current->xmit_recursion;
8007 +static inline void xmit_rec_inc(void)
8009 + current->xmit_recursion++;
8012 +static inline void xmit_rec_dec(void)
8014 + current->xmit_recursion--;
8019 +DECLARE_PER_CPU(int, xmit_recursion);
8021 static inline int dev_recursion_level(void)
8023 return this_cpu_read(xmit_recursion);
8026 +static inline int xmit_rec_read(void)
8028 + return __this_cpu_read(xmit_recursion);
8031 +static inline void xmit_rec_inc(void)
8033 + __this_cpu_inc(xmit_recursion);
8036 +static inline void xmit_rec_dec(void)
8038 + __this_cpu_dec(xmit_recursion);
8042 struct net_device *dev_get_by_index(struct net *net, int ifindex);
8043 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
8044 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
8045 @@ -2855,6 +2906,7 @@ struct softnet_data {
8046 unsigned int dropped;
8047 struct sk_buff_head input_pkt_queue;
8048 struct napi_struct backlog;
8049 + struct sk_buff_head tofree_queue;
8053 diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
8054 index 2ad1a2b289b5..b4d10155af54 100644
8055 --- a/include/linux/netfilter/x_tables.h
8056 +++ b/include/linux/netfilter/x_tables.h
8059 #include <linux/netdevice.h>
8060 #include <linux/static_key.h>
8061 +#include <linux/locallock.h>
8062 #include <uapi/linux/netfilter/x_tables.h>
8064 /* Test a struct->invflags and a boolean for inequality */
8065 @@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_info *info);
8067 DECLARE_PER_CPU(seqcount_t, xt_recseq);
8069 +DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
8071 /* xt_tee_enabled - true if x_tables needs to handle reentrancy
8073 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
8074 @@ -320,6 +323,9 @@ static inline unsigned int xt_write_recseq_begin(void)
8076 unsigned int addend;
8078 + /* RT protection */
8079 + local_lock(xt_write_lock);
8082 * Low order bit of sequence is set if we already
8083 * called xt_write_recseq_begin().
8084 @@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
8085 /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
8087 __this_cpu_add(xt_recseq.sequence, addend);
8088 + local_unlock(xt_write_lock);
8092 diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
8093 index 810124b33327..d54ca43d571f 100644
8094 --- a/include/linux/nfs_fs.h
8095 +++ b/include/linux/nfs_fs.h
8096 @@ -165,7 +165,11 @@ struct nfs_inode {
8098 /* Readers: in-flight sillydelete RPC calls */
8099 /* Writers: rmdir */
8100 +#ifdef CONFIG_PREEMPT_RT_BASE
8101 + struct semaphore rmdir_sem;
8103 struct rw_semaphore rmdir_sem;
8106 #if IS_ENABLED(CONFIG_NFS_V4)
8107 struct nfs4_cached_acl *nfs4_acl;
8108 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
8109 index beb1e10f446e..ebaf2e7bfe29 100644
8110 --- a/include/linux/nfs_xdr.h
8111 +++ b/include/linux/nfs_xdr.h
8112 @@ -1490,7 +1490,7 @@ struct nfs_unlinkdata {
8113 struct nfs_removeargs args;
8114 struct nfs_removeres res;
8115 struct dentry *dentry;
8116 - wait_queue_head_t wq;
8117 + struct swait_queue_head wq;
8118 struct rpc_cred *cred;
8119 struct nfs_fattr dir_attr;
8121 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
8122 index 4149868de4e6..babe5b9bcb91 100644
8123 --- a/include/linux/notifier.h
8124 +++ b/include/linux/notifier.h
8127 * Alan Cox <Alan.Cox@linux.org>
8131 #ifndef _LINUX_NOTIFIER_H
8132 #define _LINUX_NOTIFIER_H
8133 #include <linux/errno.h>
8135 * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
8136 * As compensation, srcu_notifier_chain_unregister() is rather expensive.
8137 * SRCU notifier chains should be used when the chain will be called very
8138 - * often but notifier_blocks will seldom be removed. Also, SRCU notifier
8139 - * chains are slightly more difficult to use because they require special
8140 - * runtime initialization.
8141 + * often but notifier_blocks will seldom be removed.
8144 struct notifier_block;
8145 @@ -90,7 +88,7 @@ struct srcu_notifier_head {
8146 (name)->head = NULL; \
8149 -/* srcu_notifier_heads must be initialized and cleaned up dynamically */
8150 +/* srcu_notifier_heads must be cleaned up dynamically */
8151 extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
8152 #define srcu_cleanup_notifier_head(name) \
8153 cleanup_srcu_struct(&(name)->srcu);
8154 @@ -103,7 +101,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
8156 #define RAW_NOTIFIER_INIT(name) { \
8158 -/* srcu_notifier_heads cannot be initialized statically */
8160 +#define SRCU_NOTIFIER_INIT(name, pcpu) \
8162 + .mutex = __MUTEX_INITIALIZER(name.mutex), \
8164 + .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
8167 #define ATOMIC_NOTIFIER_HEAD(name) \
8168 struct atomic_notifier_head name = \
8169 @@ -115,6 +119,18 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
8170 struct raw_notifier_head name = \
8171 RAW_NOTIFIER_INIT(name)
8173 +#define _SRCU_NOTIFIER_HEAD(name, mod) \
8174 + static DEFINE_PER_CPU(struct srcu_struct_array, \
8175 + name##_head_srcu_array); \
8176 + mod struct srcu_notifier_head name = \
8177 + SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
8179 +#define SRCU_NOTIFIER_HEAD(name) \
8180 + _SRCU_NOTIFIER_HEAD(name, )
8182 +#define SRCU_NOTIFIER_HEAD_STATIC(name) \
8183 + _SRCU_NOTIFIER_HEAD(name, static)
8187 extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
8188 @@ -184,12 +200,12 @@ static inline int notifier_to_errno(int ret)
8191 * Declared notifiers so far. I can imagine quite a few more chains
8192 - * over time (eg laptop power reset chains, reboot chain (to clean
8193 + * over time (eg laptop power reset chains, reboot chain (to clean
8194 * device units up), device [un]mount chain, module load/unload chain,
8195 - * low memory chain, screenblank chain (for plug in modular screenblankers)
8196 + * low memory chain, screenblank chain (for plug in modular screenblankers)
8197 * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
8201 /* CPU notfiers are defined in include/linux/cpu.h. */
8203 /* netdevice notifiers are defined in include/linux/netdevice.h */
8204 diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
8205 index 5b2e6159b744..ea940f451606 100644
8206 --- a/include/linux/percpu-rwsem.h
8207 +++ b/include/linux/percpu-rwsem.h
8209 #include <linux/atomic.h>
8210 #include <linux/rwsem.h>
8211 #include <linux/percpu.h>
8212 -#include <linux/wait.h>
8213 +#include <linux/swait.h>
8214 #include <linux/rcu_sync.h>
8215 #include <linux/lockdep.h>
8217 @@ -12,7 +12,7 @@ struct percpu_rw_semaphore {
8218 struct rcu_sync rss;
8219 unsigned int __percpu *read_count;
8220 struct rw_semaphore rw_sem;
8221 - wait_queue_head_t writer;
8222 + struct swait_queue_head writer;
8226 @@ -22,13 +22,13 @@ static struct percpu_rw_semaphore name = { \
8227 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
8228 .read_count = &__percpu_rwsem_rc_##name, \
8229 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
8230 - .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
8231 + .writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
8234 extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
8235 extern void __percpu_up_read(struct percpu_rw_semaphore *);
8237 -static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
8238 +static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
8242 @@ -46,16 +46,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
8243 __this_cpu_inc(*sem->read_count);
8244 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
8245 __percpu_down_read(sem, false); /* Unconditional memory barrier */
8248 - * The barrier() prevents the compiler from
8249 + * The preempt_enable() prevents the compiler from
8250 * bleeding the critical section out.
8254 -static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
8256 - percpu_down_read_preempt_disable(sem);
8260 @@ -82,13 +76,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
8264 -static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
8265 +static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
8268 - * The barrier() prevents the compiler from
8269 - * bleeding the critical section out.
8272 + preempt_disable();
8274 * Same as in percpu_down_read().
8276 @@ -101,12 +91,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
8277 rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
8280 -static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
8282 - preempt_disable();
8283 - percpu_up_read_preempt_enable(sem);
8286 extern void percpu_down_write(struct percpu_rw_semaphore *);
8287 extern void percpu_up_write(struct percpu_rw_semaphore *);
8289 diff --git a/include/linux/percpu.h b/include/linux/percpu.h
8290 index 56939d3f6e53..b988bf40ad3e 100644
8291 --- a/include/linux/percpu.h
8292 +++ b/include/linux/percpu.h
8294 #define PERCPU_MODULE_RESERVE 0
8297 +#ifdef CONFIG_PREEMPT_RT_FULL
8299 +#define get_local_var(var) (*({ \
8300 + migrate_disable(); \
8301 + this_cpu_ptr(&var); }))
8303 +#define put_local_var(var) do { \
8305 + migrate_enable(); \
8308 +# define get_local_ptr(var) ({ \
8309 + migrate_disable(); \
8310 + this_cpu_ptr(var); })
8312 +# define put_local_ptr(var) do { \
8314 + migrate_enable(); \
8319 +#define get_local_var(var) get_cpu_var(var)
8320 +#define put_local_var(var) put_cpu_var(var)
8321 +#define get_local_ptr(var) get_cpu_ptr(var)
8322 +#define put_local_ptr(var) put_cpu_ptr(var)
8326 /* minimum unit size, also is the maximum supported allocation size */
8327 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
8329 @@ -110,6 +139,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
8332 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
8333 +extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
8334 extern bool is_kernel_percpu_address(unsigned long addr);
8336 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
8337 diff --git a/include/linux/pid.h b/include/linux/pid.h
8338 index 23705a53abba..2cc64b779f03 100644
8339 --- a/include/linux/pid.h
8340 +++ b/include/linux/pid.h
8342 #define _LINUX_PID_H
8344 #include <linux/rcupdate.h>
8345 +#include <linux/atomic.h>
8349 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
8350 index 75e4e30677f1..1cfb1cb72354 100644
8351 --- a/include/linux/preempt.h
8352 +++ b/include/linux/preempt.h
8354 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
8355 #define NMI_OFFSET (1UL << NMI_SHIFT)
8357 -#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
8358 +#ifndef CONFIG_PREEMPT_RT_FULL
8359 +# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
8361 +# define SOFTIRQ_DISABLE_OFFSET (0)
8364 /* We use the MSB mostly because its available */
8365 #define PREEMPT_NEED_RESCHED 0x80000000
8367 #include <asm/preempt.h>
8369 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
8370 -#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
8371 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
8373 +#ifndef CONFIG_PREEMPT_RT_FULL
8374 +# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
8375 +# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
8377 +# define softirq_count() (0UL)
8378 +extern int in_serving_softirq(void);
8382 * Are we doing bottom half or hardware interrupt processing?
8384 #define in_irq() (hardirq_count())
8385 #define in_softirq() (softirq_count())
8386 #define in_interrupt() (irq_count())
8387 -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
8390 * Are we in NMI context?
8393 * The preempt_count offset after spin_lock()
8395 +#if !defined(CONFIG_PREEMPT_RT_FULL)
8396 #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
8398 +#define PREEMPT_LOCK_OFFSET 0
8402 * The preempt_count offset needed for things like:
8403 @@ -140,6 +153,20 @@ extern void preempt_count_sub(int val);
8404 #define preempt_count_inc() preempt_count_add(1)
8405 #define preempt_count_dec() preempt_count_sub(1)
8407 +#ifdef CONFIG_PREEMPT_LAZY
8408 +#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
8409 +#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
8410 +#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
8411 +#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
8412 +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
8414 +#define add_preempt_lazy_count(val) do { } while (0)
8415 +#define sub_preempt_lazy_count(val) do { } while (0)
8416 +#define inc_preempt_lazy_count() do { } while (0)
8417 +#define dec_preempt_lazy_count() do { } while (0)
8418 +#define preempt_lazy_count() (0)
8421 #ifdef CONFIG_PREEMPT_COUNT
8423 #define preempt_disable() \
8424 @@ -148,13 +175,25 @@ do { \
8428 +#define preempt_lazy_disable() \
8430 + inc_preempt_lazy_count(); \
8434 #define sched_preempt_enable_no_resched() \
8437 preempt_count_dec(); \
8440 -#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
8441 +#ifdef CONFIG_PREEMPT_RT_BASE
8442 +# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
8443 +# define preempt_check_resched_rt() preempt_check_resched()
8445 +# define preempt_enable_no_resched() preempt_enable()
8446 +# define preempt_check_resched_rt() barrier();
8449 #define preemptible() (preempt_count() == 0 && !irqs_disabled())
8451 @@ -179,6 +218,13 @@ do { \
8452 __preempt_schedule(); \
8455 +#define preempt_lazy_enable() \
8457 + dec_preempt_lazy_count(); \
8459 + preempt_check_resched(); \
8462 #else /* !CONFIG_PREEMPT */
8463 #define preempt_enable() \
8465 @@ -224,6 +270,7 @@ do { \
8466 #define preempt_disable_notrace() barrier()
8467 #define preempt_enable_no_resched_notrace() barrier()
8468 #define preempt_enable_notrace() barrier()
8469 +#define preempt_check_resched_rt() barrier()
8470 #define preemptible() 0
8472 #endif /* CONFIG_PREEMPT_COUNT */
8473 @@ -244,10 +291,31 @@ do { \
8475 #define preempt_fold_need_resched() \
8477 - if (tif_need_resched()) \
8478 + if (tif_need_resched_now()) \
8479 set_preempt_need_resched(); \
8482 +#ifdef CONFIG_PREEMPT_RT_FULL
8483 +# define preempt_disable_rt() preempt_disable()
8484 +# define preempt_enable_rt() preempt_enable()
8485 +# define preempt_disable_nort() barrier()
8486 +# define preempt_enable_nort() barrier()
8488 + extern void migrate_disable(void);
8489 + extern void migrate_enable(void);
8490 +# else /* CONFIG_SMP */
8491 +# define migrate_disable() barrier()
8492 +# define migrate_enable() barrier()
8493 +# endif /* CONFIG_SMP */
8495 +# define preempt_disable_rt() barrier()
8496 +# define preempt_enable_rt() barrier()
8497 +# define preempt_disable_nort() preempt_disable()
8498 +# define preempt_enable_nort() preempt_enable()
8499 +# define migrate_disable() preempt_disable()
8500 +# define migrate_enable() preempt_enable()
8503 #ifdef CONFIG_PREEMPT_NOTIFIERS
8505 struct preempt_notifier;
8506 diff --git a/include/linux/printk.h b/include/linux/printk.h
8507 index eac1af8502bb..37e647af0b0b 100644
8508 --- a/include/linux/printk.h
8509 +++ b/include/linux/printk.h
8510 @@ -126,9 +126,11 @@ struct va_format {
8511 #ifdef CONFIG_EARLY_PRINTK
8512 extern asmlinkage __printf(1, 2)
8513 void early_printk(const char *fmt, ...);
8514 +extern void printk_kill(void);
8516 static inline __printf(1, 2) __cold
8517 void early_printk(const char *s, ...) { }
8518 +static inline void printk_kill(void) { }
8521 #ifdef CONFIG_PRINTK_NMI
8522 diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
8523 index af3581b8a451..277295039c8f 100644
8524 --- a/include/linux/radix-tree.h
8525 +++ b/include/linux/radix-tree.h
8526 @@ -292,6 +292,8 @@ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
8527 int radix_tree_preload(gfp_t gfp_mask);
8528 int radix_tree_maybe_preload(gfp_t gfp_mask);
8529 int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
8530 +void radix_tree_preload_end(void);
8532 void radix_tree_init(void);
8533 void *radix_tree_tag_set(struct radix_tree_root *root,
8534 unsigned long index, unsigned int tag);
8535 @@ -314,11 +316,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
8536 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
8537 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
8539 -static inline void radix_tree_preload_end(void)
8545 * struct radix_tree_iter - radix tree iterator state
8547 diff --git a/include/linux/random.h b/include/linux/random.h
8548 index 16ab429735a7..9d0fecb5b6c2 100644
8549 --- a/include/linux/random.h
8550 +++ b/include/linux/random.h
8551 @@ -31,7 +31,7 @@ static inline void add_latent_entropy(void) {}
8553 extern void add_input_randomness(unsigned int type, unsigned int code,
8554 unsigned int value) __latent_entropy;
8555 -extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
8556 +extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy;
8558 extern void get_random_bytes(void *buf, int nbytes);
8559 extern int add_random_ready_callback(struct random_ready_callback *rdy);
8560 diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
8561 index e585018498d5..25c64474fc27 100644
8562 --- a/include/linux/rbtree.h
8563 +++ b/include/linux/rbtree.h
8566 #include <linux/kernel.h>
8567 #include <linux/stddef.h>
8568 -#include <linux/rcupdate.h>
8569 +#include <linux/rcu_assign_pointer.h>
8572 unsigned long __rb_parent_color;
8573 diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
8574 index d076183e49be..36bfb4dd57ae 100644
8575 --- a/include/linux/rbtree_augmented.h
8576 +++ b/include/linux/rbtree_augmented.h
8579 #include <linux/compiler.h>
8580 #include <linux/rbtree.h>
8581 +#include <linux/rcupdate.h>
8584 * Please note - only struct rb_augment_callbacks and the prototypes for
8585 diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h
8586 new file mode 100644
8587 index 000000000000..7066962a4379
8589 +++ b/include/linux/rcu_assign_pointer.h
8591 +#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
8592 +#define __LINUX_RCU_ASSIGN_POINTER_H__
8593 +#include <linux/compiler.h>
8594 +#include <asm/barrier.h>
8597 + * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
8598 + * @v: The value to statically initialize with.
8600 +#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
8603 + * rcu_assign_pointer() - assign to RCU-protected pointer
8604 + * @p: pointer to assign to
8605 + * @v: value to assign (publish)
8607 + * Assigns the specified value to the specified RCU-protected
8608 + * pointer, ensuring that any concurrent RCU readers will see
8609 + * any prior initialization.
8611 + * Inserts memory barriers on architectures that require them
8612 + * (which is most of them), and also prevents the compiler from
8613 + * reordering the code that initializes the structure after the pointer
8614 + * assignment. More importantly, this call documents which pointers
8615 + * will be dereferenced by RCU read-side code.
8617 + * In some special cases, you may use RCU_INIT_POINTER() instead
8618 + * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
8619 + * to the fact that it does not constrain either the CPU or the compiler.
8620 + * That said, using RCU_INIT_POINTER() when you should have used
8621 + * rcu_assign_pointer() is a very bad thing that results in
8622 + * impossible-to-diagnose memory corruption. So please be careful.
8623 + * See the RCU_INIT_POINTER() comment header for details.
8625 + * Note that rcu_assign_pointer() evaluates each of its arguments only
8626 + * once, appearances notwithstanding. One of the "extra" evaluations
8627 + * is in typeof() and the other visible only to sparse (__CHECKER__),
8628 + * neither of which actually execute the argument. As with most cpp
8629 + * macros, this execute-arguments-only-once property is important, so
8630 + * please be careful when making changes to rcu_assign_pointer() and the
8631 + * other macros that it invokes.
8633 +#define rcu_assign_pointer(p, v) \
8635 + uintptr_t _r_a_p__v = (uintptr_t)(v); \
8637 + if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
8638 + WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
8640 + smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
8645 diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
8646 index 01f71e1d2e94..30cc001d0d5a 100644
8647 --- a/include/linux/rcupdate.h
8648 +++ b/include/linux/rcupdate.h
8650 #include <linux/compiler.h>
8651 #include <linux/ktime.h>
8652 #include <linux/irqflags.h>
8653 +#include <linux/rcu_assign_pointer.h>
8655 #include <asm/barrier.h>
8657 @@ -178,6 +179,9 @@ void call_rcu(struct rcu_head *head,
8659 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
8661 +#ifdef CONFIG_PREEMPT_RT_FULL
8662 +#define call_rcu_bh call_rcu
8665 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
8666 * @head: structure to be used for queueing the RCU updates.
8667 @@ -201,6 +205,7 @@ void call_rcu(struct rcu_head *head,
8669 void call_rcu_bh(struct rcu_head *head,
8670 rcu_callback_t func);
8674 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
8675 @@ -301,6 +306,11 @@ void synchronize_rcu(void);
8676 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
8678 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
8679 +#ifndef CONFIG_PREEMPT_RT_FULL
8680 +#define sched_rcu_preempt_depth() rcu_preempt_depth()
8682 +static inline int sched_rcu_preempt_depth(void) { return 0; }
8685 #else /* #ifdef CONFIG_PREEMPT_RCU */
8687 @@ -326,6 +336,8 @@ static inline int rcu_preempt_depth(void)
8691 +#define sched_rcu_preempt_depth() rcu_preempt_depth()
8693 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
8695 /* Internal to kernel */
8696 @@ -505,7 +517,14 @@ extern struct lockdep_map rcu_callback_map;
8697 int debug_lockdep_rcu_enabled(void);
8699 int rcu_read_lock_held(void);
8700 +#ifdef CONFIG_PREEMPT_RT_FULL
8701 +static inline int rcu_read_lock_bh_held(void)
8703 + return rcu_read_lock_held();
8706 int rcu_read_lock_bh_held(void);
8710 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
8711 @@ -626,54 +645,6 @@ static inline void rcu_preempt_sleep_check(void)
8715 - * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
8716 - * @v: The value to statically initialize with.
8718 -#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
8721 - * rcu_assign_pointer() - assign to RCU-protected pointer
8722 - * @p: pointer to assign to
8723 - * @v: value to assign (publish)
8725 - * Assigns the specified value to the specified RCU-protected
8726 - * pointer, ensuring that any concurrent RCU readers will see
8727 - * any prior initialization.
8729 - * Inserts memory barriers on architectures that require them
8730 - * (which is most of them), and also prevents the compiler from
8731 - * reordering the code that initializes the structure after the pointer
8732 - * assignment. More importantly, this call documents which pointers
8733 - * will be dereferenced by RCU read-side code.
8735 - * In some special cases, you may use RCU_INIT_POINTER() instead
8736 - * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
8737 - * to the fact that it does not constrain either the CPU or the compiler.
8738 - * That said, using RCU_INIT_POINTER() when you should have used
8739 - * rcu_assign_pointer() is a very bad thing that results in
8740 - * impossible-to-diagnose memory corruption. So please be careful.
8741 - * See the RCU_INIT_POINTER() comment header for details.
8743 - * Note that rcu_assign_pointer() evaluates each of its arguments only
8744 - * once, appearances notwithstanding. One of the "extra" evaluations
8745 - * is in typeof() and the other visible only to sparse (__CHECKER__),
8746 - * neither of which actually execute the argument. As with most cpp
8747 - * macros, this execute-arguments-only-once property is important, so
8748 - * please be careful when making changes to rcu_assign_pointer() and the
8749 - * other macros that it invokes.
8751 -#define rcu_assign_pointer(p, v) \
8753 - uintptr_t _r_a_p__v = (uintptr_t)(v); \
8755 - if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
8756 - WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
8758 - smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
8763 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
8764 * @p: The pointer to read
8766 @@ -951,10 +922,14 @@ static inline void rcu_read_unlock(void)
8767 static inline void rcu_read_lock_bh(void)
8770 +#ifdef CONFIG_PREEMPT_RT_FULL
8774 rcu_lock_acquire(&rcu_bh_lock_map);
8775 RCU_LOCKDEP_WARN(!rcu_is_watching(),
8776 "rcu_read_lock_bh() used illegally while idle");
8781 @@ -964,10 +939,14 @@ static inline void rcu_read_lock_bh(void)
8783 static inline void rcu_read_unlock_bh(void)
8785 +#ifdef CONFIG_PREEMPT_RT_FULL
8786 + rcu_read_unlock();
8788 RCU_LOCKDEP_WARN(!rcu_is_watching(),
8789 "rcu_read_unlock_bh() used illegally while idle");
8790 rcu_lock_release(&rcu_bh_lock_map);
8796 diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
8797 index 63a4e4cf40a5..08ab12df2863 100644
8798 --- a/include/linux/rcutree.h
8799 +++ b/include/linux/rcutree.h
8800 @@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu)
8801 rcu_note_context_switch();
8804 +#ifdef CONFIG_PREEMPT_RT_FULL
8805 +# define synchronize_rcu_bh synchronize_rcu
8807 void synchronize_rcu_bh(void);
8809 void synchronize_sched_expedited(void);
8810 void synchronize_rcu_expedited(void);
8812 @@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void)
8815 void rcu_barrier(void);
8816 +#ifdef CONFIG_PREEMPT_RT_FULL
8817 +# define rcu_barrier_bh rcu_barrier
8819 void rcu_barrier_bh(void);
8821 void rcu_barrier_sched(void);
8822 unsigned long get_state_synchronize_rcu(void);
8823 void cond_synchronize_rcu(unsigned long oldstate);
8824 @@ -82,17 +90,14 @@ void cond_synchronize_sched(unsigned long oldstate);
8825 extern unsigned long rcutorture_testseq;
8826 extern unsigned long rcutorture_vernum;
8827 unsigned long rcu_batches_started(void);
8828 -unsigned long rcu_batches_started_bh(void);
8829 unsigned long rcu_batches_started_sched(void);
8830 unsigned long rcu_batches_completed(void);
8831 -unsigned long rcu_batches_completed_bh(void);
8832 unsigned long rcu_batches_completed_sched(void);
8833 unsigned long rcu_exp_batches_completed(void);
8834 unsigned long rcu_exp_batches_completed_sched(void);
8835 void show_rcu_gp_kthreads(void);
8837 void rcu_force_quiescent_state(void);
8838 -void rcu_bh_force_quiescent_state(void);
8839 void rcu_sched_force_quiescent_state(void);
8841 void rcu_idle_enter(void);
8842 @@ -109,6 +114,16 @@ extern int rcu_scheduler_active __read_mostly;
8844 bool rcu_is_watching(void);
8846 +#ifndef CONFIG_PREEMPT_RT_FULL
8847 +void rcu_bh_force_quiescent_state(void);
8848 +unsigned long rcu_batches_started_bh(void);
8849 +unsigned long rcu_batches_completed_bh(void);
8851 +# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
8852 +# define rcu_batches_completed_bh rcu_batches_completed
8853 +# define rcu_batches_started_bh rcu_batches_completed
8856 void rcu_all_qs(void);
8858 /* RCUtree hotplug events */
8859 diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
8860 index 1abba5ce2a2f..294a8b4875f1 100644
8861 --- a/include/linux/rtmutex.h
8862 +++ b/include/linux/rtmutex.h
8864 #define __LINUX_RT_MUTEX_H
8866 #include <linux/linkage.h>
8867 +#include <linux/spinlock_types_raw.h>
8868 #include <linux/rbtree.h>
8869 -#include <linux/spinlock_types.h>
8871 extern int max_lock_depth; /* for sysctl */
8873 +#ifdef CONFIG_DEBUG_MUTEXES
8874 +#include <linux/debug_locks.h>
8878 * The rt_mutex structure
8880 @@ -31,8 +35,8 @@ struct rt_mutex {
8881 struct rb_root waiters;
8882 struct rb_node *waiters_leftmost;
8883 struct task_struct *owner;
8884 -#ifdef CONFIG_DEBUG_RT_MUTEXES
8886 +#ifdef CONFIG_DEBUG_RT_MUTEXES
8887 const char *name, *file;
8890 @@ -55,22 +59,33 @@ struct hrtimer_sleeper;
8891 # define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
8894 +# define rt_mutex_init(mutex) \
8896 + raw_spin_lock_init(&(mutex)->wait_lock); \
8897 + __rt_mutex_init(mutex, #mutex); \
8900 #ifdef CONFIG_DEBUG_RT_MUTEXES
8901 # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
8902 , .name = #mutexname, .file = __FILE__, .line = __LINE__
8903 -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
8904 extern void rt_mutex_debug_task_free(struct task_struct *tsk);
8906 # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
8907 -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
8908 # define rt_mutex_debug_task_free(t) do { } while (0)
8911 -#define __RT_MUTEX_INITIALIZER(mutexname) \
8912 - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
8913 +#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
8914 + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
8915 , .waiters = RB_ROOT \
8917 - __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
8918 + __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
8920 +#define __RT_MUTEX_INITIALIZER(mutexname) \
8921 + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
8923 +#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
8924 + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
8925 + , .save_state = 1 }
8927 #define DEFINE_RT_MUTEX(mutexname) \
8928 struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
8929 @@ -90,7 +105,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
8930 extern void rt_mutex_destroy(struct rt_mutex *lock);
8932 extern void rt_mutex_lock(struct rt_mutex *lock);
8933 +extern int rt_mutex_lock_state(struct rt_mutex *lock, int state);
8934 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
8935 +extern int rt_mutex_lock_killable(struct rt_mutex *lock);
8936 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
8937 struct hrtimer_sleeper *timeout);
8939 diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
8940 new file mode 100644
8941 index 000000000000..49ed2d45d3be
8943 +++ b/include/linux/rwlock_rt.h
8945 +#ifndef __LINUX_RWLOCK_RT_H
8946 +#define __LINUX_RWLOCK_RT_H
8948 +#ifndef __LINUX_SPINLOCK_H
8949 +#error Do not include directly. Use spinlock.h
8952 +#define rwlock_init(rwl) \
8954 + static struct lock_class_key __key; \
8956 + rt_mutex_init(&(rwl)->lock); \
8957 + __rt_rwlock_init(rwl, #rwl, &__key); \
8960 +extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
8961 +extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
8962 +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
8963 +extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
8964 +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
8965 +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
8966 +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
8967 +extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
8968 +extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
8969 +extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
8971 +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
8972 +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
8974 +#define write_trylock_irqsave(lock, flags) \
8975 + __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
8977 +#define read_lock_irqsave(lock, flags) \
8979 + typecheck(unsigned long, flags); \
8980 + flags = rt_read_lock_irqsave(lock); \
8983 +#define write_lock_irqsave(lock, flags) \
8985 + typecheck(unsigned long, flags); \
8986 + flags = rt_write_lock_irqsave(lock); \
8989 +#define read_lock(lock) rt_read_lock(lock)
8991 +#define read_lock_bh(lock) \
8993 + local_bh_disable(); \
8994 + rt_read_lock(lock); \
8997 +#define read_lock_irq(lock) read_lock(lock)
8999 +#define write_lock(lock) rt_write_lock(lock)
9001 +#define write_lock_bh(lock) \
9003 + local_bh_disable(); \
9004 + rt_write_lock(lock); \
9007 +#define write_lock_irq(lock) write_lock(lock)
9009 +#define read_unlock(lock) rt_read_unlock(lock)
9011 +#define read_unlock_bh(lock) \
9013 + rt_read_unlock(lock); \
9014 + local_bh_enable(); \
9017 +#define read_unlock_irq(lock) read_unlock(lock)
9019 +#define write_unlock(lock) rt_write_unlock(lock)
9021 +#define write_unlock_bh(lock) \
9023 + rt_write_unlock(lock); \
9024 + local_bh_enable(); \
9027 +#define write_unlock_irq(lock) write_unlock(lock)
9029 +#define read_unlock_irqrestore(lock, flags) \
9031 + typecheck(unsigned long, flags); \
9033 + rt_read_unlock(lock); \
9036 +#define write_unlock_irqrestore(lock, flags) \
9038 + typecheck(unsigned long, flags); \
9040 + rt_write_unlock(lock); \
9044 diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
9045 index cc0072e93e36..5317cd957292 100644
9046 --- a/include/linux/rwlock_types.h
9047 +++ b/include/linux/rwlock_types.h
9049 #ifndef __LINUX_RWLOCK_TYPES_H
9050 #define __LINUX_RWLOCK_TYPES_H
9052 +#if !defined(__LINUX_SPINLOCK_TYPES_H)
9053 +# error "Do not include directly, include spinlock_types.h"
9057 * include/linux/rwlock_types.h - generic rwlock type definitions
9059 diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
9060 new file mode 100644
9061 index 000000000000..51b28d775fe1
9063 +++ b/include/linux/rwlock_types_rt.h
9065 +#ifndef __LINUX_RWLOCK_TYPES_RT_H
9066 +#define __LINUX_RWLOCK_TYPES_RT_H
9068 +#ifndef __LINUX_SPINLOCK_TYPES_H
9069 +#error "Do not include directly. Include spinlock_types.h instead"
9073 + * rwlocks - rtmutex which allows single reader recursion
9076 + struct rt_mutex lock;
9078 + unsigned int break_lock;
9079 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
9080 + struct lockdep_map dep_map;
9084 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
9085 +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
9087 +# define RW_DEP_MAP_INIT(lockname)
9090 +#define __RW_LOCK_UNLOCKED(name) \
9091 + { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
9092 + RW_DEP_MAP_INIT(name) }
9094 +#define DEFINE_RWLOCK(name) \
9095 + rwlock_t name = __RW_LOCK_UNLOCKED(name)
9098 diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
9099 index dd1d14250340..aa2ac1f65c2d 100644
9100 --- a/include/linux/rwsem.h
9101 +++ b/include/linux/rwsem.h
9103 #include <linux/osq_lock.h>
9106 +#ifdef CONFIG_PREEMPT_RT_FULL
9107 +#include <linux/rwsem_rt.h>
9108 +#else /* PREEMPT_RT_FULL */
9110 struct rw_semaphore;
9112 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
9113 @@ -106,6 +110,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
9114 return !list_empty(&sem->wait_list);
9117 +#endif /* !PREEMPT_RT_FULL */
9120 + * The functions below are the same for all rwsem implementations including
9121 + * the RT specific variant.
9127 diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
9128 new file mode 100644
9129 index 000000000000..2ffbf093ae92
9131 +++ b/include/linux/rwsem_rt.h
9133 +#ifndef _LINUX_RWSEM_RT_H
9134 +#define _LINUX_RWSEM_RT_H
9136 +#ifndef _LINUX_RWSEM_H
9137 +#error "Include rwsem.h"
9140 +#include <linux/rtmutex.h>
9141 +#include <linux/swait.h>
9143 +#define READER_BIAS (1U << 31)
9144 +#define WRITER_BIAS (1U << 30)
9146 +struct rw_semaphore {
9148 + struct rt_mutex rtmutex;
9149 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
9150 + struct lockdep_map dep_map;
9154 +#define __RWSEM_INITIALIZER(name) \
9156 + .readers = ATOMIC_INIT(READER_BIAS), \
9157 + .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \
9158 + RW_DEP_MAP_INIT(name) \
9161 +#define DECLARE_RWSEM(lockname) \
9162 + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
9164 +extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name,
9165 + struct lock_class_key *key);
9167 +#define __init_rwsem(sem, name, key) \
9169 + rt_mutex_init(&(sem)->rtmutex); \
9170 + __rwsem_init((sem), (name), (key)); \
9173 +#define init_rwsem(sem) \
9175 + static struct lock_class_key __key; \
9177 + __init_rwsem((sem), #sem, &__key); \
9180 +static inline int rwsem_is_locked(struct rw_semaphore *sem)
9182 + return atomic_read(&sem->readers) != READER_BIAS;
9185 +static inline int rwsem_is_contended(struct rw_semaphore *sem)
9187 + return atomic_read(&sem->readers) > 0;
9190 +extern void __down_read(struct rw_semaphore *sem);
9191 +extern int __down_read_trylock(struct rw_semaphore *sem);
9192 +extern void __down_write(struct rw_semaphore *sem);
9193 +extern int __must_check __down_write_killable(struct rw_semaphore *sem);
9194 +extern int __down_write_trylock(struct rw_semaphore *sem);
9195 +extern void __up_read(struct rw_semaphore *sem);
9196 +extern void __up_write(struct rw_semaphore *sem);
9197 +extern void __downgrade_write(struct rw_semaphore *sem);
9200 diff --git a/include/linux/sched.h b/include/linux/sched.h
9201 index f425eb3318ab..4d779486ad6b 100644
9202 --- a/include/linux/sched.h
9203 +++ b/include/linux/sched.h
9204 @@ -26,6 +26,7 @@ struct sched_param {
9205 #include <linux/nodemask.h>
9206 #include <linux/mm_types.h>
9207 #include <linux/preempt.h>
9208 +#include <asm/kmap_types.h>
9210 #include <asm/page.h>
9211 #include <asm/ptrace.h>
9212 @@ -236,17 +237,13 @@ extern char ___assert_task_state[1 - 2*!!(
9214 /* Convenience macros for the sake of wake_up */
9215 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
9216 -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
9218 /* get_task_state() */
9219 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
9220 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
9221 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
9223 -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
9224 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
9225 -#define task_is_stopped_or_traced(task) \
9226 - ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
9227 #define task_contributes_to_load(task) \
9228 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
9229 (task->flags & PF_FROZEN) == 0 && \
9230 @@ -312,6 +309,11 @@ extern char ___assert_task_state[1 - 2*!!(
9234 +#define __set_current_state_no_track(state_value) \
9235 + do { current->state = (state_value); } while (0)
9236 +#define set_current_state_no_track(state_value) \
9237 + set_mb(current->state, (state_value))
9239 /* Task command name length */
9240 #define TASK_COMM_LEN 16
9242 @@ -1013,8 +1015,18 @@ struct wake_q_head {
9243 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
9245 extern void wake_q_add(struct wake_q_head *head,
9246 - struct task_struct *task);
9247 -extern void wake_up_q(struct wake_q_head *head);
9248 + struct task_struct *task);
9249 +extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
9251 +static inline void wake_up_q(struct wake_q_head *head)
9253 + __wake_up_q(head, false);
9256 +static inline void wake_up_q_sleeper(struct wake_q_head *head)
9258 + __wake_up_q(head, true);
9262 * sched-domains (multiprocessor balancing) declarations:
9263 @@ -1481,6 +1493,7 @@ struct task_struct {
9264 struct thread_info thread_info;
9266 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
9267 + volatile long saved_state; /* saved state for "spinlock sleepers" */
9270 unsigned int flags; /* per process flags, defined below */
9271 @@ -1520,6 +1533,12 @@ struct task_struct {
9274 unsigned int policy;
9275 +#ifdef CONFIG_PREEMPT_RT_FULL
9276 + int migrate_disable;
9277 +# ifdef CONFIG_SCHED_DEBUG
9278 + int migrate_disable_atomic;
9281 int nr_cpus_allowed;
9282 cpumask_t cpus_allowed;
9284 @@ -1658,6 +1677,9 @@ struct task_struct {
9286 struct task_cputime cputime_expires;
9287 struct list_head cpu_timers[3];
9288 +#ifdef CONFIG_PREEMPT_RT_BASE
9289 + struct task_struct *posix_timer_list;
9292 /* process credentials */
9293 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
9294 @@ -1689,10 +1711,15 @@ struct task_struct {
9295 /* signal handlers */
9296 struct signal_struct *signal;
9297 struct sighand_struct *sighand;
9298 + struct sigqueue *sigqueue_cache;
9300 sigset_t blocked, real_blocked;
9301 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
9302 struct sigpending pending;
9303 +#ifdef CONFIG_PREEMPT_RT_FULL
9304 + /* TODO: move me into ->restart_block ? */
9305 + struct siginfo forced_info;
9308 unsigned long sas_ss_sp;
9310 @@ -1723,6 +1750,8 @@ struct task_struct {
9311 /* PI waiters blocked on a rt_mutex held by this task */
9312 struct rb_root pi_waiters;
9313 struct rb_node *pi_waiters_leftmost;
9314 + /* Updated under owner's pi_lock and rq lock */
9315 + struct task_struct *pi_top_task;
9316 /* Deadlock detection and priority inheritance handling */
9317 struct rt_mutex_waiter *pi_blocked_on;
9319 @@ -1921,6 +1950,12 @@ struct task_struct {
9320 /* bitmask and counter of trace recursion */
9321 unsigned long trace_recursion;
9322 #endif /* CONFIG_TRACING */
9323 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
9324 + u64 preempt_timestamp_hist;
9325 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
9326 + long timer_offset;
9330 /* Coverage collection mode enabled for this task (0 if disabled). */
9331 enum kcov_mode kcov_mode;
9332 @@ -1946,9 +1981,23 @@ struct task_struct {
9333 unsigned int sequential_io;
9334 unsigned int sequential_io_avg;
9336 +#ifdef CONFIG_PREEMPT_RT_BASE
9337 + struct rcu_head put_rcu;
9338 + int softirq_nestcnt;
9339 + unsigned int softirqs_raised;
9341 +#ifdef CONFIG_PREEMPT_RT_FULL
9342 +# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
9344 + pte_t kmap_pte[KM_TYPE_NR];
9347 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
9348 unsigned long task_state_change;
9350 +#ifdef CONFIG_PREEMPT_RT_FULL
9351 + int xmit_recursion;
9353 int pagefault_disabled;
9355 struct task_struct *oom_reaper_list;
9356 @@ -1988,14 +2037,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
9360 -/* Future-safe accessor for struct task_struct's cpus_allowed. */
9361 -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
9363 -static inline int tsk_nr_cpus_allowed(struct task_struct *p)
9365 - return p->nr_cpus_allowed;
9368 #define TNF_MIGRATED 0x01
9369 #define TNF_NO_GROUP 0x02
9370 #define TNF_SHARED 0x04
9371 @@ -2211,6 +2252,15 @@ extern struct pid *cad_pid;
9372 extern void free_task(struct task_struct *tsk);
9373 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
9375 +#ifdef CONFIG_PREEMPT_RT_BASE
9376 +extern void __put_task_struct_cb(struct rcu_head *rhp);
9378 +static inline void put_task_struct(struct task_struct *t)
9380 + if (atomic_dec_and_test(&t->usage))
9381 + call_rcu(&t->put_rcu, __put_task_struct_cb);
9384 extern void __put_task_struct(struct task_struct *t);
9386 static inline void put_task_struct(struct task_struct *t)
9387 @@ -2218,6 +2268,7 @@ static inline void put_task_struct(struct task_struct *t)
9388 if (atomic_dec_and_test(&t->usage))
9389 __put_task_struct(t);
9393 struct task_struct *task_rcu_dereference(struct task_struct **ptask);
9394 struct task_struct *try_get_task_struct(struct task_struct **ptask);
9395 @@ -2259,6 +2310,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
9399 +#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
9400 #define PF_EXITING 0x00000004 /* getting shut down */
9401 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
9402 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
9403 @@ -2427,6 +2479,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
9405 extern int set_cpus_allowed_ptr(struct task_struct *p,
9406 const struct cpumask *new_mask);
9407 +int migrate_me(void);
9408 +void tell_sched_cpu_down_begin(int cpu);
9409 +void tell_sched_cpu_down_done(int cpu);
9412 static inline void do_set_cpus_allowed(struct task_struct *p,
9413 const struct cpumask *new_mask)
9414 @@ -2439,6 +2495,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
9418 +static inline int migrate_me(void) { return 0; }
9419 +static inline void tell_sched_cpu_down_begin(int cpu) { }
9420 +static inline void tell_sched_cpu_down_done(int cpu) { }
9423 #ifdef CONFIG_NO_HZ_COMMON
9424 @@ -2677,6 +2736,7 @@ extern void xtime_update(unsigned long ticks);
9426 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
9427 extern int wake_up_process(struct task_struct *tsk);
9428 +extern int wake_up_lock_sleeper(struct task_struct * tsk);
9429 extern void wake_up_new_task(struct task_struct *tsk);
9431 extern void kick_process(struct task_struct *tsk);
9432 @@ -2885,6 +2945,17 @@ static inline void mmdrop(struct mm_struct *mm)
9436 +#ifdef CONFIG_PREEMPT_RT_BASE
9437 +extern void __mmdrop_delayed(struct rcu_head *rhp);
9438 +static inline void mmdrop_delayed(struct mm_struct *mm)
9440 + if (atomic_dec_and_test(&mm->mm_count))
9441 + call_rcu(&mm->delayed_drop, __mmdrop_delayed);
9444 +# define mmdrop_delayed(mm) mmdrop(mm)
9447 static inline void mmdrop_async_fn(struct work_struct *work)
9449 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
9450 @@ -3277,6 +3348,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
9451 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
9454 +#ifdef CONFIG_PREEMPT_LAZY
9455 +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
9457 + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
9460 +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
9462 + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
9465 +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
9467 + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
9470 +static inline int need_resched_lazy(void)
9472 + return test_thread_flag(TIF_NEED_RESCHED_LAZY);
9475 +static inline int need_resched_now(void)
9477 + return test_thread_flag(TIF_NEED_RESCHED);
9481 +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
9482 +static inline int need_resched_lazy(void) { return 0; }
9484 +static inline int need_resched_now(void)
9486 + return test_thread_flag(TIF_NEED_RESCHED);
9491 static inline int restart_syscall(void)
9493 set_tsk_thread_flag(current, TIF_SIGPENDING);
9494 @@ -3308,6 +3416,51 @@ static inline int signal_pending_state(long state, struct task_struct *p)
9495 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
9498 +static inline bool __task_is_stopped_or_traced(struct task_struct *task)
9500 + if (task->state & (__TASK_STOPPED | __TASK_TRACED))
9502 +#ifdef CONFIG_PREEMPT_RT_FULL
9503 + if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
9509 +static inline bool task_is_stopped_or_traced(struct task_struct *task)
9511 + bool traced_stopped;
9513 +#ifdef CONFIG_PREEMPT_RT_FULL
9514 + unsigned long flags;
9516 + raw_spin_lock_irqsave(&task->pi_lock, flags);
9517 + traced_stopped = __task_is_stopped_or_traced(task);
9518 + raw_spin_unlock_irqrestore(&task->pi_lock, flags);
9520 + traced_stopped = __task_is_stopped_or_traced(task);
9522 + return traced_stopped;
9525 +static inline bool task_is_traced(struct task_struct *task)
9527 + bool traced = false;
9529 + if (task->state & __TASK_TRACED)
9531 +#ifdef CONFIG_PREEMPT_RT_FULL
9532 + /* in case the task is sleeping on tasklist_lock */
9533 + raw_spin_lock_irq(&task->pi_lock);
9534 + if (task->state & __TASK_TRACED)
9536 + else if (task->saved_state & __TASK_TRACED)
9538 + raw_spin_unlock_irq(&task->pi_lock);
9544 * cond_resched() and cond_resched_lock(): latency reduction via
9545 * explicit rescheduling in places that are safe. The return
9546 @@ -3333,12 +3486,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
9547 __cond_resched_lock(lock); \
9550 +#ifndef CONFIG_PREEMPT_RT_FULL
9551 extern int __cond_resched_softirq(void);
9553 #define cond_resched_softirq() ({ \
9554 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
9555 __cond_resched_softirq(); \
9558 +# define cond_resched_softirq() cond_resched()
9561 static inline void cond_resched_rcu(void)
9563 @@ -3513,6 +3670,31 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
9565 #endif /* CONFIG_SMP */
9567 +static inline int __migrate_disabled(struct task_struct *p)
9569 +#ifdef CONFIG_PREEMPT_RT_FULL
9570 + return p->migrate_disable;
9576 +/* Future-safe accessor for struct task_struct's cpus_allowed. */
9577 +static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
9579 + if (__migrate_disabled(p))
9580 + return cpumask_of(task_cpu(p));
9582 + return &p->cpus_allowed;
9585 +static inline int tsk_nr_cpus_allowed(struct task_struct *p)
9587 + if (__migrate_disabled(p))
9589 + return p->nr_cpus_allowed;
9592 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
9593 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
9595 diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
9596 index a30b172df6e1..db3e91f2bc03 100644
9597 --- a/include/linux/sched/rt.h
9598 +++ b/include/linux/sched/rt.h
9599 @@ -16,27 +16,20 @@ static inline int rt_task(struct task_struct *p)
9602 #ifdef CONFIG_RT_MUTEXES
9603 -extern int rt_mutex_getprio(struct task_struct *p);
9604 -extern void rt_mutex_setprio(struct task_struct *p, int prio);
9605 -extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
9606 -extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
9608 + * Must hold either p->pi_lock or task_rq(p)->lock.
9610 +static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
9612 + return p->pi_top_task;
9614 +extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
9615 extern void rt_mutex_adjust_pi(struct task_struct *p);
9616 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
9618 return tsk->pi_blocked_on != NULL;
9621 -static inline int rt_mutex_getprio(struct task_struct *p)
9623 - return p->normal_prio;
9626 -static inline int rt_mutex_get_effective_prio(struct task_struct *task,
9632 static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
9635 diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
9636 index ead97654c4e9..3d7223ffdd3b 100644
9637 --- a/include/linux/seqlock.h
9638 +++ b/include/linux/seqlock.h
9639 @@ -220,20 +220,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
9640 return __read_seqcount_retry(s, start);
9645 -static inline void raw_write_seqcount_begin(seqcount_t *s)
9646 +static inline void __raw_write_seqcount_begin(seqcount_t *s)
9652 -static inline void raw_write_seqcount_end(seqcount_t *s)
9653 +static inline void raw_write_seqcount_begin(seqcount_t *s)
9655 + preempt_disable_rt();
9656 + __raw_write_seqcount_begin(s);
9659 +static inline void __raw_write_seqcount_end(seqcount_t *s)
9665 +static inline void raw_write_seqcount_end(seqcount_t *s)
9667 + __raw_write_seqcount_end(s);
9668 + preempt_enable_rt();
9672 * raw_write_seqcount_barrier - do a seq write barrier
9673 * @s: pointer to seqcount_t
9674 @@ -428,10 +438,32 @@ typedef struct {
9676 * Read side functions for starting and finalizing a read side section.
9678 +#ifndef CONFIG_PREEMPT_RT_FULL
9679 static inline unsigned read_seqbegin(const seqlock_t *sl)
9681 return read_seqcount_begin(&sl->seqcount);
9685 + * Starvation safe read side for RT
9687 +static inline unsigned read_seqbegin(seqlock_t *sl)
9692 + ret = ACCESS_ONCE(sl->seqcount.sequence);
9693 + if (unlikely(ret & 1)) {
9695 + * Take the lock and let the writer proceed (i.e. evtl
9696 + * boost it), otherwise we could loop here forever.
9698 + spin_unlock_wait(&sl->lock);
9705 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
9707 @@ -446,36 +478,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
9708 static inline void write_seqlock(seqlock_t *sl)
9710 spin_lock(&sl->lock);
9711 - write_seqcount_begin(&sl->seqcount);
9712 + __raw_write_seqcount_begin(&sl->seqcount);
9715 +static inline int try_write_seqlock(seqlock_t *sl)
9717 + if (spin_trylock(&sl->lock)) {
9718 + __raw_write_seqcount_begin(&sl->seqcount);
9724 static inline void write_sequnlock(seqlock_t *sl)
9726 - write_seqcount_end(&sl->seqcount);
9727 + __raw_write_seqcount_end(&sl->seqcount);
9728 spin_unlock(&sl->lock);
9731 static inline void write_seqlock_bh(seqlock_t *sl)
9733 spin_lock_bh(&sl->lock);
9734 - write_seqcount_begin(&sl->seqcount);
9735 + __raw_write_seqcount_begin(&sl->seqcount);
9738 static inline void write_sequnlock_bh(seqlock_t *sl)
9740 - write_seqcount_end(&sl->seqcount);
9741 + __raw_write_seqcount_end(&sl->seqcount);
9742 spin_unlock_bh(&sl->lock);
9745 static inline void write_seqlock_irq(seqlock_t *sl)
9747 spin_lock_irq(&sl->lock);
9748 - write_seqcount_begin(&sl->seqcount);
9749 + __raw_write_seqcount_begin(&sl->seqcount);
9752 static inline void write_sequnlock_irq(seqlock_t *sl)
9754 - write_seqcount_end(&sl->seqcount);
9755 + __raw_write_seqcount_end(&sl->seqcount);
9756 spin_unlock_irq(&sl->lock);
9759 @@ -484,7 +525,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
9760 unsigned long flags;
9762 spin_lock_irqsave(&sl->lock, flags);
9763 - write_seqcount_begin(&sl->seqcount);
9764 + __raw_write_seqcount_begin(&sl->seqcount);
9768 @@ -494,7 +535,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
9770 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
9772 - write_seqcount_end(&sl->seqcount);
9773 + __raw_write_seqcount_end(&sl->seqcount);
9774 spin_unlock_irqrestore(&sl->lock, flags);
9777 diff --git a/include/linux/signal.h b/include/linux/signal.h
9778 index b63f63eaa39c..295540fdfc72 100644
9779 --- a/include/linux/signal.h
9780 +++ b/include/linux/signal.h
9781 @@ -233,6 +233,7 @@ static inline void init_sigpending(struct sigpending *sig)
9784 extern void flush_sigqueue(struct sigpending *queue);
9785 +extern void flush_task_sigqueue(struct task_struct *tsk);
9787 /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
9788 static inline int valid_signal(unsigned long sig)
9789 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
9790 index 32810f279f8e..0db6e31161f6 100644
9791 --- a/include/linux/skbuff.h
9792 +++ b/include/linux/skbuff.h
9793 @@ -284,6 +284,7 @@ struct sk_buff_head {
9797 + raw_spinlock_t raw_lock;
9801 @@ -1573,6 +1574,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
9802 __skb_queue_head_init(list);
9805 +static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
9807 + raw_spin_lock_init(&list->raw_lock);
9808 + __skb_queue_head_init(list);
9811 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
9812 struct lock_class_key *class)
9814 diff --git a/include/linux/smp.h b/include/linux/smp.h
9815 index 8e0cb7a0f836..891c533724f5 100644
9816 --- a/include/linux/smp.h
9817 +++ b/include/linux/smp.h
9818 @@ -120,6 +120,13 @@ extern unsigned int setup_max_cpus;
9819 extern void __init setup_nr_cpu_ids(void);
9820 extern void __init smp_init(void);
9822 +extern int __boot_cpu_id;
9824 +static inline int get_boot_cpu_id(void)
9826 + return __boot_cpu_id;
9831 static inline void smp_send_stop(void) { }
9832 @@ -158,6 +165,11 @@ static inline void smp_init(void) { up_late_init(); }
9833 static inline void smp_init(void) { }
9836 +static inline int get_boot_cpu_id(void)
9844 @@ -185,6 +197,9 @@ static inline void smp_init(void) { }
9845 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
9846 #define put_cpu() preempt_enable()
9848 +#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
9849 +#define put_cpu_light() migrate_enable()
9852 * Callback to arch code if there's nosmp or maxcpus=0 on the
9853 * boot command line:
9854 diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
9855 index 47dd0cebd204..b241cc044bd3 100644
9856 --- a/include/linux/spinlock.h
9857 +++ b/include/linux/spinlock.h
9858 @@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
9859 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
9861 /* Include rwlock functions */
9862 -#include <linux/rwlock.h>
9863 +#ifdef CONFIG_PREEMPT_RT_FULL
9864 +# include <linux/rwlock_rt.h>
9866 +# include <linux/rwlock.h>
9870 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
9871 @@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
9872 # include <linux/spinlock_api_up.h>
9875 +#ifdef CONFIG_PREEMPT_RT_FULL
9876 +# include <linux/spinlock_rt.h>
9877 +#else /* PREEMPT_RT_FULL */
9880 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
9882 @@ -416,4 +424,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
9883 #define atomic_dec_and_lock(atomic, lock) \
9884 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
9886 +#endif /* !PREEMPT_RT_FULL */
9888 #endif /* __LINUX_SPINLOCK_H */
9889 diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
9890 index 5344268e6e62..043263f30e81 100644
9891 --- a/include/linux/spinlock_api_smp.h
9892 +++ b/include/linux/spinlock_api_smp.h
9893 @@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
9897 -#include <linux/rwlock_api_smp.h>
9898 +#ifndef CONFIG_PREEMPT_RT_FULL
9899 +# include <linux/rwlock_api_smp.h>
9902 #endif /* __LINUX_SPINLOCK_API_SMP_H */
9903 diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
9904 new file mode 100644
9905 index 000000000000..43ca841b913a
9907 +++ b/include/linux/spinlock_rt.h
9909 +#ifndef __LINUX_SPINLOCK_RT_H
9910 +#define __LINUX_SPINLOCK_RT_H
9912 +#ifndef __LINUX_SPINLOCK_H
9913 +#error Do not include directly. Use spinlock.h
9916 +#include <linux/bug.h>
9919 +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
9921 +#define spin_lock_init(slock) \
9923 + static struct lock_class_key __key; \
9925 + rt_mutex_init(&(slock)->lock); \
9926 + __rt_spin_lock_init(slock, #slock, &__key); \
9929 +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
9930 +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
9931 +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
9933 +extern void __lockfunc rt_spin_lock(spinlock_t *lock);
9934 +extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
9935 +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
9936 +extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
9937 +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
9938 +extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
9939 +extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
9940 +extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
9941 +extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
9944 + * lockdep-less calls, for derived types like rwlock:
9945 + * (for trylock they can use rt_mutex_trylock() directly.
9947 +extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
9948 +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
9949 +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
9951 +#define spin_lock(lock) rt_spin_lock(lock)
9953 +#define spin_lock_bh(lock) \
9955 + local_bh_disable(); \
9956 + rt_spin_lock(lock); \
9959 +#define spin_lock_irq(lock) spin_lock(lock)
9961 +#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
9963 +#define spin_trylock(lock) \
9966 + __locked = spin_do_trylock(lock); \
9970 +#ifdef CONFIG_LOCKDEP
9971 +# define spin_lock_nested(lock, subclass) \
9973 + rt_spin_lock_nested(lock, subclass); \
9976 +#define spin_lock_bh_nested(lock, subclass) \
9978 + local_bh_disable(); \
9979 + rt_spin_lock_nested(lock, subclass); \
9982 +# define spin_lock_irqsave_nested(lock, flags, subclass) \
9984 + typecheck(unsigned long, flags); \
9986 + rt_spin_lock_nested(lock, subclass); \
9989 +# define spin_lock_nested(lock, subclass) spin_lock(lock)
9990 +# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock)
9992 +# define spin_lock_irqsave_nested(lock, flags, subclass) \
9994 + typecheck(unsigned long, flags); \
9996 + spin_lock(lock); \
10000 +#define spin_lock_irqsave(lock, flags) \
10002 + typecheck(unsigned long, flags); \
10004 + spin_lock(lock); \
10007 +static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
10009 + unsigned long flags = 0;
10010 +#ifdef CONFIG_TRACE_IRQFLAGS
10011 + flags = rt_spin_lock_trace_flags(lock);
10013 + spin_lock(lock); /* lock_local */
10018 +/* FIXME: we need rt_spin_lock_nest_lock */
10019 +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
10021 +#define spin_unlock(lock) rt_spin_unlock(lock)
10023 +#define spin_unlock_bh(lock) \
10025 + rt_spin_unlock(lock); \
10026 + local_bh_enable(); \
10029 +#define spin_unlock_irq(lock) spin_unlock(lock)
10031 +#define spin_unlock_irqrestore(lock, flags) \
10033 + typecheck(unsigned long, flags); \
10035 + spin_unlock(lock); \
10038 +#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
10039 +#define spin_trylock_irq(lock) spin_trylock(lock)
10041 +#define spin_trylock_irqsave(lock, flags) \
10042 + rt_spin_trylock_irqsave(lock, &(flags))
10044 +#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
10046 +#ifdef CONFIG_GENERIC_LOCKBREAK
10047 +# define spin_is_contended(lock) ((lock)->break_lock)
10049 +# define spin_is_contended(lock) (((void)(lock), 0))
10052 +static inline int spin_can_lock(spinlock_t *lock)
10054 + return !rt_mutex_is_locked(&lock->lock);
10057 +static inline int spin_is_locked(spinlock_t *lock)
10059 + return rt_mutex_is_locked(&lock->lock);
10062 +static inline void assert_spin_locked(spinlock_t *lock)
10064 + BUG_ON(!spin_is_locked(lock));
10067 +#define atomic_dec_and_lock(atomic, lock) \
10068 + atomic_dec_and_spin_lock(atomic, lock)
10071 diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
10072 index 73548eb13a5d..10bac715ea96 100644
10073 --- a/include/linux/spinlock_types.h
10074 +++ b/include/linux/spinlock_types.h
10076 * Released under the General Public License (GPL).
10079 -#if defined(CONFIG_SMP)
10080 -# include <asm/spinlock_types.h>
10081 +#include <linux/spinlock_types_raw.h>
10083 +#ifndef CONFIG_PREEMPT_RT_FULL
10084 +# include <linux/spinlock_types_nort.h>
10085 +# include <linux/rwlock_types.h>
10087 -# include <linux/spinlock_types_up.h>
10088 +# include <linux/rtmutex.h>
10089 +# include <linux/spinlock_types_rt.h>
10090 +# include <linux/rwlock_types_rt.h>
10093 -#include <linux/lockdep.h>
10095 -typedef struct raw_spinlock {
10096 - arch_spinlock_t raw_lock;
10097 -#ifdef CONFIG_GENERIC_LOCKBREAK
10098 - unsigned int break_lock;
10100 -#ifdef CONFIG_DEBUG_SPINLOCK
10101 - unsigned int magic, owner_cpu;
10104 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
10105 - struct lockdep_map dep_map;
10109 -#define SPINLOCK_MAGIC 0xdead4ead
10111 -#define SPINLOCK_OWNER_INIT ((void *)-1L)
10113 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
10114 -# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
10116 -# define SPIN_DEP_MAP_INIT(lockname)
10119 -#ifdef CONFIG_DEBUG_SPINLOCK
10120 -# define SPIN_DEBUG_INIT(lockname) \
10121 - .magic = SPINLOCK_MAGIC, \
10122 - .owner_cpu = -1, \
10123 - .owner = SPINLOCK_OWNER_INIT,
10125 -# define SPIN_DEBUG_INIT(lockname)
10128 -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
10130 - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
10131 - SPIN_DEBUG_INIT(lockname) \
10132 - SPIN_DEP_MAP_INIT(lockname) }
10134 -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
10135 - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
10137 -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
10139 -typedef struct spinlock {
10141 - struct raw_spinlock rlock;
10143 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
10144 -# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
10146 - u8 __padding[LOCK_PADSIZE];
10147 - struct lockdep_map dep_map;
10153 -#define __SPIN_LOCK_INITIALIZER(lockname) \
10154 - { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
10156 -#define __SPIN_LOCK_UNLOCKED(lockname) \
10157 - (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
10159 -#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
10161 -#include <linux/rwlock_types.h>
10163 #endif /* __LINUX_SPINLOCK_TYPES_H */
10164 diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
10165 new file mode 100644
10166 index 000000000000..f1dac1fb1d6a
10168 +++ b/include/linux/spinlock_types_nort.h
10170 +#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
10171 +#define __LINUX_SPINLOCK_TYPES_NORT_H
10173 +#ifndef __LINUX_SPINLOCK_TYPES_H
10174 +#error "Do not include directly. Include spinlock_types.h instead"
10178 + * The non RT version maps spinlocks to raw_spinlocks
10180 +typedef struct spinlock {
10182 + struct raw_spinlock rlock;
10184 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
10185 +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
10187 + u8 __padding[LOCK_PADSIZE];
10188 + struct lockdep_map dep_map;
10194 +#define __SPIN_LOCK_INITIALIZER(lockname) \
10195 + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
10197 +#define __SPIN_LOCK_UNLOCKED(lockname) \
10198 + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
10200 +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
10203 diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
10204 new file mode 100644
10205 index 000000000000..edffc4d53fc9
10207 +++ b/include/linux/spinlock_types_raw.h
10209 +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
10210 +#define __LINUX_SPINLOCK_TYPES_RAW_H
10212 +#if defined(CONFIG_SMP)
10213 +# include <asm/spinlock_types.h>
10215 +# include <linux/spinlock_types_up.h>
10218 +#include <linux/lockdep.h>
10220 +typedef struct raw_spinlock {
10221 + arch_spinlock_t raw_lock;
10222 +#ifdef CONFIG_GENERIC_LOCKBREAK
10223 + unsigned int break_lock;
10225 +#ifdef CONFIG_DEBUG_SPINLOCK
10226 + unsigned int magic, owner_cpu;
10229 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
10230 + struct lockdep_map dep_map;
10234 +#define SPINLOCK_MAGIC 0xdead4ead
10236 +#define SPINLOCK_OWNER_INIT ((void *)-1L)
10238 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
10239 +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
10241 +# define SPIN_DEP_MAP_INIT(lockname)
10244 +#ifdef CONFIG_DEBUG_SPINLOCK
10245 +# define SPIN_DEBUG_INIT(lockname) \
10246 + .magic = SPINLOCK_MAGIC, \
10247 + .owner_cpu = -1, \
10248 + .owner = SPINLOCK_OWNER_INIT,
10250 +# define SPIN_DEBUG_INIT(lockname)
10253 +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
10255 + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
10256 + SPIN_DEBUG_INIT(lockname) \
10257 + SPIN_DEP_MAP_INIT(lockname) }
10259 +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
10260 + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
10262 +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
10265 diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
10266 new file mode 100644
10267 index 000000000000..3e3d8c5f7a9a
10269 +++ b/include/linux/spinlock_types_rt.h
10271 +#ifndef __LINUX_SPINLOCK_TYPES_RT_H
10272 +#define __LINUX_SPINLOCK_TYPES_RT_H
10274 +#ifndef __LINUX_SPINLOCK_TYPES_H
10275 +#error "Do not include directly. Include spinlock_types.h instead"
10278 +#include <linux/cache.h>
10281 + * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
10283 +typedef struct spinlock {
10284 + struct rt_mutex lock;
10285 + unsigned int break_lock;
10286 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
10287 + struct lockdep_map dep_map;
10291 +#ifdef CONFIG_DEBUG_RT_MUTEXES
10292 +# define __RT_SPIN_INITIALIZER(name) \
10294 + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
10295 + .save_state = 1, \
10296 + .file = __FILE__, \
10297 + .line = __LINE__ , \
10300 +# define __RT_SPIN_INITIALIZER(name) \
10302 + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
10303 + .save_state = 1, \
10308 +.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
10311 +#define __SPIN_LOCK_UNLOCKED(name) \
10312 + { .lock = __RT_SPIN_INITIALIZER(name.lock), \
10313 + SPIN_DEP_MAP_INIT(name) }
10315 +#define DEFINE_SPINLOCK(name) \
10316 + spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
10319 diff --git a/include/linux/srcu.h b/include/linux/srcu.h
10320 index dc8eb63c6568..e793d3a257da 100644
10321 --- a/include/linux/srcu.h
10322 +++ b/include/linux/srcu.h
10323 @@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp);
10325 void process_srcu(struct work_struct *work);
10327 -#define __SRCU_STRUCT_INIT(name) \
10328 +#define __SRCU_STRUCT_INIT(name, pcpu_name) \
10330 .completed = -300, \
10331 - .per_cpu_ref = &name##_srcu_array, \
10332 + .per_cpu_ref = &pcpu_name, \
10333 .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
10334 .running = false, \
10335 .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
10336 @@ -119,7 +119,7 @@ void process_srcu(struct work_struct *work);
10338 #define __DEFINE_SRCU(name, is_static) \
10339 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
10340 - is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
10341 + is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array)
10342 #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
10343 #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
10345 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
10346 index d9718378a8be..e81e6dc7dcb1 100644
10347 --- a/include/linux/suspend.h
10348 +++ b/include/linux/suspend.h
10349 @@ -193,6 +193,12 @@ struct platform_freeze_ops {
10353 +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
10354 +extern bool pm_in_action;
10356 +# define pm_in_action false
10359 #ifdef CONFIG_SUSPEND
10361 * suspend_set_ops - set platform dependent suspend operations
10362 diff --git a/include/linux/swait.h b/include/linux/swait.h
10363 index c1f9c62a8a50..83f004a72320 100644
10364 --- a/include/linux/swait.h
10365 +++ b/include/linux/swait.h
10366 @@ -87,6 +87,7 @@ static inline int swait_active(struct swait_queue_head *q)
10367 extern void swake_up(struct swait_queue_head *q);
10368 extern void swake_up_all(struct swait_queue_head *q);
10369 extern void swake_up_locked(struct swait_queue_head *q);
10370 +extern void swake_up_all_locked(struct swait_queue_head *q);
10372 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
10373 extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
10374 diff --git a/include/linux/swap.h b/include/linux/swap.h
10375 index 55ff5593c193..52bf5477dc92 100644
10376 --- a/include/linux/swap.h
10377 +++ b/include/linux/swap.h
10379 #include <linux/fs.h>
10380 #include <linux/atomic.h>
10381 #include <linux/page-flags.h>
10382 +#include <linux/locallock.h>
10383 #include <asm/page.h>
10385 struct notifier_block;
10386 @@ -247,7 +248,8 @@ struct swap_info_struct {
10387 void *workingset_eviction(struct address_space *mapping, struct page *page);
10388 bool workingset_refault(void *shadow);
10389 void workingset_activation(struct page *page);
10390 -extern struct list_lru workingset_shadow_nodes;
10391 +extern struct list_lru __workingset_shadow_nodes;
10392 +DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
10394 static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
10396 @@ -292,6 +294,7 @@ extern unsigned long nr_free_pagecache_pages(void);
10399 /* linux/mm/swap.c */
10400 +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock);
10401 extern void lru_cache_add(struct page *);
10402 extern void lru_cache_add_anon(struct page *page);
10403 extern void lru_cache_add_file(struct page *page);
10404 diff --git a/include/linux/swork.h b/include/linux/swork.h
10405 new file mode 100644
10406 index 000000000000..f175fa9a6016
10408 +++ b/include/linux/swork.h
10410 +#ifndef _LINUX_SWORK_H
10411 +#define _LINUX_SWORK_H
10413 +#include <linux/list.h>
10415 +struct swork_event {
10416 + struct list_head item;
10417 + unsigned long flags;
10418 + void (*func)(struct swork_event *);
10421 +static inline void INIT_SWORK(struct swork_event *event,
10422 + void (*func)(struct swork_event *))
10424 + event->flags = 0;
10425 + event->func = func;
10428 +bool swork_queue(struct swork_event *sev);
10430 +int swork_get(void);
10431 +void swork_put(void);
10433 +#endif /* _LINUX_SWORK_H */
10434 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
10435 index 2873baf5372a..eb1a108f17ca 100644
10436 --- a/include/linux/thread_info.h
10437 +++ b/include/linux/thread_info.h
10438 @@ -107,7 +107,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
10439 #define test_thread_flag(flag) \
10440 test_ti_thread_flag(current_thread_info(), flag)
10442 -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
10443 +#ifdef CONFIG_PREEMPT_LAZY
10444 +#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
10445 + test_thread_flag(TIF_NEED_RESCHED_LAZY))
10446 +#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
10447 +#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY))
10450 +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
10451 +#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
10452 +#define tif_need_resched_lazy() 0
10455 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
10456 static inline int arch_within_stack_frames(const void * const stack,
10457 diff --git a/include/linux/timer.h b/include/linux/timer.h
10458 index 51d601f192d4..83cea629efe1 100644
10459 --- a/include/linux/timer.h
10460 +++ b/include/linux/timer.h
10461 @@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
10463 extern int try_to_del_timer_sync(struct timer_list *timer);
10466 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
10467 extern int del_timer_sync(struct timer_list *timer);
10469 # define del_timer_sync(t) del_timer(t)
10470 diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
10471 index be007610ceb0..15154b13a53b 100644
10472 --- a/include/linux/trace_events.h
10473 +++ b/include/linux/trace_events.h
10474 @@ -56,6 +56,9 @@ struct trace_entry {
10475 unsigned char flags;
10476 unsigned char preempt_count;
10478 + unsigned short migrate_disable;
10479 + unsigned short padding;
10480 + unsigned char preempt_lazy_count;
10483 #define TRACE_EVENT_TYPE_MAX \
10484 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
10485 index f30c187ed785..83bf0f798426 100644
10486 --- a/include/linux/uaccess.h
10487 +++ b/include/linux/uaccess.h
10488 @@ -24,6 +24,7 @@ static __always_inline void pagefault_disabled_dec(void)
10490 static inline void pagefault_disable(void)
10492 + migrate_disable();
10493 pagefault_disabled_inc();
10495 * make sure to have issued the store before a pagefault
10496 @@ -40,6 +41,7 @@ static inline void pagefault_enable(void)
10499 pagefault_disabled_dec();
10500 + migrate_enable();
10504 diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
10505 index 4a29c75b146e..0a294e950df8 100644
10506 --- a/include/linux/uprobes.h
10507 +++ b/include/linux/uprobes.h
10509 #include <linux/errno.h>
10510 #include <linux/rbtree.h>
10511 #include <linux/types.h>
10512 +#include <linux/wait.h>
10514 struct vm_area_struct;
10516 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
10517 index 613771909b6e..e28c5a43229d 100644
10518 --- a/include/linux/vmstat.h
10519 +++ b/include/linux/vmstat.h
10520 @@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
10522 static inline void __count_vm_event(enum vm_event_item item)
10524 + preempt_disable_rt();
10525 raw_cpu_inc(vm_event_states.event[item]);
10526 + preempt_enable_rt();
10529 static inline void count_vm_event(enum vm_event_item item)
10530 @@ -43,7 +45,9 @@ static inline void count_vm_event(enum vm_event_item item)
10532 static inline void __count_vm_events(enum vm_event_item item, long delta)
10534 + preempt_disable_rt();
10535 raw_cpu_add(vm_event_states.event[item], delta);
10536 + preempt_enable_rt();
10539 static inline void count_vm_events(enum vm_event_item item, long delta)
10540 diff --git a/include/linux/wait.h b/include/linux/wait.h
10541 index 2408e8d5c05c..db50d6609195 100644
10542 --- a/include/linux/wait.h
10543 +++ b/include/linux/wait.h
10545 #include <linux/spinlock.h>
10546 #include <asm/current.h>
10547 #include <uapi/linux/wait.h>
10548 +#include <linux/atomic.h>
10550 typedef struct __wait_queue wait_queue_t;
10551 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
10552 diff --git a/include/net/dst.h b/include/net/dst.h
10553 index 6835d224d47b..55a5a9698f14 100644
10554 --- a/include/net/dst.h
10555 +++ b/include/net/dst.h
10556 @@ -446,7 +446,7 @@ static inline void dst_confirm(struct dst_entry *dst)
10557 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
10558 struct sk_buff *skb)
10560 - const struct hh_cache *hh;
10561 + struct hh_cache *hh;
10563 if (dst->pending_confirm) {
10564 unsigned long now = jiffies;
10565 diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
10566 index 231e121cc7d9..d125222b979d 100644
10567 --- a/include/net/gen_stats.h
10568 +++ b/include/net/gen_stats.h
10570 #include <linux/socket.h>
10571 #include <linux/rtnetlink.h>
10572 #include <linux/pkt_sched.h>
10573 +#include <net/net_seq_lock.h>
10575 struct gnet_stats_basic_cpu {
10576 struct gnet_stats_basic_packed bstats;
10577 @@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
10578 spinlock_t *lock, struct gnet_dump *d,
10581 -int gnet_stats_copy_basic(const seqcount_t *running,
10582 +int gnet_stats_copy_basic(net_seqlock_t *running,
10583 struct gnet_dump *d,
10584 struct gnet_stats_basic_cpu __percpu *cpu,
10585 struct gnet_stats_basic_packed *b);
10586 -void __gnet_stats_copy_basic(const seqcount_t *running,
10587 +void __gnet_stats_copy_basic(net_seqlock_t *running,
10588 struct gnet_stats_basic_packed *bstats,
10589 struct gnet_stats_basic_cpu __percpu *cpu,
10590 struct gnet_stats_basic_packed *b);
10591 @@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
10592 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
10593 struct gnet_stats_rate_est64 *rate_est,
10594 spinlock_t *stats_lock,
10595 - seqcount_t *running, struct nlattr *opt);
10596 + net_seqlock_t *running, struct nlattr *opt);
10597 void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
10598 struct gnet_stats_rate_est64 *rate_est);
10599 int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
10600 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
10601 struct gnet_stats_rate_est64 *rate_est,
10602 spinlock_t *stats_lock,
10603 - seqcount_t *running, struct nlattr *opt);
10604 + net_seqlock_t *running, struct nlattr *opt);
10605 bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
10606 const struct gnet_stats_rate_est64 *rate_est);
10608 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
10609 index 8b683841e574..bf656008f6e7 100644
10610 --- a/include/net/neighbour.h
10611 +++ b/include/net/neighbour.h
10612 @@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
10616 -static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
10617 +static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
10621 @@ -501,7 +501,7 @@ struct neighbour_cb {
10623 #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
10625 -static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
10626 +static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
10627 const struct net_device *dev)
10630 diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h
10631 new file mode 100644
10632 index 000000000000..a7034298a82a
10634 +++ b/include/net/net_seq_lock.h
10636 +#ifndef __NET_NET_SEQ_LOCK_H__
10637 +#define __NET_NET_SEQ_LOCK_H__
10639 +#ifdef CONFIG_PREEMPT_RT_BASE
10640 +# define net_seqlock_t seqlock_t
10641 +# define net_seq_begin(__r) read_seqbegin(__r)
10642 +# define net_seq_retry(__r, __s) read_seqretry(__r, __s)
10645 +# define net_seqlock_t seqcount_t
10646 +# define net_seq_begin(__r) read_seqcount_begin(__r)
10647 +# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s)
10651 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
10652 index 7adf4386ac8f..d3fd5c357268 100644
10653 --- a/include/net/netns/ipv4.h
10654 +++ b/include/net/netns/ipv4.h
10655 @@ -69,6 +69,7 @@ struct netns_ipv4 {
10657 int sysctl_icmp_echo_ignore_all;
10658 int sysctl_icmp_echo_ignore_broadcasts;
10659 + int sysctl_icmp_echo_sysrq;
10660 int sysctl_icmp_ignore_bogus_error_responses;
10661 int sysctl_icmp_ratelimit;
10662 int sysctl_icmp_ratemask;
10663 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
10664 index e6aa0a249672..b57736f2a8a3 100644
10665 --- a/include/net/sch_generic.h
10666 +++ b/include/net/sch_generic.h
10668 #include <linux/dynamic_queue_limits.h>
10669 #include <net/gen_stats.h>
10670 #include <net/rtnetlink.h>
10671 +#include <net/net_seq_lock.h>
10674 struct qdisc_walker;
10675 @@ -86,7 +87,7 @@ struct Qdisc {
10676 struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
10677 struct qdisc_skb_head q;
10678 struct gnet_stats_basic_packed bstats;
10679 - seqcount_t running;
10680 + net_seqlock_t running;
10681 struct gnet_stats_queue qstats;
10682 unsigned long state;
10683 struct Qdisc *next_sched;
10684 @@ -98,13 +99,22 @@ struct Qdisc {
10685 spinlock_t busylock ____cacheline_aligned_in_smp;
10688 -static inline bool qdisc_is_running(const struct Qdisc *qdisc)
10689 +static inline bool qdisc_is_running(struct Qdisc *qdisc)
10691 +#ifdef CONFIG_PREEMPT_RT_BASE
10692 + return spin_is_locked(&qdisc->running.lock) ? true : false;
10694 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
10698 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
10700 +#ifdef CONFIG_PREEMPT_RT_BASE
10701 + if (try_write_seqlock(&qdisc->running))
10705 if (qdisc_is_running(qdisc))
10707 /* Variant of write_seqcount_begin() telling lockdep a trylock
10708 @@ -113,11 +123,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
10709 raw_write_seqcount_begin(&qdisc->running);
10710 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
10715 static inline void qdisc_run_end(struct Qdisc *qdisc)
10717 +#ifdef CONFIG_PREEMPT_RT_BASE
10718 + write_sequnlock(&qdisc->running);
10720 write_seqcount_end(&qdisc->running);
10724 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
10725 @@ -308,7 +323,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
10726 return qdisc_lock(root);
10729 -static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
10730 +static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
10732 struct Qdisc *root = qdisc_root_sleeping(qdisc);
10734 diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
10735 new file mode 100644
10736 index 000000000000..f7710de1b1f3
10738 +++ b/include/trace/events/hist.h
10740 +#undef TRACE_SYSTEM
10741 +#define TRACE_SYSTEM hist
10743 +#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
10744 +#define _TRACE_HIST_H
10746 +#include "latency_hist.h"
10747 +#include <linux/tracepoint.h>
10749 +#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
10750 +#define trace_preemptirqsoff_hist(a, b)
10751 +#define trace_preemptirqsoff_hist_rcuidle(a, b)
10753 +TRACE_EVENT(preemptirqsoff_hist,
10755 + TP_PROTO(int reason, int starthist),
10757 + TP_ARGS(reason, starthist),
10759 + TP_STRUCT__entry(
10760 + __field(int, reason)
10761 + __field(int, starthist)
10765 + __entry->reason = reason;
10766 + __entry->starthist = starthist;
10769 + TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
10770 + __entry->starthist ? "start" : "stop")
10774 +#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
10775 +#define trace_hrtimer_interrupt(a, b, c, d)
10777 +TRACE_EVENT(hrtimer_interrupt,
10779 + TP_PROTO(int cpu, long long offset, struct task_struct *curr,
10780 + struct task_struct *task),
10782 + TP_ARGS(cpu, offset, curr, task),
10784 + TP_STRUCT__entry(
10785 + __field(int, cpu)
10786 + __field(long long, offset)
10787 + __array(char, ccomm, TASK_COMM_LEN)
10788 + __field(int, cprio)
10789 + __array(char, tcomm, TASK_COMM_LEN)
10790 + __field(int, tprio)
10794 + __entry->cpu = cpu;
10795 + __entry->offset = offset;
10796 + memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
10797 + __entry->cprio = curr->prio;
10798 + memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
10799 + task != NULL ? TASK_COMM_LEN : 7);
10800 + __entry->tprio = task != NULL ? task->prio : -1;
10803 + TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
10804 + __entry->cpu, __entry->offset, __entry->ccomm,
10805 + __entry->cprio, __entry->tcomm, __entry->tprio)
10809 +#endif /* _TRACE_HIST_H */
10811 +/* This part must be outside protection */
10812 +#include <trace/define_trace.h>
10813 diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h
10814 new file mode 100644
10815 index 000000000000..d3f2fbd560b1
10817 +++ b/include/trace/events/latency_hist.h
10819 +#ifndef _LATENCY_HIST_H
10820 +#define _LATENCY_HIST_H
10822 +enum hist_action {
10831 +static char *actions[] = {
10840 +static inline char *getaction(int action)
10842 + if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
10843 + return actions[action];
10844 + return "unknown";
10847 +#endif /* _LATENCY_HIST_H */
10848 diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
10849 index 9b90c57517a9..516ae88cddf4 100644
10850 --- a/include/trace/events/sched.h
10851 +++ b/include/trace/events/sched.h
10852 @@ -70,7 +70,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
10854 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
10855 __entry->pid = p->pid;
10856 - __entry->prio = p->prio;
10857 + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
10858 __entry->success = 1; /* rudiment, kill when possible */
10859 __entry->target_cpu = task_cpu(p);
10861 @@ -147,6 +147,7 @@ TRACE_EVENT(sched_switch,
10862 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
10863 __entry->next_pid = next->pid;
10864 __entry->next_prio = next->prio;
10865 + /* XXX SCHED_DEADLINE */
10868 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
10869 @@ -181,7 +182,7 @@ TRACE_EVENT(sched_migrate_task,
10871 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
10872 __entry->pid = p->pid;
10873 - __entry->prio = p->prio;
10874 + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
10875 __entry->orig_cpu = task_cpu(p);
10876 __entry->dest_cpu = dest_cpu;
10878 @@ -206,7 +207,7 @@ DECLARE_EVENT_CLASS(sched_process_template,
10880 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
10881 __entry->pid = p->pid;
10882 - __entry->prio = p->prio;
10883 + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
10886 TP_printk("comm=%s pid=%d prio=%d",
10887 @@ -253,7 +254,7 @@ TRACE_EVENT(sched_process_wait,
10889 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
10890 __entry->pid = pid_nr(pid);
10891 - __entry->prio = current->prio;
10892 + __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
10895 TP_printk("comm=%s pid=%d prio=%d",
10896 @@ -413,9 +414,9 @@ DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
10898 TRACE_EVENT(sched_pi_setprio,
10900 - TP_PROTO(struct task_struct *tsk, int newprio),
10901 + TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
10903 - TP_ARGS(tsk, newprio),
10904 + TP_ARGS(tsk, pi_task),
10907 __array( char, comm, TASK_COMM_LEN )
10908 @@ -428,7 +429,8 @@ TRACE_EVENT(sched_pi_setprio,
10909 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
10910 __entry->pid = tsk->pid;
10911 __entry->oldprio = tsk->prio;
10912 - __entry->newprio = newprio;
10913 + __entry->newprio = pi_task ? pi_task->prio : tsk->prio;
10914 + /* XXX SCHED_DEADLINE bits missing */
10917 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
10918 diff --git a/init/Kconfig b/init/Kconfig
10919 index 34407f15e6d3..2ce33a32e65d 100644
10922 @@ -506,7 +506,7 @@ config TINY_RCU
10925 bool "Make expert-level adjustments to RCU configuration"
10927 + default y if PREEMPT_RT_FULL
10929 This option needs to be enabled if you wish to make
10930 expert-level adjustments to RCU configuration. By default,
10931 @@ -623,7 +623,7 @@ config RCU_FANOUT_LEAF
10933 config RCU_FAST_NO_HZ
10934 bool "Accelerate last non-dyntick-idle CPU's grace periods"
10935 - depends on NO_HZ_COMMON && SMP && RCU_EXPERT
10936 + depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL
10939 This option permits CPUs to enter dynticks-idle state even if
10940 @@ -650,7 +650,7 @@ config TREE_RCU_TRACE
10942 bool "Enable RCU priority boosting"
10943 depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
10945 + default y if PREEMPT_RT_FULL
10947 This option boosts the priority of preempted RCU readers that
10948 block the current preemptible RCU grace period for too long.
10949 @@ -781,19 +781,6 @@ config RCU_NOCB_CPU_ALL
10953 -config RCU_EXPEDITE_BOOT
10957 - This option enables expedited grace periods at boot time,
10958 - as if rcu_expedite_gp() had been invoked early in boot.
10959 - The corresponding rcu_unexpedite_gp() is invoked from
10960 - rcu_end_inkernel_boot(), which is intended to be invoked
10961 - at the end of the kernel-only boot sequence, just before
10964 - Accept the default if unsure.
10966 endmenu # "RCU Subsystem"
10969 @@ -1064,6 +1051,7 @@ config CFS_BANDWIDTH
10970 config RT_GROUP_SCHED
10971 bool "Group scheduling for SCHED_RR/FIFO"
10972 depends on CGROUP_SCHED
10973 + depends on !PREEMPT_RT_FULL
10976 This feature lets you explicitly allocate real CPU bandwidth
10977 @@ -1772,6 +1760,7 @@ choice
10981 + depends on !PREEMPT_RT_FULL
10982 select HAVE_HARDENED_USERCOPY_ALLOCATOR
10984 The regular slab allocator that is established and known to work
10985 @@ -1792,6 +1781,7 @@ config SLUB
10988 bool "SLOB (Simple Allocator)"
10989 + depends on !PREEMPT_RT_FULL
10991 SLOB replaces the stock allocator with a drastically simpler
10992 allocator. SLOB is generally more space efficient but
10993 @@ -1810,7 +1800,7 @@ config SLAB_FREELIST_RANDOM
10995 config SLUB_CPU_PARTIAL
10997 - depends on SLUB && SMP
10998 + depends on SLUB && SMP && !PREEMPT_RT_FULL
10999 bool "SLUB per cpu partial cache"
11001 Per cpu partial caches accellerate objects allocation and freeing
11002 diff --git a/init/Makefile b/init/Makefile
11003 index c4fb45525d08..821190dfaa75 100644
11004 --- a/init/Makefile
11005 +++ b/init/Makefile
11006 @@ -35,4 +35,4 @@ $(obj)/version.o: include/generated/compile.h
11007 include/generated/compile.h: FORCE
11008 @$($(quiet)chk_compile.h)
11009 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
11010 - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
11011 + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
11012 diff --git a/init/main.c b/init/main.c
11013 index ae3996ae9bac..6470deef01c9 100644
11016 @@ -507,6 +507,7 @@ asmlinkage __visible void __init start_kernel(void)
11017 setup_command_line(command_line);
11018 setup_nr_cpu_ids();
11019 setup_per_cpu_areas();
11020 + softirq_early_init();
11021 boot_cpu_state_init();
11022 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
11024 diff --git a/ipc/sem.c b/ipc/sem.c
11025 index 10b94bc59d4a..b8360eaacc7a 100644
11028 @@ -712,6 +712,13 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
11029 static void wake_up_sem_queue_prepare(struct list_head *pt,
11030 struct sem_queue *q, int error)
11032 +#ifdef CONFIG_PREEMPT_RT_BASE
11033 + struct task_struct *p = q->sleeper;
11034 + get_task_struct(p);
11035 + q->status = error;
11036 + wake_up_process(p);
11037 + put_task_struct(p);
11039 if (list_empty(pt)) {
11041 * Hold preempt off so that we don't get preempted and have the
11042 @@ -723,6 +730,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
11045 list_add_tail(&q->list, pt);
11050 @@ -736,6 +744,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
11052 static void wake_up_sem_queue_do(struct list_head *pt)
11054 +#ifndef CONFIG_PREEMPT_RT_BASE
11055 struct sem_queue *q, *t;
11058 @@ -748,6 +757,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
11065 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
11066 diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
11067 index ebdb0043203a..b9e6aa7e5aa6 100644
11068 --- a/kernel/Kconfig.locks
11069 +++ b/kernel/Kconfig.locks
11070 @@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
11072 config MUTEX_SPIN_ON_OWNER
11074 - depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
11075 + depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
11077 config RWSEM_SPIN_ON_OWNER
11079 - depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
11080 + depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
11082 config LOCK_SPIN_ON_OWNER
11084 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
11085 index 3f9c97419f02..11dbe26a8279 100644
11086 --- a/kernel/Kconfig.preempt
11087 +++ b/kernel/Kconfig.preempt
11091 + select PREEMPT_COUNT
11093 +config PREEMPT_RT_BASE
11097 +config HAVE_PREEMPT_LAZY
11100 +config PREEMPT_LAZY
11101 + def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
11104 prompt "Preemption Model"
11105 @@ -33,9 +46,9 @@ config PREEMPT_VOLUNTARY
11107 Select this if you are building a kernel for a desktop system.
11110 +config PREEMPT__LL
11111 bool "Preemptible Kernel (Low-Latency Desktop)"
11112 - select PREEMPT_COUNT
11114 select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
11116 This option reduces the latency of the kernel by making
11117 @@ -52,6 +65,22 @@ config PREEMPT
11118 embedded system with latency requirements in the milliseconds
11121 +config PREEMPT_RTB
11122 + bool "Preemptible Kernel (Basic RT)"
11123 + select PREEMPT_RT_BASE
11125 + This option is basically the same as (Low-Latency Desktop) but
11126 + enables changes which are preliminary for the full preemptible
11129 +config PREEMPT_RT_FULL
11130 + bool "Fully Preemptible Kernel (RT)"
11131 + depends on IRQ_FORCED_THREADING
11132 + select PREEMPT_RT_BASE
11133 + select PREEMPT_RCU
11135 + All and everything
11139 config PREEMPT_COUNT
11140 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
11141 index a3d2aad2443f..bb6b252648ff 100644
11142 --- a/kernel/cgroup.c
11143 +++ b/kernel/cgroup.c
11144 @@ -5041,10 +5041,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
11145 queue_work(cgroup_destroy_wq, &css->destroy_work);
11148 -static void css_release_work_fn(struct work_struct *work)
11149 +static void css_release_work_fn(struct swork_event *sev)
11151 struct cgroup_subsys_state *css =
11152 - container_of(work, struct cgroup_subsys_state, destroy_work);
11153 + container_of(sev, struct cgroup_subsys_state, destroy_swork);
11154 struct cgroup_subsys *ss = css->ss;
11155 struct cgroup *cgrp = css->cgroup;
11157 @@ -5087,8 +5087,8 @@ static void css_release(struct percpu_ref *ref)
11158 struct cgroup_subsys_state *css =
11159 container_of(ref, struct cgroup_subsys_state, refcnt);
11161 - INIT_WORK(&css->destroy_work, css_release_work_fn);
11162 - queue_work(cgroup_destroy_wq, &css->destroy_work);
11163 + INIT_SWORK(&css->destroy_swork, css_release_work_fn);
11164 + swork_queue(&css->destroy_swork);
11167 static void init_and_link_css(struct cgroup_subsys_state *css,
11168 @@ -5740,6 +5740,7 @@ static int __init cgroup_wq_init(void)
11170 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
11171 BUG_ON(!cgroup_destroy_wq);
11172 + BUG_ON(swork_get());
11175 * Used to destroy pidlists and separate to serve as flush domain.
11176 diff --git a/kernel/cpu.c b/kernel/cpu.c
11177 index 99c6c568bc55..f1c64e563970 100644
11180 @@ -239,6 +239,289 @@ static struct {
11181 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
11182 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
11185 + * hotplug_pcp - per cpu hotplug descriptor
11186 + * @unplug: set when pin_current_cpu() needs to sync tasks
11187 + * @sync_tsk: the task that waits for tasks to finish pinned sections
11188 + * @refcount: counter of tasks in pinned sections
11189 + * @grab_lock: set when the tasks entering pinned sections should wait
11190 + * @synced: notifier for @sync_tsk to tell cpu_down it's finished
11191 + * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
11192 + * @mutex_init: zero if the mutex hasn't been initialized yet.
11194 + * Although @unplug and @sync_tsk may point to the same task, the @unplug
11195 + * is used as a flag and still exists after @sync_tsk has exited and
11196 + * @sync_tsk set to NULL.
11198 +struct hotplug_pcp {
11199 + struct task_struct *unplug;
11200 + struct task_struct *sync_tsk;
11203 + struct completion synced;
11204 + struct completion unplug_wait;
11205 +#ifdef CONFIG_PREEMPT_RT_FULL
11207 + * Note, on PREEMPT_RT, the hotplug lock must save the state of
11208 + * the task, otherwise the mutex will cause the task to fail
11209 + * to sleep when required. (Because it's called from migrate_disable())
11211 + * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
11216 + struct mutex mutex;
11221 +#ifdef CONFIG_PREEMPT_RT_FULL
11222 +# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
11223 +# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
11225 +# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
11226 +# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
11229 +static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
11232 + * pin_current_cpu - Prevent the current cpu from being unplugged
11234 + * Lightweight version of get_online_cpus() to prevent cpu from being
11235 + * unplugged when code runs in a migration disabled region.
11237 + * Must be called with preemption disabled (preempt_count = 1)!
11239 +void pin_current_cpu(void)
11241 + struct hotplug_pcp *hp;
11245 + hp = this_cpu_ptr(&hotplug_pcp);
11247 + if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
11248 + hp->unplug == current) {
11252 + if (hp->grab_lock) {
11253 + preempt_enable();
11254 + hotplug_lock(hp);
11255 + hotplug_unlock(hp);
11257 + preempt_enable();
11259 + * Try to push this task off of this CPU.
11261 + if (!migrate_me()) {
11262 + preempt_disable();
11263 + hp = this_cpu_ptr(&hotplug_pcp);
11264 + if (!hp->grab_lock) {
11266 + * Just let it continue it's already pinned
11267 + * or about to sleep.
11272 + preempt_enable();
11275 + preempt_disable();
11280 + * unpin_current_cpu - Allow unplug of current cpu
11282 + * Must be called with preemption or interrupts disabled!
11284 +void unpin_current_cpu(void)
11286 + struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
11288 + WARN_ON(hp->refcount <= 0);
11290 + /* This is safe. sync_unplug_thread is pinned to this cpu */
11291 + if (!--hp->refcount && hp->unplug && hp->unplug != current)
11292 + wake_up_process(hp->unplug);
11295 +static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
11297 + set_current_state(TASK_UNINTERRUPTIBLE);
11298 + while (hp->refcount) {
11299 + schedule_preempt_disabled();
11300 + set_current_state(TASK_UNINTERRUPTIBLE);
11304 +static int sync_unplug_thread(void *data)
11306 + struct hotplug_pcp *hp = data;
11308 + wait_for_completion(&hp->unplug_wait);
11309 + preempt_disable();
11310 + hp->unplug = current;
11311 + wait_for_pinned_cpus(hp);
11314 + * This thread will synchronize the cpu_down() with threads
11315 + * that have pinned the CPU. When the pinned CPU count reaches
11316 + * zero, we inform the cpu_down code to continue to the next step.
11318 + set_current_state(TASK_UNINTERRUPTIBLE);
11319 + preempt_enable();
11320 + complete(&hp->synced);
11323 + * If all succeeds, the next step will need tasks to wait till
11324 + * the CPU is offline before continuing. To do this, the grab_lock
11325 + * is set and tasks going into pin_current_cpu() will block on the
11326 + * mutex. But we still need to wait for those that are already in
11327 + * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
11328 + * will kick this thread out.
11330 + while (!hp->grab_lock && !kthread_should_stop()) {
11332 + set_current_state(TASK_UNINTERRUPTIBLE);
11335 + /* Make sure grab_lock is seen before we see a stale completion */
11339 + * Now just before cpu_down() enters stop machine, we need to make
11340 + * sure all tasks that are in pinned CPU sections are out, and new
11341 + * tasks will now grab the lock, keeping them from entering pinned
11344 + if (!kthread_should_stop()) {
11345 + preempt_disable();
11346 + wait_for_pinned_cpus(hp);
11347 + preempt_enable();
11348 + complete(&hp->synced);
11351 + set_current_state(TASK_UNINTERRUPTIBLE);
11352 + while (!kthread_should_stop()) {
11354 + set_current_state(TASK_UNINTERRUPTIBLE);
11356 + set_current_state(TASK_RUNNING);
11359 + * Force this thread off this CPU as it's going down and
11360 + * we don't want any more work on this CPU.
11362 + current->flags &= ~PF_NO_SETAFFINITY;
11363 + set_cpus_allowed_ptr(current, cpu_present_mask);
11368 +static void __cpu_unplug_sync(struct hotplug_pcp *hp)
11370 + wake_up_process(hp->sync_tsk);
11371 + wait_for_completion(&hp->synced);
11374 +static void __cpu_unplug_wait(unsigned int cpu)
11376 + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
11378 + complete(&hp->unplug_wait);
11379 + wait_for_completion(&hp->synced);
11383 + * Start the sync_unplug_thread on the target cpu and wait for it to
11386 +static int cpu_unplug_begin(unsigned int cpu)
11388 + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
11391 + /* Protected by cpu_hotplug.lock */
11392 + if (!hp->mutex_init) {
11393 +#ifdef CONFIG_PREEMPT_RT_FULL
11394 + spin_lock_init(&hp->lock);
11396 + mutex_init(&hp->mutex);
11398 + hp->mutex_init = 1;
11401 + /* Inform the scheduler to migrate tasks off this CPU */
11402 + tell_sched_cpu_down_begin(cpu);
11404 + init_completion(&hp->synced);
11405 + init_completion(&hp->unplug_wait);
11407 + hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
11408 + if (IS_ERR(hp->sync_tsk)) {
11409 + err = PTR_ERR(hp->sync_tsk);
11410 + hp->sync_tsk = NULL;
11413 + kthread_bind(hp->sync_tsk, cpu);
11416 + * Wait for tasks to get out of the pinned sections,
11417 + * it's still OK if new tasks enter. Some CPU notifiers will
11418 + * wait for tasks that are going to enter these sections and
11419 + * we must not have them block.
11421 + wake_up_process(hp->sync_tsk);
11425 +static void cpu_unplug_sync(unsigned int cpu)
11427 + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
11429 + init_completion(&hp->synced);
11430 + /* The completion needs to be initialzied before setting grab_lock */
11433 + /* Grab the mutex before setting grab_lock */
11434 + hotplug_lock(hp);
11435 + hp->grab_lock = 1;
11438 + * The CPU notifiers have been completed.
11439 + * Wait for tasks to get out of pinned CPU sections and have new
11440 + * tasks block until the CPU is completely down.
11442 + __cpu_unplug_sync(hp);
11444 + /* All done with the sync thread */
11445 + kthread_stop(hp->sync_tsk);
11446 + hp->sync_tsk = NULL;
11449 +static void cpu_unplug_done(unsigned int cpu)
11451 + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
11453 + hp->unplug = NULL;
11454 + /* Let all tasks know cpu unplug is finished before cleaning up */
11457 + if (hp->sync_tsk)
11458 + kthread_stop(hp->sync_tsk);
11460 + if (hp->grab_lock) {
11461 + hotplug_unlock(hp);
11462 + /* protected by cpu_hotplug.lock */
11463 + hp->grab_lock = 0;
11465 + tell_sched_cpu_down_done(cpu);
11468 void get_online_cpus(void)
11470 @@ -789,10 +1072,14 @@ static int takedown_cpu(unsigned int cpu)
11471 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
11474 + __cpu_unplug_wait(cpu);
11475 /* Park the smpboot threads */
11476 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
11477 smpboot_park_threads(cpu);
11479 + /* Notifiers are done. Don't let any more tasks pin this CPU. */
11480 + cpu_unplug_sync(cpu);
11483 * Prevent irq alloc/free while the dying cpu reorganizes the
11484 * interrupt affinities.
11485 @@ -877,6 +1164,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
11486 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
11487 int prev_state, ret = 0;
11488 bool hasdied = false;
11490 + cpumask_var_t cpumask;
11491 + cpumask_var_t cpumask_org;
11493 if (num_online_cpus() == 1)
11495 @@ -884,7 +1174,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
11496 if (!cpu_present(cpu))
11499 + /* Move the downtaker off the unplug cpu */
11500 + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
11502 + if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
11503 + free_cpumask_var(cpumask);
11507 + cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
11508 + cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
11509 + set_cpus_allowed_ptr(current, cpumask);
11510 + free_cpumask_var(cpumask);
11511 + migrate_disable();
11512 + mycpu = smp_processor_id();
11513 + if (mycpu == cpu) {
11514 + printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
11515 + migrate_enable();
11517 + goto restore_cpus;
11520 + migrate_enable();
11521 cpu_hotplug_begin();
11522 + ret = cpu_unplug_begin(cpu);
11524 + printk("cpu_unplug_begin(%d) failed\n", cpu);
11528 cpuhp_tasks_frozen = tasks_frozen;
11530 @@ -923,10 +1240,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
11532 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
11534 + cpu_unplug_done(cpu);
11536 cpu_hotplug_done();
11537 /* This post dead nonsense must die */
11538 if (!ret && hasdied)
11539 cpu_notify_nofail(CPU_POST_DEAD, cpu);
11541 + set_cpus_allowed_ptr(current, cpumask_org);
11542 + free_cpumask_var(cpumask_org);
11546 @@ -1240,6 +1562,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
11548 #endif /* CONFIG_PM_SLEEP_SMP */
11550 +int __boot_cpu_id;
11552 #endif /* CONFIG_SMP */
11554 /* Boot processor state steps */
11555 @@ -1924,6 +2248,10 @@ void __init boot_cpu_init(void)
11556 set_cpu_active(cpu, true);
11557 set_cpu_present(cpu, true);
11558 set_cpu_possible(cpu, true);
11561 + __boot_cpu_id = cpu;
11566 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
11567 index 29f815d2ef7e..341b17f24f95 100644
11568 --- a/kernel/cpuset.c
11569 +++ b/kernel/cpuset.c
11570 @@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
11573 static DEFINE_MUTEX(cpuset_mutex);
11574 -static DEFINE_SPINLOCK(callback_lock);
11575 +static DEFINE_RAW_SPINLOCK(callback_lock);
11577 static struct workqueue_struct *cpuset_migrate_mm_wq;
11579 @@ -907,9 +907,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
11583 - spin_lock_irq(&callback_lock);
11584 + raw_spin_lock_irq(&callback_lock);
11585 cpumask_copy(cp->effective_cpus, new_cpus);
11586 - spin_unlock_irq(&callback_lock);
11587 + raw_spin_unlock_irq(&callback_lock);
11589 WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
11590 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
11591 @@ -974,9 +974,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
11595 - spin_lock_irq(&callback_lock);
11596 + raw_spin_lock_irq(&callback_lock);
11597 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
11598 - spin_unlock_irq(&callback_lock);
11599 + raw_spin_unlock_irq(&callback_lock);
11601 /* use trialcs->cpus_allowed as a temp variable */
11602 update_cpumasks_hier(cs, trialcs->cpus_allowed);
11603 @@ -1176,9 +1176,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
11607 - spin_lock_irq(&callback_lock);
11608 + raw_spin_lock_irq(&callback_lock);
11609 cp->effective_mems = *new_mems;
11610 - spin_unlock_irq(&callback_lock);
11611 + raw_spin_unlock_irq(&callback_lock);
11613 WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
11614 !nodes_equal(cp->mems_allowed, cp->effective_mems));
11615 @@ -1246,9 +1246,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
11619 - spin_lock_irq(&callback_lock);
11620 + raw_spin_lock_irq(&callback_lock);
11621 cs->mems_allowed = trialcs->mems_allowed;
11622 - spin_unlock_irq(&callback_lock);
11623 + raw_spin_unlock_irq(&callback_lock);
11625 /* use trialcs->mems_allowed as a temp variable */
11626 update_nodemasks_hier(cs, &trialcs->mems_allowed);
11627 @@ -1339,9 +1339,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
11628 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
11629 || (is_spread_page(cs) != is_spread_page(trialcs)));
11631 - spin_lock_irq(&callback_lock);
11632 + raw_spin_lock_irq(&callback_lock);
11633 cs->flags = trialcs->flags;
11634 - spin_unlock_irq(&callback_lock);
11635 + raw_spin_unlock_irq(&callback_lock);
11637 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
11638 rebuild_sched_domains_locked();
11639 @@ -1756,7 +1756,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
11640 cpuset_filetype_t type = seq_cft(sf)->private;
11643 - spin_lock_irq(&callback_lock);
11644 + raw_spin_lock_irq(&callback_lock);
11648 @@ -1775,7 +1775,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
11652 - spin_unlock_irq(&callback_lock);
11653 + raw_spin_unlock_irq(&callback_lock);
11657 @@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
11661 - spin_lock_irq(&callback_lock);
11662 + raw_spin_lock_irq(&callback_lock);
11663 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
11664 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
11665 cs->effective_mems = parent->effective_mems;
11667 - spin_unlock_irq(&callback_lock);
11668 + raw_spin_unlock_irq(&callback_lock);
11670 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
11672 @@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
11676 - spin_lock_irq(&callback_lock);
11677 + raw_spin_lock_irq(&callback_lock);
11678 cs->mems_allowed = parent->mems_allowed;
11679 cs->effective_mems = parent->mems_allowed;
11680 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
11681 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
11682 - spin_unlock_irq(&callback_lock);
11683 + raw_spin_unlock_irq(&callback_lock);
11685 mutex_unlock(&cpuset_mutex);
11687 @@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
11688 static void cpuset_bind(struct cgroup_subsys_state *root_css)
11690 mutex_lock(&cpuset_mutex);
11691 - spin_lock_irq(&callback_lock);
11692 + raw_spin_lock_irq(&callback_lock);
11694 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
11695 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
11696 @@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
11697 top_cpuset.mems_allowed = top_cpuset.effective_mems;
11700 - spin_unlock_irq(&callback_lock);
11701 + raw_spin_unlock_irq(&callback_lock);
11702 mutex_unlock(&cpuset_mutex);
11705 @@ -2177,12 +2177,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
11709 - spin_lock_irq(&callback_lock);
11710 + raw_spin_lock_irq(&callback_lock);
11711 cpumask_copy(cs->cpus_allowed, new_cpus);
11712 cpumask_copy(cs->effective_cpus, new_cpus);
11713 cs->mems_allowed = *new_mems;
11714 cs->effective_mems = *new_mems;
11715 - spin_unlock_irq(&callback_lock);
11716 + raw_spin_unlock_irq(&callback_lock);
11719 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
11720 @@ -2219,10 +2219,10 @@ hotplug_update_tasks(struct cpuset *cs,
11721 if (nodes_empty(*new_mems))
11722 *new_mems = parent_cs(cs)->effective_mems;
11724 - spin_lock_irq(&callback_lock);
11725 + raw_spin_lock_irq(&callback_lock);
11726 cpumask_copy(cs->effective_cpus, new_cpus);
11727 cs->effective_mems = *new_mems;
11728 - spin_unlock_irq(&callback_lock);
11729 + raw_spin_unlock_irq(&callback_lock);
11732 update_tasks_cpumask(cs);
11733 @@ -2308,21 +2308,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
11735 /* synchronize cpus_allowed to cpu_active_mask */
11736 if (cpus_updated) {
11737 - spin_lock_irq(&callback_lock);
11738 + raw_spin_lock_irq(&callback_lock);
11740 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
11741 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
11742 - spin_unlock_irq(&callback_lock);
11743 + raw_spin_unlock_irq(&callback_lock);
11744 /* we don't mess with cpumasks of tasks in top_cpuset */
11747 /* synchronize mems_allowed to N_MEMORY */
11748 if (mems_updated) {
11749 - spin_lock_irq(&callback_lock);
11750 + raw_spin_lock_irq(&callback_lock);
11752 top_cpuset.mems_allowed = new_mems;
11753 top_cpuset.effective_mems = new_mems;
11754 - spin_unlock_irq(&callback_lock);
11755 + raw_spin_unlock_irq(&callback_lock);
11756 update_tasks_nodemask(&top_cpuset);
11759 @@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
11761 unsigned long flags;
11763 - spin_lock_irqsave(&callback_lock, flags);
11764 + raw_spin_lock_irqsave(&callback_lock, flags);
11766 guarantee_online_cpus(task_cs(tsk), pmask);
11768 - spin_unlock_irqrestore(&callback_lock, flags);
11769 + raw_spin_unlock_irqrestore(&callback_lock, flags);
11772 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
11773 @@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
11775 unsigned long flags;
11777 - spin_lock_irqsave(&callback_lock, flags);
11778 + raw_spin_lock_irqsave(&callback_lock, flags);
11780 guarantee_online_mems(task_cs(tsk), &mask);
11782 - spin_unlock_irqrestore(&callback_lock, flags);
11783 + raw_spin_unlock_irqrestore(&callback_lock, flags);
11787 @@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
11790 /* Not hardwall and node outside mems_allowed: scan up cpusets */
11791 - spin_lock_irqsave(&callback_lock, flags);
11792 + raw_spin_lock_irqsave(&callback_lock, flags);
11795 cs = nearest_hardwall_ancestor(task_cs(current));
11796 allowed = node_isset(node, cs->mems_allowed);
11799 - spin_unlock_irqrestore(&callback_lock, flags);
11800 + raw_spin_unlock_irqrestore(&callback_lock, flags);
11804 diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
11805 index fc1ef736253c..83c666537a7a 100644
11806 --- a/kernel/debug/kdb/kdb_io.c
11807 +++ b/kernel/debug/kdb/kdb_io.c
11808 @@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
11811 int logging, saved_loglevel = 0;
11812 - int saved_trap_printk;
11813 int got_printf_lock = 0;
11816 @@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
11817 unsigned long uninitialized_var(flags);
11820 - saved_trap_printk = kdb_trap_printk;
11821 - kdb_trap_printk = 0;
11823 /* Serialize kdb_printf if multiple cpus try to write at once.
11824 * But if any cpu goes recursive in kdb, just print the output,
11825 @@ -855,7 +852,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
11827 __release(kdb_printf_lock);
11829 - kdb_trap_printk = saved_trap_printk;
11833 @@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...)
11837 + kdb_trap_printk++;
11839 r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
11841 + kdb_trap_printk--;
11845 diff --git a/kernel/events/core.c b/kernel/events/core.c
11846 index 07c0dc806dfc..baf1a2867d74 100644
11847 --- a/kernel/events/core.c
11848 +++ b/kernel/events/core.c
11849 @@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
11850 raw_spin_lock_init(&cpuctx->hrtimer_lock);
11851 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
11852 timer->function = perf_mux_hrtimer_handler;
11853 + timer->irqsafe = 1;
11856 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
11857 @@ -8363,6 +8364,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
11859 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
11860 hwc->hrtimer.function = perf_swevent_hrtimer;
11861 + hwc->hrtimer.irqsafe = 1;
11864 * Since hrtimers have a fixed rate, we can do a static freq->period
11865 diff --git a/kernel/exit.c b/kernel/exit.c
11866 index 3076f3089919..fb2ebcf3ca7c 100644
11867 --- a/kernel/exit.c
11868 +++ b/kernel/exit.c
11869 @@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
11870 * Do this under ->siglock, we can race with another thread
11871 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
11873 - flush_sigqueue(&tsk->pending);
11874 + flush_task_sigqueue(tsk);
11875 tsk->sighand = NULL;
11876 spin_unlock(&sighand->siglock);
11878 diff --git a/kernel/fork.c b/kernel/fork.c
11879 index 59faac4de181..0edb0f3c1db8 100644
11880 --- a/kernel/fork.c
11881 +++ b/kernel/fork.c
11883 #include <linux/compiler.h>
11884 #include <linux/sysctl.h>
11885 #include <linux/kcov.h>
11886 +#include <linux/kprobes.h>
11888 #include <asm/pgtable.h>
11889 #include <asm/pgalloc.h>
11890 @@ -376,13 +377,24 @@ static inline void put_signal_struct(struct signal_struct *sig)
11891 if (atomic_dec_and_test(&sig->sigcnt))
11892 free_signal_struct(sig);
11895 +#ifdef CONFIG_PREEMPT_RT_BASE
11898 void __put_task_struct(struct task_struct *tsk)
11900 WARN_ON(!tsk->exit_state);
11901 WARN_ON(atomic_read(&tsk->usage));
11902 WARN_ON(tsk == current);
11905 + * Remove function-return probe instances associated with this
11906 + * task and put them back on the free list.
11908 + kprobe_flush_task(tsk);
11910 + /* Task is done with its stack. */
11911 + put_task_stack(tsk);
11914 task_numa_free(tsk);
11915 security_task_free(tsk);
11916 @@ -393,7 +405,18 @@ void __put_task_struct(struct task_struct *tsk)
11917 if (!profile_handoff_task(tsk))
11920 +#ifndef CONFIG_PREEMPT_RT_BASE
11921 EXPORT_SYMBOL_GPL(__put_task_struct);
11923 +void __put_task_struct_cb(struct rcu_head *rhp)
11925 + struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
11927 + __put_task_struct(tsk);
11930 +EXPORT_SYMBOL_GPL(__put_task_struct_cb);
11933 void __init __weak arch_task_cache_init(void) { }
11935 @@ -852,6 +875,19 @@ void __mmdrop(struct mm_struct *mm)
11937 EXPORT_SYMBOL_GPL(__mmdrop);
11939 +#ifdef CONFIG_PREEMPT_RT_BASE
11941 + * RCU callback for delayed mm drop. Not strictly rcu, but we don't
11942 + * want another facility to make this work.
11944 +void __mmdrop_delayed(struct rcu_head *rhp)
11946 + struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
11952 static inline void __mmput(struct mm_struct *mm)
11954 VM_BUG_ON(atomic_read(&mm->mm_users));
11955 @@ -1417,6 +1453,7 @@ static void rt_mutex_init_task(struct task_struct *p)
11956 #ifdef CONFIG_RT_MUTEXES
11957 p->pi_waiters = RB_ROOT;
11958 p->pi_waiters_leftmost = NULL;
11959 + p->pi_top_task = NULL;
11960 p->pi_blocked_on = NULL;
11963 @@ -1426,6 +1463,9 @@ static void rt_mutex_init_task(struct task_struct *p)
11965 static void posix_cpu_timers_init(struct task_struct *tsk)
11967 +#ifdef CONFIG_PREEMPT_RT_BASE
11968 + tsk->posix_timer_list = NULL;
11970 tsk->cputime_expires.prof_exp = 0;
11971 tsk->cputime_expires.virt_exp = 0;
11972 tsk->cputime_expires.sched_exp = 0;
11973 @@ -1552,6 +1592,7 @@ static __latent_entropy struct task_struct *copy_process(
11974 spin_lock_init(&p->alloc_lock);
11976 init_sigpending(&p->pending);
11977 + p->sigqueue_cache = NULL;
11979 p->utime = p->stime = p->gtime = 0;
11980 p->utimescaled = p->stimescaled = 0;
11981 diff --git a/kernel/futex.c b/kernel/futex.c
11982 index 4c6b6e697b73..d9bab63efccb 100644
11983 --- a/kernel/futex.c
11984 +++ b/kernel/futex.c
11985 @@ -800,7 +800,7 @@ static int refill_pi_state_cache(void)
11989 -static struct futex_pi_state * alloc_pi_state(void)
11990 +static struct futex_pi_state *alloc_pi_state(void)
11992 struct futex_pi_state *pi_state = current->pi_state_cache;
11994 @@ -810,6 +810,11 @@ static struct futex_pi_state * alloc_pi_state(void)
11998 +static void get_pi_state(struct futex_pi_state *pi_state)
12000 + WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
12004 * Drops a reference to the pi_state object and frees or caches it
12005 * when the last reference is gone.
12006 @@ -854,7 +859,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
12007 * Look up the task based on what TID userspace gave us.
12008 * We dont trust it.
12010 -static struct task_struct * futex_find_get_task(pid_t pid)
12011 +static struct task_struct *futex_find_get_task(pid_t pid)
12013 struct task_struct *p;
12015 @@ -904,7 +909,9 @@ void exit_pi_state_list(struct task_struct *curr)
12016 * task still owns the PI-state:
12018 if (head->next != next) {
12019 + raw_spin_unlock_irq(&curr->pi_lock);
12020 spin_unlock(&hb->lock);
12021 + raw_spin_lock_irq(&curr->pi_lock);
12025 @@ -914,10 +921,12 @@ void exit_pi_state_list(struct task_struct *curr)
12026 pi_state->owner = NULL;
12027 raw_spin_unlock_irq(&curr->pi_lock);
12029 - rt_mutex_unlock(&pi_state->pi_mutex);
12031 + get_pi_state(pi_state);
12032 spin_unlock(&hb->lock);
12034 + rt_mutex_futex_unlock(&pi_state->pi_mutex);
12035 + put_pi_state(pi_state);
12037 raw_spin_lock_irq(&curr->pi_lock);
12039 raw_spin_unlock_irq(&curr->pi_lock);
12040 @@ -971,6 +980,39 @@ void exit_pi_state_list(struct task_struct *curr)
12042 * [10] There is no transient state which leaves owner and user space
12046 + * Serialization and lifetime rules:
12050 + * hb -> futex_q, relation
12051 + * futex_q -> pi_state, relation
12053 + * (cannot be raw because hb can contain arbitrary amount
12056 + * pi_mutex->wait_lock:
12058 + * {uval, pi_state}
12060 + * (and pi_mutex 'obviously')
12064 + * p->pi_state_list -> pi_state->list, relation
12066 + * pi_state->refcount:
12068 + * pi_state lifetime
12074 + * pi_mutex->wait_lock
12080 @@ -978,10 +1020,13 @@ void exit_pi_state_list(struct task_struct *curr)
12081 * the pi_state against the user space value. If correct, attach to
12084 -static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
12085 +static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
12086 + struct futex_pi_state *pi_state,
12087 struct futex_pi_state **ps)
12089 pid_t pid = uval & FUTEX_TID_MASK;
12094 * Userspace might have messed up non-PI and PI futexes [3]
12095 @@ -989,9 +1034,39 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
12096 if (unlikely(!pi_state))
12100 + * We get here with hb->lock held, and having found a
12101 + * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
12102 + * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
12103 + * which in turn means that futex_lock_pi() still has a reference on
12106 + * The waiter holding a reference on @pi_state also protects against
12107 + * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
12108 + * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
12109 + * free pi_state before we can take a reference ourselves.
12111 WARN_ON(!atomic_read(&pi_state->refcount));
12114 + * Now that we have a pi_state, we can acquire wait_lock
12115 + * and do the state validation.
12117 + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
12120 + * Since {uval, pi_state} is serialized by wait_lock, and our current
12121 + * uval was read without holding it, it can have changed. Verify it
12122 + * still is what we expect it to be, otherwise retry the entire
12125 + if (get_futex_value_locked(&uval2, uaddr))
12128 + if (uval != uval2)
12132 * Handle the owner died case:
12134 if (uval & FUTEX_OWNER_DIED) {
12135 @@ -1006,11 +1081,11 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
12136 * is not 0. Inconsistent state. [5]
12142 * Take a ref on the state and return success. [4]
12149 @@ -1022,14 +1097,14 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
12150 * Take a ref on the state and return success. [6]
12157 * If the owner died bit is not set, then the pi_state
12158 * must have an owner. [7]
12160 if (!pi_state->owner)
12166 @@ -1038,11 +1113,29 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
12167 * user space TID. [9/10]
12169 if (pid != task_pid_vnr(pi_state->owner))
12172 - atomic_inc(&pi_state->refcount);
12176 + get_pi_state(pi_state);
12177 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
12194 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
12199 @@ -1093,6 +1186,9 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
12202 * No existing pi state. First waiter. [2]
12204 + * This creates pi_state, we have hb->lock held, this means nothing can
12205 + * observe this state, wait_lock is irrelevant.
12207 pi_state = alloc_pi_state();
12209 @@ -1117,17 +1213,18 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
12213 -static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
12214 +static int lookup_pi_state(u32 __user *uaddr, u32 uval,
12215 + struct futex_hash_bucket *hb,
12216 union futex_key *key, struct futex_pi_state **ps)
12218 - struct futex_q *match = futex_top_waiter(hb, key);
12219 + struct futex_q *top_waiter = futex_top_waiter(hb, key);
12222 * If there is a waiter on that futex, validate it and
12223 * attach to the pi_state when the validation succeeds.
12226 - return attach_to_pi_state(uval, match->pi_state, ps);
12228 + return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
12231 * We are the first waiter - try to look up the owner based on
12232 @@ -1146,7 +1243,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
12233 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
12236 - /*If user space value changed, let the caller retry */
12237 + /* If user space value changed, let the caller retry */
12238 return curval != uval ? -EAGAIN : 0;
12241 @@ -1174,7 +1271,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
12242 struct task_struct *task, int set_waiters)
12244 u32 uval, newval, vpid = task_pid_vnr(task);
12245 - struct futex_q *match;
12246 + struct futex_q *top_waiter;
12250 @@ -1200,9 +1297,9 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
12251 * Lookup existing state first. If it exists, try to attach to
12254 - match = futex_top_waiter(hb, key);
12256 - return attach_to_pi_state(uval, match->pi_state, ps);
12257 + top_waiter = futex_top_waiter(hb, key);
12259 + return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
12262 * No waiter and user TID is 0. We are here because the
12263 @@ -1283,50 +1380,45 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
12264 wake_q_add(wake_q, p);
12265 __unqueue_futex(q);
12267 - * The waiting task can free the futex_q as soon as
12268 - * q->lock_ptr = NULL is written, without taking any locks. A
12269 - * memory barrier is required here to prevent the following
12270 - * store to lock_ptr from getting ahead of the plist_del.
12271 + * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
12272 + * is written, without taking any locks. This is possible in the event
12273 + * of a spurious wakeup, for example. A memory barrier is required here
12274 + * to prevent the following store to lock_ptr from getting ahead of the
12275 + * plist_del in __unqueue_futex().
12278 - q->lock_ptr = NULL;
12279 + smp_store_release(&q->lock_ptr, NULL);
12282 -static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
12283 - struct futex_hash_bucket *hb)
12285 + * Caller must hold a reference on @pi_state.
12287 +static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
12289 - struct task_struct *new_owner;
12290 - struct futex_pi_state *pi_state = this->pi_state;
12291 u32 uninitialized_var(curval), newval;
12292 + struct task_struct *new_owner;
12293 + bool postunlock = false;
12296 + WAKE_Q(wake_sleeper_q);
12303 - * If current does not own the pi_state then the futex is
12304 - * inconsistent and user space fiddled with the futex value.
12306 - if (pi_state->owner != current)
12309 - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
12310 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
12311 + if (WARN_ON_ONCE(!new_owner)) {
12313 + * As per the comment in futex_unlock_pi() this should not happen.
12315 + * When this happens, give up our locks and try again, giving
12316 + * the futex_lock_pi() instance time to complete, either by
12317 + * waiting on the rtmutex or removing itself from the futex
12325 - * It is possible that the next waiter (the one that brought
12326 - * this owner to the kernel) timed out and is no longer
12327 - * waiting on the lock.
12330 - new_owner = this->task;
12333 - * We pass it to the next owner. The WAITERS bit is always
12334 - * kept enabled while there is PI state around. We cleanup the
12335 - * owner died bit, because we are the owner.
12336 + * We pass it to the next owner. The WAITERS bit is always kept
12337 + * enabled while there is PI state around. We cleanup the owner
12338 + * died bit, because we are the owner.
12340 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
12342 @@ -1335,6 +1427,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
12344 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
12347 } else if (curval != uval) {
12349 * If a unconditional UNLOCK_PI operation (user space did not
12350 @@ -1347,10 +1440,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
12355 - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
12363 + * This is a point of no return; once we modify the uval there is no
12364 + * going back and subsequent operations must not fail.
12367 raw_spin_lock(&pi_state->owner->pi_lock);
12368 WARN_ON(list_empty(&pi_state->list));
12369 @@ -1363,22 +1460,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
12370 pi_state->owner = new_owner;
12371 raw_spin_unlock(&new_owner->pi_lock);
12373 + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
12374 + &wake_sleeper_q);
12376 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
12378 - deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
12380 + rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
12383 - * First unlock HB so the waiter does not spin on it once he got woken
12384 - * up. Second wake up the waiter before the priority is adjusted. If we
12385 - * deboost first (and lose our higher priority), then the task might get
12386 - * scheduled away before the wake up can take place.
12388 - spin_unlock(&hb->lock);
12389 - wake_up_q(&wake_q);
12391 - rt_mutex_adjust_prio(current);
12398 @@ -1824,7 +1914,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
12399 * If that call succeeds then we have pi_state and an
12400 * initial refcount on it.
12402 - ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
12403 + ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
12407 @@ -1907,7 +1997,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
12408 * refcount on the pi_state and store the pointer in
12409 * the futex_q object of the waiter.
12411 - atomic_inc(&pi_state->refcount);
12412 + get_pi_state(pi_state);
12413 this->pi_state = pi_state;
12414 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
12416 @@ -1924,6 +2014,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
12417 requeue_pi_wake_futex(this, &key2, hb2);
12420 + } else if (ret == -EAGAIN) {
12422 + * Waiter was woken by timeout or
12423 + * signal and has set pi_blocked_on to
12424 + * PI_WAKEUP_INPROGRESS before we
12425 + * tried to enqueue it on the rtmutex.
12427 + this->pi_state = NULL;
12428 + put_pi_state(pi_state);
12432 * rt_mutex_start_proxy_lock() detected a
12433 @@ -2007,20 +2107,7 @@ queue_unlock(struct futex_hash_bucket *hb)
12434 hb_waiters_dec(hb);
12438 - * queue_me() - Enqueue the futex_q on the futex_hash_bucket
12439 - * @q: The futex_q to enqueue
12440 - * @hb: The destination hash bucket
12442 - * The hb->lock must be held by the caller, and is released here. A call to
12443 - * queue_me() is typically paired with exactly one call to unqueue_me(). The
12444 - * exceptions involve the PI related operations, which may use unqueue_me_pi()
12445 - * or nothing if the unqueue is done as part of the wake process and the unqueue
12446 - * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
12449 -static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
12450 - __releases(&hb->lock)
12451 +static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
12455 @@ -2037,6 +2124,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
12456 plist_node_init(&q->list, prio);
12457 plist_add(&q->list, &hb->chain);
12462 + * queue_me() - Enqueue the futex_q on the futex_hash_bucket
12463 + * @q: The futex_q to enqueue
12464 + * @hb: The destination hash bucket
12466 + * The hb->lock must be held by the caller, and is released here. A call to
12467 + * queue_me() is typically paired with exactly one call to unqueue_me(). The
12468 + * exceptions involve the PI related operations, which may use unqueue_me_pi()
12469 + * or nothing if the unqueue is done as part of the wake process and the unqueue
12470 + * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
12473 +static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
12474 + __releases(&hb->lock)
12476 + __queue_me(q, hb);
12477 spin_unlock(&hb->lock);
12480 @@ -2123,10 +2228,13 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
12482 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
12483 struct futex_pi_state *pi_state = q->pi_state;
12484 - struct task_struct *oldowner = pi_state->owner;
12485 u32 uval, uninitialized_var(curval), newval;
12486 + struct task_struct *oldowner;
12489 + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
12491 + oldowner = pi_state->owner;
12493 if (!pi_state->owner)
12494 newtid |= FUTEX_OWNER_DIED;
12495 @@ -2134,7 +2242,8 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
12497 * We are here either because we stole the rtmutex from the
12498 * previous highest priority waiter or we are the highest priority
12499 - * waiter but failed to get the rtmutex the first time.
12500 + * waiter but have failed to get the rtmutex the first time.
12502 * We have to replace the newowner TID in the user space variable.
12503 * This must be atomic as we have to preserve the owner died bit here.
12505 @@ -2142,17 +2251,16 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
12506 * because we can fault here. Imagine swapped out pages or a fork
12507 * that marked all the anonymous memory readonly for cow.
12509 - * Modifying pi_state _before_ the user space value would
12510 - * leave the pi_state in an inconsistent state when we fault
12511 - * here, because we need to drop the hash bucket lock to
12512 - * handle the fault. This might be observed in the PID check
12513 - * in lookup_pi_state.
12514 + * Modifying pi_state _before_ the user space value would leave the
12515 + * pi_state in an inconsistent state when we fault here, because we
12516 + * need to drop the locks to handle the fault. This might be observed
12517 + * in the PID check in lookup_pi_state.
12520 if (get_futex_value_locked(&uval, uaddr))
12525 newval = (uval & FUTEX_OWNER_DIED) | newtid;
12527 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
12528 @@ -2167,47 +2275,60 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
12531 if (pi_state->owner != NULL) {
12532 - raw_spin_lock_irq(&pi_state->owner->pi_lock);
12533 + raw_spin_lock(&pi_state->owner->pi_lock);
12534 WARN_ON(list_empty(&pi_state->list));
12535 list_del_init(&pi_state->list);
12536 - raw_spin_unlock_irq(&pi_state->owner->pi_lock);
12537 + raw_spin_unlock(&pi_state->owner->pi_lock);
12540 pi_state->owner = newowner;
12542 - raw_spin_lock_irq(&newowner->pi_lock);
12543 + raw_spin_lock(&newowner->pi_lock);
12544 WARN_ON(!list_empty(&pi_state->list));
12545 list_add(&pi_state->list, &newowner->pi_state_list);
12546 - raw_spin_unlock_irq(&newowner->pi_lock);
12547 + raw_spin_unlock(&newowner->pi_lock);
12548 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
12553 - * To handle the page fault we need to drop the hash bucket
12554 - * lock here. That gives the other task (either the highest priority
12555 - * waiter itself or the task which stole the rtmutex) the
12556 - * chance to try the fixup of the pi_state. So once we are
12557 - * back from handling the fault we need to check the pi_state
12558 - * after reacquiring the hash bucket lock and before trying to
12559 - * do another fixup. When the fixup has been done already we
12561 + * To handle the page fault we need to drop the locks here. That gives
12562 + * the other task (either the highest priority waiter itself or the
12563 + * task which stole the rtmutex) the chance to try the fixup of the
12564 + * pi_state. So once we are back from handling the fault we need to
12565 + * check the pi_state after reacquiring the locks and before trying to
12566 + * do another fixup. When the fixup has been done already we simply
12569 + * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
12570 + * drop hb->lock since the caller owns the hb -> futex_q relation.
12571 + * Dropping the pi_mutex->wait_lock requires the state revalidate.
12574 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
12575 spin_unlock(q->lock_ptr);
12577 ret = fault_in_user_writeable(uaddr);
12579 spin_lock(q->lock_ptr);
12580 + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
12583 * Check if someone else fixed it for us:
12585 - if (pi_state->owner != oldowner)
12587 + if (pi_state->owner != oldowner) {
12599 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
12603 static long futex_wait_restart(struct restart_block *restart);
12604 @@ -2229,13 +2350,16 @@ static long futex_wait_restart(struct restart_block *restart);
12606 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
12608 - struct task_struct *owner;
12613 * Got the lock. We might not be the anticipated owner if we
12614 * did a lock-steal - fix up the PI-state in that case:
12616 + * We can safely read pi_state->owner without holding wait_lock
12617 + * because we now own the rt_mutex, only the owner will attempt
12620 if (q->pi_state->owner != current)
12621 ret = fixup_pi_state_owner(uaddr, q, current);
12622 @@ -2243,43 +2367,15 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
12626 - * Catch the rare case, where the lock was released when we were on the
12627 - * way back before we locked the hash bucket.
12629 - if (q->pi_state->owner == current) {
12631 - * Try to get the rt_mutex now. This might fail as some other
12632 - * task acquired the rt_mutex after we removed ourself from the
12633 - * rt_mutex waiters list.
12635 - if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
12641 - * pi_state is incorrect, some other task did a lock steal and
12642 - * we returned due to timeout or signal without taking the
12643 - * rt_mutex. Too late.
12645 - raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
12646 - owner = rt_mutex_owner(&q->pi_state->pi_mutex);
12648 - owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
12649 - raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
12650 - ret = fixup_pi_state_owner(uaddr, q, owner);
12655 * Paranoia check. If we did not take the lock, then we should not be
12656 * the owner of the rt_mutex.
12658 - if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
12659 + if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
12660 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
12661 "pi-state %p\n", ret,
12662 q->pi_state->pi_mutex.owner,
12663 q->pi_state->owner);
12667 return ret ? ret : locked;
12668 @@ -2503,6 +2599,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
12669 ktime_t *time, int trylock)
12671 struct hrtimer_sleeper timeout, *to = NULL;
12672 + struct futex_pi_state *pi_state = NULL;
12673 + struct rt_mutex_waiter rt_waiter;
12674 struct futex_hash_bucket *hb;
12675 struct futex_q q = futex_q_init;
12677 @@ -2555,25 +2653,77 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
12681 + WARN_ON(!q.pi_state);
12684 * Only actually queue now that the atomic ops are done:
12686 - queue_me(&q, hb);
12687 + __queue_me(&q, hb);
12689 - WARN_ON(!q.pi_state);
12691 - * Block on the PI mutex:
12694 - ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
12696 - ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
12698 + ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
12699 /* Fixup the trylock return value: */
12700 ret = ret ? 0 : -EWOULDBLOCK;
12704 + rt_mutex_init_waiter(&rt_waiter, false);
12707 + * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
12708 + * hold it while doing rt_mutex_start_proxy(), because then it will
12709 + * include hb->lock in the blocking chain, even through we'll not in
12710 + * fact hold it while blocking. This will lead it to report -EDEADLK
12711 + * and BUG when futex_unlock_pi() interleaves with this.
12713 + * Therefore acquire wait_lock while holding hb->lock, but drop the
12714 + * latter before calling rt_mutex_start_proxy_lock(). This still fully
12715 + * serializes against futex_unlock_pi() as that does the exact same
12716 + * lock handoff sequence.
12718 + raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
12720 + * the migrate_disable() here disables migration in the in_atomic() fast
12721 + * path which is enabled again in the following spin_unlock(). We have
12722 + * one migrate_disable() pending in the slow-path which is reversed
12723 + * after the raw_spin_unlock_irq() where we leave the atomic context.
12725 + migrate_disable();
12727 + spin_unlock(q.lock_ptr);
12728 + ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
12729 + raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
12730 + migrate_enable();
12736 + spin_lock(q.lock_ptr);
12741 + if (unlikely(to))
12742 + hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
12744 + ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
12746 spin_lock(q.lock_ptr);
12748 + * If we failed to acquire the lock (signal/timeout), we must
12749 + * first acquire the hb->lock before removing the lock from the
12750 + * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
12751 + * wait lists consistent.
12753 + * In particular; it is important that futex_unlock_pi() can not
12754 + * observe this inconsistency.
12756 + if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
12761 * Fixup the pi_state owner and possibly acquire the lock if we
12764 @@ -2589,12 +2739,19 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
12765 * If fixup_owner() faulted and was unable to handle the fault, unlock
12766 * it and return the fault to userspace.
12768 - if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
12769 - rt_mutex_unlock(&q.pi_state->pi_mutex);
12770 + if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
12771 + pi_state = q.pi_state;
12772 + get_pi_state(pi_state);
12775 /* Unqueue and drop the lock */
12779 + rt_mutex_futex_unlock(&pi_state->pi_mutex);
12780 + put_pi_state(pi_state);
12785 out_unlock_put_key:
12786 @@ -2603,8 +2760,10 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
12788 put_futex_key(&q.key);
12792 + hrtimer_cancel(&to->timer);
12793 destroy_hrtimer_on_stack(&to->timer);
12795 return ret != -EINTR ? ret : -ERESTARTNOINTR;
12798 @@ -2631,7 +2790,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
12799 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
12800 union futex_key key = FUTEX_KEY_INIT;
12801 struct futex_hash_bucket *hb;
12802 - struct futex_q *match;
12803 + struct futex_q *top_waiter;
12807 @@ -2655,12 +2814,48 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
12808 * all and we at least want to know if user space fiddled
12809 * with the futex value instead of blindly unlocking.
12811 - match = futex_top_waiter(hb, &key);
12813 - ret = wake_futex_pi(uaddr, uval, match, hb);
12814 + top_waiter = futex_top_waiter(hb, &key);
12815 + if (top_waiter) {
12816 + struct futex_pi_state *pi_state = top_waiter->pi_state;
12823 - * In case of success wake_futex_pi dropped the hash
12825 + * If current does not own the pi_state then the futex is
12826 + * inconsistent and user space fiddled with the futex value.
12828 + if (pi_state->owner != current)
12831 + get_pi_state(pi_state);
12833 + * By taking wait_lock while still holding hb->lock, we ensure
12834 + * there is no point where we hold neither; and therefore
12835 + * wake_futex_pi() must observe a state consistent with what we
12838 + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
12840 + * Magic trickery for now to make the RT migrate disable
12841 + * logic happy. The following spin_unlock() happens with
12842 + * interrupts disabled so the internal migrate_enable()
12843 + * won't undo the migrate_disable() which was issued when
12844 + * locking hb->lock.
12846 + migrate_disable();
12847 + spin_unlock(&hb->lock);
12849 + /* Drops pi_state->pi_mutex.wait_lock */
12850 + ret = wake_futex_pi(uaddr, uval, pi_state);
12852 + migrate_enable();
12854 + put_pi_state(pi_state);
12857 + * Success, we're done! No tricky corner cases.
12861 @@ -2675,7 +2870,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
12862 * setting the FUTEX_WAITERS bit. Try again.
12864 if (ret == -EAGAIN) {
12865 - spin_unlock(&hb->lock);
12866 put_futex_key(&key);
12869 @@ -2683,7 +2877,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
12870 * wake_futex_pi has detected invalid state. Tell user
12878 @@ -2693,8 +2887,10 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
12879 * preserve the WAITERS bit not the OWNER_DIED one. We are the
12882 - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
12883 + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
12884 + spin_unlock(&hb->lock);
12889 * If uval has changed, let user space handle it.
12890 @@ -2708,7 +2904,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
12894 - spin_unlock(&hb->lock);
12895 put_futex_key(&key);
12897 ret = fault_in_user_writeable(uaddr);
12898 @@ -2812,8 +3007,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
12899 u32 __user *uaddr2)
12901 struct hrtimer_sleeper timeout, *to = NULL;
12902 + struct futex_pi_state *pi_state = NULL;
12903 struct rt_mutex_waiter rt_waiter;
12904 - struct futex_hash_bucket *hb;
12905 + struct futex_hash_bucket *hb, *hb2;
12906 union futex_key key2 = FUTEX_KEY_INIT;
12907 struct futex_q q = futex_q_init;
12909 @@ -2838,10 +3034,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
12910 * The waiter is allocated on our stack, manipulated by the requeue
12911 * code while we sleep on uaddr.
12913 - debug_rt_mutex_init_waiter(&rt_waiter);
12914 - RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
12915 - RB_CLEAR_NODE(&rt_waiter.tree_entry);
12916 - rt_waiter.task = NULL;
12917 + rt_mutex_init_waiter(&rt_waiter, false);
12919 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
12920 if (unlikely(ret != 0))
12921 @@ -2872,20 +3065,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
12922 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
12923 futex_wait_queue_me(hb, &q, to);
12925 - spin_lock(&hb->lock);
12926 - ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
12927 - spin_unlock(&hb->lock);
12929 - goto out_put_keys;
12931 + * On RT we must avoid races with requeue and trying to block
12932 + * on two mutexes (hb->lock and uaddr2's rtmutex) by
12933 + * serializing access to pi_blocked_on with pi_lock.
12935 + raw_spin_lock_irq(¤t->pi_lock);
12936 + if (current->pi_blocked_on) {
12938 + * We have been requeued or are in the process of
12939 + * being requeued.
12941 + raw_spin_unlock_irq(¤t->pi_lock);
12944 + * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
12945 + * prevents a concurrent requeue from moving us to the
12946 + * uaddr2 rtmutex. After that we can safely acquire
12947 + * (and possibly block on) hb->lock.
12949 + current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
12950 + raw_spin_unlock_irq(¤t->pi_lock);
12952 + spin_lock(&hb->lock);
12955 + * Clean up pi_blocked_on. We might leak it otherwise
12956 + * when we succeeded with the hb->lock in the fast
12959 + raw_spin_lock_irq(¤t->pi_lock);
12960 + current->pi_blocked_on = NULL;
12961 + raw_spin_unlock_irq(¤t->pi_lock);
12963 + ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
12964 + spin_unlock(&hb->lock);
12966 + goto out_put_keys;
12970 - * In order for us to be here, we know our q.key == key2, and since
12971 - * we took the hb->lock above, we also know that futex_requeue() has
12972 - * completed and we no longer have to concern ourselves with a wakeup
12973 - * race with the atomic proxy lock acquisition by the requeue code. The
12974 - * futex_requeue dropped our key1 reference and incremented our key2
12975 - * reference count.
12976 + * In order to be here, we have either been requeued, are in
12977 + * the process of being requeued, or requeue successfully
12978 + * acquired uaddr2 on our behalf. If pi_blocked_on was
12979 + * non-null above, we may be racing with a requeue. Do not
12980 + * rely on q->lock_ptr to be hb2->lock until after blocking on
12981 + * hb->lock or hb2->lock. The futex_requeue dropped our key1
12982 + * reference and incremented our key2 reference count.
12984 + hb2 = hash_futex(&key2);
12986 /* Check if the requeue code acquired the second futex for us. */
12987 if (!q.rt_waiter) {
12988 @@ -2894,16 +3122,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
12989 * did a lock-steal - fix up the PI-state in that case.
12991 if (q.pi_state && (q.pi_state->owner != current)) {
12992 - spin_lock(q.lock_ptr);
12993 + spin_lock(&hb2->lock);
12994 + BUG_ON(&hb2->lock != q.lock_ptr);
12995 ret = fixup_pi_state_owner(uaddr2, &q, current);
12996 - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
12997 - rt_mutex_unlock(&q.pi_state->pi_mutex);
12998 + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
12999 + pi_state = q.pi_state;
13000 + get_pi_state(pi_state);
13003 * Drop the reference to the pi state which
13004 * the requeue_pi() code acquired for us.
13006 put_pi_state(q.pi_state);
13007 - spin_unlock(q.lock_ptr);
13008 + spin_unlock(&hb2->lock);
13011 struct rt_mutex *pi_mutex;
13012 @@ -2915,10 +3146,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
13014 WARN_ON(!q.pi_state);
13015 pi_mutex = &q.pi_state->pi_mutex;
13016 - ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
13017 - debug_rt_mutex_free_waiter(&rt_waiter);
13018 + ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
13020 - spin_lock(q.lock_ptr);
13021 + spin_lock(&hb2->lock);
13022 + BUG_ON(&hb2->lock != q.lock_ptr);
13023 + if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
13026 + debug_rt_mutex_free_waiter(&rt_waiter);
13028 * Fixup the pi_state owner and possibly acquire the lock if we
13030 @@ -2936,13 +3171,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
13031 * the fault, unlock the rt_mutex and return the fault to
13034 - if (ret && rt_mutex_owner(pi_mutex) == current)
13035 - rt_mutex_unlock(pi_mutex);
13036 + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
13037 + pi_state = q.pi_state;
13038 + get_pi_state(pi_state);
13041 /* Unqueue and drop the lock. */
13046 + rt_mutex_futex_unlock(&pi_state->pi_mutex);
13047 + put_pi_state(pi_state);
13050 if (ret == -EINTR) {
13052 * We've already been requeued, but cannot restart by calling
13053 diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
13054 index d3f24905852c..f87aa8fdcc51 100644
13055 --- a/kernel/irq/handle.c
13056 +++ b/kernel/irq/handle.c
13057 @@ -181,10 +181,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
13059 irqreturn_t retval;
13060 unsigned int flags = 0;
13061 + struct pt_regs *regs = get_irq_regs();
13062 + u64 ip = regs ? instruction_pointer(regs) : 0;
13064 retval = __handle_irq_event_percpu(desc, &flags);
13066 - add_interrupt_randomness(desc->irq_data.irq, flags);
13067 +#ifdef CONFIG_PREEMPT_RT_FULL
13068 + desc->random_ip = ip;
13070 + add_interrupt_randomness(desc->irq_data.irq, flags, ip);
13074 note_interrupt(desc, retval);
13075 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
13076 index 6b669593e7eb..e357bf6c59d5 100644
13077 --- a/kernel/irq/manage.c
13078 +++ b/kernel/irq/manage.c
13080 #include "internals.h"
13082 #ifdef CONFIG_IRQ_FORCED_THREADING
13083 +# ifndef CONFIG_PREEMPT_RT_BASE
13084 __read_mostly bool force_irqthreads;
13086 static int __init setup_forced_irqthreads(char *arg)
13087 @@ -30,6 +31,7 @@ static int __init setup_forced_irqthreads(char *arg)
13090 early_param("threadirqs", setup_forced_irqthreads);
13094 static void __synchronize_hardirq(struct irq_desc *desc)
13095 @@ -233,7 +235,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
13097 if (desc->affinity_notify) {
13098 kref_get(&desc->affinity_notify->kref);
13100 +#ifdef CONFIG_PREEMPT_RT_BASE
13101 + swork_queue(&desc->affinity_notify->swork);
13103 schedule_work(&desc->affinity_notify->work);
13106 irqd_set(data, IRQD_AFFINITY_SET);
13108 @@ -271,10 +278,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
13110 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
13112 -static void irq_affinity_notify(struct work_struct *work)
13113 +static void _irq_affinity_notify(struct irq_affinity_notify *notify)
13115 - struct irq_affinity_notify *notify =
13116 - container_of(work, struct irq_affinity_notify, work);
13117 struct irq_desc *desc = irq_to_desc(notify->irq);
13118 cpumask_var_t cpumask;
13119 unsigned long flags;
13120 @@ -296,6 +301,35 @@ static void irq_affinity_notify(struct work_struct *work)
13121 kref_put(¬ify->kref, notify->release);
13124 +#ifdef CONFIG_PREEMPT_RT_BASE
13125 +static void init_helper_thread(void)
13127 + static int init_sworker_once;
13129 + if (init_sworker_once)
13131 + if (WARN_ON(swork_get()))
13133 + init_sworker_once = 1;
13136 +static void irq_affinity_notify(struct swork_event *swork)
13138 + struct irq_affinity_notify *notify =
13139 + container_of(swork, struct irq_affinity_notify, swork);
13140 + _irq_affinity_notify(notify);
13145 +static void irq_affinity_notify(struct work_struct *work)
13147 + struct irq_affinity_notify *notify =
13148 + container_of(work, struct irq_affinity_notify, work);
13149 + _irq_affinity_notify(notify);
13154 * irq_set_affinity_notifier - control notification of IRQ affinity changes
13155 * @irq: Interrupt for which to enable/disable notification
13156 @@ -324,7 +358,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
13159 kref_init(¬ify->kref);
13160 +#ifdef CONFIG_PREEMPT_RT_BASE
13161 + INIT_SWORK(¬ify->swork, irq_affinity_notify);
13162 + init_helper_thread();
13164 INIT_WORK(¬ify->work, irq_affinity_notify);
13168 raw_spin_lock_irqsave(&desc->lock, flags);
13169 @@ -879,7 +918,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
13170 local_bh_disable();
13171 ret = action->thread_fn(action->irq, action->dev_id);
13172 irq_finalize_oneshot(desc, action);
13173 - local_bh_enable();
13175 + * Interrupts which have real time requirements can be set up
13176 + * to avoid softirq processing in the thread handler. This is
13177 + * safe as these interrupts do not raise soft interrupts.
13179 + if (irq_settings_no_softirq_call(desc))
13180 + _local_bh_enable();
13182 + local_bh_enable();
13186 @@ -976,6 +1023,12 @@ static int irq_thread(void *data)
13187 if (action_ret == IRQ_WAKE_THREAD)
13188 irq_wake_secondary(desc, action);
13190 +#ifdef CONFIG_PREEMPT_RT_FULL
13191 + migrate_disable();
13192 + add_interrupt_randomness(action->irq, 0,
13193 + desc->random_ip ^ (unsigned long) action);
13194 + migrate_enable();
13196 wake_threads_waitq(desc);
13199 @@ -1336,6 +1389,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
13200 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
13203 + if (new->flags & IRQF_NO_SOFTIRQ_CALL)
13204 + irq_settings_set_no_softirq_call(desc);
13206 /* Set default affinity mask once everything is setup */
13207 setup_affinity(desc, mask);
13209 @@ -2061,7 +2117,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
13210 * This call sets the internal irqchip state of an interrupt,
13211 * depending on the value of @which.
13213 - * This function should be called with preemption disabled if the
13214 + * This function should be called with migration disabled if the
13215 * interrupt controller has per-cpu registers.
13217 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
13218 diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
13219 index 320579d89091..2df2d4445b1e 100644
13220 --- a/kernel/irq/settings.h
13221 +++ b/kernel/irq/settings.h
13222 @@ -16,6 +16,7 @@ enum {
13223 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
13224 _IRQ_IS_POLLED = IRQ_IS_POLLED,
13225 _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
13226 + _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
13227 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
13230 @@ -30,6 +31,7 @@ enum {
13231 #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
13232 #define IRQ_IS_POLLED GOT_YOU_MORON
13233 #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
13234 +#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
13235 #undef IRQF_MODIFY_MASK
13236 #define IRQF_MODIFY_MASK GOT_YOU_MORON
13238 @@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
13239 desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
13242 +static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
13244 + return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
13247 +static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
13249 + desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
13252 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
13254 return desc->status_use_accessors & _IRQ_PER_CPU;
13255 diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
13256 index 5707f97a3e6a..73f38dc7a7fb 100644
13257 --- a/kernel/irq/spurious.c
13258 +++ b/kernel/irq/spurious.c
13259 @@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
13261 static int __init irqfixup_setup(char *str)
13263 +#ifdef CONFIG_PREEMPT_RT_BASE
13264 + pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
13268 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
13269 printk(KERN_WARNING "This may impact system performance.\n");
13270 @@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644);
13272 static int __init irqpoll_setup(char *str)
13274 +#ifdef CONFIG_PREEMPT_RT_BASE
13275 + pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
13279 printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
13281 diff --git a/kernel/irq_work.c b/kernel/irq_work.c
13282 index bcf107ce0854..2899ba0d23d1 100644
13283 --- a/kernel/irq_work.c
13284 +++ b/kernel/irq_work.c
13286 #include <linux/cpu.h>
13287 #include <linux/notifier.h>
13288 #include <linux/smp.h>
13289 +#include <linux/interrupt.h>
13290 #include <asm/processor.h>
13293 @@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void)
13295 bool irq_work_queue_on(struct irq_work *work, int cpu)
13297 + struct llist_head *list;
13299 /* All work should have been flushed before going offline */
13300 WARN_ON_ONCE(cpu_is_offline(cpu));
13302 @@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
13303 if (!irq_work_claim(work))
13306 - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
13307 + if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
13308 + list = &per_cpu(lazy_list, cpu);
13310 + list = &per_cpu(raised_list, cpu);
13312 + if (llist_add(&work->llnode, list))
13313 arch_send_call_function_single_ipi(cpu);
13316 @@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on);
13317 /* Enqueue the irq work @work on the current CPU */
13318 bool irq_work_queue(struct irq_work *work)
13320 + struct llist_head *list;
13321 + bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
13323 /* Only queue if not already pending */
13324 if (!irq_work_claim(work))
13326 @@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *work)
13327 /* Queue the entry and raise the IPI if needed. */
13330 - /* If the work is "lazy", handle it from next tick if any */
13331 - if (work->flags & IRQ_WORK_LAZY) {
13332 - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
13333 - tick_nohz_tick_stopped())
13334 - arch_irq_work_raise();
13336 - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
13337 + lazy_work = work->flags & IRQ_WORK_LAZY;
13339 + if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
13340 + list = this_cpu_ptr(&lazy_list);
13342 + list = this_cpu_ptr(&raised_list);
13344 + if (llist_add(&work->llnode, list)) {
13345 + if (!lazy_work || tick_nohz_tick_stopped())
13346 arch_irq_work_raise();
13349 @@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void)
13350 raised = this_cpu_ptr(&raised_list);
13351 lazy = this_cpu_ptr(&lazy_list);
13353 - if (llist_empty(raised) || arch_irq_work_has_interrupt())
13354 - if (llist_empty(lazy))
13356 + if (llist_empty(raised) && llist_empty(lazy))
13359 /* All work should have been flushed before going offline */
13360 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
13361 @@ -132,7 +144,7 @@ static void irq_work_run_list(struct llist_head *list)
13362 struct irq_work *work;
13363 struct llist_node *llnode;
13365 - BUG_ON(!irqs_disabled());
13366 + BUG_ON_NONRT(!irqs_disabled());
13368 if (llist_empty(list))
13370 @@ -169,7 +181,16 @@ static void irq_work_run_list(struct llist_head *list)
13371 void irq_work_run(void)
13373 irq_work_run_list(this_cpu_ptr(&raised_list));
13374 - irq_work_run_list(this_cpu_ptr(&lazy_list));
13375 + if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
13377 + * NOTE: we raise softirq via IPI for safety,
13378 + * and execute in irq_work_tick() to move the
13379 + * overhead from hard to soft irq context.
13381 + if (!llist_empty(this_cpu_ptr(&lazy_list)))
13382 + raise_softirq(TIMER_SOFTIRQ);
13384 + irq_work_run_list(this_cpu_ptr(&lazy_list));
13386 EXPORT_SYMBOL_GPL(irq_work_run);
13388 @@ -179,8 +200,17 @@ void irq_work_tick(void)
13390 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
13391 irq_work_run_list(raised);
13393 + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
13394 + irq_work_run_list(this_cpu_ptr(&lazy_list));
13397 +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
13398 +void irq_work_tick_soft(void)
13400 irq_work_run_list(this_cpu_ptr(&lazy_list));
13405 * Synchronize against the irq_work @entry, ensures the entry is not
13406 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
13407 index ee1bc1bb8feb..ddef07958840 100644
13408 --- a/kernel/ksysfs.c
13409 +++ b/kernel/ksysfs.c
13410 @@ -136,6 +136,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
13412 #endif /* CONFIG_KEXEC_CORE */
13414 +#if defined(CONFIG_PREEMPT_RT_FULL)
13415 +static ssize_t realtime_show(struct kobject *kobj,
13416 + struct kobj_attribute *attr, char *buf)
13418 + return sprintf(buf, "%d\n", 1);
13420 +KERNEL_ATTR_RO(realtime);
13423 /* whether file capabilities are enabled */
13424 static ssize_t fscaps_show(struct kobject *kobj,
13425 struct kobj_attribute *attr, char *buf)
13426 @@ -225,6 +234,9 @@ static struct attribute * kernel_attrs[] = {
13427 &rcu_expedited_attr.attr,
13428 &rcu_normal_attr.attr,
13430 +#ifdef CONFIG_PREEMPT_RT_FULL
13431 + &realtime_attr.attr,
13436 diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
13437 index 6f88e352cd4f..6ff9e8011dd0 100644
13438 --- a/kernel/locking/Makefile
13439 +++ b/kernel/locking/Makefile
13441 # and is generally not a function of system call inputs.
13442 KCOV_INSTRUMENT := n
13444 -obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
13445 +obj-y += semaphore.o percpu-rwsem.o
13447 ifdef CONFIG_FUNCTION_TRACER
13448 CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
13449 @@ -11,7 +11,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
13450 CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
13453 +ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
13455 obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
13458 obj-$(CONFIG_LOCKDEP) += lockdep.o
13459 ifeq ($(CONFIG_PROC_FS),y)
13460 obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
13461 @@ -24,7 +28,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
13462 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
13463 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
13464 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
13465 +ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
13466 obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
13467 obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
13469 +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o
13470 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
13471 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
13472 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
13473 index 4d7ffc0a0d00..3d157b3128eb 100644
13474 --- a/kernel/locking/lockdep.c
13475 +++ b/kernel/locking/lockdep.c
13476 @@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
13477 struct lockdep_subclass_key *key;
13478 struct hlist_head *hash_head;
13479 struct lock_class *class;
13480 + bool is_static = false;
13482 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
13484 @@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
13487 * Static locks do not have their class-keys yet - for them the key
13488 - * is the lock object itself:
13489 + * is the lock object itself. If the lock is in the per cpu area,
13490 + * the canonical address of the lock (per cpu offset removed) is
13493 - if (unlikely(!lock->key))
13494 - lock->key = (void *)lock;
13495 + if (unlikely(!lock->key)) {
13496 + unsigned long can_addr, addr = (unsigned long)lock;
13498 + if (__is_kernel_percpu_address(addr, &can_addr))
13499 + lock->key = (void *)can_addr;
13500 + else if (__is_module_percpu_address(addr, &can_addr))
13501 + lock->key = (void *)can_addr;
13502 + else if (static_obj(lock))
13503 + lock->key = (void *)lock;
13505 + return ERR_PTR(-EINVAL);
13506 + is_static = true;
13510 * NOTE: the class-key must be unique. For dynamic locks, a static
13511 @@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
13516 + return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
13520 @@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
13521 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
13523 class = look_up_lock_class(lock, subclass);
13524 - if (likely(class))
13525 + if (likely(!IS_ERR_OR_NULL(class)))
13526 goto out_set_class_cache;
13529 * Debug-check: all keys must be persistent!
13531 - if (!static_obj(lock->key)) {
13533 + if (IS_ERR(class)) {
13535 printk("INFO: trying to register non-static key.\n");
13536 printk("the code is fine but needs lockdep annotation.\n");
13537 printk("turning off the locking correctness validator.\n");
13543 @@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
13544 * Clearly if the lock hasn't been acquired _ever_, we're not
13545 * holding it either, so report failure.
13548 + if (IS_ERR_OR_NULL(class))
13552 @@ -3689,6 +3702,7 @@ static void check_flags(unsigned long flags)
13556 +#ifndef CONFIG_PREEMPT_RT_FULL
13558 * We dont accurately track softirq state in e.g.
13559 * hardirq contexts (such as on 4KSTACKS), so only
13560 @@ -3703,6 +3717,7 @@ static void check_flags(unsigned long flags)
13561 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
13567 print_irqtrace_events(current);
13568 @@ -4159,7 +4174,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
13569 * If the class exists we look it up and zap it:
13571 class = look_up_lock_class(lock, j);
13573 + if (!IS_ERR_OR_NULL(class))
13577 diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
13578 index f8c5af52a131..788068773e61 100644
13579 --- a/kernel/locking/locktorture.c
13580 +++ b/kernel/locking/locktorture.c
13582 #include <linux/kthread.h>
13583 #include <linux/sched/rt.h>
13584 #include <linux/spinlock.h>
13585 -#include <linux/rwlock.h>
13586 #include <linux/mutex.h>
13587 #include <linux/rwsem.h>
13588 #include <linux/smp.h>
13589 diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
13590 index ce182599cf2e..2ad3a1e8344c 100644
13591 --- a/kernel/locking/percpu-rwsem.c
13592 +++ b/kernel/locking/percpu-rwsem.c
13593 @@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
13594 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
13595 rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
13596 __init_rwsem(&sem->rw_sem, name, rwsem_key);
13597 - init_waitqueue_head(&sem->writer);
13598 + init_swait_queue_head(&sem->writer);
13599 sem->readers_block = 0;
13602 @@ -103,7 +103,7 @@ void __percpu_up_read(struct percpu_rw_semaphore *sem)
13603 __this_cpu_dec(*sem->read_count);
13605 /* Prod writer to recheck readers_active */
13606 - wake_up(&sem->writer);
13607 + swake_up(&sem->writer);
13609 EXPORT_SYMBOL_GPL(__percpu_up_read);
13611 @@ -160,7 +160,7 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
13614 /* Wait for all now active readers to complete. */
13615 - wait_event(sem->writer, readers_active_check(sem));
13616 + swait_event(sem->writer, readers_active_check(sem));
13618 EXPORT_SYMBOL_GPL(percpu_down_write);
13620 diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c
13621 new file mode 100644
13622 index 000000000000..6284e3b15091
13624 +++ b/kernel/locking/rt.c
13629 + * Real-Time Preemption Support
13631 + * started by Ingo Molnar:
13633 + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
13634 + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
13636 + * historic credit for proving that Linux spinlocks can be implemented via
13637 + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
13638 + * and others) who prototyped it on 2.4 and did lots of comparative
13639 + * research and analysis; TimeSys, for proving that you can implement a
13640 + * fully preemptible kernel via the use of IRQ threading and mutexes;
13641 + * Bill Huey for persuasively arguing on lkml that the mutex model is the
13642 + * right one; and to MontaVista, who ported pmutexes to 2.6.
13644 + * This code is a from-scratch implementation and is not based on pmutexes,
13645 + * but the idea of converting spinlocks to mutexes is used here too.
13647 + * lock debugging, locking tree, deadlock detection:
13649 + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
13650 + * Released under the General Public License (GPL).
13652 + * Includes portions of the generic R/W semaphore implementation from:
13654 + * Copyright (c) 2001 David Howells (dhowells@redhat.com).
13655 + * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
13656 + * - Derived also from comments by Linus
13658 + * Pending ownership of locks and ownership stealing:
13660 + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
13662 + * (also by Steven Rostedt)
13663 + * - Converted single pi_lock to individual task locks.
13665 + * By Esben Nielsen:
13666 + * Doing priority inheritance with help of the scheduler.
13668 + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
13669 + * - major rework based on Esben Nielsens initial patch
13670 + * - replaced thread_info references by task_struct refs
13671 + * - removed task->pending_owner dependency
13672 + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
13673 + * in the scheduler return path as discussed with Steven Rostedt
13675 + * Copyright (C) 2006, Kihon Technologies Inc.
13676 + * Steven Rostedt <rostedt@goodmis.org>
13677 + * - debugged and patched Thomas Gleixner's rework.
13678 + * - added back the cmpxchg to the rework.
13679 + * - turned atomic require back on for SMP.
13682 +#include <linux/spinlock.h>
13683 +#include <linux/rtmutex.h>
13684 +#include <linux/sched.h>
13685 +#include <linux/delay.h>
13686 +#include <linux/module.h>
13687 +#include <linux/kallsyms.h>
13688 +#include <linux/syscalls.h>
13689 +#include <linux/interrupt.h>
13690 +#include <linux/plist.h>
13691 +#include <linux/fs.h>
13692 +#include <linux/futex.h>
13693 +#include <linux/hrtimer.h>
13695 +#include "rtmutex_common.h"
13698 + * struct mutex functions
13700 +void __mutex_do_init(struct mutex *mutex, const char *name,
13701 + struct lock_class_key *key)
13703 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
13705 + * Make sure we are not reinitializing a held lock:
13707 + debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
13708 + lockdep_init_map(&mutex->dep_map, name, key, 0);
13710 + mutex->lock.save_state = 0;
13712 +EXPORT_SYMBOL(__mutex_do_init);
13714 +void __lockfunc _mutex_lock(struct mutex *lock)
13716 + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
13717 + rt_mutex_lock(&lock->lock);
13719 +EXPORT_SYMBOL(_mutex_lock);
13721 +int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
13725 + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
13726 + ret = rt_mutex_lock_interruptible(&lock->lock);
13728 + mutex_release(&lock->dep_map, 1, _RET_IP_);
13731 +EXPORT_SYMBOL(_mutex_lock_interruptible);
13733 +int __lockfunc _mutex_lock_killable(struct mutex *lock)
13737 + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
13738 + ret = rt_mutex_lock_killable(&lock->lock);
13740 + mutex_release(&lock->dep_map, 1, _RET_IP_);
13743 +EXPORT_SYMBOL(_mutex_lock_killable);
13745 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
13746 +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
13748 + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
13749 + rt_mutex_lock(&lock->lock);
13751 +EXPORT_SYMBOL(_mutex_lock_nested);
13753 +void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
13755 + mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
13756 + rt_mutex_lock(&lock->lock);
13758 +EXPORT_SYMBOL(_mutex_lock_nest_lock);
13760 +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
13764 + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
13765 + ret = rt_mutex_lock_interruptible(&lock->lock);
13767 + mutex_release(&lock->dep_map, 1, _RET_IP_);
13770 +EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
13772 +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
13776 + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
13777 + ret = rt_mutex_lock_killable(&lock->lock);
13779 + mutex_release(&lock->dep_map, 1, _RET_IP_);
13782 +EXPORT_SYMBOL(_mutex_lock_killable_nested);
13785 +int __lockfunc _mutex_trylock(struct mutex *lock)
13787 + int ret = rt_mutex_trylock(&lock->lock);
13790 + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
13794 +EXPORT_SYMBOL(_mutex_trylock);
13796 +void __lockfunc _mutex_unlock(struct mutex *lock)
13798 + mutex_release(&lock->dep_map, 1, _RET_IP_);
13799 + rt_mutex_unlock(&lock->lock);
13801 +EXPORT_SYMBOL(_mutex_unlock);
13804 + * rwlock_t functions
13806 +int __lockfunc rt_write_trylock(rwlock_t *rwlock)
13810 + migrate_disable();
13811 + ret = rt_mutex_trylock(&rwlock->lock);
13813 + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
13815 + migrate_enable();
13819 +EXPORT_SYMBOL(rt_write_trylock);
13821 +int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
13826 + ret = rt_write_trylock(rwlock);
13829 +EXPORT_SYMBOL(rt_write_trylock_irqsave);
13831 +int __lockfunc rt_read_trylock(rwlock_t *rwlock)
13833 + struct rt_mutex *lock = &rwlock->lock;
13837 + * recursive read locks succeed when current owns the lock,
13838 + * but not when read_depth == 0 which means that the lock is
13841 + if (rt_mutex_owner(lock) != current) {
13842 + migrate_disable();
13843 + ret = rt_mutex_trylock(lock);
13845 + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
13847 + migrate_enable();
13849 + } else if (!rwlock->read_depth) {
13854 + rwlock->read_depth++;
13858 +EXPORT_SYMBOL(rt_read_trylock);
13860 +void __lockfunc rt_write_lock(rwlock_t *rwlock)
13862 + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
13863 + __rt_spin_lock(&rwlock->lock);
13865 +EXPORT_SYMBOL(rt_write_lock);
13867 +void __lockfunc rt_read_lock(rwlock_t *rwlock)
13869 + struct rt_mutex *lock = &rwlock->lock;
13873 + * recursive read locks succeed when current owns the lock
13875 + if (rt_mutex_owner(lock) != current) {
13876 + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
13877 + __rt_spin_lock(lock);
13879 + rwlock->read_depth++;
13882 +EXPORT_SYMBOL(rt_read_lock);
13884 +void __lockfunc rt_write_unlock(rwlock_t *rwlock)
13886 + /* NOTE: we always pass in '1' for nested, for simplicity */
13887 + rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
13888 + __rt_spin_unlock(&rwlock->lock);
13889 + migrate_enable();
13891 +EXPORT_SYMBOL(rt_write_unlock);
13893 +void __lockfunc rt_read_unlock(rwlock_t *rwlock)
13895 + /* Release the lock only when read_depth is down to 0 */
13896 + if (--rwlock->read_depth == 0) {
13897 + rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
13898 + __rt_spin_unlock(&rwlock->lock);
13899 + migrate_enable();
13902 +EXPORT_SYMBOL(rt_read_unlock);
13904 +unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
13906 + rt_write_lock(rwlock);
13910 +EXPORT_SYMBOL(rt_write_lock_irqsave);
13912 +unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
13914 + rt_read_lock(rwlock);
13918 +EXPORT_SYMBOL(rt_read_lock_irqsave);
13920 +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
13922 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
13924 + * Make sure we are not reinitializing a held lock:
13926 + debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
13927 + lockdep_init_map(&rwlock->dep_map, name, key, 0);
13929 + rwlock->lock.save_state = 1;
13930 + rwlock->read_depth = 0;
13932 +EXPORT_SYMBOL(__rt_rwlock_init);
13935 + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
13936 + * @cnt: the atomic which we are to dec
13937 + * @lock: the mutex to return holding if we dec to 0
13939 + * return true and hold lock if we dec to 0, return false otherwise
13941 +int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
13943 + /* dec if we can't possibly hit 0 */
13944 + if (atomic_add_unless(cnt, -1, 1))
13946 + /* we might hit 0, so take the lock */
13947 + mutex_lock(lock);
13948 + if (!atomic_dec_and_test(cnt)) {
13949 + /* when we actually did the dec, we didn't hit 0 */
13950 + mutex_unlock(lock);
13953 + /* we hit 0, and we hold the lock */
13956 +EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
13957 diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
13958 index 62b6cee8ea7f..0613c4b1d059 100644
13959 --- a/kernel/locking/rtmutex-debug.c
13960 +++ b/kernel/locking/rtmutex-debug.c
13961 @@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
13966 -rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
13970 -void rt_mutex_deadlock_account_unlock(struct task_struct *task)
13974 diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
13975 index d0519c3432b6..b585af9a1b50 100644
13976 --- a/kernel/locking/rtmutex-debug.h
13977 +++ b/kernel/locking/rtmutex-debug.h
13979 * This file contains macros used solely by rtmutex.c. Debug version.
13983 -rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
13984 -extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
13985 extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
13986 extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
13987 extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
13988 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
13989 index 2c49d76f96c3..218f1d26afe7 100644
13990 --- a/kernel/locking/rtmutex.c
13991 +++ b/kernel/locking/rtmutex.c
13993 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
13994 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
13995 * Copyright (C) 2006 Esben Nielsen
13996 + * Adaptive Spinlocks:
13997 + * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
13998 + * and Peter Morreale,
13999 + * Adaptive Spinlocks simplification:
14000 + * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
14002 * See Documentation/locking/rt-mutex-design.txt for details.
14005 #include <linux/sched/rt.h>
14006 #include <linux/sched/deadline.h>
14007 #include <linux/timer.h>
14008 +#include <linux/ww_mutex.h>
14010 #include "rtmutex_common.h"
14012 @@ -133,6 +139,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
14013 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
14016 +static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
14018 + return waiter && waiter != PI_WAKEUP_INPROGRESS &&
14019 + waiter != PI_REQUEUE_INPROGRESS;
14023 * We can speed up the acquire/release, if there's no debugging state to be
14025 @@ -222,12 +234,25 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
14029 +#define STEAL_NORMAL 0
14030 +#define STEAL_LATERAL 1
14032 + * Only use with rt_mutex_waiter_{less,equal}()
14034 +#define task_to_waiter(p) \
14035 + &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
14038 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
14039 - struct rt_mutex_waiter *right)
14040 + struct rt_mutex_waiter *right, int mode)
14042 - if (left->prio < right->prio)
14044 + if (mode == STEAL_NORMAL) {
14045 + if (left->prio < right->prio)
14048 + if (left->prio <= right->prio)
14053 * If both waiters have dl_prio(), we check the deadlines of the
14054 @@ -236,12 +261,30 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
14055 * then right waiter has a dl_prio() too.
14057 if (dl_prio(left->prio))
14058 - return dl_time_before(left->task->dl.deadline,
14059 - right->task->dl.deadline);
14060 + return dl_time_before(left->deadline, right->deadline);
14066 +rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
14067 + struct rt_mutex_waiter *right)
14069 + if (left->prio != right->prio)
14073 + * If both waiters have dl_prio(), we check the deadlines of the
14074 + * associated tasks.
14075 + * If left waiter has a dl_prio(), and we didn't return 0 above,
14076 + * then right waiter has a dl_prio() too.
14078 + if (dl_prio(left->prio))
14079 + return left->deadline == right->deadline;
14085 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
14087 @@ -253,7 +296,7 @@ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
14090 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
14091 - if (rt_mutex_waiter_less(waiter, entry)) {
14092 + if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
14093 link = &parent->rb_left;
14095 link = &parent->rb_right;
14096 @@ -292,7 +335,7 @@ rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
14099 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
14100 - if (rt_mutex_waiter_less(waiter, entry)) {
14101 + if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
14102 link = &parent->rb_left;
14104 link = &parent->rb_right;
14105 @@ -320,72 +363,16 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
14106 RB_CLEAR_NODE(&waiter->pi_tree_entry);
14110 - * Calculate task priority from the waiter tree priority
14112 - * Return task->normal_prio when the waiter tree is empty or when
14113 - * the waiter is not allowed to do priority boosting
14115 -int rt_mutex_getprio(struct task_struct *task)
14116 +static void rt_mutex_adjust_prio(struct task_struct *p)
14118 - if (likely(!task_has_pi_waiters(task)))
14119 - return task->normal_prio;
14120 + struct task_struct *pi_task = NULL;
14122 - return min(task_top_pi_waiter(task)->prio,
14123 - task->normal_prio);
14125 + lockdep_assert_held(&p->pi_lock);
14127 -struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
14129 - if (likely(!task_has_pi_waiters(task)))
14131 + if (task_has_pi_waiters(p))
14132 + pi_task = task_top_pi_waiter(p)->task;
14134 - return task_top_pi_waiter(task)->task;
14138 - * Called by sched_setscheduler() to get the priority which will be
14139 - * effective after the change.
14141 -int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
14143 - if (!task_has_pi_waiters(task))
14146 - if (task_top_pi_waiter(task)->task->prio <= newprio)
14147 - return task_top_pi_waiter(task)->task->prio;
14152 - * Adjust the priority of a task, after its pi_waiters got modified.
14154 - * This can be both boosting and unboosting. task->pi_lock must be held.
14156 -static void __rt_mutex_adjust_prio(struct task_struct *task)
14158 - int prio = rt_mutex_getprio(task);
14160 - if (task->prio != prio || dl_prio(prio))
14161 - rt_mutex_setprio(task, prio);
14165 - * Adjust task priority (undo boosting). Called from the exit path of
14166 - * rt_mutex_slowunlock() and rt_mutex_slowlock().
14168 - * (Note: We do this outside of the protection of lock->wait_lock to
14169 - * allow the lock to be taken while or before we readjust the priority
14170 - * of task. We do not use the spin_xx_mutex() variants here as we are
14171 - * outside of the debug path.)
14173 -void rt_mutex_adjust_prio(struct task_struct *task)
14175 - unsigned long flags;
14177 - raw_spin_lock_irqsave(&task->pi_lock, flags);
14178 - __rt_mutex_adjust_prio(task);
14179 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
14180 + rt_mutex_setprio(p, pi_task);
14184 @@ -414,6 +401,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
14185 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
14188 +static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
14190 + if (waiter->savestate)
14191 + wake_up_lock_sleeper(waiter->task);
14193 + wake_up_process(waiter->task);
14197 * Max number of times we'll walk the boosting chain:
14199 @@ -421,7 +416,8 @@ int max_lock_depth = 1024;
14201 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
14203 - return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
14204 + return rt_mutex_real_waiter(p->pi_blocked_on) ?
14205 + p->pi_blocked_on->lock : NULL;
14209 @@ -557,7 +553,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
14210 * reached or the state of the chain has changed while we
14211 * dropped the locks.
14214 + if (!rt_mutex_real_waiter(waiter))
14215 goto out_unlock_pi;
14218 @@ -608,7 +604,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
14219 * enabled we continue, but stop the requeueing in the chain
14222 - if (waiter->prio == task->prio) {
14223 + if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
14224 if (!detect_deadlock)
14225 goto out_unlock_pi;
14227 @@ -704,7 +700,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
14229 /* [7] Requeue the waiter in the lock waiter tree. */
14230 rt_mutex_dequeue(lock, waiter);
14233 + * Update the waiter prio fields now that we're dequeued.
14235 + * These values can have changed through either:
14237 + * sys_sched_set_scheduler() / sys_sched_setattr()
14241 + * DL CBS enforcement advancing the effective deadline.
14243 + * Even though pi_waiters also uses these fields, and that tree is only
14244 + * updated in [11], we can do this here, since we hold [L], which
14245 + * serializes all pi_waiters access and rb_erase() does not care about
14246 + * the values of the node being removed.
14248 waiter->prio = task->prio;
14249 + waiter->deadline = task->dl.deadline;
14251 rt_mutex_enqueue(lock, waiter);
14253 /* [8] Release the task */
14254 @@ -719,13 +734,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
14255 * follow here. This is the end of the chain we are walking.
14257 if (!rt_mutex_owner(lock)) {
14258 + struct rt_mutex_waiter *lock_top_waiter;
14261 * If the requeue [7] above changed the top waiter,
14262 * then we need to wake the new top waiter up to try
14265 - if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
14266 - wake_up_process(rt_mutex_top_waiter(lock)->task);
14267 + lock_top_waiter = rt_mutex_top_waiter(lock);
14268 + if (prerequeue_top_waiter != lock_top_waiter)
14269 + rt_mutex_wake_waiter(lock_top_waiter);
14270 raw_spin_unlock_irq(&lock->wait_lock);
14273 @@ -745,7 +763,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
14275 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
14276 rt_mutex_enqueue_pi(task, waiter);
14277 - __rt_mutex_adjust_prio(task);
14278 + rt_mutex_adjust_prio(task);
14280 } else if (prerequeue_top_waiter == waiter) {
14282 @@ -761,7 +779,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
14283 rt_mutex_dequeue_pi(task, waiter);
14284 waiter = rt_mutex_top_waiter(lock);
14285 rt_mutex_enqueue_pi(task, waiter);
14286 - __rt_mutex_adjust_prio(task);
14287 + rt_mutex_adjust_prio(task);
14290 * Nothing changed. No need to do any priority
14291 @@ -818,6 +836,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
14297 * Try to take an rt-mutex
14299 @@ -828,9 +847,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
14300 * @waiter: The waiter that is queued to the lock's wait tree if the
14301 * callsite called task_blocked_on_lock(), otherwise NULL
14303 -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
14304 - struct rt_mutex_waiter *waiter)
14305 +static int __try_to_take_rt_mutex(struct rt_mutex *lock,
14306 + struct task_struct *task,
14307 + struct rt_mutex_waiter *waiter, int mode)
14309 + lockdep_assert_held(&lock->wait_lock);
14312 * Before testing whether we can acquire @lock, we set the
14313 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
14314 @@ -866,8 +888,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
14315 * If waiter is not the highest priority waiter of
14318 - if (waiter != rt_mutex_top_waiter(lock))
14319 + if (waiter != rt_mutex_top_waiter(lock)) {
14320 + /* XXX rt_mutex_waiter_less() ? */
14325 * We can acquire the lock. Remove the waiter from the
14326 @@ -885,14 +909,26 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
14327 * not need to be dequeued.
14329 if (rt_mutex_has_waiters(lock)) {
14330 + struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
14332 + if (task != pown)
14336 + * Note that RT tasks are excluded from lateral-steals
14337 + * to prevent the introduction of an unbounded latency.
14339 + if (rt_task(task))
14340 + mode = STEAL_NORMAL;
14342 * If @task->prio is greater than or equal to
14343 * the top waiter priority (kernel view),
14346 - if (task->prio >= rt_mutex_top_waiter(lock)->prio)
14347 + if (!rt_mutex_waiter_less(task_to_waiter(task),
14348 + rt_mutex_top_waiter(lock),
14353 * The current top waiter stays enqueued. We
14354 * don't have to change anything in the lock
14355 @@ -936,11 +972,384 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
14357 rt_mutex_set_owner(lock, task);
14359 - rt_mutex_deadlock_account_lock(lock, task);
14364 +#ifdef CONFIG_PREEMPT_RT_FULL
14366 + * preemptible spin_lock functions:
14368 +static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
14369 + void (*slowfn)(struct rt_mutex *lock,
14373 + might_sleep_no_state_check();
14376 + migrate_disable();
14378 + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
14381 + slowfn(lock, do_mig_dis);
14384 +static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
14385 + void (*slowfn)(struct rt_mutex *lock))
14387 + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
14394 + * Note that owner is a speculative pointer and dereferencing relies
14395 + * on rcu_read_lock() and the check against the lock owner.
14397 +static int adaptive_wait(struct rt_mutex *lock,
14398 + struct task_struct *owner)
14404 + if (owner != rt_mutex_owner(lock))
14407 + * Ensure that owner->on_cpu is dereferenced _after_
14408 + * checking the above to be valid.
14411 + if (!owner->on_cpu) {
14417 + rcu_read_unlock();
14421 +static int adaptive_wait(struct rt_mutex *lock,
14422 + struct task_struct *orig_owner)
14428 +static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
14429 + struct rt_mutex_waiter *waiter,
14430 + struct task_struct *task,
14431 + enum rtmutex_chainwalk chwalk);
14433 + * Slow path lock function spin_lock style: this variant is very
14434 + * careful not to miss any non-lock wakeups.
14436 + * We store the current state under p->pi_lock in p->saved_state and
14437 + * the try_to_wake_up() code handles this accordingly.
14439 +static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
14442 + struct task_struct *lock_owner, *self = current;
14443 + struct rt_mutex_waiter waiter, *top_waiter;
14444 + unsigned long flags;
14447 + rt_mutex_init_waiter(&waiter, true);
14449 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
14451 + if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
14452 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
14456 + BUG_ON(rt_mutex_owner(lock) == self);
14459 + * We save whatever state the task is in and we'll restore it
14460 + * after acquiring the lock taking real wakeups into account
14461 + * as well. We are serialized via pi_lock against wakeups. See
14462 + * try_to_wake_up().
14464 + raw_spin_lock(&self->pi_lock);
14465 + self->saved_state = self->state;
14466 + __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
14467 + raw_spin_unlock(&self->pi_lock);
14469 + ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
14473 + /* Try to acquire the lock again. */
14474 + if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
14477 + top_waiter = rt_mutex_top_waiter(lock);
14478 + lock_owner = rt_mutex_owner(lock);
14480 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
14482 + debug_rt_mutex_print_deadlock(&waiter);
14484 + if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
14486 + migrate_enable();
14489 + migrate_disable();
14492 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
14494 + raw_spin_lock(&self->pi_lock);
14495 + __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
14496 + raw_spin_unlock(&self->pi_lock);
14500 + * Restore the task state to current->saved_state. We set it
14501 + * to the original state above and the try_to_wake_up() code
14502 + * has possibly updated it when a real (non-rtmutex) wakeup
14503 + * happened while we were blocked. Clear saved_state so
14504 + * try_to_wakeup() does not get confused.
14506 + raw_spin_lock(&self->pi_lock);
14507 + __set_current_state_no_track(self->saved_state);
14508 + self->saved_state = TASK_RUNNING;
14509 + raw_spin_unlock(&self->pi_lock);
14512 + * try_to_take_rt_mutex() sets the waiter bit
14513 + * unconditionally. We might have to fix that up:
14515 + fixup_rt_mutex_waiters(lock);
14517 + BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
14518 + BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
14520 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
14522 + debug_rt_mutex_free_waiter(&waiter);
14525 +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
14526 + struct wake_q_head *wake_q,
14527 + struct wake_q_head *wq_sleeper);
14529 + * Slow path to release a rt_mutex spin_lock style
14531 +static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
14533 + unsigned long flags;
14535 + WAKE_Q(wake_sleeper_q);
14538 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
14539 + postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);
14540 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
14543 + rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
14546 +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
14548 + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
14549 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
14551 +EXPORT_SYMBOL(rt_spin_lock__no_mg);
14553 +void __lockfunc rt_spin_lock(spinlock_t *lock)
14555 + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
14556 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
14558 +EXPORT_SYMBOL(rt_spin_lock);
14560 +void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
14562 + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
14564 +EXPORT_SYMBOL(__rt_spin_lock);
14566 +void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
14568 + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
14570 +EXPORT_SYMBOL(__rt_spin_lock__no_mg);
14572 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
14573 +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
14575 + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
14576 + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
14578 +EXPORT_SYMBOL(rt_spin_lock_nested);
14581 +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
14583 + /* NOTE: we always pass in '1' for nested, for simplicity */
14584 + spin_release(&lock->dep_map, 1, _RET_IP_);
14585 + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
14587 +EXPORT_SYMBOL(rt_spin_unlock__no_mg);
14589 +void __lockfunc rt_spin_unlock(spinlock_t *lock)
14591 + /* NOTE: we always pass in '1' for nested, for simplicity */
14592 + spin_release(&lock->dep_map, 1, _RET_IP_);
14593 + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
14594 + migrate_enable();
14596 +EXPORT_SYMBOL(rt_spin_unlock);
14598 +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
14600 + rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
14602 +EXPORT_SYMBOL(__rt_spin_unlock);
14605 + * Wait for the lock to get unlocked: instead of polling for an unlock
14606 + * (like raw spinlocks do), we lock and unlock, to force the kernel to
14607 + * schedule if there's contention:
14609 +void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
14612 + spin_unlock(lock);
14614 +EXPORT_SYMBOL(rt_spin_unlock_wait);
14616 +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
14620 + ret = rt_mutex_trylock(&lock->lock);
14622 + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
14625 +EXPORT_SYMBOL(rt_spin_trylock__no_mg);
14627 +int __lockfunc rt_spin_trylock(spinlock_t *lock)
14631 + migrate_disable();
14632 + ret = rt_mutex_trylock(&lock->lock);
14634 + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
14636 + migrate_enable();
14639 +EXPORT_SYMBOL(rt_spin_trylock);
14641 +int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
14645 + local_bh_disable();
14646 + ret = rt_mutex_trylock(&lock->lock);
14648 + migrate_disable();
14649 + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
14651 + local_bh_enable();
14654 +EXPORT_SYMBOL(rt_spin_trylock_bh);
14656 +int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
14661 + ret = rt_mutex_trylock(&lock->lock);
14663 + migrate_disable();
14664 + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
14668 +EXPORT_SYMBOL(rt_spin_trylock_irqsave);
14670 +int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
14672 + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
14673 + if (atomic_add_unless(atomic, -1, 1))
14675 + rt_spin_lock(lock);
14676 + if (atomic_dec_and_test(atomic))
14678 + rt_spin_unlock(lock);
14681 +EXPORT_SYMBOL(atomic_dec_and_spin_lock);
14684 +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
14686 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
14688 + * Make sure we are not reinitializing a held lock:
14690 + debug_check_no_locks_freed((void *)lock, sizeof(*lock));
14691 + lockdep_init_map(&lock->dep_map, name, key, 0);
14694 +EXPORT_SYMBOL(__rt_spin_lock_init);
14696 +#endif /* PREEMPT_RT_FULL */
14698 +#ifdef CONFIG_PREEMPT_RT_FULL
14699 + static inline int __sched
14700 +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
14702 + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
14703 + struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
14708 + if (unlikely(ctx == hold_ctx))
14709 + return -EALREADY;
14711 + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
14712 + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
14713 +#ifdef CONFIG_DEBUG_MUTEXES
14714 + DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
14715 + ctx->contending_lock = ww;
14723 + static inline int __sched
14724 +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
14733 +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
14734 + struct rt_mutex_waiter *waiter)
14736 + return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
14740 * Task blocks on lock.
14742 @@ -958,6 +1367,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
14743 struct rt_mutex *next_lock;
14744 int chain_walk = 0, res;
14746 + lockdep_assert_held(&lock->wait_lock);
14749 * Early deadlock detection. We really don't want the task to
14750 * enqueue on itself just to untangle the mess later. It's not
14751 @@ -971,10 +1382,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
14754 raw_spin_lock(&task->pi_lock);
14755 - __rt_mutex_adjust_prio(task);
14758 + * In the case of futex requeue PI, this will be a proxy
14759 + * lock. The task will wake unaware that it is enqueueed on
14760 + * this lock. Avoid blocking on two locks and corrupting
14761 + * pi_blocked_on via the PI_WAKEUP_INPROGRESS
14762 + * flag. futex_wait_requeue_pi() sets this when it wakes up
14763 + * before requeue (due to a signal or timeout). Do not enqueue
14764 + * the task if PI_WAKEUP_INPROGRESS is set.
14766 + if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
14767 + raw_spin_unlock(&task->pi_lock);
14771 + BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
14773 + rt_mutex_adjust_prio(task);
14774 waiter->task = task;
14775 waiter->lock = lock;
14776 waiter->prio = task->prio;
14777 + waiter->deadline = task->dl.deadline;
14779 /* Get the top priority waiter on the lock */
14780 if (rt_mutex_has_waiters(lock))
14781 @@ -993,8 +1422,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
14782 rt_mutex_dequeue_pi(owner, top_waiter);
14783 rt_mutex_enqueue_pi(owner, waiter);
14785 - __rt_mutex_adjust_prio(owner);
14786 - if (owner->pi_blocked_on)
14787 + rt_mutex_adjust_prio(owner);
14788 + if (rt_mutex_real_waiter(owner->pi_blocked_on))
14790 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
14792 @@ -1036,6 +1465,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
14793 * Called with lock->wait_lock held and interrupts disabled.
14795 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
14796 + struct wake_q_head *wake_sleeper_q,
14797 struct rt_mutex *lock)
14799 struct rt_mutex_waiter *waiter;
14800 @@ -1045,12 +1475,14 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
14801 waiter = rt_mutex_top_waiter(lock);
14804 - * Remove it from current->pi_waiters. We do not adjust a
14805 - * possible priority boost right now. We execute wakeup in the
14806 - * boosted mode and go back to normal after releasing
14807 - * lock->wait_lock.
14808 + * Remove it from current->pi_waiters and deboost.
14810 + * We must in fact deboost here in order to ensure we call
14811 + * rt_mutex_setprio() to update p->pi_top_task before the
14814 rt_mutex_dequeue_pi(current, waiter);
14815 + rt_mutex_adjust_prio(current);
14818 * As we are waking up the top waiter, and the waiter stays
14819 @@ -1062,9 +1494,22 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
14821 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
14824 + * We deboosted before waking the top waiter task such that we don't
14825 + * run two tasks with the 'same' priority (and ensure the
14826 + * p->pi_top_task pointer points to a blocked task). This however can
14827 + * lead to priority inversion if we would get preempted after the
14828 + * deboost but before waking our donor task, hence the preempt_disable()
14831 + * Pairs with preempt_enable() in rt_mutex_postunlock();
14833 + preempt_disable();
14834 + if (waiter->savestate)
14835 + wake_q_add(wake_sleeper_q, waiter->task);
14837 + wake_q_add(wake_q, waiter->task);
14838 raw_spin_unlock(¤t->pi_lock);
14840 - wake_q_add(wake_q, waiter->task);
14844 @@ -1078,7 +1523,9 @@ static void remove_waiter(struct rt_mutex *lock,
14846 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
14847 struct task_struct *owner = rt_mutex_owner(lock);
14848 - struct rt_mutex *next_lock;
14849 + struct rt_mutex *next_lock = NULL;
14851 + lockdep_assert_held(&lock->wait_lock);
14853 raw_spin_lock(¤t->pi_lock);
14854 rt_mutex_dequeue(lock, waiter);
14855 @@ -1099,10 +1546,11 @@ static void remove_waiter(struct rt_mutex *lock,
14856 if (rt_mutex_has_waiters(lock))
14857 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
14859 - __rt_mutex_adjust_prio(owner);
14860 + rt_mutex_adjust_prio(owner);
14862 /* Store the lock on which owner is blocked or NULL */
14863 - next_lock = task_blocked_on_lock(owner);
14864 + if (rt_mutex_real_waiter(owner->pi_blocked_on))
14865 + next_lock = task_blocked_on_lock(owner);
14867 raw_spin_unlock(&owner->pi_lock);
14869 @@ -1138,21 +1586,30 @@ void rt_mutex_adjust_pi(struct task_struct *task)
14870 raw_spin_lock_irqsave(&task->pi_lock, flags);
14872 waiter = task->pi_blocked_on;
14873 - if (!waiter || (waiter->prio == task->prio &&
14874 - !dl_prio(task->prio))) {
14875 + if (!rt_mutex_real_waiter(waiter) ||
14876 + rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
14877 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
14880 next_lock = waiter->lock;
14881 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
14883 /* gets dropped in rt_mutex_adjust_prio_chain()! */
14884 get_task_struct(task);
14886 + raw_spin_unlock_irqrestore(&task->pi_lock, flags);
14887 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
14888 next_lock, NULL, task);
14891 +void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
14893 + debug_rt_mutex_init_waiter(waiter);
14894 + RB_CLEAR_NODE(&waiter->pi_tree_entry);
14895 + RB_CLEAR_NODE(&waiter->tree_entry);
14896 + waiter->task = NULL;
14897 + waiter->savestate = savestate;
14901 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
14902 * @lock: the rt_mutex to take
14903 @@ -1166,7 +1623,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
14905 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
14906 struct hrtimer_sleeper *timeout,
14907 - struct rt_mutex_waiter *waiter)
14908 + struct rt_mutex_waiter *waiter,
14909 + struct ww_acquire_ctx *ww_ctx)
14913 @@ -1175,16 +1633,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
14914 if (try_to_take_rt_mutex(lock, current, waiter))
14918 - * TASK_INTERRUPTIBLE checks for signals and
14919 - * timeout. Ignored otherwise.
14921 - if (unlikely(state == TASK_INTERRUPTIBLE)) {
14922 - /* Signal pending? */
14923 - if (signal_pending(current))
14925 - if (timeout && !timeout->task)
14926 - ret = -ETIMEDOUT;
14927 + if (timeout && !timeout->task) {
14928 + ret = -ETIMEDOUT;
14931 + if (signal_pending_state(state, current)) {
14936 + if (ww_ctx && ww_ctx->acquired > 0) {
14937 + ret = __mutex_lock_check_stamp(lock, ww_ctx);
14941 @@ -1223,21 +1682,148 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
14945 +static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
14946 + struct ww_acquire_ctx *ww_ctx)
14948 +#ifdef CONFIG_DEBUG_MUTEXES
14950 + * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
14951 + * but released with a normal mutex_unlock in this call.
14953 + * This should never happen, always use ww_mutex_unlock.
14955 + DEBUG_LOCKS_WARN_ON(ww->ctx);
14958 + * Not quite done after calling ww_acquire_done() ?
14960 + DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
14962 + if (ww_ctx->contending_lock) {
14964 + * After -EDEADLK you tried to
14965 + * acquire a different ww_mutex? Bad!
14967 + DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
14970 + * You called ww_mutex_lock after receiving -EDEADLK,
14971 + * but 'forgot' to unlock everything else first?
14973 + DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
14974 + ww_ctx->contending_lock = NULL;
14978 + * Naughty, using a different class will lead to undefined behavior!
14980 + DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
14982 + ww_ctx->acquired++;
14985 +#ifdef CONFIG_PREEMPT_RT_FULL
14986 +static void ww_mutex_account_lock(struct rt_mutex *lock,
14987 + struct ww_acquire_ctx *ww_ctx)
14989 + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
14990 + struct rt_mutex_waiter *waiter, *n;
14993 + * This branch gets optimized out for the common case,
14994 + * and is only important for ww_mutex_lock.
14996 + ww_mutex_lock_acquired(ww, ww_ctx);
14997 + ww->ctx = ww_ctx;
15000 + * Give any possible sleeping processes the chance to wake up,
15001 + * so they can recheck if they have to back off.
15003 + rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
15005 + /* XXX debug rt mutex waiter wakeup */
15007 + BUG_ON(waiter->lock != lock);
15008 + rt_mutex_wake_waiter(waiter);
15014 +static void ww_mutex_account_lock(struct rt_mutex *lock,
15015 + struct ww_acquire_ctx *ww_ctx)
15021 +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
15022 + struct hrtimer_sleeper *timeout,
15023 + enum rtmutex_chainwalk chwalk,
15024 + struct ww_acquire_ctx *ww_ctx,
15025 + struct rt_mutex_waiter *waiter)
15029 + /* Try to acquire the lock again: */
15030 + if (try_to_take_rt_mutex(lock, current, NULL)) {
15032 + ww_mutex_account_lock(lock, ww_ctx);
15036 + set_current_state(state);
15038 + /* Setup the timer, when timeout != NULL */
15039 + if (unlikely(timeout))
15040 + hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
15042 + ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
15044 + if (likely(!ret)) {
15045 + /* sleep on the mutex */
15046 + ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
15048 + } else if (ww_ctx) {
15049 + /* ww_mutex received EDEADLK, let it become EALREADY */
15050 + ret = __mutex_lock_check_stamp(lock, ww_ctx);
15054 + if (unlikely(ret)) {
15055 + __set_current_state(TASK_RUNNING);
15056 + if (rt_mutex_has_waiters(lock))
15057 + remove_waiter(lock, waiter);
15058 + /* ww_mutex want to report EDEADLK/EALREADY, let them */
15060 + rt_mutex_handle_deadlock(ret, chwalk, waiter);
15061 + } else if (ww_ctx) {
15062 + ww_mutex_account_lock(lock, ww_ctx);
15066 + * try_to_take_rt_mutex() sets the waiter bit
15067 + * unconditionally. We might have to fix that up.
15069 + fixup_rt_mutex_waiters(lock);
15074 * Slow path lock function:
15077 rt_mutex_slowlock(struct rt_mutex *lock, int state,
15078 struct hrtimer_sleeper *timeout,
15079 - enum rtmutex_chainwalk chwalk)
15080 + enum rtmutex_chainwalk chwalk,
15081 + struct ww_acquire_ctx *ww_ctx)
15083 struct rt_mutex_waiter waiter;
15084 unsigned long flags;
15087 - debug_rt_mutex_init_waiter(&waiter);
15088 - RB_CLEAR_NODE(&waiter.pi_tree_entry);
15089 - RB_CLEAR_NODE(&waiter.tree_entry);
15090 + rt_mutex_init_waiter(&waiter, false);
15093 * Technically we could use raw_spin_[un]lock_irq() here, but this can
15094 @@ -1249,36 +1835,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
15096 raw_spin_lock_irqsave(&lock->wait_lock, flags);
15098 - /* Try to acquire the lock again: */
15099 - if (try_to_take_rt_mutex(lock, current, NULL)) {
15100 - raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
15104 - set_current_state(state);
15106 - /* Setup the timer, when timeout != NULL */
15107 - if (unlikely(timeout))
15108 - hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
15110 - ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
15112 - if (likely(!ret))
15113 - /* sleep on the mutex */
15114 - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
15116 - if (unlikely(ret)) {
15117 - __set_current_state(TASK_RUNNING);
15118 - if (rt_mutex_has_waiters(lock))
15119 - remove_waiter(lock, &waiter);
15120 - rt_mutex_handle_deadlock(ret, chwalk, &waiter);
15124 - * try_to_take_rt_mutex() sets the waiter bit
15125 - * unconditionally. We might have to fix that up.
15127 - fixup_rt_mutex_waiters(lock);
15128 + ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
15131 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
15133 @@ -1328,10 +1886,12 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
15136 * Slow path to release a rt-mutex.
15137 - * Return whether the current task needs to undo a potential priority boosting.
15139 + * Return whether the current task needs to call rt_mutex_postunlock().
15141 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
15142 - struct wake_q_head *wake_q)
15143 + struct wake_q_head *wake_q,
15144 + struct wake_q_head *wake_sleeper_q)
15146 unsigned long flags;
15148 @@ -1340,8 +1900,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
15150 debug_rt_mutex_unlock(lock);
15152 - rt_mutex_deadlock_account_unlock(current);
15155 * We must be careful here if the fast path is enabled. If we
15156 * have no waiters queued we cannot set owner to NULL here
15157 @@ -1387,12 +1945,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
15159 * Queue the next waiter for wakeup once we release the wait_lock.
15161 - mark_wakeup_next_waiter(wake_q, lock);
15163 + mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
15164 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
15166 - /* check PI boosting */
15168 + return true; /* call rt_mutex_postunlock() */
15172 @@ -1403,63 +1959,85 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
15175 rt_mutex_fastlock(struct rt_mutex *lock, int state,
15176 + struct ww_acquire_ctx *ww_ctx,
15177 int (*slowfn)(struct rt_mutex *lock, int state,
15178 struct hrtimer_sleeper *timeout,
15179 - enum rtmutex_chainwalk chwalk))
15180 + enum rtmutex_chainwalk chwalk,
15181 + struct ww_acquire_ctx *ww_ctx))
15183 - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
15184 - rt_mutex_deadlock_account_lock(lock, current);
15185 + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
15188 - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
15190 + return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
15194 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
15195 struct hrtimer_sleeper *timeout,
15196 enum rtmutex_chainwalk chwalk,
15197 + struct ww_acquire_ctx *ww_ctx,
15198 int (*slowfn)(struct rt_mutex *lock, int state,
15199 struct hrtimer_sleeper *timeout,
15200 - enum rtmutex_chainwalk chwalk))
15201 + enum rtmutex_chainwalk chwalk,
15202 + struct ww_acquire_ctx *ww_ctx))
15204 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
15205 - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
15206 - rt_mutex_deadlock_account_lock(lock, current);
15207 + likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
15210 - return slowfn(lock, state, timeout, chwalk);
15212 + return slowfn(lock, state, timeout, chwalk, ww_ctx);
15216 rt_mutex_fasttrylock(struct rt_mutex *lock,
15217 int (*slowfn)(struct rt_mutex *lock))
15219 - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
15220 - rt_mutex_deadlock_account_lock(lock, current);
15221 + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
15225 return slowfn(lock);
15229 + * Performs the wakeup of the the top-waiter and re-enables preemption.
15231 +void rt_mutex_postunlock(struct wake_q_head *wake_q,
15232 + struct wake_q_head *wq_sleeper)
15234 + wake_up_q(wake_q);
15235 + wake_up_q_sleeper(wq_sleeper);
15237 + /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
15238 + preempt_enable();
15242 rt_mutex_fastunlock(struct rt_mutex *lock,
15243 bool (*slowfn)(struct rt_mutex *lock,
15244 - struct wake_q_head *wqh))
15245 + struct wake_q_head *wqh,
15246 + struct wake_q_head *wq_sleeper))
15249 + WAKE_Q(wake_sleeper_q);
15251 - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
15252 - rt_mutex_deadlock_account_unlock(current);
15253 + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
15257 - bool deboost = slowfn(lock, &wake_q);
15258 + if (slowfn(lock, &wake_q, &wake_sleeper_q))
15259 + rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
15262 - wake_up_q(&wake_q);
15264 + * rt_mutex_lock_state - lock a rt_mutex with a given state
15266 + * @lock: The rt_mutex to be locked
15267 + * @state: The state to set when blocking on the rt_mutex
15269 +int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state)
15273 - /* Undo pi boosting if necessary: */
15275 - rt_mutex_adjust_prio(current);
15277 + return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
15281 @@ -1469,15 +2047,13 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
15283 void __sched rt_mutex_lock(struct rt_mutex *lock)
15287 - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
15288 + rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE);
15290 EXPORT_SYMBOL_GPL(rt_mutex_lock);
15293 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
15296 * @lock: the rt_mutex to be locked
15299 @@ -1486,23 +2062,32 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
15301 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
15305 - return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
15306 + return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE);
15308 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
15311 - * Futex variant with full deadlock detection.
15313 + * rt_mutex_lock_killable - lock a rt_mutex killable
15315 + * @lock: the rt_mutex to be locked
15316 + * @detect_deadlock: deadlock detection on/off
15320 + * -EINTR when interrupted by a signal
15322 -int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
15323 - struct hrtimer_sleeper *timeout)
15324 +int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
15327 + return rt_mutex_lock_state(lock, TASK_KILLABLE);
15329 +EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
15331 - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
15332 - RT_MUTEX_FULL_CHAINWALK,
15333 - rt_mutex_slowlock);
15335 + * Futex variant, must not use fastpath.
15337 +int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
15339 + return rt_mutex_slowtrylock(lock);
15343 @@ -1525,6 +2110,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
15345 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
15346 RT_MUTEX_MIN_CHAINWALK,
15348 rt_mutex_slowlock);
15350 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
15351 @@ -1542,7 +2128,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
15353 int __sched rt_mutex_trylock(struct rt_mutex *lock)
15355 +#ifdef CONFIG_PREEMPT_RT_FULL
15356 + if (WARN_ON_ONCE(in_irq() || in_nmi()))
15358 if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
15362 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
15363 @@ -1560,21 +2150,53 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
15365 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
15368 - * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
15369 - * @lock: the rt_mutex to be unlocked
15371 - * Returns: true/false indicating whether priority adjustment is
15372 - * required or not.
15374 -bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
15375 - struct wake_q_head *wqh)
15376 +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
15377 + struct wake_q_head *wake_q,
15378 + struct wake_q_head *wq_sleeper)
15380 - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
15381 - rt_mutex_deadlock_account_unlock(current);
15383 + lockdep_assert_held(&lock->wait_lock);
15385 + debug_rt_mutex_unlock(lock);
15387 + if (!rt_mutex_has_waiters(lock)) {
15388 + lock->owner = NULL;
15389 + return false; /* done */
15391 - return rt_mutex_slowunlock(lock, wqh);
15394 + * We've already deboosted, mark_wakeup_next_waiter() will
15395 + * retain preempt_disabled when we drop the wait_lock, to
15396 + * avoid inversion prior to the wakeup. preempt_disable()
15397 + * therein pairs with rt_mutex_postunlock().
15399 + mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);
15401 + return true; /* call postunlock() */
15405 + * Futex variant, that since futex variants do not use the fast-path, can be
15406 + * simple and will not need to retry.
15408 +bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
15409 + struct wake_q_head *wake_q,
15410 + struct wake_q_head *wq_sleeper)
15412 + return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);
15415 +void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
15418 + WAKE_Q(wake_sleeper_q);
15421 + raw_spin_lock_irq(&lock->wait_lock);
15422 + postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
15423 + raw_spin_unlock_irq(&lock->wait_lock);
15426 + rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
15430 @@ -1607,13 +2229,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
15431 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
15433 lock->owner = NULL;
15434 - raw_spin_lock_init(&lock->wait_lock);
15435 lock->waiters = RB_ROOT;
15436 lock->waiters_leftmost = NULL;
15438 debug_rt_mutex_init(lock, name);
15440 -EXPORT_SYMBOL_GPL(__rt_mutex_init);
15441 +EXPORT_SYMBOL(__rt_mutex_init);
15444 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
15445 @@ -1628,10 +2249,9 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
15446 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
15447 struct task_struct *proxy_owner)
15449 - __rt_mutex_init(lock, NULL);
15450 + rt_mutex_init(lock);
15451 debug_rt_mutex_proxy_lock(lock, proxy_owner);
15452 rt_mutex_set_owner(lock, proxy_owner);
15453 - rt_mutex_deadlock_account_lock(lock, proxy_owner);
15457 @@ -1647,7 +2267,66 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
15459 debug_rt_mutex_proxy_unlock(lock);
15460 rt_mutex_set_owner(lock, NULL);
15461 - rt_mutex_deadlock_account_unlock(proxy_owner);
15464 +int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
15465 + struct rt_mutex_waiter *waiter,
15466 + struct task_struct *task)
15470 + if (try_to_take_rt_mutex(lock, task, NULL))
15473 +#ifdef CONFIG_PREEMPT_RT_FULL
15475 + * In PREEMPT_RT there's an added race.
15476 + * If the task, that we are about to requeue, times out,
15477 + * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
15478 + * to skip this task. But right after the task sets
15479 + * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
15480 + * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
15481 + * This will replace the PI_WAKEUP_INPROGRESS with the actual
15482 + * lock that it blocks on. We *must not* place this task
15483 + * on this proxy lock in that case.
15485 + * To prevent this race, we first take the task's pi_lock
15486 + * and check if it has updated its pi_blocked_on. If it has,
15487 + * we assume that it woke up and we return -EAGAIN.
15488 + * Otherwise, we set the task's pi_blocked_on to
15489 + * PI_REQUEUE_INPROGRESS, so that if the task is waking up
15490 + * it will know that we are in the process of requeuing it.
15492 + raw_spin_lock(&task->pi_lock);
15493 + if (task->pi_blocked_on) {
15494 + raw_spin_unlock(&task->pi_lock);
15495 + raw_spin_unlock_irq(&lock->wait_lock);
15498 + task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
15499 + raw_spin_unlock(&task->pi_lock);
15502 + /* We enforce deadlock detection for futexes */
15503 + ret = task_blocks_on_rt_mutex(lock, waiter, task,
15504 + RT_MUTEX_FULL_CHAINWALK);
15506 + if (ret && !rt_mutex_owner(lock)) {
15508 + * Reset the return value. We might have
15509 + * returned with -EDEADLK and the owner
15510 + * released the lock while we were walking the
15511 + * pi chain. Let the waiter sort it out.
15516 + if (ret && rt_mutex_has_waiters(lock))
15517 + remove_waiter(lock, waiter);
15519 + debug_rt_mutex_print_deadlock(waiter);
15525 @@ -1670,33 +2349,9 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
15528 raw_spin_lock_irq(&lock->wait_lock);
15530 - if (try_to_take_rt_mutex(lock, task, NULL)) {
15531 - raw_spin_unlock_irq(&lock->wait_lock);
15535 - /* We enforce deadlock detection for futexes */
15536 - ret = task_blocks_on_rt_mutex(lock, waiter, task,
15537 - RT_MUTEX_FULL_CHAINWALK);
15539 - if (ret && !rt_mutex_owner(lock)) {
15541 - * Reset the return value. We might have
15542 - * returned with -EDEADLK and the owner
15543 - * released the lock while we were walking the
15544 - * pi chain. Let the waiter sort it out.
15549 - if (unlikely(ret))
15550 - remove_waiter(lock, waiter);
15552 + ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
15553 raw_spin_unlock_irq(&lock->wait_lock);
15555 - debug_rt_mutex_print_deadlock(waiter);
15560 @@ -1721,36 +2376,106 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
15564 - * rt_mutex_finish_proxy_lock() - Complete lock acquisition
15565 + * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
15566 * @lock: the rt_mutex we were woken on
15567 * @to: the timeout, null if none. hrtimer should already have
15569 * @waiter: the pre-initialized rt_mutex_waiter
15571 - * Complete the lock acquisition started our behalf by another thread.
15572 + * Wait for the the lock acquisition started on our behalf by
15573 + * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
15574 + * rt_mutex_cleanup_proxy_lock().
15578 * <0 - error, one of -EINTR, -ETIMEDOUT
15580 - * Special API call for PI-futex requeue support
15581 + * Special API call for PI-futex support
15583 -int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
15584 +int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
15585 struct hrtimer_sleeper *to,
15586 struct rt_mutex_waiter *waiter)
15588 + struct task_struct *tsk = current;
15591 raw_spin_lock_irq(&lock->wait_lock);
15593 - set_current_state(TASK_INTERRUPTIBLE);
15595 /* sleep on the mutex */
15596 - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
15597 + set_current_state(TASK_INTERRUPTIBLE);
15598 + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
15600 + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
15601 + * have to fix that up.
15603 + fixup_rt_mutex_waiters(lock);
15605 - if (unlikely(ret))
15607 + * RT has a problem here when the wait got interrupted by a timeout
15608 + * or a signal. task->pi_blocked_on is still set. The task must
15609 + * acquire the hash bucket lock when returning from this function.
15611 + * If the hash bucket lock is contended then the
15612 + * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
15613 + * task_blocks_on_rt_mutex() will trigger. This can be avoided by
15614 + * clearing task->pi_blocked_on which removes the task from the
15615 + * boosting chain of the rtmutex. That's correct because the task
15616 + * is not longer blocked on it.
15619 + raw_spin_lock(&tsk->pi_lock);
15620 + tsk->pi_blocked_on = NULL;
15621 + raw_spin_unlock(&tsk->pi_lock);
15623 + raw_spin_unlock_irq(&lock->wait_lock);
15629 + * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
15630 + * @lock: the rt_mutex we were woken on
15631 + * @waiter: the pre-initialized rt_mutex_waiter
15633 + * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
15635 + * Unless we acquired the lock; we're still enqueued on the wait-list and can
15636 + * in fact still be granted ownership until we're removed. Therefore we can
15637 + * find we are in fact the owner and must disregard the
15638 + * rt_mutex_wait_proxy_lock() failure.
15641 + * true - did the cleanup, we done.
15642 + * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
15643 + * caller should disregards its return value.
15645 + * Special API call for PI-futex support
15647 +bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
15648 + struct rt_mutex_waiter *waiter)
15650 + bool cleanup = false;
15652 + raw_spin_lock_irq(&lock->wait_lock);
15654 + * Do an unconditional try-lock, this deals with the lock stealing
15655 + * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
15656 + * sets a NULL owner.
15658 + * We're not interested in the return value, because the subsequent
15659 + * test on rt_mutex_owner() will infer that. If the trylock succeeded,
15660 + * we will own the lock and it will have removed the waiter. If we
15661 + * failed the trylock, we're still not owner and we need to remove
15664 + try_to_take_rt_mutex(lock, current, waiter);
15666 + * Unless we're the owner; we're still enqueued on the wait_list.
15667 + * So check if we became owner, if not, take us off the wait_list.
15669 + if (rt_mutex_owner(lock) != current) {
15670 remove_waiter(lock, waiter);
15675 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
15676 * have to fix that up.
15677 @@ -1759,5 +2484,91 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
15679 raw_spin_unlock_irq(&lock->wait_lock);
15685 +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
15687 +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
15690 + if (ctx->deadlock_inject_countdown-- == 0) {
15691 + tmp = ctx->deadlock_inject_interval;
15692 + if (tmp > UINT_MAX/4)
15695 + tmp = tmp*2 + tmp + tmp/2;
15697 + ctx->deadlock_inject_interval = tmp;
15698 + ctx->deadlock_inject_countdown = tmp;
15699 + ctx->contending_lock = lock;
15701 + ww_mutex_unlock(lock);
15710 +#ifdef CONFIG_PREEMPT_RT_FULL
15712 +__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
15718 + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
15719 + ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
15721 + mutex_release(&lock->base.dep_map, 1, _RET_IP_);
15722 + else if (!ret && ww_ctx->acquired > 1)
15723 + return ww_mutex_deadlock_injection(lock, ww_ctx);
15727 +EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
15730 +__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
15736 + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
15737 + ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
15739 + mutex_release(&lock->base.dep_map, 1, _RET_IP_);
15740 + else if (!ret && ww_ctx->acquired > 1)
15741 + return ww_mutex_deadlock_injection(lock, ww_ctx);
15745 +EXPORT_SYMBOL_GPL(__ww_mutex_lock);
15747 +void __sched ww_mutex_unlock(struct ww_mutex *lock)
15749 + int nest = !!lock->ctx;
15752 + * The unlocking fastpath is the 0->1 transition from 'locked'
15753 + * into 'unlocked' state:
15756 +#ifdef CONFIG_DEBUG_MUTEXES
15757 + DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
15759 + if (lock->ctx->acquired > 0)
15760 + lock->ctx->acquired--;
15761 + lock->ctx = NULL;
15764 + mutex_release(&lock->base.dep_map, nest, _RET_IP_);
15765 + rt_mutex_unlock(&lock->base.lock);
15767 +EXPORT_SYMBOL(ww_mutex_unlock);
15769 diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
15770 index c4060584c407..6607802efa8b 100644
15771 --- a/kernel/locking/rtmutex.h
15772 +++ b/kernel/locking/rtmutex.h
15776 #define rt_mutex_deadlock_check(l) (0)
15777 -#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
15778 -#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
15779 #define debug_rt_mutex_init_waiter(w) do { } while (0)
15780 #define debug_rt_mutex_free_waiter(w) do { } while (0)
15781 #define debug_rt_mutex_lock(l) do { } while (0)
15782 diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
15783 index e317e1cbb3eb..64d89d780059 100644
15784 --- a/kernel/locking/rtmutex_common.h
15785 +++ b/kernel/locking/rtmutex_common.h
15786 @@ -27,12 +27,14 @@ struct rt_mutex_waiter {
15787 struct rb_node pi_tree_entry;
15788 struct task_struct *task;
15789 struct rt_mutex *lock;
15791 #ifdef CONFIG_DEBUG_RT_MUTEXES
15793 struct pid *deadlock_task_pid;
15794 struct rt_mutex *deadlock_lock;
15801 @@ -98,21 +100,45 @@ enum rtmutex_chainwalk {
15803 * PI-futex support (proxy locking functions, etc.):
15805 +#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
15806 +#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
15808 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
15809 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
15810 struct task_struct *proxy_owner);
15811 extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
15812 struct task_struct *proxy_owner);
15813 +extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate);
15814 +extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
15815 + struct rt_mutex_waiter *waiter,
15816 + struct task_struct *task);
15817 extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
15818 struct rt_mutex_waiter *waiter,
15819 struct task_struct *task);
15820 -extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
15821 - struct hrtimer_sleeper *to,
15822 - struct rt_mutex_waiter *waiter);
15823 -extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
15824 -extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
15825 - struct wake_q_head *wqh);
15826 -extern void rt_mutex_adjust_prio(struct task_struct *task);
15827 +extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
15828 + struct hrtimer_sleeper *to,
15829 + struct rt_mutex_waiter *waiter);
15830 +extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
15831 + struct rt_mutex_waiter *waiter);
15833 +extern int rt_mutex_futex_trylock(struct rt_mutex *l);
15835 +extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
15836 +extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
15837 + struct wake_q_head *wqh,
15838 + struct wake_q_head *wq_sleeper);
15840 +extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
15841 + struct wake_q_head *wq_sleeper);
15843 +/* RW semaphore special interface */
15844 +struct ww_acquire_ctx;
15846 +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
15847 + struct hrtimer_sleeper *timeout,
15848 + enum rtmutex_chainwalk chwalk,
15849 + struct ww_acquire_ctx *ww_ctx,
15850 + struct rt_mutex_waiter *waiter);
15852 #ifdef CONFIG_DEBUG_RT_MUTEXES
15853 # include "rtmutex-debug.h"
15854 diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c
15855 new file mode 100644
15856 index 000000000000..4a708ffcded6
15858 +++ b/kernel/locking/rwsem-rt.c
15862 +#include <linux/rwsem.h>
15863 +#include <linux/sched.h>
15864 +#include <linux/export.h>
15866 +#include "rtmutex_common.h"
15869 + * RT-specific reader/writer semaphores
15872 + * 1) Lock sem->rtmutex
15873 + * 2) Remove the reader BIAS to force readers into the slow path
15874 + * 3) Wait until all readers have left the critical region
15875 + * 4) Mark it write locked
15878 + * 1) Remove the write locked marker
15879 + * 2) Set the reader BIAS so readers can use the fast path again
15880 + * 3) Unlock sem->rtmutex to release blocked readers
15883 + * 1) Try fast path acquisition (reader BIAS is set)
15884 + * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag
15885 + * 3) If !writelocked, acquire it for read
15886 + * 4) If writelocked, block on sem->rtmutex
15887 + * 5) unlock sem->rtmutex, goto 1)
15890 + * 1) Try fast path release (reader count != 1)
15891 + * 2) Wake the writer waiting in down_write()#3
15893 + * down_read()#3 has the consequence, that rw semaphores on RT are not writer
15894 + * fair, but writers, which should be avoided in RT tasks (think mmap_sem),
15895 + * are subject to the rtmutex priority/DL inheritance mechanism.
15897 + * It's possible to make the rw semaphores writer fair by keeping a list of
15898 + * active readers. A blocked writer would force all newly incoming readers to
15899 + * block on the rtmutex, but the rtmutex would have to be proxy locked for one
15900 + * reader after the other. We can't use multi-reader inheritance because there
15901 + * is no way to support that with SCHED_DEADLINE. Implementing the one by one
15902 + * reader boosting/handover mechanism is a major surgery for a very dubious
15905 + * The risk of writer starvation is there, but the pathological use cases
15906 + * which trigger it are not necessarily the typical RT workloads.
15909 +void __rwsem_init(struct rw_semaphore *sem, const char *name,
15910 + struct lock_class_key *key)
15912 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
15914 + * Make sure we are not reinitializing a held semaphore:
15916 + debug_check_no_locks_freed((void *)sem, sizeof(*sem));
15917 + lockdep_init_map(&sem->dep_map, name, key, 0);
15919 + atomic_set(&sem->readers, READER_BIAS);
15921 +EXPORT_SYMBOL(__rwsem_init);
15923 +int __down_read_trylock(struct rw_semaphore *sem)
15928 + * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
15931 + for (r = atomic_read(&sem->readers); r < 0;) {
15932 + old = atomic_cmpxchg(&sem->readers, r, r + 1);
15933 + if (likely(old == r))
15940 +void __sched __down_read(struct rw_semaphore *sem)
15942 + struct rt_mutex *m = &sem->rtmutex;
15943 + struct rt_mutex_waiter waiter;
15945 + if (__down_read_trylock(sem))
15949 + raw_spin_lock_irq(&m->wait_lock);
15951 + * Allow readers as long as the writer has not completely
15952 + * acquired the semaphore for write.
15954 + if (atomic_read(&sem->readers) != WRITER_BIAS) {
15955 + atomic_inc(&sem->readers);
15956 + raw_spin_unlock_irq(&m->wait_lock);
15961 + * Call into the slow lock path with the rtmutex->wait_lock
15962 + * held, so this can't result in the following race:
15964 + * Reader1 Reader2 Writer
15967 + * rtmutex_lock(m)
15970 + * unlock(m->wait_lock)
15973 + * lock(m->wait_lock)
15974 + * sem->writelocked=true
15975 + * unlock(m->wait_lock)
15978 + * sem->writelocked=false
15979 + * rtmutex_unlock(m)
15982 + * rtmutex_lock(m)
15984 + * rtmutex_lock(m)
15986 + * That would put Reader1 behind the writer waiting on
15987 + * Reader2 to call up_read() which might be unbound.
15989 + rt_mutex_init_waiter(&waiter, false);
15990 + rt_mutex_slowlock_locked(m, TASK_UNINTERRUPTIBLE, NULL,
15991 + RT_MUTEX_MIN_CHAINWALK, NULL,
15994 + * The slowlock() above is guaranteed to return with the rtmutex is
15995 + * now held, so there can't be a writer active. Increment the reader
15996 + * count and immediately drop the rtmutex again.
15998 + atomic_inc(&sem->readers);
15999 + raw_spin_unlock_irq(&m->wait_lock);
16000 + rt_mutex_unlock(m);
16002 + debug_rt_mutex_free_waiter(&waiter);
16005 +void __up_read(struct rw_semaphore *sem)
16007 + struct rt_mutex *m = &sem->rtmutex;
16008 + struct task_struct *tsk;
16011 + * sem->readers can only hit 0 when a writer is waiting for the
16012 + * active readers to leave the critical region.
16014 + if (!atomic_dec_and_test(&sem->readers))
16018 + raw_spin_lock_irq(&m->wait_lock);
16020 + * Wake the writer, i.e. the rtmutex owner. It might release the
16021 + * rtmutex concurrently in the fast path (due to a signal), but to
16022 + * clean up the rwsem it needs to acquire m->wait_lock. The worst
16023 + * case which can happen is a spurious wakeup.
16025 + tsk = rt_mutex_owner(m);
16027 + wake_up_process(tsk);
16029 + raw_spin_unlock_irq(&m->wait_lock);
16032 +static void __up_write_unlock(struct rw_semaphore *sem, int bias,
16033 + unsigned long flags)
16035 + struct rt_mutex *m = &sem->rtmutex;
16037 + atomic_add(READER_BIAS - bias, &sem->readers);
16038 + raw_spin_unlock_irqrestore(&m->wait_lock, flags);
16039 + rt_mutex_unlock(m);
16042 +static int __sched __down_write_common(struct rw_semaphore *sem, int state)
16044 + struct rt_mutex *m = &sem->rtmutex;
16045 + unsigned long flags;
16047 + /* Take the rtmutex as a first step */
16048 + if (rt_mutex_lock_state(m, state))
16051 + /* Force readers into slow path */
16052 + atomic_sub(READER_BIAS, &sem->readers);
16055 + set_current_state(state);
16057 + raw_spin_lock_irqsave(&m->wait_lock, flags);
16058 + /* Have all readers left the critical region? */
16059 + if (!atomic_read(&sem->readers)) {
16060 + atomic_set(&sem->readers, WRITER_BIAS);
16061 + __set_current_state(TASK_RUNNING);
16062 + raw_spin_unlock_irqrestore(&m->wait_lock, flags);
16066 + if (signal_pending_state(state, current)) {
16067 + __set_current_state(TASK_RUNNING);
16068 + __up_write_unlock(sem, 0, flags);
16071 + raw_spin_unlock_irqrestore(&m->wait_lock, flags);
16073 + if (atomic_read(&sem->readers) != 0) {
16075 + set_current_state(state);
16080 +void __sched __down_write(struct rw_semaphore *sem)
16082 + __down_write_common(sem, TASK_UNINTERRUPTIBLE);
16085 +int __sched __down_write_killable(struct rw_semaphore *sem)
16087 + return __down_write_common(sem, TASK_KILLABLE);
16090 +int __down_write_trylock(struct rw_semaphore *sem)
16092 + struct rt_mutex *m = &sem->rtmutex;
16093 + unsigned long flags;
16095 + if (!rt_mutex_trylock(m))
16098 + atomic_sub(READER_BIAS, &sem->readers);
16100 + raw_spin_lock_irqsave(&m->wait_lock, flags);
16101 + if (!atomic_read(&sem->readers)) {
16102 + atomic_set(&sem->readers, WRITER_BIAS);
16103 + raw_spin_unlock_irqrestore(&m->wait_lock, flags);
16106 + __up_write_unlock(sem, 0, flags);
16110 +void __up_write(struct rw_semaphore *sem)
16112 + struct rt_mutex *m = &sem->rtmutex;
16113 + unsigned long flags;
16115 + raw_spin_lock_irqsave(&m->wait_lock, flags);
16116 + __up_write_unlock(sem, WRITER_BIAS, flags);
16119 +void __downgrade_write(struct rw_semaphore *sem)
16121 + struct rt_mutex *m = &sem->rtmutex;
16122 + unsigned long flags;
16124 + raw_spin_lock_irqsave(&m->wait_lock, flags);
16125 + /* Release it and account current as reader */
16126 + __up_write_unlock(sem, WRITER_BIAS - 1, flags);
16128 diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
16129 index db3ccb1dd614..909779647bd1 100644
16130 --- a/kernel/locking/spinlock.c
16131 +++ b/kernel/locking/spinlock.c
16132 @@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
16133 * __[spin|read|write]_lock_bh()
16135 BUILD_LOCK_OPS(spin, raw_spinlock);
16137 +#ifndef CONFIG_PREEMPT_RT_FULL
16138 BUILD_LOCK_OPS(read, rwlock);
16139 BUILD_LOCK_OPS(write, rwlock);
16144 @@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
16145 EXPORT_SYMBOL(_raw_spin_unlock_bh);
16148 +#ifndef CONFIG_PREEMPT_RT_FULL
16150 #ifndef CONFIG_INLINE_READ_TRYLOCK
16151 int __lockfunc _raw_read_trylock(rwlock_t *lock)
16153 @@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
16154 EXPORT_SYMBOL(_raw_write_unlock_bh);
16157 +#endif /* !PREEMPT_RT_FULL */
16159 #ifdef CONFIG_DEBUG_LOCK_ALLOC
16161 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
16162 diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
16163 index 0374a596cffa..94970338d518 100644
16164 --- a/kernel/locking/spinlock_debug.c
16165 +++ b/kernel/locking/spinlock_debug.c
16166 @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
16168 EXPORT_SYMBOL(__raw_spin_lock_init);
16170 +#ifndef CONFIG_PREEMPT_RT_FULL
16171 void __rwlock_init(rwlock_t *lock, const char *name,
16172 struct lock_class_key *key)
16174 @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
16177 EXPORT_SYMBOL(__rwlock_init);
16180 static void spin_dump(raw_spinlock_t *lock, const char *msg)
16182 @@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
16183 arch_spin_unlock(&lock->raw_lock);
16186 +#ifndef CONFIG_PREEMPT_RT_FULL
16187 static void rwlock_bug(rwlock_t *lock, const char *msg)
16189 if (!debug_locks_off())
16190 @@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock)
16191 debug_write_unlock(lock);
16192 arch_write_unlock(&lock->raw_lock);
16196 diff --git a/kernel/module.c b/kernel/module.c
16197 index 0e54d5bf0097..f27764fbfa24 100644
16198 --- a/kernel/module.c
16199 +++ b/kernel/module.c
16200 @@ -660,16 +660,7 @@ static void percpu_modcopy(struct module *mod,
16201 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
16205 - * is_module_percpu_address - test whether address is from module static percpu
16206 - * @addr: address to test
16208 - * Test whether @addr belongs to module static percpu area.
16211 - * %true if @addr is from module static percpu area
16213 -bool is_module_percpu_address(unsigned long addr)
16214 +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
16216 struct module *mod;
16218 @@ -683,9 +674,15 @@ bool is_module_percpu_address(unsigned long addr)
16220 for_each_possible_cpu(cpu) {
16221 void *start = per_cpu_ptr(mod->percpu, cpu);
16222 + void *va = (void *)addr;
16224 - if ((void *)addr >= start &&
16225 - (void *)addr < start + mod->percpu_size) {
16226 + if (va >= start && va < start + mod->percpu_size) {
16228 + *can_addr = (unsigned long) (va - start);
16229 + *can_addr += (unsigned long)
16230 + per_cpu_ptr(mod->percpu,
16231 + get_boot_cpu_id());
16236 @@ -696,6 +693,20 @@ bool is_module_percpu_address(unsigned long addr)
16241 + * is_module_percpu_address - test whether address is from module static percpu
16242 + * @addr: address to test
16244 + * Test whether @addr belongs to module static percpu area.
16247 + * %true if @addr is from module static percpu area
16249 +bool is_module_percpu_address(unsigned long addr)
16251 + return __is_module_percpu_address(addr, NULL);
16254 #else /* ... !CONFIG_SMP */
16256 static inline void __percpu *mod_percpu(struct module *mod)
16257 @@ -727,6 +738,11 @@ bool is_module_percpu_address(unsigned long addr)
16261 +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
16266 #endif /* CONFIG_SMP */
16268 #define MODINFO_ATTR(field) \
16269 diff --git a/kernel/panic.c b/kernel/panic.c
16270 index e6480e20379e..7e9c1918a94e 100644
16271 --- a/kernel/panic.c
16272 +++ b/kernel/panic.c
16273 @@ -482,9 +482,11 @@ static u64 oops_id;
16275 static int init_oops_id(void)
16277 +#ifndef CONFIG_PREEMPT_RT_FULL
16279 get_random_bytes(&oops_id, sizeof(oops_id));
16285 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
16286 index b26dbc48c75b..968255f27a33 100644
16287 --- a/kernel/power/hibernate.c
16288 +++ b/kernel/power/hibernate.c
16289 @@ -286,6 +286,8 @@ static int create_image(int platform_mode)
16291 local_irq_disable();
16293 + system_state = SYSTEM_SUSPEND;
16295 error = syscore_suspend();
16297 printk(KERN_ERR "PM: Some system devices failed to power down, "
16298 @@ -317,6 +319,7 @@ static int create_image(int platform_mode)
16302 + system_state = SYSTEM_RUNNING;
16303 local_irq_enable();
16306 @@ -446,6 +449,7 @@ static int resume_target_kernel(bool platform_mode)
16309 local_irq_disable();
16310 + system_state = SYSTEM_SUSPEND;
16312 error = syscore_suspend();
16314 @@ -479,6 +483,7 @@ static int resume_target_kernel(bool platform_mode)
16318 + system_state = SYSTEM_RUNNING;
16319 local_irq_enable();
16322 @@ -564,6 +569,7 @@ int hibernation_platform_enter(void)
16325 local_irq_disable();
16326 + system_state = SYSTEM_SUSPEND;
16328 if (pm_wakeup_pending()) {
16330 @@ -576,6 +582,7 @@ int hibernation_platform_enter(void)
16334 + system_state = SYSTEM_RUNNING;
16335 local_irq_enable();
16338 @@ -676,6 +683,10 @@ static int load_image_and_restore(void)
16342 +#ifndef CONFIG_SUSPEND
16343 +bool pm_in_action;
16347 * hibernate - Carry out system hibernation, including saving the image.
16349 @@ -689,6 +700,8 @@ int hibernate(void)
16353 + pm_in_action = true;
16355 lock_system_sleep();
16356 /* The snapshot device should not be opened while we're running */
16357 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
16358 @@ -766,6 +779,7 @@ int hibernate(void)
16359 atomic_inc(&snapshot_device_available);
16361 unlock_system_sleep();
16362 + pm_in_action = false;
16366 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
16367 index 6ccb08f57fcb..c8cbb5ed2fe3 100644
16368 --- a/kernel/power/suspend.c
16369 +++ b/kernel/power/suspend.c
16370 @@ -369,6 +369,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
16371 arch_suspend_disable_irqs();
16372 BUG_ON(!irqs_disabled());
16374 + system_state = SYSTEM_SUSPEND;
16376 error = syscore_suspend();
16378 *wakeup = pm_wakeup_pending();
16379 @@ -385,6 +387,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
16383 + system_state = SYSTEM_RUNNING;
16385 arch_suspend_enable_irqs();
16386 BUG_ON(irqs_disabled());
16388 @@ -527,6 +531,8 @@ static int enter_state(suspend_state_t state)
16392 +bool pm_in_action;
16395 * pm_suspend - Externally visible function for suspending the system.
16396 * @state: System sleep state to enter.
16397 @@ -541,6 +547,8 @@ int pm_suspend(suspend_state_t state)
16398 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
16401 + pm_in_action = true;
16403 error = enter_state(state);
16405 suspend_stats.fail++;
16406 @@ -548,6 +556,7 @@ int pm_suspend(suspend_state_t state)
16408 suspend_stats.success++;
16410 + pm_in_action = false;
16413 EXPORT_SYMBOL(pm_suspend);
16414 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
16415 index 9c5b231684d0..cf15bdb6855b 100644
16416 --- a/kernel/printk/printk.c
16417 +++ b/kernel/printk/printk.c
16418 @@ -351,6 +351,65 @@ __packed __aligned(4)
16420 DEFINE_RAW_SPINLOCK(logbuf_lock);
16422 +#ifdef CONFIG_EARLY_PRINTK
16423 +struct console *early_console;
16425 +static void early_vprintk(const char *fmt, va_list ap)
16427 + if (early_console) {
16429 + int n = vscnprintf(buf, sizeof(buf), fmt, ap);
16431 + early_console->write(early_console, buf, n);
16435 +asmlinkage void early_printk(const char *fmt, ...)
16439 + va_start(ap, fmt);
16440 + early_vprintk(fmt, ap);
16445 + * This is independent of any log levels - a global
16446 + * kill switch that turns off all of printk.
16448 + * Used by the NMI watchdog if early-printk is enabled.
16450 +static bool __read_mostly printk_killswitch;
16452 +static int __init force_early_printk_setup(char *str)
16454 + printk_killswitch = true;
16457 +early_param("force_early_printk", force_early_printk_setup);
16459 +void printk_kill(void)
16461 + printk_killswitch = true;
16464 +#ifdef CONFIG_PRINTK
16465 +static int forced_early_printk(const char *fmt, va_list ap)
16467 + if (!printk_killswitch)
16469 + early_vprintk(fmt, ap);
16475 +static inline int forced_early_printk(const char *fmt, va_list ap)
16481 #ifdef CONFIG_PRINTK
16482 DECLARE_WAIT_QUEUE_HEAD(log_wait);
16483 /* the next printk record to read by syslog(READ) or /proc/kmsg */
16484 @@ -1337,6 +1396,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
16488 + int attempts = 0;
16490 text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
16492 @@ -1348,6 +1408,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
16495 enum log_flags prev;
16499 + if (attempts > 10) {
16506 * Find first record that fits, including all following records,
16507 @@ -1363,6 +1431,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
16509 idx = log_next(idx);
16512 + if (num_msg > 5) {
16514 + raw_spin_unlock_irq(&logbuf_lock);
16515 + raw_spin_lock_irq(&logbuf_lock);
16516 + if (clear_seq < log_first_seq)
16521 /* move first record forward until length fits into the buffer */
16522 @@ -1376,6 +1452,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
16524 idx = log_next(idx);
16527 + if (num_msg > 5) {
16529 + raw_spin_unlock_irq(&logbuf_lock);
16530 + raw_spin_lock_irq(&logbuf_lock);
16531 + if (clear_seq < log_first_seq)
16536 /* last message fitting into this dump */
16537 @@ -1416,6 +1500,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
16538 clear_seq = log_next_seq;
16539 clear_idx = log_next_idx;
16542 raw_spin_unlock_irq(&logbuf_lock);
16545 @@ -1569,6 +1654,12 @@ static void call_console_drivers(int level,
16546 if (!console_drivers)
16549 + if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
16550 + if (in_irq() || in_nmi())
16554 + migrate_disable();
16555 for_each_console(con) {
16556 if (exclusive_console && con != exclusive_console)
16558 @@ -1584,6 +1675,7 @@ static void call_console_drivers(int level,
16560 con->write(con, text, len);
16562 + migrate_enable();
16566 @@ -1781,6 +1873,13 @@ asmlinkage int vprintk_emit(int facility, int level,
16567 /* cpu currently holding logbuf_lock in this function */
16568 static unsigned int logbuf_cpu = UINT_MAX;
16571 + * Fall back to early_printk if a debugging subsystem has
16572 + * killed printk output
16574 + if (unlikely(forced_early_printk(fmt, args)))
16577 if (level == LOGLEVEL_SCHED) {
16578 level = LOGLEVEL_DEFAULT;
16580 @@ -1885,13 +1984,23 @@ asmlinkage int vprintk_emit(int facility, int level,
16582 /* If called from the scheduler, we can not call up(). */
16584 + int may_trylock = 1;
16587 +#ifdef CONFIG_PREEMPT_RT_FULL
16589 + * we can't take a sleeping lock with IRQs or preeption disabled
16590 + * so we can't print in these contexts
16592 + if (!(preempt_count() == 0 && !irqs_disabled()))
16596 * Try to acquire and then immediately release the console
16597 * semaphore. The release will print out buffers and wake up
16598 * /dev/kmsg and syslog() users.
16600 - if (console_trylock())
16601 + if (may_trylock && console_trylock())
16605 @@ -2014,26 +2123,6 @@ DEFINE_PER_CPU(printk_func_t, printk_func);
16607 #endif /* CONFIG_PRINTK */
16609 -#ifdef CONFIG_EARLY_PRINTK
16610 -struct console *early_console;
16612 -asmlinkage __visible void early_printk(const char *fmt, ...)
16618 - if (!early_console)
16621 - va_start(ap, fmt);
16622 - n = vscnprintf(buf, sizeof(buf), fmt, ap);
16625 - early_console->write(early_console, buf, n);
16629 static int __add_preferred_console(char *name, int idx, char *options,
16632 @@ -2303,11 +2392,16 @@ static void console_cont_flush(char *text, size_t size)
16635 len = cont_print_text(text, size);
16636 +#ifdef CONFIG_PREEMPT_RT_FULL
16637 + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
16638 + call_console_drivers(cont.level, NULL, 0, text, len);
16640 raw_spin_unlock(&logbuf_lock);
16641 stop_critical_timings();
16642 call_console_drivers(cont.level, NULL, 0, text, len);
16643 start_critical_timings();
16644 local_irq_restore(flags);
16648 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
16649 @@ -2431,13 +2525,17 @@ void console_unlock(void)
16650 console_idx = log_next(console_idx);
16652 console_prev = msg->flags;
16653 +#ifdef CONFIG_PREEMPT_RT_FULL
16654 + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
16655 + call_console_drivers(level, ext_text, ext_len, text, len);
16657 raw_spin_unlock(&logbuf_lock);
16659 stop_critical_timings(); /* don't trace print latency */
16660 call_console_drivers(level, ext_text, ext_len, text, len);
16661 start_critical_timings();
16662 local_irq_restore(flags);
16665 if (do_cond_resched)
16668 @@ -2489,6 +2587,11 @@ void console_unblank(void)
16672 + if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
16673 + if (in_irq() || in_nmi())
16678 * console_unblank can no longer be called in interrupt context unless
16679 * oops_in_progress is set to 1..
16680 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
16681 index a5caecef88be..61e7c5e2183c 100644
16682 --- a/kernel/ptrace.c
16683 +++ b/kernel/ptrace.c
16684 @@ -166,7 +166,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
16686 spin_lock_irq(&task->sighand->siglock);
16687 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
16688 - task->state = __TASK_TRACED;
16689 + unsigned long flags;
16691 + raw_spin_lock_irqsave(&task->pi_lock, flags);
16692 + if (task->state & __TASK_TRACED)
16693 + task->state = __TASK_TRACED;
16695 + task->saved_state = __TASK_TRACED;
16696 + raw_spin_unlock_irqrestore(&task->pi_lock, flags);
16699 spin_unlock_irq(&task->sighand->siglock);
16700 diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
16701 index bf08fee53dc7..eeb8ce4ad7b6 100644
16702 --- a/kernel/rcu/rcutorture.c
16703 +++ b/kernel/rcu/rcutorture.c
16704 @@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops = {
16708 +#ifndef CONFIG_PREEMPT_RT_FULL
16710 * Definitions for rcu_bh torture testing.
16712 @@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops = {
16717 +static struct rcu_torture_ops rcu_bh_ops = {
16718 + .ttype = INVALID_RCU_FLAVOR,
16723 * Don't even think about trying any of these in real life!!!
16724 * The names includes "busted", and they really means it!
16725 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
16726 index 10f62c6f48e7..dbee19478f09 100644
16727 --- a/kernel/rcu/tree.c
16728 +++ b/kernel/rcu/tree.c
16730 #include <linux/random.h>
16731 #include <linux/trace_events.h>
16732 #include <linux/suspend.h>
16733 +#include <linux/delay.h>
16734 +#include <linux/gfp.h>
16735 +#include <linux/oom.h>
16736 +#include <linux/smpboot.h>
16737 +#include "../time/tick-internal.h"
16741 @@ -260,6 +265,19 @@ void rcu_sched_qs(void)
16742 this_cpu_ptr(&rcu_sched_data), true);
16745 +#ifdef CONFIG_PREEMPT_RT_FULL
16746 +static void rcu_preempt_qs(void);
16748 +void rcu_bh_qs(void)
16750 + unsigned long flags;
16752 + /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
16753 + local_irq_save(flags);
16754 + rcu_preempt_qs();
16755 + local_irq_restore(flags);
16758 void rcu_bh_qs(void)
16760 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
16761 @@ -269,6 +287,7 @@ void rcu_bh_qs(void)
16762 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
16767 static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
16769 @@ -449,11 +468,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
16771 * Return the number of RCU BH batches started thus far for debug & stats.
16773 +#ifndef CONFIG_PREEMPT_RT_FULL
16774 unsigned long rcu_batches_started_bh(void)
16776 return rcu_bh_state.gpnum;
16778 EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
16782 * Return the number of RCU batches completed thus far for debug & stats.
16783 @@ -473,6 +494,7 @@ unsigned long rcu_batches_completed_sched(void)
16785 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
16787 +#ifndef CONFIG_PREEMPT_RT_FULL
16789 * Return the number of RCU BH batches completed thus far for debug & stats.
16791 @@ -481,6 +503,7 @@ unsigned long rcu_batches_completed_bh(void)
16792 return rcu_bh_state.completed;
16794 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
16798 * Return the number of RCU expedited batches completed thus far for
16799 @@ -504,6 +527,7 @@ unsigned long rcu_exp_batches_completed_sched(void)
16801 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
16803 +#ifndef CONFIG_PREEMPT_RT_FULL
16805 * Force a quiescent state.
16807 @@ -522,6 +546,13 @@ void rcu_bh_force_quiescent_state(void)
16809 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
16812 +void rcu_force_quiescent_state(void)
16815 +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
16819 * Force a quiescent state for RCU-sched.
16821 @@ -572,9 +603,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
16825 +#ifndef CONFIG_PREEMPT_RT_FULL
16826 case RCU_BH_FLAVOR:
16827 rsp = &rcu_bh_state;
16830 case RCU_SCHED_FLAVOR:
16831 rsp = &rcu_sched_state;
16833 @@ -3016,18 +3049,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
16835 * Do RCU core processing for the current CPU.
16837 -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
16838 +static __latent_entropy void rcu_process_callbacks(void)
16840 struct rcu_state *rsp;
16842 if (cpu_is_offline(smp_processor_id()))
16844 - trace_rcu_utilization(TPS("Start RCU core"));
16845 for_each_rcu_flavor(rsp)
16846 __rcu_process_callbacks(rsp);
16847 - trace_rcu_utilization(TPS("End RCU core"));
16850 +static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
16852 * Schedule RCU callback invocation. If the specified type of RCU
16853 * does not support RCU priority boosting, just do a direct call,
16854 @@ -3039,19 +3071,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
16856 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
16858 - if (likely(!rsp->boost)) {
16859 - rcu_do_batch(rsp, rdp);
16862 - invoke_rcu_callbacks_kthread();
16863 + rcu_do_batch(rsp, rdp);
16866 +static void rcu_wake_cond(struct task_struct *t, int status)
16869 + * If the thread is yielding, only wake it when this
16870 + * is invoked from idle
16872 + if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
16873 + wake_up_process(t);
16877 + * Wake up this CPU's rcuc kthread to do RCU core processing.
16879 static void invoke_rcu_core(void)
16881 - if (cpu_online(smp_processor_id()))
16882 - raise_softirq(RCU_SOFTIRQ);
16883 + unsigned long flags;
16884 + struct task_struct *t;
16886 + if (!cpu_online(smp_processor_id()))
16888 + local_irq_save(flags);
16889 + __this_cpu_write(rcu_cpu_has_work, 1);
16890 + t = __this_cpu_read(rcu_cpu_kthread_task);
16891 + if (t != NULL && current != t)
16892 + rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
16893 + local_irq_restore(flags);
16896 +static void rcu_cpu_kthread_park(unsigned int cpu)
16898 + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
16901 +static int rcu_cpu_kthread_should_run(unsigned int cpu)
16903 + return __this_cpu_read(rcu_cpu_has_work);
16907 + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
16908 + * RCU softirq used in flavors and configurations of RCU that do not
16909 + * support RCU priority boosting.
16911 +static void rcu_cpu_kthread(unsigned int cpu)
16913 + unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
16914 + char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
16917 + for (spincnt = 0; spincnt < 10; spincnt++) {
16918 + trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
16919 + local_bh_disable();
16920 + *statusp = RCU_KTHREAD_RUNNING;
16921 + this_cpu_inc(rcu_cpu_kthread_loops);
16922 + local_irq_disable();
16925 + local_irq_enable();
16927 + rcu_process_callbacks();
16928 + local_bh_enable();
16929 + if (*workp == 0) {
16930 + trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
16931 + *statusp = RCU_KTHREAD_WAITING;
16935 + *statusp = RCU_KTHREAD_YIELDING;
16936 + trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
16937 + schedule_timeout_interruptible(2);
16938 + trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
16939 + *statusp = RCU_KTHREAD_WAITING;
16942 +static struct smp_hotplug_thread rcu_cpu_thread_spec = {
16943 + .store = &rcu_cpu_kthread_task,
16944 + .thread_should_run = rcu_cpu_kthread_should_run,
16945 + .thread_fn = rcu_cpu_kthread,
16946 + .thread_comm = "rcuc/%u",
16947 + .setup = rcu_cpu_kthread_setup,
16948 + .park = rcu_cpu_kthread_park,
16952 + * Spawn per-CPU RCU core processing kthreads.
16954 +static int __init rcu_spawn_core_kthreads(void)
16958 + for_each_possible_cpu(cpu)
16959 + per_cpu(rcu_cpu_has_work, cpu) = 0;
16960 + BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
16963 +early_initcall(rcu_spawn_core_kthreads);
16966 * Handle any core-RCU processing required by a call_rcu() invocation.
16968 @@ -3195,6 +3314,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
16970 EXPORT_SYMBOL_GPL(call_rcu_sched);
16972 +#ifndef CONFIG_PREEMPT_RT_FULL
16974 * Queue an RCU callback for invocation after a quicker grace period.
16976 @@ -3203,6 +3323,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
16977 __call_rcu(head, func, &rcu_bh_state, -1, 0);
16979 EXPORT_SYMBOL_GPL(call_rcu_bh);
16983 * Queue an RCU callback for lazy invocation after a grace period.
16984 @@ -3294,6 +3415,7 @@ void synchronize_sched(void)
16986 EXPORT_SYMBOL_GPL(synchronize_sched);
16988 +#ifndef CONFIG_PREEMPT_RT_FULL
16990 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
16992 @@ -3320,6 +3442,7 @@ void synchronize_rcu_bh(void)
16993 wait_rcu_gp(call_rcu_bh);
16995 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
16999 * get_state_synchronize_rcu - Snapshot current RCU state
17000 @@ -3698,6 +3821,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
17001 mutex_unlock(&rsp->barrier_mutex);
17004 +#ifndef CONFIG_PREEMPT_RT_FULL
17006 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
17008 @@ -3706,6 +3830,7 @@ void rcu_barrier_bh(void)
17009 _rcu_barrier(&rcu_bh_state);
17011 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
17015 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
17016 @@ -4227,12 +4352,13 @@ void __init rcu_init(void)
17018 rcu_bootup_announce();
17019 rcu_init_geometry();
17020 +#ifndef CONFIG_PREEMPT_RT_FULL
17021 rcu_init_one(&rcu_bh_state);
17023 rcu_init_one(&rcu_sched_state);
17025 rcu_dump_rcu_node_tree(&rcu_sched_state);
17026 __rcu_init_preempt();
17027 - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
17030 * We don't need protection against CPU-hotplug here because
17031 diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
17032 index e99a5234d9ed..958ac107062c 100644
17033 --- a/kernel/rcu/tree.h
17034 +++ b/kernel/rcu/tree.h
17035 @@ -588,18 +588,18 @@ extern struct list_head rcu_struct_flavors;
17037 extern struct rcu_state rcu_sched_state;
17039 +#ifndef CONFIG_PREEMPT_RT_FULL
17040 extern struct rcu_state rcu_bh_state;
17043 #ifdef CONFIG_PREEMPT_RCU
17044 extern struct rcu_state rcu_preempt_state;
17045 #endif /* #ifdef CONFIG_PREEMPT_RCU */
17047 -#ifdef CONFIG_RCU_BOOST
17048 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
17049 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
17050 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
17051 DECLARE_PER_CPU(char, rcu_cpu_has_work);
17052 -#endif /* #ifdef CONFIG_RCU_BOOST */
17054 #ifndef RCU_TREE_NONCORE
17056 @@ -619,10 +619,9 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
17057 static void __init __rcu_init_preempt(void);
17058 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
17059 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
17060 -static void invoke_rcu_callbacks_kthread(void);
17061 static bool rcu_is_callbacks_kthread(void);
17062 +static void rcu_cpu_kthread_setup(unsigned int cpu);
17063 #ifdef CONFIG_RCU_BOOST
17064 -static void rcu_preempt_do_callbacks(void);
17065 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
17066 struct rcu_node *rnp);
17067 #endif /* #ifdef CONFIG_RCU_BOOST */
17068 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
17069 index 56583e764ebf..7c656f8e192f 100644
17070 --- a/kernel/rcu/tree_plugin.h
17071 +++ b/kernel/rcu/tree_plugin.h
17072 @@ -24,25 +24,10 @@
17073 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
17076 -#include <linux/delay.h>
17077 -#include <linux/gfp.h>
17078 -#include <linux/oom.h>
17079 -#include <linux/smpboot.h>
17080 -#include "../time/tick-internal.h"
17082 #ifdef CONFIG_RCU_BOOST
17084 #include "../locking/rtmutex_common.h"
17087 - * Control variables for per-CPU and per-rcu_node kthreads. These
17088 - * handle all flavors of RCU.
17090 -static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
17091 -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
17092 -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
17093 -DEFINE_PER_CPU(char, rcu_cpu_has_work);
17095 #else /* #ifdef CONFIG_RCU_BOOST */
17098 @@ -55,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
17100 #endif /* #else #ifdef CONFIG_RCU_BOOST */
17103 + * Control variables for per-CPU and per-rcu_node kthreads. These
17104 + * handle all flavors of RCU.
17106 +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
17107 +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
17108 +DEFINE_PER_CPU(char, rcu_cpu_has_work);
17110 #ifdef CONFIG_RCU_NOCB_CPU
17111 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
17112 static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
17113 @@ -426,7 +419,7 @@ void rcu_read_unlock_special(struct task_struct *t)
17116 /* Hardware IRQ handlers cannot block, complain if they get here. */
17117 - if (in_irq() || in_serving_softirq()) {
17118 + if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
17119 lockdep_rcu_suspicious(__FILE__, __LINE__,
17120 "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
17121 pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
17122 @@ -632,15 +625,6 @@ static void rcu_preempt_check_callbacks(void)
17123 t->rcu_read_unlock_special.b.need_qs = true;
17126 -#ifdef CONFIG_RCU_BOOST
17128 -static void rcu_preempt_do_callbacks(void)
17130 - rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
17133 -#endif /* #ifdef CONFIG_RCU_BOOST */
17136 * Queue a preemptible-RCU callback for invocation after a grace period.
17138 @@ -829,6 +813,19 @@ void exit_rcu(void)
17140 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
17143 + * If boosting, set rcuc kthreads to realtime priority.
17145 +static void rcu_cpu_kthread_setup(unsigned int cpu)
17147 +#ifdef CONFIG_RCU_BOOST
17148 + struct sched_param sp;
17150 + sp.sched_priority = kthread_prio;
17151 + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
17152 +#endif /* #ifdef CONFIG_RCU_BOOST */
17155 #ifdef CONFIG_RCU_BOOST
17157 #include "../locking/rtmutex_common.h"
17158 @@ -860,16 +857,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
17160 #endif /* #else #ifdef CONFIG_RCU_TRACE */
17162 -static void rcu_wake_cond(struct task_struct *t, int status)
17165 - * If the thread is yielding, only wake it when this
17166 - * is invoked from idle
17168 - if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
17169 - wake_up_process(t);
17173 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
17174 * or ->boost_tasks, advancing the pointer to the next task in the
17175 @@ -1013,23 +1000,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
17179 - * Wake up the per-CPU kthread to invoke RCU callbacks.
17181 -static void invoke_rcu_callbacks_kthread(void)
17183 - unsigned long flags;
17185 - local_irq_save(flags);
17186 - __this_cpu_write(rcu_cpu_has_work, 1);
17187 - if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
17188 - current != __this_cpu_read(rcu_cpu_kthread_task)) {
17189 - rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
17190 - __this_cpu_read(rcu_cpu_kthread_status));
17192 - local_irq_restore(flags);
17196 * Is the current CPU running the RCU-callbacks kthread?
17197 * Caller must have preemption disabled.
17199 @@ -1083,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
17203 -static void rcu_kthread_do_work(void)
17205 - rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
17206 - rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
17207 - rcu_preempt_do_callbacks();
17210 -static void rcu_cpu_kthread_setup(unsigned int cpu)
17212 - struct sched_param sp;
17214 - sp.sched_priority = kthread_prio;
17215 - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
17218 -static void rcu_cpu_kthread_park(unsigned int cpu)
17220 - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
17223 -static int rcu_cpu_kthread_should_run(unsigned int cpu)
17225 - return __this_cpu_read(rcu_cpu_has_work);
17229 - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
17230 - * RCU softirq used in flavors and configurations of RCU that do not
17231 - * support RCU priority boosting.
17233 -static void rcu_cpu_kthread(unsigned int cpu)
17235 - unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
17236 - char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
17239 - for (spincnt = 0; spincnt < 10; spincnt++) {
17240 - trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
17241 - local_bh_disable();
17242 - *statusp = RCU_KTHREAD_RUNNING;
17243 - this_cpu_inc(rcu_cpu_kthread_loops);
17244 - local_irq_disable();
17247 - local_irq_enable();
17249 - rcu_kthread_do_work();
17250 - local_bh_enable();
17251 - if (*workp == 0) {
17252 - trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
17253 - *statusp = RCU_KTHREAD_WAITING;
17257 - *statusp = RCU_KTHREAD_YIELDING;
17258 - trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
17259 - schedule_timeout_interruptible(2);
17260 - trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
17261 - *statusp = RCU_KTHREAD_WAITING;
17265 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
17266 * served by the rcu_node in question. The CPU hotplug lock is still
17267 @@ -1174,26 +1083,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
17268 free_cpumask_var(cm);
17271 -static struct smp_hotplug_thread rcu_cpu_thread_spec = {
17272 - .store = &rcu_cpu_kthread_task,
17273 - .thread_should_run = rcu_cpu_kthread_should_run,
17274 - .thread_fn = rcu_cpu_kthread,
17275 - .thread_comm = "rcuc/%u",
17276 - .setup = rcu_cpu_kthread_setup,
17277 - .park = rcu_cpu_kthread_park,
17281 * Spawn boost kthreads -- called as soon as the scheduler is running.
17283 static void __init rcu_spawn_boost_kthreads(void)
17285 struct rcu_node *rnp;
17288 - for_each_possible_cpu(cpu)
17289 - per_cpu(rcu_cpu_has_work, cpu) = 0;
17290 - BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
17291 rcu_for_each_leaf_node(rcu_state_p, rnp)
17292 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
17294 @@ -1216,11 +1111,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
17295 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
17298 -static void invoke_rcu_callbacks_kthread(void)
17303 static bool rcu_is_callbacks_kthread(void)
17306 @@ -1244,7 +1134,7 @@ static void rcu_prepare_kthreads(int cpu)
17308 #endif /* #else #ifdef CONFIG_RCU_BOOST */
17310 -#if !defined(CONFIG_RCU_FAST_NO_HZ)
17311 +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
17314 * Check to see if any future RCU-related work will need to be done
17315 @@ -1261,7 +1151,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
17316 return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
17317 ? 0 : rcu_cpu_has_callbacks(NULL);
17319 +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
17321 +#if !defined(CONFIG_RCU_FAST_NO_HZ)
17323 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
17325 @@ -1357,6 +1249,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
17329 +#ifndef CONFIG_PREEMPT_RT_FULL
17332 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
17333 * to invoke. If the CPU has callbacks, try to advance them. Tell the
17334 @@ -1402,6 +1296,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
17335 *nextevt = basemono + dj * TICK_NSEC;
17338 +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
17341 * Prepare a CPU for idle from an RCU perspective. The first major task
17342 diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
17343 index 4f6db7e6a117..ee02e1e1b3e5 100644
17344 --- a/kernel/rcu/update.c
17345 +++ b/kernel/rcu/update.c
17347 #ifndef CONFIG_TINY_RCU
17348 module_param(rcu_expedited, int, 0);
17349 module_param(rcu_normal, int, 0);
17350 -static int rcu_normal_after_boot;
17351 +static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
17352 module_param(rcu_normal_after_boot, int, 0);
17353 #endif /* #ifndef CONFIG_TINY_RCU */
17355 @@ -132,8 +132,7 @@ bool rcu_gp_is_normal(void)
17357 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
17359 -static atomic_t rcu_expedited_nesting =
17360 - ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
17361 +static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
17364 * Should normal grace-period primitives be expedited? Intended for
17365 @@ -182,8 +181,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
17367 void rcu_end_inkernel_boot(void)
17369 - if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
17370 - rcu_unexpedite_gp();
17371 + rcu_unexpedite_gp();
17372 if (rcu_normal_after_boot)
17373 WRITE_ONCE(rcu_normal, 1);
17375 @@ -298,6 +296,7 @@ int rcu_read_lock_held(void)
17377 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
17379 +#ifndef CONFIG_PREEMPT_RT_FULL
17381 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
17383 @@ -324,6 +323,7 @@ int rcu_read_lock_bh_held(void)
17384 return in_softirq() || irqs_disabled();
17386 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
17389 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
17391 diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
17392 index 5e59b832ae2b..7337a7f60e3f 100644
17393 --- a/kernel/sched/Makefile
17394 +++ b/kernel/sched/Makefile
17395 @@ -17,7 +17,7 @@ endif
17397 obj-y += core.o loadavg.o clock.o cputime.o
17398 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
17399 -obj-y += wait.o swait.o completion.o idle.o
17400 +obj-y += wait.o swait.o swork.o completion.o idle.o
17401 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
17402 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
17403 obj-$(CONFIG_SCHEDSTATS) += stats.o
17404 diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
17405 index 8d0f35debf35..b62cf6400fe0 100644
17406 --- a/kernel/sched/completion.c
17407 +++ b/kernel/sched/completion.c
17408 @@ -30,10 +30,10 @@ void complete(struct completion *x)
17410 unsigned long flags;
17412 - spin_lock_irqsave(&x->wait.lock, flags);
17413 + raw_spin_lock_irqsave(&x->wait.lock, flags);
17415 - __wake_up_locked(&x->wait, TASK_NORMAL, 1);
17416 - spin_unlock_irqrestore(&x->wait.lock, flags);
17417 + swake_up_locked(&x->wait);
17418 + raw_spin_unlock_irqrestore(&x->wait.lock, flags);
17420 EXPORT_SYMBOL(complete);
17422 @@ -50,10 +50,10 @@ void complete_all(struct completion *x)
17424 unsigned long flags;
17426 - spin_lock_irqsave(&x->wait.lock, flags);
17427 + raw_spin_lock_irqsave(&x->wait.lock, flags);
17428 x->done += UINT_MAX/2;
17429 - __wake_up_locked(&x->wait, TASK_NORMAL, 0);
17430 - spin_unlock_irqrestore(&x->wait.lock, flags);
17431 + swake_up_all_locked(&x->wait);
17432 + raw_spin_unlock_irqrestore(&x->wait.lock, flags);
17434 EXPORT_SYMBOL(complete_all);
17436 @@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
17437 long (*action)(long), long timeout, int state)
17440 - DECLARE_WAITQUEUE(wait, current);
17441 + DECLARE_SWAITQUEUE(wait);
17443 - __add_wait_queue_tail_exclusive(&x->wait, &wait);
17444 + __prepare_to_swait(&x->wait, &wait);
17446 if (signal_pending_state(state, current)) {
17447 timeout = -ERESTARTSYS;
17450 __set_current_state(state);
17451 - spin_unlock_irq(&x->wait.lock);
17452 + raw_spin_unlock_irq(&x->wait.lock);
17453 timeout = action(timeout);
17454 - spin_lock_irq(&x->wait.lock);
17455 + raw_spin_lock_irq(&x->wait.lock);
17456 } while (!x->done && timeout);
17457 - __remove_wait_queue(&x->wait, &wait);
17458 + __finish_swait(&x->wait, &wait);
17462 @@ -89,9 +89,9 @@ __wait_for_common(struct completion *x,
17466 - spin_lock_irq(&x->wait.lock);
17467 + raw_spin_lock_irq(&x->wait.lock);
17468 timeout = do_wait_for_common(x, action, timeout, state);
17469 - spin_unlock_irq(&x->wait.lock);
17470 + raw_spin_unlock_irq(&x->wait.lock);
17474 @@ -277,12 +277,12 @@ bool try_wait_for_completion(struct completion *x)
17475 if (!READ_ONCE(x->done))
17478 - spin_lock_irqsave(&x->wait.lock, flags);
17479 + raw_spin_lock_irqsave(&x->wait.lock, flags);
17484 - spin_unlock_irqrestore(&x->wait.lock, flags);
17485 + raw_spin_unlock_irqrestore(&x->wait.lock, flags);
17488 EXPORT_SYMBOL(try_wait_for_completion);
17489 @@ -311,7 +311,7 @@ bool completion_done(struct completion *x)
17490 * after it's acquired the lock.
17493 - spin_unlock_wait(&x->wait.lock);
17494 + raw_spin_unlock_wait(&x->wait.lock);
17497 EXPORT_SYMBOL(completion_done);
17498 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
17499 index 154fd689fe02..10e832da70b6 100644
17500 --- a/kernel/sched/core.c
17501 +++ b/kernel/sched/core.c
17502 @@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_features =
17503 * Number of tasks to iterate in a single balance run.
17504 * Limited because this is done with IRQs disabled.
17506 +#ifndef CONFIG_PREEMPT_RT_FULL
17507 const_debug unsigned int sysctl_sched_nr_migrate = 32;
17509 +const_debug unsigned int sysctl_sched_nr_migrate = 8;
17513 * period over which we average the RT time consumption, measured
17514 @@ -345,6 +349,7 @@ static void init_rq_hrtick(struct rq *rq)
17516 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
17517 rq->hrtick_timer.function = hrtick;
17518 + rq->hrtick_timer.irqsafe = 1;
17520 #else /* CONFIG_SCHED_HRTICK */
17521 static inline void hrtick_clear(struct rq *rq)
17522 @@ -449,7 +454,7 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
17523 head->lastp = &node->next;
17526 -void wake_up_q(struct wake_q_head *head)
17527 +void __wake_up_q(struct wake_q_head *head, bool sleeper)
17529 struct wake_q_node *node = head->first;
17531 @@ -466,7 +471,10 @@ void wake_up_q(struct wake_q_head *head)
17532 * wake_up_process() implies a wmb() to pair with the queueing
17533 * in wake_q_add() so as not to miss wakeups.
17535 - wake_up_process(task);
17537 + wake_up_lock_sleeper(task);
17539 + wake_up_process(task);
17540 put_task_struct(task);
17543 @@ -502,6 +510,38 @@ void resched_curr(struct rq *rq)
17544 trace_sched_wake_idle_without_ipi(cpu);
17547 +#ifdef CONFIG_PREEMPT_LAZY
17548 +void resched_curr_lazy(struct rq *rq)
17550 + struct task_struct *curr = rq->curr;
17553 + if (!sched_feat(PREEMPT_LAZY)) {
17554 + resched_curr(rq);
17558 + lockdep_assert_held(&rq->lock);
17560 + if (test_tsk_need_resched(curr))
17563 + if (test_tsk_need_resched_lazy(curr))
17566 + set_tsk_need_resched_lazy(curr);
17568 + cpu = cpu_of(rq);
17569 + if (cpu == smp_processor_id())
17572 + /* NEED_RESCHED_LAZY must be visible before we test polling */
17574 + if (!tsk_is_polling(curr))
17575 + smp_send_reschedule(cpu);
17579 void resched_cpu(int cpu)
17581 struct rq *rq = cpu_rq(cpu);
17582 @@ -525,11 +565,14 @@ void resched_cpu(int cpu)
17584 int get_nohz_timer_target(void)
17586 - int i, cpu = smp_processor_id();
17588 struct sched_domain *sd;
17590 + preempt_disable_rt();
17591 + cpu = smp_processor_id();
17593 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
17595 + goto preempt_en_rt;
17598 for_each_domain(cpu, sd) {
17599 @@ -548,6 +591,8 @@ int get_nohz_timer_target(void)
17600 cpu = housekeeping_any_cpu();
17604 + preempt_enable_rt();
17608 @@ -1100,6 +1145,11 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
17610 lockdep_assert_held(&p->pi_lock);
17612 + if (__migrate_disabled(p)) {
17613 + cpumask_copy(&p->cpus_allowed, new_mask);
17617 queued = task_on_rq_queued(p);
17618 running = task_current(rq, p);
17620 @@ -1122,6 +1172,84 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
17621 set_curr_task(rq, p);
17624 +static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
17625 +static DEFINE_MUTEX(sched_down_mutex);
17626 +static cpumask_t sched_down_cpumask;
17628 +void tell_sched_cpu_down_begin(int cpu)
17630 + mutex_lock(&sched_down_mutex);
17631 + cpumask_set_cpu(cpu, &sched_down_cpumask);
17632 + mutex_unlock(&sched_down_mutex);
17635 +void tell_sched_cpu_down_done(int cpu)
17637 + mutex_lock(&sched_down_mutex);
17638 + cpumask_clear_cpu(cpu, &sched_down_cpumask);
17639 + mutex_unlock(&sched_down_mutex);
17643 + * migrate_me - try to move the current task off this cpu
17645 + * Used by the pin_current_cpu() code to try to get tasks
17646 + * to move off the current CPU as it is going down.
17647 + * It will only move the task if the task isn't pinned to
17648 + * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
17649 + * and the task has to be in a RUNNING state. Otherwise the
17650 + * movement of the task will wake it up (change its state
17651 + * to running) when the task did not expect it.
17653 + * Returns 1 if it succeeded in moving the current task
17656 +int migrate_me(void)
17658 + struct task_struct *p = current;
17659 + struct migration_arg arg;
17660 + struct cpumask *cpumask;
17661 + struct cpumask *mask;
17662 + unsigned int dest_cpu;
17663 + struct rq_flags rf;
17667 + * We can not migrate tasks bounded to a CPU or tasks not
17668 + * running. The movement of the task will wake it up.
17670 + if (p->flags & PF_NO_SETAFFINITY || p->state)
17673 + mutex_lock(&sched_down_mutex);
17674 + rq = task_rq_lock(p, &rf);
17676 + cpumask = this_cpu_ptr(&sched_cpumasks);
17677 + mask = &p->cpus_allowed;
17679 + cpumask_andnot(cpumask, mask, &sched_down_cpumask);
17681 + if (!cpumask_weight(cpumask)) {
17682 + /* It's only on this CPU? */
17683 + task_rq_unlock(rq, p, &rf);
17684 + mutex_unlock(&sched_down_mutex);
17688 + dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
17691 + arg.dest_cpu = dest_cpu;
17693 + task_rq_unlock(rq, p, &rf);
17695 + stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
17696 + tlb_migrate_finish(p->mm);
17697 + mutex_unlock(&sched_down_mutex);
17703 * Change a given task's CPU affinity. Migrate the thread to a
17704 * proper CPU and schedule it away if the CPU it's executing on
17705 @@ -1179,7 +1307,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
17708 /* Can the task run on the task's current CPU? If so, we're done */
17709 - if (cpumask_test_cpu(task_cpu(p), new_mask))
17710 + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
17713 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
17714 @@ -1366,6 +1494,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
17718 +static bool check_task_state(struct task_struct *p, long match_state)
17720 + bool match = false;
17722 + raw_spin_lock_irq(&p->pi_lock);
17723 + if (p->state == match_state || p->saved_state == match_state)
17725 + raw_spin_unlock_irq(&p->pi_lock);
17731 * wait_task_inactive - wait for a thread to unschedule.
17733 @@ -1410,7 +1550,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
17734 * is actually now running somewhere else!
17736 while (task_running(rq, p)) {
17737 - if (match_state && unlikely(p->state != match_state))
17738 + if (match_state && !check_task_state(p, match_state))
17742 @@ -1425,7 +1565,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
17743 running = task_running(rq, p);
17744 queued = task_on_rq_queued(p);
17746 - if (!match_state || p->state == match_state)
17747 + if (!match_state || p->state == match_state ||
17748 + p->saved_state == match_state)
17749 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
17750 task_rq_unlock(rq, p, &rf);
17752 @@ -1680,10 +1821,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
17754 activate_task(rq, p, en_flags);
17755 p->on_rq = TASK_ON_RQ_QUEUED;
17757 - /* if a worker is waking up, notify workqueue */
17758 - if (p->flags & PF_WQ_WORKER)
17759 - wq_worker_waking_up(p, cpu_of(rq));
17763 @@ -2018,8 +2155,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
17765 smp_mb__before_spinlock();
17766 raw_spin_lock_irqsave(&p->pi_lock, flags);
17767 - if (!(p->state & state))
17768 + if (!(p->state & state)) {
17770 + * The task might be running due to a spinlock sleeper
17771 + * wakeup. Check the saved state and set it to running
17772 + * if the wakeup condition is true.
17774 + if (!(wake_flags & WF_LOCK_SLEEPER)) {
17775 + if (p->saved_state & state) {
17776 + p->saved_state = TASK_RUNNING;
17784 + * If this is a regular wakeup, then we can unconditionally
17785 + * clear the saved state of a "lock sleeper".
17787 + if (!(wake_flags & WF_LOCK_SLEEPER))
17788 + p->saved_state = TASK_RUNNING;
17790 trace_sched_waking(p);
17792 @@ -2102,53 +2258,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
17796 - * try_to_wake_up_local - try to wake up a local task with rq lock held
17797 - * @p: the thread to be awakened
17798 - * @cookie: context's cookie for pinning
17800 - * Put @p on the run-queue if it's not already there. The caller must
17801 - * ensure that this_rq() is locked, @p is bound to this_rq() and not
17802 - * the current task.
17804 -static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
17806 - struct rq *rq = task_rq(p);
17808 - if (WARN_ON_ONCE(rq != this_rq()) ||
17809 - WARN_ON_ONCE(p == current))
17812 - lockdep_assert_held(&rq->lock);
17814 - if (!raw_spin_trylock(&p->pi_lock)) {
17816 - * This is OK, because current is on_cpu, which avoids it being
17817 - * picked for load-balance and preemption/IRQs are still
17818 - * disabled avoiding further scheduler activity on it and we've
17819 - * not yet picked a replacement task.
17821 - lockdep_unpin_lock(&rq->lock, cookie);
17822 - raw_spin_unlock(&rq->lock);
17823 - raw_spin_lock(&p->pi_lock);
17824 - raw_spin_lock(&rq->lock);
17825 - lockdep_repin_lock(&rq->lock, cookie);
17828 - if (!(p->state & TASK_NORMAL))
17831 - trace_sched_waking(p);
17833 - if (!task_on_rq_queued(p))
17834 - ttwu_activate(rq, p, ENQUEUE_WAKEUP);
17836 - ttwu_do_wakeup(rq, p, 0, cookie);
17837 - ttwu_stat(p, smp_processor_id(), 0);
17839 - raw_spin_unlock(&p->pi_lock);
17843 * wake_up_process - Wake up a specific process
17844 * @p: The process to be woken up.
17846 @@ -2166,6 +2275,18 @@ int wake_up_process(struct task_struct *p)
17848 EXPORT_SYMBOL(wake_up_process);
17851 + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
17852 + * @p: The process to be woken up.
17854 + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
17855 + * the nature of the wakeup.
17857 +int wake_up_lock_sleeper(struct task_struct *p)
17859 + return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
17862 int wake_up_state(struct task_struct *p, unsigned int state)
17864 return try_to_wake_up(p, state, 0);
17865 @@ -2442,6 +2563,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
17868 init_task_preempt_count(p);
17869 +#ifdef CONFIG_HAVE_PREEMPT_LAZY
17870 + task_thread_info(p)->preempt_lazy_count = 0;
17873 plist_node_init(&p->pushable_tasks, MAX_PRIO);
17874 RB_CLEAR_NODE(&p->pushable_dl_tasks);
17875 @@ -2770,21 +2894,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
17876 finish_arch_post_lock_switch();
17878 fire_sched_in_preempt_notifiers(current);
17880 + * We use mmdrop_delayed() here so we don't have to do the
17881 + * full __mmdrop() when we are the last user.
17885 + mmdrop_delayed(mm);
17886 if (unlikely(prev_state == TASK_DEAD)) {
17887 if (prev->sched_class->task_dead)
17888 prev->sched_class->task_dead(prev);
17891 - * Remove function-return probe instances associated with this
17892 - * task and put them back on the free list.
17894 - kprobe_flush_task(prev);
17896 - /* Task is done with its stack. */
17897 - put_task_stack(prev);
17899 put_task_struct(prev);
17902 @@ -3252,6 +3371,77 @@ static inline void schedule_debug(struct task_struct *prev)
17903 schedstat_inc(this_rq()->sched_count);
17906 +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
17908 +void migrate_disable(void)
17910 + struct task_struct *p = current;
17912 + if (in_atomic() || irqs_disabled()) {
17913 +#ifdef CONFIG_SCHED_DEBUG
17914 + p->migrate_disable_atomic++;
17919 +#ifdef CONFIG_SCHED_DEBUG
17920 + if (unlikely(p->migrate_disable_atomic)) {
17926 + if (p->migrate_disable) {
17927 + p->migrate_disable++;
17931 + preempt_disable();
17932 + preempt_lazy_disable();
17933 + pin_current_cpu();
17934 + p->migrate_disable = 1;
17935 + preempt_enable();
17937 +EXPORT_SYMBOL(migrate_disable);
17939 +void migrate_enable(void)
17941 + struct task_struct *p = current;
17943 + if (in_atomic() || irqs_disabled()) {
17944 +#ifdef CONFIG_SCHED_DEBUG
17945 + p->migrate_disable_atomic--;
17950 +#ifdef CONFIG_SCHED_DEBUG
17951 + if (unlikely(p->migrate_disable_atomic)) {
17956 + WARN_ON_ONCE(p->migrate_disable <= 0);
17958 + if (p->migrate_disable > 1) {
17959 + p->migrate_disable--;
17963 + preempt_disable();
17965 + * Clearing migrate_disable causes tsk_cpus_allowed to
17966 + * show the tasks original cpu affinity.
17968 + p->migrate_disable = 0;
17970 + unpin_current_cpu();
17971 + preempt_enable();
17972 + preempt_lazy_enable();
17974 +EXPORT_SYMBOL(migrate_enable);
17978 * Pick up the highest-prio task:
17980 @@ -3368,19 +3558,6 @@ static void __sched notrace __schedule(bool preempt)
17982 deactivate_task(rq, prev, DEQUEUE_SLEEP);
17986 - * If a worker went to sleep, notify and ask workqueue
17987 - * whether it wants to wake up a task to maintain
17990 - if (prev->flags & PF_WQ_WORKER) {
17991 - struct task_struct *to_wakeup;
17993 - to_wakeup = wq_worker_sleeping(prev);
17995 - try_to_wake_up_local(to_wakeup, cookie);
17998 switch_count = &prev->nvcsw;
18000 @@ -3390,6 +3567,7 @@ static void __sched notrace __schedule(bool preempt)
18002 next = pick_next_task(rq, prev, cookie);
18003 clear_tsk_need_resched(prev);
18004 + clear_tsk_need_resched_lazy(prev);
18005 clear_preempt_need_resched();
18006 rq->clock_skip_update = 0;
18008 @@ -3437,9 +3615,20 @@ void __noreturn do_task_dead(void)
18010 static inline void sched_submit_work(struct task_struct *tsk)
18012 - if (!tsk->state || tsk_is_pi_blocked(tsk))
18016 + * If a worker went to sleep, notify and ask workqueue whether
18017 + * it wants to wake up a task to maintain concurrency.
18019 + if (tsk->flags & PF_WQ_WORKER)
18020 + wq_worker_sleeping(tsk);
18023 + if (tsk_is_pi_blocked(tsk))
18027 * If we are going to sleep and we have plugged IO queued,
18028 * make sure to submit it to avoid deadlocks.
18030 @@ -3447,6 +3636,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
18031 blk_schedule_flush_plug(tsk);
18034 +static void sched_update_worker(struct task_struct *tsk)
18036 + if (tsk->flags & PF_WQ_WORKER)
18037 + wq_worker_running(tsk);
18040 asmlinkage __visible void __sched schedule(void)
18042 struct task_struct *tsk = current;
18043 @@ -3457,6 +3652,7 @@ asmlinkage __visible void __sched schedule(void)
18045 sched_preempt_enable_no_resched();
18046 } while (need_resched());
18047 + sched_update_worker(tsk);
18049 EXPORT_SYMBOL(schedule);
18051 @@ -3520,6 +3716,30 @@ static void __sched notrace preempt_schedule_common(void)
18052 } while (need_resched());
18055 +#ifdef CONFIG_PREEMPT_LAZY
18057 + * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
18058 + * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
18059 + * preempt_lazy_count counter >0.
18061 +static __always_inline int preemptible_lazy(void)
18063 + if (test_thread_flag(TIF_NEED_RESCHED))
18065 + if (current_thread_info()->preempt_lazy_count)
18072 +static inline int preemptible_lazy(void)
18079 #ifdef CONFIG_PREEMPT
18081 * this is the entry point to schedule() from in-kernel preemption
18082 @@ -3534,7 +3754,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
18084 if (likely(!preemptible()))
18087 + if (!preemptible_lazy())
18089 preempt_schedule_common();
18091 NOKPROBE_SYMBOL(preempt_schedule);
18092 @@ -3561,6 +3782,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
18093 if (likely(!preemptible()))
18096 + if (!preemptible_lazy())
18101 * Because the function tracer can trace preempt_count_sub()
18102 @@ -3583,7 +3807,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
18103 * an infinite recursion.
18105 prev_ctx = exception_enter();
18107 + * The add/subtract must not be traced by the function
18108 + * tracer. But we still want to account for the
18109 + * preempt off latency tracer. Since the _notrace versions
18110 + * of add/subtract skip the accounting for latency tracer
18111 + * we must force it manually.
18113 + start_critical_timings();
18115 + stop_critical_timings();
18116 exception_exit(prev_ctx);
18118 preempt_latency_stop(1);
18119 @@ -3629,10 +3862,25 @@ EXPORT_SYMBOL(default_wake_function);
18121 #ifdef CONFIG_RT_MUTEXES
18123 +static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
18126 + prio = min(prio, pi_task->prio);
18131 +static inline int rt_effective_prio(struct task_struct *p, int prio)
18133 + struct task_struct *pi_task = rt_mutex_get_top_task(p);
18135 + return __rt_effective_prio(pi_task, prio);
18139 * rt_mutex_setprio - set the current priority of a task
18141 - * @prio: prio value (kernel-internal form)
18142 + * @p: task to boost
18143 + * @pi_task: donor task
18145 * This function changes the 'effective' priority of a task. It does
18146 * not touch ->normal_prio like __setscheduler().
18147 @@ -3640,16 +3888,40 @@ EXPORT_SYMBOL(default_wake_function);
18148 * Used by the rt_mutex code to implement priority inheritance
18149 * logic. Call site only calls if the priority of the task changed.
18151 -void rt_mutex_setprio(struct task_struct *p, int prio)
18152 +void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
18154 - int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
18155 + int prio, oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
18156 const struct sched_class *prev_class;
18157 struct rq_flags rf;
18160 - BUG_ON(prio > MAX_PRIO);
18161 + /* XXX used to be waiter->prio, not waiter->task->prio */
18162 + prio = __rt_effective_prio(pi_task, p->normal_prio);
18165 + * If nothing changed; bail early.
18167 + if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
18170 rq = __task_rq_lock(p, &rf);
18172 + * Set under pi_lock && rq->lock, such that the value can be used under
18175 + * Note that there is loads of tricky to make this pointer cache work
18176 + * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
18177 + * ensure a task is de-boosted (pi_task is set to NULL) before the
18178 + * task is allowed to run again (and can exit). This ensures the pointer
18179 + * points to a blocked task -- which guaratees the task is present.
18181 + p->pi_top_task = pi_task;
18184 + * For FIFO/RR we only need to set prio, if that matches we're done.
18186 + if (prio == p->prio && !dl_prio(prio))
18190 * Idle task boosting is a nono in general. There is one
18191 @@ -3669,7 +3941,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
18195 - trace_sched_pi_setprio(p, prio);
18196 + trace_sched_pi_setprio(p, pi_task);
18199 if (oldprio == prio)
18200 @@ -3693,7 +3965,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
18203 if (dl_prio(prio)) {
18204 - struct task_struct *pi_task = rt_mutex_get_top_task(p);
18205 if (!dl_prio(p->normal_prio) ||
18206 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
18207 p->dl.dl_boosted = 1;
18208 @@ -3730,6 +4001,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
18209 balance_callback(rq);
18213 +static inline int rt_effective_prio(struct task_struct *p, int prio)
18219 void set_user_nice(struct task_struct *p, long nice)
18220 @@ -3974,10 +4250,9 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
18221 * Keep a potential priority boosting if called from
18222 * sched_setscheduler().
18224 + p->prio = normal_prio(p);
18226 - p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
18228 - p->prio = normal_prio(p);
18229 + p->prio = rt_effective_prio(p, p->prio);
18231 if (dl_prio(p->prio))
18232 p->sched_class = &dl_sched_class;
18233 @@ -4264,7 +4539,7 @@ static int __sched_setscheduler(struct task_struct *p,
18234 * the runqueue. This will be done when the task deboost
18237 - new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
18238 + new_effective_prio = rt_effective_prio(p, newprio);
18239 if (new_effective_prio == oldprio)
18240 queue_flags &= ~DEQUEUE_MOVE;
18242 @@ -4939,6 +5214,7 @@ int __cond_resched_lock(spinlock_t *lock)
18244 EXPORT_SYMBOL(__cond_resched_lock);
18246 +#ifndef CONFIG_PREEMPT_RT_FULL
18247 int __sched __cond_resched_softirq(void)
18249 BUG_ON(!in_softirq());
18250 @@ -4952,6 +5228,7 @@ int __sched __cond_resched_softirq(void)
18253 EXPORT_SYMBOL(__cond_resched_softirq);
18257 * yield - yield the current processor to other threads.
18258 @@ -5315,7 +5592,9 @@ void init_idle(struct task_struct *idle, int cpu)
18260 /* Set the preempt count _outside_ the spinlocks! */
18261 init_idle_preempt_count(idle, cpu);
18263 +#ifdef CONFIG_HAVE_PREEMPT_LAZY
18264 + task_thread_info(idle)->preempt_lazy_count = 0;
18267 * The idle tasks have their own, simple scheduling class:
18269 @@ -5458,6 +5737,8 @@ void sched_setnuma(struct task_struct *p, int nid)
18270 #endif /* CONFIG_NUMA_BALANCING */
18272 #ifdef CONFIG_HOTPLUG_CPU
18273 +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
18276 * Ensures that the idle task is using init_mm right before its cpu goes
18278 @@ -5472,7 +5753,12 @@ void idle_task_exit(void)
18279 switch_mm_irqs_off(mm, &init_mm, current);
18280 finish_arch_post_lock_switch();
18284 + * Defer the cleanup to an alive cpu. On RT we can neither
18285 + * call mmdrop() nor mmdrop_delayed() from here.
18287 + per_cpu(idle_last_mm, smp_processor_id()) = mm;
18292 @@ -7418,6 +7704,10 @@ int sched_cpu_dying(unsigned int cpu)
18293 update_max_interval();
18294 nohz_balance_exit_idle(cpu);
18296 + if (per_cpu(idle_last_mm, cpu)) {
18297 + mmdrop_delayed(per_cpu(idle_last_mm, cpu));
18298 + per_cpu(idle_last_mm, cpu) = NULL;
18303 @@ -7698,7 +7988,7 @@ void __init sched_init(void)
18304 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
18305 static inline int preempt_count_equals(int preempt_offset)
18307 - int nested = preempt_count() + rcu_preempt_depth();
18308 + int nested = preempt_count() + sched_rcu_preempt_depth();
18310 return (nested == preempt_offset);
18312 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
18313 index c95c5122b105..e00accf92a4b 100644
18314 --- a/kernel/sched/deadline.c
18315 +++ b/kernel/sched/deadline.c
18316 @@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
18318 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
18319 timer->function = dl_task_timer;
18320 + timer->irqsafe = 1;
18324 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
18325 index fa178b62ea79..935224123441 100644
18326 --- a/kernel/sched/debug.c
18327 +++ b/kernel/sched/debug.c
18328 @@ -558,6 +558,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
18333 + P(rt_nr_migratory);
18338 @@ -953,6 +956,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
18342 +#ifdef CONFIG_PREEMPT_RT_FULL
18343 + P(migrate_disable);
18345 + P(nr_cpus_allowed);
18346 #undef PN_SCHEDSTAT
18349 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
18350 index c242944f5cbd..4aeb2e2e41bc 100644
18351 --- a/kernel/sched/fair.c
18352 +++ b/kernel/sched/fair.c
18353 @@ -3518,7 +3518,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
18354 ideal_runtime = sched_slice(cfs_rq, curr);
18355 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
18356 if (delta_exec > ideal_runtime) {
18357 - resched_curr(rq_of(cfs_rq));
18358 + resched_curr_lazy(rq_of(cfs_rq));
18360 * The current task ran long enough, ensure it doesn't get
18361 * re-elected due to buddy favours.
18362 @@ -3542,7 +3542,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
18365 if (delta > ideal_runtime)
18366 - resched_curr(rq_of(cfs_rq));
18367 + resched_curr_lazy(rq_of(cfs_rq));
18371 @@ -3684,7 +3684,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
18372 * validating it and just reschedule.
18375 - resched_curr(rq_of(cfs_rq));
18376 + resched_curr_lazy(rq_of(cfs_rq));
18380 @@ -3866,7 +3866,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
18381 * hierarchy can be throttled
18383 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
18384 - resched_curr(rq_of(cfs_rq));
18385 + resched_curr_lazy(rq_of(cfs_rq));
18388 static __always_inline
18389 @@ -4494,7 +4494,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
18393 - resched_curr(rq);
18394 + resched_curr_lazy(rq);
18397 hrtick_start(rq, delta);
18398 @@ -5905,7 +5905,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
18402 - resched_curr(rq);
18403 + resched_curr_lazy(rq);
18405 * Only set the backward buddy when the current task is still
18406 * on the rq. This can happen when a wakeup gets interleaved
18407 @@ -8631,7 +8631,7 @@ static void task_fork_fair(struct task_struct *p)
18408 * 'current' within the tree based on its new key value.
18410 swap(curr->vruntime, se->vruntime);
18411 - resched_curr(rq);
18412 + resched_curr_lazy(rq);
18415 se->vruntime -= cfs_rq->min_vruntime;
18416 @@ -8655,7 +8655,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
18418 if (rq->curr == p) {
18419 if (p->prio > oldprio)
18420 - resched_curr(rq);
18421 + resched_curr_lazy(rq);
18423 check_preempt_curr(rq, p, 0);
18425 diff --git a/kernel/sched/features.h b/kernel/sched/features.h
18426 index 69631fa46c2f..6d28fcd08872 100644
18427 --- a/kernel/sched/features.h
18428 +++ b/kernel/sched/features.h
18429 @@ -45,11 +45,19 @@ SCHED_FEAT(LB_BIAS, true)
18431 SCHED_FEAT(NONTASK_CAPACITY, true)
18433 +#ifdef CONFIG_PREEMPT_RT_FULL
18434 +SCHED_FEAT(TTWU_QUEUE, false)
18435 +# ifdef CONFIG_PREEMPT_LAZY
18436 +SCHED_FEAT(PREEMPT_LAZY, true)
18441 * Queue remote wakeups on the target CPU and process them
18442 * using the scheduler IPI. Reduces rq->lock contention/bounces.
18444 SCHED_FEAT(TTWU_QUEUE, true)
18447 #ifdef HAVE_RT_PUSH_IPI
18449 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
18450 index f139f22ce30d..b0691f4e7d49 100644
18451 --- a/kernel/sched/rt.c
18452 +++ b/kernel/sched/rt.c
18453 @@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
18455 hrtimer_init(&rt_b->rt_period_timer,
18456 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
18457 + rt_b->rt_period_timer.irqsafe = 1;
18458 rt_b->rt_period_timer.function = sched_rt_period_timer;
18461 @@ -101,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
18462 rt_rq->push_cpu = nr_cpu_ids;
18463 raw_spin_lock_init(&rt_rq->push_lock);
18464 init_irq_work(&rt_rq->push_work, push_irq_work_func);
18465 + rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ;
18467 #endif /* CONFIG_SMP */
18468 /* We start is dequeued state, because no RT tasks are queued */
18469 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
18470 index 055f935d4421..19324ac27026 100644
18471 --- a/kernel/sched/sched.h
18472 +++ b/kernel/sched/sched.h
18473 @@ -1163,6 +1163,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
18474 #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
18475 #define WF_FORK 0x02 /* child wakeup after fork */
18476 #define WF_MIGRATED 0x4 /* internal use, task got migrated */
18477 +#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
18480 * To aid in avoiding the subversion of "niceness" due to uneven distribution
18481 @@ -1346,6 +1347,15 @@ extern void init_sched_fair_class(void);
18482 extern void resched_curr(struct rq *rq);
18483 extern void resched_cpu(int cpu);
18485 +#ifdef CONFIG_PREEMPT_LAZY
18486 +extern void resched_curr_lazy(struct rq *rq);
18488 +static inline void resched_curr_lazy(struct rq *rq)
18490 + resched_curr(rq);
18494 extern struct rt_bandwidth def_rt_bandwidth;
18495 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
18497 diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
18498 index 82f0dff90030..ef027ff3250a 100644
18499 --- a/kernel/sched/swait.c
18500 +++ b/kernel/sched/swait.c
18502 #include <linux/sched.h>
18503 #include <linux/swait.h>
18504 +#include <linux/suspend.h>
18506 void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
18507 struct lock_class_key *key)
18508 @@ -29,6 +30,25 @@ void swake_up_locked(struct swait_queue_head *q)
18510 EXPORT_SYMBOL(swake_up_locked);
18512 +void swake_up_all_locked(struct swait_queue_head *q)
18514 + struct swait_queue *curr;
18517 + while (!list_empty(&q->task_list)) {
18519 + curr = list_first_entry(&q->task_list, typeof(*curr),
18521 + wake_up_process(curr->task);
18522 + list_del_init(&curr->task_list);
18525 + if (pm_in_action)
18527 + WARN(wakes > 2, "complete_all() with %d waiters\n", wakes);
18529 +EXPORT_SYMBOL(swake_up_all_locked);
18531 void swake_up(struct swait_queue_head *q)
18533 unsigned long flags;
18534 @@ -54,6 +74,7 @@ void swake_up_all(struct swait_queue_head *q)
18535 if (!swait_active(q))
18538 + WARN_ON(irqs_disabled());
18539 raw_spin_lock_irq(&q->lock);
18540 list_splice_init(&q->task_list, &tmp);
18541 while (!list_empty(&tmp)) {
18542 diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c
18543 new file mode 100644
18544 index 000000000000..1950f40ca725
18546 +++ b/kernel/sched/swork.c
18549 + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
18551 + * Provides a framework for enqueuing callbacks from irq context
18552 + * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
18555 +#include <linux/swait.h>
18556 +#include <linux/swork.h>
18557 +#include <linux/kthread.h>
18558 +#include <linux/slab.h>
18559 +#include <linux/spinlock.h>
18560 +#include <linux/export.h>
18562 +#define SWORK_EVENT_PENDING (1 << 0)
18564 +static DEFINE_MUTEX(worker_mutex);
18565 +static struct sworker *glob_worker;
18568 + struct list_head events;
18569 + struct swait_queue_head wq;
18571 + raw_spinlock_t lock;
18573 + struct task_struct *task;
18577 +static bool swork_readable(struct sworker *worker)
18581 + if (kthread_should_stop())
18584 + raw_spin_lock_irq(&worker->lock);
18585 + r = !list_empty(&worker->events);
18586 + raw_spin_unlock_irq(&worker->lock);
18591 +static int swork_kthread(void *arg)
18593 + struct sworker *worker = arg;
18596 + swait_event_interruptible(worker->wq,
18597 + swork_readable(worker));
18598 + if (kthread_should_stop())
18601 + raw_spin_lock_irq(&worker->lock);
18602 + while (!list_empty(&worker->events)) {
18603 + struct swork_event *sev;
18605 + sev = list_first_entry(&worker->events,
18606 + struct swork_event, item);
18607 + list_del(&sev->item);
18608 + raw_spin_unlock_irq(&worker->lock);
18610 + WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
18613 + raw_spin_lock_irq(&worker->lock);
18615 + raw_spin_unlock_irq(&worker->lock);
18620 +static struct sworker *swork_create(void)
18622 + struct sworker *worker;
18624 + worker = kzalloc(sizeof(*worker), GFP_KERNEL);
18626 + return ERR_PTR(-ENOMEM);
18628 + INIT_LIST_HEAD(&worker->events);
18629 + raw_spin_lock_init(&worker->lock);
18630 + init_swait_queue_head(&worker->wq);
18632 + worker->task = kthread_run(swork_kthread, worker, "kswork");
18633 + if (IS_ERR(worker->task)) {
18635 + return ERR_PTR(-ENOMEM);
18641 +static void swork_destroy(struct sworker *worker)
18643 + kthread_stop(worker->task);
18645 + WARN_ON(!list_empty(&worker->events));
18650 + * swork_queue - queue swork
18652 + * Returns %false if @work was already on a queue, %true otherwise.
18654 + * The work is queued and processed on a random CPU
18656 +bool swork_queue(struct swork_event *sev)
18658 + unsigned long flags;
18660 + if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
18663 + raw_spin_lock_irqsave(&glob_worker->lock, flags);
18664 + list_add_tail(&sev->item, &glob_worker->events);
18665 + raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
18667 + swake_up(&glob_worker->wq);
18670 +EXPORT_SYMBOL_GPL(swork_queue);
18673 + * swork_get - get an instance of the sworker
18675 + * Returns an negative error code if the initialization if the worker did not
18676 + * work, %0 otherwise.
18679 +int swork_get(void)
18681 + struct sworker *worker;
18683 + mutex_lock(&worker_mutex);
18684 + if (!glob_worker) {
18685 + worker = swork_create();
18686 + if (IS_ERR(worker)) {
18687 + mutex_unlock(&worker_mutex);
18691 + glob_worker = worker;
18694 + glob_worker->refs++;
18695 + mutex_unlock(&worker_mutex);
18699 +EXPORT_SYMBOL_GPL(swork_get);
18702 + * swork_put - puts an instance of the sworker
18704 + * Will destroy the sworker thread. This function must not be called until all
18705 + * queued events have been completed.
18707 +void swork_put(void)
18709 + mutex_lock(&worker_mutex);
18711 + glob_worker->refs--;
18712 + if (glob_worker->refs > 0)
18715 + swork_destroy(glob_worker);
18716 + glob_worker = NULL;
18718 + mutex_unlock(&worker_mutex);
18720 +EXPORT_SYMBOL_GPL(swork_put);
18721 diff --git a/kernel/signal.c b/kernel/signal.c
18722 index 0b1415720a15..c884647951f7 100644
18723 --- a/kernel/signal.c
18724 +++ b/kernel/signal.c
18726 #include <linux/export.h>
18727 #include <linux/init.h>
18728 #include <linux/sched.h>
18729 +#include <linux/sched/rt.h>
18730 #include <linux/fs.h>
18731 #include <linux/tty.h>
18732 #include <linux/binfmts.h>
18733 @@ -352,13 +353,30 @@ static bool task_participate_group_stop(struct task_struct *task)
18737 +static inline struct sigqueue *get_task_cache(struct task_struct *t)
18739 + struct sigqueue *q = t->sigqueue_cache;
18741 + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
18746 +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
18748 + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
18754 * allocate a new signal queue record
18755 * - this may be called without locks if and only if t == current, otherwise an
18756 * appropriate lock must be held to stop the target task from exiting
18758 static struct sigqueue *
18759 -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
18760 +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
18761 + int override_rlimit, int fromslab)
18763 struct sigqueue *q = NULL;
18764 struct user_struct *user;
18765 @@ -375,7 +393,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
18766 if (override_rlimit ||
18767 atomic_read(&user->sigpending) <=
18768 task_rlimit(t, RLIMIT_SIGPENDING)) {
18769 - q = kmem_cache_alloc(sigqueue_cachep, flags);
18771 + q = get_task_cache(t);
18773 + q = kmem_cache_alloc(sigqueue_cachep, flags);
18775 print_dropped_signal(sig);
18777 @@ -392,6 +413,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
18781 +static struct sigqueue *
18782 +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
18783 + int override_rlimit)
18785 + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
18788 static void __sigqueue_free(struct sigqueue *q)
18790 if (q->flags & SIGQUEUE_PREALLOC)
18791 @@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqueue *q)
18792 kmem_cache_free(sigqueue_cachep, q);
18795 +static void sigqueue_free_current(struct sigqueue *q)
18797 + struct user_struct *up;
18799 + if (q->flags & SIGQUEUE_PREALLOC)
18803 + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
18804 + atomic_dec(&up->sigpending);
18807 + __sigqueue_free(q);
18810 void flush_sigqueue(struct sigpending *queue)
18812 struct sigqueue *q;
18813 @@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *queue)
18817 + * Called from __exit_signal. Flush tsk->pending and
18818 + * tsk->sigqueue_cache
18820 +void flush_task_sigqueue(struct task_struct *tsk)
18822 + struct sigqueue *q;
18824 + flush_sigqueue(&tsk->pending);
18826 + q = get_task_cache(tsk);
18828 + kmem_cache_free(sigqueue_cachep, q);
18832 * Flush all pending signals for this kthread.
18834 void flush_signals(struct task_struct *t)
18835 @@ -525,7 +583,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
18837 list_del_init(&first->list);
18838 copy_siginfo(info, &first->info);
18839 - __sigqueue_free(first);
18840 + sigqueue_free_current(first);
18843 * Ok, it wasn't in the queue. This must be
18844 @@ -560,6 +618,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
18848 + WARN_ON_ONCE(tsk != current);
18850 /* We only dequeue private signals from ourselves, we don't let
18851 * signalfd steal them
18853 @@ -1156,8 +1216,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
18854 * We don't want to have recursive SIGSEGV's etc, for example,
18855 * that is why we also clear SIGNAL_UNKILLABLE.
18858 -force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
18860 +do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
18862 unsigned long int flags;
18863 int ret, blocked, ignored;
18864 @@ -1182,6 +1242,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
18868 +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
18871 + * On some archs, PREEMPT_RT has to delay sending a signal from a trap
18872 + * since it can not enable preemption, and the signal code's spin_locks
18873 + * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
18874 + * send the signal on exit of the trap.
18876 +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
18877 + if (in_atomic()) {
18878 + if (WARN_ON_ONCE(t != current))
18880 + if (WARN_ON_ONCE(t->forced_info.si_signo))
18883 + if (is_si_special(info)) {
18884 + WARN_ON_ONCE(info != SEND_SIG_PRIV);
18885 + t->forced_info.si_signo = sig;
18886 + t->forced_info.si_errno = 0;
18887 + t->forced_info.si_code = SI_KERNEL;
18888 + t->forced_info.si_pid = 0;
18889 + t->forced_info.si_uid = 0;
18891 + t->forced_info = *info;
18894 + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
18898 + return do_force_sig_info(sig, info, t);
18902 * Nuke all other threads in the group.
18904 @@ -1216,12 +1309,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
18905 * Disable interrupts early to avoid deadlocks.
18906 * See rcu_read_unlock() comment header for details.
18908 - local_irq_save(*flags);
18909 + local_irq_save_nort(*flags);
18911 sighand = rcu_dereference(tsk->sighand);
18912 if (unlikely(sighand == NULL)) {
18914 - local_irq_restore(*flags);
18915 + local_irq_restore_nort(*flags);
18919 @@ -1242,7 +1335,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
18921 spin_unlock(&sighand->siglock);
18923 - local_irq_restore(*flags);
18924 + local_irq_restore_nort(*flags);
18928 @@ -1485,7 +1578,8 @@ EXPORT_SYMBOL(kill_pid);
18930 struct sigqueue *sigqueue_alloc(void)
18932 - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
18933 + /* Preallocated sigqueue objects always from the slabcache ! */
18934 + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
18937 q->flags |= SIGQUEUE_PREALLOC;
18938 @@ -1846,15 +1940,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
18939 if (gstop_done && ptrace_reparented(current))
18940 do_notify_parent_cldstop(current, false, why);
18943 - * Don't want to allow preemption here, because
18944 - * sys_ptrace() needs this task to be inactive.
18946 - * XXX: implement read_unlock_no_resched().
18948 - preempt_disable();
18949 read_unlock(&tasklist_lock);
18950 - preempt_enable_no_resched();
18951 freezable_schedule();
18954 diff --git a/kernel/softirq.c b/kernel/softirq.c
18955 index 744fa611cae0..819bd7cf5ad0 100644
18956 --- a/kernel/softirq.c
18957 +++ b/kernel/softirq.c
18958 @@ -21,10 +21,12 @@
18959 #include <linux/freezer.h>
18960 #include <linux/kthread.h>
18961 #include <linux/rcupdate.h>
18962 +#include <linux/delay.h>
18963 #include <linux/ftrace.h>
18964 #include <linux/smp.h>
18965 #include <linux/smpboot.h>
18966 #include <linux/tick.h>
18967 +#include <linux/locallock.h>
18968 #include <linux/irq.h>
18970 #define CREATE_TRACE_POINTS
18971 @@ -56,12 +58,108 @@ EXPORT_SYMBOL(irq_stat);
18972 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
18974 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
18975 +#ifdef CONFIG_PREEMPT_RT_FULL
18976 +#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
18977 +DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
18980 const char * const softirq_to_name[NR_SOFTIRQS] = {
18981 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
18982 "TASKLET", "SCHED", "HRTIMER", "RCU"
18985 +#ifdef CONFIG_NO_HZ_COMMON
18986 +# ifdef CONFIG_PREEMPT_RT_FULL
18988 +struct softirq_runner {
18989 + struct task_struct *runner[NR_SOFTIRQS];
18992 +static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
18994 +static inline void softirq_set_runner(unsigned int sirq)
18996 + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
18998 + sr->runner[sirq] = current;
19001 +static inline void softirq_clr_runner(unsigned int sirq)
19003 + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
19005 + sr->runner[sirq] = NULL;
19009 + * On preempt-rt a softirq running context might be blocked on a
19010 + * lock. There might be no other runnable task on this CPU because the
19011 + * lock owner runs on some other CPU. So we have to go into idle with
19012 + * the pending bit set. Therefor we need to check this otherwise we
19013 + * warn about false positives which confuses users and defeats the
19014 + * whole purpose of this test.
19016 + * This code is called with interrupts disabled.
19018 +void softirq_check_pending_idle(void)
19020 + static int rate_limit;
19021 + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
19025 + if (rate_limit >= 10)
19028 + warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
19029 + for (i = 0; i < NR_SOFTIRQS; i++) {
19030 + struct task_struct *tsk = sr->runner[i];
19033 + * The wakeup code in rtmutex.c wakes up the task
19034 + * _before_ it sets pi_blocked_on to NULL under
19035 + * tsk->pi_lock. So we need to check for both: state
19036 + * and pi_blocked_on.
19039 + raw_spin_lock(&tsk->pi_lock);
19040 + if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
19041 + /* Clear all bits pending in that task */
19042 + warnpending &= ~(tsk->softirqs_raised);
19043 + warnpending &= ~(1 << i);
19045 + raw_spin_unlock(&tsk->pi_lock);
19049 + if (warnpending) {
19050 + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
19057 + * On !PREEMPT_RT we just printk rate limited:
19059 +void softirq_check_pending_idle(void)
19061 + static int rate_limit;
19063 + if (rate_limit < 10 &&
19064 + (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
19065 + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
19066 + local_softirq_pending());
19072 +#else /* !CONFIG_NO_HZ_COMMON */
19073 +static inline void softirq_set_runner(unsigned int sirq) { }
19074 +static inline void softirq_clr_runner(unsigned int sirq) { }
19078 * we cannot loop indefinitely here to avoid userspace starvation,
19079 * but we also don't want to introduce a worst case 1/HZ latency
19080 @@ -77,6 +175,38 @@ static void wakeup_softirqd(void)
19081 wake_up_process(tsk);
19084 +#ifdef CONFIG_PREEMPT_RT_FULL
19085 +static void wakeup_timer_softirqd(void)
19087 + /* Interrupts are disabled: no need to stop preemption */
19088 + struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
19090 + if (tsk && tsk->state != TASK_RUNNING)
19091 + wake_up_process(tsk);
19095 +static void handle_softirq(unsigned int vec_nr)
19097 + struct softirq_action *h = softirq_vec + vec_nr;
19100 + prev_count = preempt_count();
19102 + kstat_incr_softirqs_this_cpu(vec_nr);
19104 + trace_softirq_entry(vec_nr);
19106 + trace_softirq_exit(vec_nr);
19107 + if (unlikely(prev_count != preempt_count())) {
19108 + pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
19109 + vec_nr, softirq_to_name[vec_nr], h->action,
19110 + prev_count, preempt_count());
19111 + preempt_count_set(prev_count);
19115 +#ifndef CONFIG_PREEMPT_RT_FULL
19117 * If ksoftirqd is scheduled, we do not want to process pending softirqs
19118 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
19119 @@ -88,6 +218,47 @@ static bool ksoftirqd_running(void)
19120 return tsk && (tsk->state == TASK_RUNNING);
19123 +static inline int ksoftirqd_softirq_pending(void)
19125 + return local_softirq_pending();
19128 +static void handle_pending_softirqs(u32 pending)
19130 + struct softirq_action *h = softirq_vec;
19133 + local_irq_enable();
19137 + while ((softirq_bit = ffs(pending))) {
19138 + unsigned int vec_nr;
19140 + h += softirq_bit - 1;
19141 + vec_nr = h - softirq_vec;
19142 + handle_softirq(vec_nr);
19145 + pending >>= softirq_bit;
19149 + local_irq_disable();
19152 +static void run_ksoftirqd(unsigned int cpu)
19154 + local_irq_disable();
19155 + if (ksoftirqd_softirq_pending()) {
19157 + local_irq_enable();
19158 + cond_resched_rcu_qs();
19161 + local_irq_enable();
19165 * preempt_count and SOFTIRQ_OFFSET usage:
19166 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
19167 @@ -243,10 +414,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
19168 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
19169 unsigned long old_flags = current->flags;
19170 int max_restart = MAX_SOFTIRQ_RESTART;
19171 - struct softirq_action *h;
19177 * Mask out PF_MEMALLOC s current task context is borrowed for the
19178 @@ -265,36 +434,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
19179 /* Reset the pending bitmask before enabling irqs */
19180 set_softirq_pending(0);
19182 - local_irq_enable();
19186 - while ((softirq_bit = ffs(pending))) {
19187 - unsigned int vec_nr;
19190 - h += softirq_bit - 1;
19192 - vec_nr = h - softirq_vec;
19193 - prev_count = preempt_count();
19195 - kstat_incr_softirqs_this_cpu(vec_nr);
19197 - trace_softirq_entry(vec_nr);
19199 - trace_softirq_exit(vec_nr);
19200 - if (unlikely(prev_count != preempt_count())) {
19201 - pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
19202 - vec_nr, softirq_to_name[vec_nr], h->action,
19203 - prev_count, preempt_count());
19204 - preempt_count_set(prev_count);
19207 - pending >>= softirq_bit;
19211 - local_irq_disable();
19212 + handle_pending_softirqs(pending);
19214 pending = local_softirq_pending();
19216 @@ -331,6 +471,309 @@ asmlinkage __visible void do_softirq(void)
19220 + * This function must run with irqs disabled!
19222 +void raise_softirq_irqoff(unsigned int nr)
19224 + __raise_softirq_irqoff(nr);
19227 + * If we're in an interrupt or softirq, we're done
19228 + * (this also catches softirq-disabled code). We will
19229 + * actually run the softirq once we return from
19230 + * the irq or softirq.
19232 + * Otherwise we wake up ksoftirqd to make sure we
19233 + * schedule the softirq soon.
19235 + if (!in_interrupt())
19236 + wakeup_softirqd();
19239 +void __raise_softirq_irqoff(unsigned int nr)
19241 + trace_softirq_raise(nr);
19242 + or_softirq_pending(1UL << nr);
19245 +static inline void local_bh_disable_nort(void) { local_bh_disable(); }
19246 +static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
19247 +static void ksoftirqd_set_sched_params(unsigned int cpu) { }
19249 +#else /* !PREEMPT_RT_FULL */
19252 + * On RT we serialize softirq execution with a cpu local lock per softirq
19254 +static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
19256 +void __init softirq_early_init(void)
19260 + for (i = 0; i < NR_SOFTIRQS; i++)
19261 + local_irq_lock_init(local_softirq_locks[i]);
19264 +static void lock_softirq(int which)
19266 + local_lock(local_softirq_locks[which]);
19269 +static void unlock_softirq(int which)
19271 + local_unlock(local_softirq_locks[which]);
19274 +static void do_single_softirq(int which)
19276 + unsigned long old_flags = current->flags;
19278 + current->flags &= ~PF_MEMALLOC;
19279 + vtime_account_irq_enter(current);
19280 + current->flags |= PF_IN_SOFTIRQ;
19281 + lockdep_softirq_enter();
19282 + local_irq_enable();
19283 + handle_softirq(which);
19284 + local_irq_disable();
19285 + lockdep_softirq_exit();
19286 + current->flags &= ~PF_IN_SOFTIRQ;
19287 + vtime_account_irq_enter(current);
19288 + tsk_restore_flags(current, old_flags, PF_MEMALLOC);
19292 + * Called with interrupts disabled. Process softirqs which were raised
19293 + * in current context (or on behalf of ksoftirqd).
19295 +static void do_current_softirqs(void)
19297 + while (current->softirqs_raised) {
19298 + int i = __ffs(current->softirqs_raised);
19299 + unsigned int pending, mask = (1U << i);
19301 + current->softirqs_raised &= ~mask;
19302 + local_irq_enable();
19305 + * If the lock is contended, we boost the owner to
19306 + * process the softirq or leave the critical section
19310 + local_irq_disable();
19311 + softirq_set_runner(i);
19313 + * Check with the local_softirq_pending() bits,
19314 + * whether we need to process this still or if someone
19315 + * else took care of it.
19317 + pending = local_softirq_pending();
19318 + if (pending & mask) {
19319 + set_softirq_pending(pending & ~mask);
19320 + do_single_softirq(i);
19322 + softirq_clr_runner(i);
19323 + WARN_ON(current->softirq_nestcnt != 1);
19324 + local_irq_enable();
19325 + unlock_softirq(i);
19326 + local_irq_disable();
19330 +void __local_bh_disable(void)
19332 + if (++current->softirq_nestcnt == 1)
19333 + migrate_disable();
19335 +EXPORT_SYMBOL(__local_bh_disable);
19337 +void __local_bh_enable(void)
19339 + if (WARN_ON(current->softirq_nestcnt == 0))
19342 + local_irq_disable();
19343 + if (current->softirq_nestcnt == 1 && current->softirqs_raised)
19344 + do_current_softirqs();
19345 + local_irq_enable();
19347 + if (--current->softirq_nestcnt == 0)
19348 + migrate_enable();
19350 +EXPORT_SYMBOL(__local_bh_enable);
19352 +void _local_bh_enable(void)
19354 + if (WARN_ON(current->softirq_nestcnt == 0))
19356 + if (--current->softirq_nestcnt == 0)
19357 + migrate_enable();
19359 +EXPORT_SYMBOL(_local_bh_enable);
19361 +int in_serving_softirq(void)
19363 + return current->flags & PF_IN_SOFTIRQ;
19365 +EXPORT_SYMBOL(in_serving_softirq);
19367 +/* Called with preemption disabled */
19368 +static void run_ksoftirqd(unsigned int cpu)
19370 + local_irq_disable();
19371 + current->softirq_nestcnt++;
19373 + do_current_softirqs();
19374 + current->softirq_nestcnt--;
19375 + local_irq_enable();
19376 + cond_resched_rcu_qs();
19380 + * Called from netif_rx_ni(). Preemption enabled, but migration
19381 + * disabled. So the cpu can't go away under us.
19383 +void thread_do_softirq(void)
19385 + if (!in_serving_softirq() && current->softirqs_raised) {
19386 + current->softirq_nestcnt++;
19387 + do_current_softirqs();
19388 + current->softirq_nestcnt--;
19392 +static void do_raise_softirq_irqoff(unsigned int nr)
19394 + unsigned int mask;
19396 + mask = 1UL << nr;
19398 + trace_softirq_raise(nr);
19399 + or_softirq_pending(mask);
19402 + * If we are not in a hard interrupt and inside a bh disabled
19403 + * region, we simply raise the flag on current. local_bh_enable()
19404 + * will make sure that the softirq is executed. Otherwise we
19405 + * delegate it to ksoftirqd.
19407 + if (!in_irq() && current->softirq_nestcnt)
19408 + current->softirqs_raised |= mask;
19409 + else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
19412 + if (mask & TIMER_SOFTIRQS)
19413 + __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
19415 + __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
19418 +static void wakeup_proper_softirq(unsigned int nr)
19420 + if ((1UL << nr) & TIMER_SOFTIRQS)
19421 + wakeup_timer_softirqd();
19423 + wakeup_softirqd();
19426 +void __raise_softirq_irqoff(unsigned int nr)
19428 + do_raise_softirq_irqoff(nr);
19429 + if (!in_irq() && !current->softirq_nestcnt)
19430 + wakeup_proper_softirq(nr);
19434 + * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
19436 +void __raise_softirq_irqoff_ksoft(unsigned int nr)
19438 + unsigned int mask;
19440 + if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
19441 + !__this_cpu_read(ktimer_softirqd)))
19443 + mask = 1UL << nr;
19445 + trace_softirq_raise(nr);
19446 + or_softirq_pending(mask);
19447 + if (mask & TIMER_SOFTIRQS)
19448 + __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
19450 + __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
19451 + wakeup_proper_softirq(nr);
19455 + * This function must run with irqs disabled!
19457 +void raise_softirq_irqoff(unsigned int nr)
19459 + do_raise_softirq_irqoff(nr);
19462 + * If we're in an hard interrupt we let irq return code deal
19463 + * with the wakeup of ksoftirqd.
19468 + * If we are in thread context but outside of a bh disabled
19469 + * region, we need to wake ksoftirqd as well.
19471 + * CHECKME: Some of the places which do that could be wrapped
19472 + * into local_bh_disable/enable pairs. Though it's unclear
19473 + * whether this is worth the effort. To find those places just
19474 + * raise a WARN() if the condition is met.
19476 + if (!current->softirq_nestcnt)
19477 + wakeup_proper_softirq(nr);
19480 +static inline int ksoftirqd_softirq_pending(void)
19482 + return current->softirqs_raised;
19485 +static inline void local_bh_disable_nort(void) { }
19486 +static inline void _local_bh_enable_nort(void) { }
19488 +static inline void ksoftirqd_set_sched_params(unsigned int cpu)
19490 + /* Take over all but timer pending softirqs when starting */
19491 + local_irq_disable();
19492 + current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
19493 + local_irq_enable();
19496 +static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
19498 + struct sched_param param = { .sched_priority = 1 };
19500 + sched_setscheduler(current, SCHED_FIFO, ¶m);
19502 + /* Take over timer pending softirqs when starting */
19503 + local_irq_disable();
19504 + current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
19505 + local_irq_enable();
19508 +static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
19511 + struct sched_param param = { .sched_priority = 0 };
19513 + sched_setscheduler(current, SCHED_NORMAL, ¶m);
19516 +static int ktimer_softirqd_should_run(unsigned int cpu)
19518 + return current->softirqs_raised;
19521 +#endif /* PREEMPT_RT_FULL */
19523 * Enter an interrupt context.
19525 void irq_enter(void)
19526 @@ -341,9 +784,9 @@ void irq_enter(void)
19527 * Prevent raise_softirq from needlessly waking up ksoftirqd
19528 * here, as softirq will be serviced on return from interrupt.
19530 - local_bh_disable();
19531 + local_bh_disable_nort();
19533 - _local_bh_enable();
19534 + _local_bh_enable_nort();
19538 @@ -351,6 +794,7 @@ void irq_enter(void)
19540 static inline void invoke_softirq(void)
19542 +#ifndef CONFIG_PREEMPT_RT_FULL
19543 if (ksoftirqd_running())
19546 @@ -373,6 +817,18 @@ static inline void invoke_softirq(void)
19550 +#else /* PREEMPT_RT_FULL */
19551 + unsigned long flags;
19553 + local_irq_save(flags);
19554 + if (__this_cpu_read(ksoftirqd) &&
19555 + __this_cpu_read(ksoftirqd)->softirqs_raised)
19556 + wakeup_softirqd();
19557 + if (__this_cpu_read(ktimer_softirqd) &&
19558 + __this_cpu_read(ktimer_softirqd)->softirqs_raised)
19559 + wakeup_timer_softirqd();
19560 + local_irq_restore(flags);
19564 static inline void tick_irq_exit(void)
19565 @@ -409,26 +865,6 @@ void irq_exit(void)
19566 trace_hardirq_exit(); /* must be last! */
19570 - * This function must run with irqs disabled!
19572 -inline void raise_softirq_irqoff(unsigned int nr)
19574 - __raise_softirq_irqoff(nr);
19577 - * If we're in an interrupt or softirq, we're done
19578 - * (this also catches softirq-disabled code). We will
19579 - * actually run the softirq once we return from
19580 - * the irq or softirq.
19582 - * Otherwise we wake up ksoftirqd to make sure we
19583 - * schedule the softirq soon.
19585 - if (!in_interrupt())
19586 - wakeup_softirqd();
19589 void raise_softirq(unsigned int nr)
19591 unsigned long flags;
19592 @@ -438,12 +874,6 @@ void raise_softirq(unsigned int nr)
19593 local_irq_restore(flags);
19596 -void __raise_softirq_irqoff(unsigned int nr)
19598 - trace_softirq_raise(nr);
19599 - or_softirq_pending(1UL << nr);
19602 void open_softirq(int nr, void (*action)(struct softirq_action *))
19604 softirq_vec[nr].action = action;
19605 @@ -460,15 +890,45 @@ struct tasklet_head {
19606 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
19607 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
19609 +static void inline
19610 +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
19612 + if (tasklet_trylock(t)) {
19614 + /* We may have been preempted before tasklet_trylock
19615 + * and __tasklet_action may have already run.
19616 + * So double check the sched bit while the takslet
19617 + * is locked before adding it to the list.
19619 + if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
19622 + head->tail = &(t->next);
19623 + raise_softirq_irqoff(nr);
19624 + tasklet_unlock(t);
19626 + /* This is subtle. If we hit the corner case above
19627 + * It is possible that we get preempted right here,
19628 + * and another task has successfully called
19629 + * tasklet_schedule(), then this function, and
19630 + * failed on the trylock. Thus we must be sure
19631 + * before releasing the tasklet lock, that the
19632 + * SCHED_BIT is clear. Otherwise the tasklet
19633 + * may get its SCHED_BIT set, but not added to the
19636 + if (!tasklet_tryunlock(t))
19642 void __tasklet_schedule(struct tasklet_struct *t)
19644 unsigned long flags;
19646 local_irq_save(flags);
19648 - *__this_cpu_read(tasklet_vec.tail) = t;
19649 - __this_cpu_write(tasklet_vec.tail, &(t->next));
19650 - raise_softirq_irqoff(TASKLET_SOFTIRQ);
19651 + __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
19652 local_irq_restore(flags);
19654 EXPORT_SYMBOL(__tasklet_schedule);
19655 @@ -478,10 +938,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
19656 unsigned long flags;
19658 local_irq_save(flags);
19660 - *__this_cpu_read(tasklet_hi_vec.tail) = t;
19661 - __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
19662 - raise_softirq_irqoff(HI_SOFTIRQ);
19663 + __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
19664 local_irq_restore(flags);
19666 EXPORT_SYMBOL(__tasklet_hi_schedule);
19667 @@ -490,82 +947,122 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
19669 BUG_ON(!irqs_disabled());
19671 - t->next = __this_cpu_read(tasklet_hi_vec.head);
19672 - __this_cpu_write(tasklet_hi_vec.head, t);
19673 - __raise_softirq_irqoff(HI_SOFTIRQ);
19674 + __tasklet_hi_schedule(t);
19676 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
19678 -static __latent_entropy void tasklet_action(struct softirq_action *a)
19679 +void tasklet_enable(struct tasklet_struct *t)
19681 - struct tasklet_struct *list;
19682 + if (!atomic_dec_and_test(&t->count))
19684 + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
19685 + tasklet_schedule(t);
19687 +EXPORT_SYMBOL(tasklet_enable);
19689 - local_irq_disable();
19690 - list = __this_cpu_read(tasklet_vec.head);
19691 - __this_cpu_write(tasklet_vec.head, NULL);
19692 - __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
19693 - local_irq_enable();
19694 +static void __tasklet_action(struct softirq_action *a,
19695 + struct tasklet_struct *list)
19697 + int loops = 1000000;
19700 struct tasklet_struct *t = list;
19704 - if (tasklet_trylock(t)) {
19705 - if (!atomic_read(&t->count)) {
19706 - if (!test_and_clear_bit(TASKLET_STATE_SCHED,
19709 - t->func(t->data);
19710 - tasklet_unlock(t);
19713 - tasklet_unlock(t);
19715 + * Should always succeed - after a tasklist got on the
19716 + * list (after getting the SCHED bit set from 0 to 1),
19717 + * nothing but the tasklet softirq it got queued to can
19720 + if (!tasklet_trylock(t)) {
19725 - local_irq_disable();
19727 - *__this_cpu_read(tasklet_vec.tail) = t;
19728 - __this_cpu_write(tasklet_vec.tail, &(t->next));
19729 - __raise_softirq_irqoff(TASKLET_SOFTIRQ);
19730 - local_irq_enable();
19733 + * If we cannot handle the tasklet because it's disabled,
19734 + * mark it as pending. tasklet_enable() will later
19735 + * re-schedule the tasklet.
19737 + if (unlikely(atomic_read(&t->count))) {
19739 + /* implicit unlock: */
19741 + t->state = TASKLET_STATEF_PENDING;
19746 + * After this point on the tasklet might be rescheduled
19747 + * on another CPU, but it can only be added to another
19748 + * CPU's tasklet list if we unlock the tasklet (which we
19751 + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
19755 + t->func(t->data);
19758 + * Try to unlock the tasklet. We must use cmpxchg, because
19759 + * another CPU might have scheduled or disabled the tasklet.
19760 + * We only allow the STATE_RUN -> 0 transition here.
19762 + while (!tasklet_tryunlock(t)) {
19764 + * If it got disabled meanwhile, bail out:
19766 + if (atomic_read(&t->count))
19767 + goto out_disabled;
19769 + * If it got scheduled meanwhile, re-execute
19770 + * the tasklet function:
19772 + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
19775 + printk("hm, tasklet state: %08lx\n", t->state);
19777 + tasklet_unlock(t);
19784 +static void tasklet_action(struct softirq_action *a)
19786 + struct tasklet_struct *list;
19788 + local_irq_disable();
19790 + list = __this_cpu_read(tasklet_vec.head);
19791 + __this_cpu_write(tasklet_vec.head, NULL);
19792 + __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
19794 + local_irq_enable();
19796 + __tasklet_action(a, list);
19799 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
19801 struct tasklet_struct *list;
19803 local_irq_disable();
19805 list = __this_cpu_read(tasklet_hi_vec.head);
19806 __this_cpu_write(tasklet_hi_vec.head, NULL);
19807 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
19809 local_irq_enable();
19812 - struct tasklet_struct *t = list;
19814 - list = list->next;
19816 - if (tasklet_trylock(t)) {
19817 - if (!atomic_read(&t->count)) {
19818 - if (!test_and_clear_bit(TASKLET_STATE_SCHED,
19821 - t->func(t->data);
19822 - tasklet_unlock(t);
19825 - tasklet_unlock(t);
19828 - local_irq_disable();
19830 - *__this_cpu_read(tasklet_hi_vec.tail) = t;
19831 - __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
19832 - __raise_softirq_irqoff(HI_SOFTIRQ);
19833 - local_irq_enable();
19835 + __tasklet_action(a, list);
19838 void tasklet_init(struct tasklet_struct *t,
19839 @@ -586,7 +1083,7 @@ void tasklet_kill(struct tasklet_struct *t)
19841 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
19845 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
19847 tasklet_unlock_wait(t);
19848 @@ -660,25 +1157,26 @@ void __init softirq_init(void)
19849 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
19852 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
19853 +void tasklet_unlock_wait(struct tasklet_struct *t)
19855 + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
19857 + * Hack for now to avoid this busy-loop:
19859 +#ifdef CONFIG_PREEMPT_RT_FULL
19866 +EXPORT_SYMBOL(tasklet_unlock_wait);
19869 static int ksoftirqd_should_run(unsigned int cpu)
19871 - return local_softirq_pending();
19874 -static void run_ksoftirqd(unsigned int cpu)
19876 - local_irq_disable();
19877 - if (local_softirq_pending()) {
19879 - * We can safely run softirq on inline stack, as we are not deep
19880 - * in the task stack here.
19883 - local_irq_enable();
19884 - cond_resched_rcu_qs();
19887 - local_irq_enable();
19888 + return ksoftirqd_softirq_pending();
19891 #ifdef CONFIG_HOTPLUG_CPU
19892 @@ -745,17 +1243,31 @@ static int takeover_tasklets(unsigned int cpu)
19894 static struct smp_hotplug_thread softirq_threads = {
19895 .store = &ksoftirqd,
19896 + .setup = ksoftirqd_set_sched_params,
19897 .thread_should_run = ksoftirqd_should_run,
19898 .thread_fn = run_ksoftirqd,
19899 .thread_comm = "ksoftirqd/%u",
19902 +#ifdef CONFIG_PREEMPT_RT_FULL
19903 +static struct smp_hotplug_thread softirq_timer_threads = {
19904 + .store = &ktimer_softirqd,
19905 + .setup = ktimer_softirqd_set_sched_params,
19906 + .cleanup = ktimer_softirqd_clr_sched_params,
19907 + .thread_should_run = ktimer_softirqd_should_run,
19908 + .thread_fn = run_ksoftirqd,
19909 + .thread_comm = "ktimersoftd/%u",
19913 static __init int spawn_ksoftirqd(void)
19915 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
19916 takeover_tasklets);
19917 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
19919 +#ifdef CONFIG_PREEMPT_RT_FULL
19920 + BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
19924 early_initcall(spawn_ksoftirqd);
19925 diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
19926 index ec9ab2f01489..8b89dbedeaff 100644
19927 --- a/kernel/stop_machine.c
19928 +++ b/kernel/stop_machine.c
19929 @@ -36,7 +36,7 @@ struct cpu_stop_done {
19930 struct cpu_stopper {
19931 struct task_struct *thread;
19934 + raw_spinlock_t lock;
19935 bool enabled; /* is this stopper enabled? */
19936 struct list_head works; /* list of pending works */
19938 @@ -78,14 +78,14 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
19939 unsigned long flags;
19942 - spin_lock_irqsave(&stopper->lock, flags);
19943 + raw_spin_lock_irqsave(&stopper->lock, flags);
19944 enabled = stopper->enabled;
19946 __cpu_stop_queue_work(stopper, work);
19947 else if (work->done)
19948 cpu_stop_signal_done(work->done);
19949 - spin_unlock_irqrestore(&stopper->lock, flags);
19951 + raw_spin_unlock_irqrestore(&stopper->lock, flags);
19955 @@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
19956 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
19959 - spin_lock_irq(&stopper1->lock);
19960 - spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
19961 + raw_spin_lock_irq(&stopper1->lock);
19962 + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
19965 if (!stopper1->enabled || !stopper2->enabled)
19966 @@ -255,8 +255,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
19967 __cpu_stop_queue_work(stopper1, work1);
19968 __cpu_stop_queue_work(stopper2, work2);
19970 - spin_unlock(&stopper2->lock);
19971 - spin_unlock_irq(&stopper1->lock);
19972 + raw_spin_unlock(&stopper2->lock);
19973 + raw_spin_unlock_irq(&stopper1->lock);
19975 if (unlikely(err == -EDEADLK)) {
19976 while (stop_cpus_in_progress)
19977 @@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned int cpu)
19978 unsigned long flags;
19981 - spin_lock_irqsave(&stopper->lock, flags);
19982 + raw_spin_lock_irqsave(&stopper->lock, flags);
19983 run = !list_empty(&stopper->works);
19984 - spin_unlock_irqrestore(&stopper->lock, flags);
19985 + raw_spin_unlock_irqrestore(&stopper->lock, flags);
19989 @@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned int cpu)
19993 - spin_lock_irq(&stopper->lock);
19994 + raw_spin_lock_irq(&stopper->lock);
19995 if (!list_empty(&stopper->works)) {
19996 work = list_first_entry(&stopper->works,
19997 struct cpu_stop_work, list);
19998 list_del_init(&work->list);
20000 - spin_unlock_irq(&stopper->lock);
20001 + raw_spin_unlock_irq(&stopper->lock);
20004 cpu_stop_fn_t fn = work->fn;
20005 @@ -475,6 +475,8 @@ static void cpu_stopper_thread(unsigned int cpu)
20006 struct cpu_stop_done *done = work->done;
20011 /* cpu stop callbacks must not sleep, make in_atomic() == T */
20012 preempt_count_inc();
20014 @@ -541,7 +543,7 @@ static int __init cpu_stop_init(void)
20015 for_each_possible_cpu(cpu) {
20016 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
20018 - spin_lock_init(&stopper->lock);
20019 + raw_spin_lock_init(&stopper->lock);
20020 INIT_LIST_HEAD(&stopper->works);
20023 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
20024 index bb5ec425dfe0..8338b14ed3a3 100644
20025 --- a/kernel/time/hrtimer.c
20026 +++ b/kernel/time/hrtimer.c
20028 #include <asm/uaccess.h>
20030 #include <trace/events/timer.h>
20031 +#include <trace/events/hist.h>
20033 #include "tick-internal.h"
20035 @@ -695,6 +696,29 @@ static void hrtimer_switch_to_hres(void)
20036 retrigger_next_event(NULL);
20039 +#ifdef CONFIG_PREEMPT_RT_FULL
20041 +static struct swork_event clock_set_delay_work;
20043 +static void run_clock_set_delay(struct swork_event *event)
20048 +void clock_was_set_delayed(void)
20050 + swork_queue(&clock_set_delay_work);
20053 +static __init int create_clock_set_delay_thread(void)
20055 + WARN_ON(swork_get());
20056 + INIT_SWORK(&clock_set_delay_work, run_clock_set_delay);
20059 +early_initcall(create_clock_set_delay_thread);
20060 +#else /* PREEMPT_RT_FULL */
20062 static void clock_was_set_work(struct work_struct *work)
20065 @@ -710,6 +734,7 @@ void clock_was_set_delayed(void)
20067 schedule_work(&hrtimer_work);
20073 @@ -719,11 +744,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
20074 static inline void hrtimer_switch_to_hres(void) { }
20076 hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
20077 -static inline int hrtimer_reprogram(struct hrtimer *timer,
20078 - struct hrtimer_clock_base *base)
20082 +static inline void hrtimer_reprogram(struct hrtimer *timer,
20083 + struct hrtimer_clock_base *base) { }
20084 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
20085 static inline void retrigger_next_event(void *arg) { }
20087 @@ -855,6 +877,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
20089 EXPORT_SYMBOL_GPL(hrtimer_forward);
20091 +#ifdef CONFIG_PREEMPT_RT_BASE
20092 +# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
20095 + * hrtimer_wait_for_timer - Wait for a running timer
20097 + * @timer: timer to wait for
20099 + * The function waits in case the timers callback function is
20100 + * currently executed on the waitqueue of the timer base. The
20101 + * waitqueue is woken up after the timer callback function has
20102 + * finished execution.
20104 +void hrtimer_wait_for_timer(const struct hrtimer *timer)
20106 + struct hrtimer_clock_base *base = timer->base;
20108 + if (base && base->cpu_base && !timer->irqsafe)
20109 + wait_event(base->cpu_base->wait,
20110 + !(hrtimer_callback_running(timer)));
20114 +# define wake_up_timer_waiters(b) do { } while (0)
20118 * enqueue_hrtimer - internal function to (re)start a timer
20120 @@ -896,6 +944,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
20121 if (!(state & HRTIMER_STATE_ENQUEUED))
20124 + if (unlikely(!list_empty(&timer->cb_entry))) {
20125 + list_del_init(&timer->cb_entry);
20129 if (!timerqueue_del(&base->active, &timer->node))
20130 cpu_base->active_bases &= ~(1 << base->index);
20132 @@ -991,7 +1044,16 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
20133 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
20135 timer_stats_hrtimer_set_start_info(timer);
20136 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20138 + ktime_t now = new_base->get_time();
20140 + if (ktime_to_ns(tim) < ktime_to_ns(now))
20141 + timer->praecox = now;
20143 + timer->praecox = ktime_set(0, 0);
20146 leftmost = enqueue_hrtimer(timer, new_base);
20149 @@ -1063,7 +1125,7 @@ int hrtimer_cancel(struct hrtimer *timer)
20154 + hrtimer_wait_for_timer(timer);
20157 EXPORT_SYMBOL_GPL(hrtimer_cancel);
20158 @@ -1127,6 +1189,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
20160 base = hrtimer_clockid_to_base(clock_id);
20161 timer->base = &cpu_base->clock_base[base];
20162 + INIT_LIST_HEAD(&timer->cb_entry);
20163 timerqueue_init(&timer->node);
20165 #ifdef CONFIG_TIMER_STATS
20166 @@ -1167,6 +1230,7 @@ bool hrtimer_active(const struct hrtimer *timer)
20167 seq = raw_read_seqcount_begin(&cpu_base->seq);
20169 if (timer->state != HRTIMER_STATE_INACTIVE ||
20170 + cpu_base->running_soft == timer ||
20171 cpu_base->running == timer)
20174 @@ -1265,10 +1329,112 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
20175 cpu_base->running = NULL;
20178 +#ifdef CONFIG_PREEMPT_RT_BASE
20179 +static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
20180 + struct hrtimer_clock_base *base)
20184 + if (restart != HRTIMER_NORESTART &&
20185 + !(timer->state & HRTIMER_STATE_ENQUEUED)) {
20187 + leftmost = enqueue_hrtimer(timer, base);
20190 +#ifdef CONFIG_HIGH_RES_TIMERS
20191 + if (!hrtimer_is_hres_active(timer)) {
20193 + * Kick to reschedule the next tick to handle the new timer
20194 + * on dynticks target.
20196 + if (base->cpu_base->nohz_active)
20197 + wake_up_nohz_cpu(base->cpu_base->cpu);
20200 + hrtimer_reprogram(timer, base);
20207 + * The changes in mainline which removed the callback modes from
20208 + * hrtimer are not yet working with -rt. The non wakeup_process()
20209 + * based callbacks which involve sleeping locks need to be treated
20212 +static void hrtimer_rt_run_pending(void)
20214 + enum hrtimer_restart (*fn)(struct hrtimer *);
20215 + struct hrtimer_cpu_base *cpu_base;
20216 + struct hrtimer_clock_base *base;
20217 + struct hrtimer *timer;
20218 + int index, restart;
20220 + local_irq_disable();
20221 + cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
20223 + raw_spin_lock(&cpu_base->lock);
20225 + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
20226 + base = &cpu_base->clock_base[index];
20228 + while (!list_empty(&base->expired)) {
20229 + timer = list_first_entry(&base->expired,
20230 + struct hrtimer, cb_entry);
20233 + * Same as the above __run_hrtimer function
20234 + * just we run with interrupts enabled.
20236 + debug_deactivate(timer);
20237 + cpu_base->running_soft = timer;
20238 + raw_write_seqcount_barrier(&cpu_base->seq);
20240 + __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
20241 + timer_stats_account_hrtimer(timer);
20242 + fn = timer->function;
20244 + raw_spin_unlock_irq(&cpu_base->lock);
20245 + restart = fn(timer);
20246 + raw_spin_lock_irq(&cpu_base->lock);
20248 + hrtimer_rt_reprogram(restart, timer, base);
20249 + raw_write_seqcount_barrier(&cpu_base->seq);
20251 + WARN_ON_ONCE(cpu_base->running_soft != timer);
20252 + cpu_base->running_soft = NULL;
20256 + raw_spin_unlock_irq(&cpu_base->lock);
20258 + wake_up_timer_waiters(cpu_base);
20261 +static int hrtimer_rt_defer(struct hrtimer *timer)
20263 + if (timer->irqsafe)
20266 + __remove_hrtimer(timer, timer->base, timer->state, 0);
20267 + list_add_tail(&timer->cb_entry, &timer->base->expired);
20273 +static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
20277 +static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
20279 static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
20281 struct hrtimer_clock_base *base = cpu_base->clock_base;
20282 unsigned int active = cpu_base->active_bases;
20285 for (; active; base++, active >>= 1) {
20286 struct timerqueue_node *node;
20287 @@ -1284,6 +1450,15 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
20289 timer = container_of(node, struct hrtimer, node);
20291 + trace_hrtimer_interrupt(raw_smp_processor_id(),
20292 + ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
20293 + timer->praecox : hrtimer_get_expires(timer),
20296 + timer->function == hrtimer_wakeup ?
20297 + container_of(timer, struct hrtimer_sleeper,
20298 + timer)->task : NULL);
20301 * The immediate goal for using the softexpires is
20302 * minimizing wakeups, not running timers at the
20303 @@ -1299,9 +1474,14 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
20304 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
20307 - __run_hrtimer(cpu_base, base, timer, &basenow);
20308 + if (!hrtimer_rt_defer(timer))
20309 + __run_hrtimer(cpu_base, base, timer, &basenow);
20315 + raise_softirq_irqoff(HRTIMER_SOFTIRQ);
20318 #ifdef CONFIG_HIGH_RES_TIMERS
20319 @@ -1464,16 +1644,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
20320 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
20322 sl->timer.function = hrtimer_wakeup;
20323 + sl->timer.irqsafe = 1;
20326 EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
20328 -static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
20329 +static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
20330 + unsigned long state)
20332 hrtimer_init_sleeper(t, current);
20335 - set_current_state(TASK_INTERRUPTIBLE);
20336 + set_current_state(state);
20337 hrtimer_start_expires(&t->timer, mode);
20339 if (likely(t->task))
20340 @@ -1515,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
20342 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
20344 - if (do_nanosleep(&t, HRTIMER_MODE_ABS))
20345 + /* cpu_chill() does not care about restart state. */
20346 + if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
20349 rmtp = restart->nanosleep.rmtp;
20350 @@ -1532,8 +1715,10 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
20354 -long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
20355 - const enum hrtimer_mode mode, const clockid_t clockid)
20357 +__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
20358 + const enum hrtimer_mode mode, const clockid_t clockid,
20359 + unsigned long state)
20361 struct restart_block *restart;
20362 struct hrtimer_sleeper t;
20363 @@ -1546,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
20365 hrtimer_init_on_stack(&t.timer, clockid, mode);
20366 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
20367 - if (do_nanosleep(&t, mode))
20368 + if (do_nanosleep(&t, mode, state))
20371 /* Absolute timers do not update the rmtp value and restart: */
20372 @@ -1573,6 +1758,12 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
20376 +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
20377 + const enum hrtimer_mode mode, const clockid_t clockid)
20379 + return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
20382 SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
20383 struct timespec __user *, rmtp)
20385 @@ -1587,6 +1778,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
20386 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
20389 +#ifdef CONFIG_PREEMPT_RT_FULL
20391 + * Sleep for 1 ms in hope whoever holds what we want will let it go.
20393 +void cpu_chill(void)
20395 + struct timespec tu = {
20396 + .tv_nsec = NSEC_PER_MSEC,
20398 + unsigned int freeze_flag = current->flags & PF_NOFREEZE;
20400 + current->flags |= PF_NOFREEZE;
20401 + __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
20402 + TASK_UNINTERRUPTIBLE);
20403 + if (!freeze_flag)
20404 + current->flags &= ~PF_NOFREEZE;
20406 +EXPORT_SYMBOL(cpu_chill);
20410 * Functions related to boot-time initialization:
20412 @@ -1598,10 +1809,14 @@ int hrtimers_prepare_cpu(unsigned int cpu)
20413 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
20414 cpu_base->clock_base[i].cpu_base = cpu_base;
20415 timerqueue_init_head(&cpu_base->clock_base[i].active);
20416 + INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
20419 cpu_base->cpu = cpu;
20420 hrtimer_init_hres(cpu_base);
20421 +#ifdef CONFIG_PREEMPT_RT_BASE
20422 + init_waitqueue_head(&cpu_base->wait);
20427 @@ -1671,9 +1886,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
20429 #endif /* CONFIG_HOTPLUG_CPU */
20431 +#ifdef CONFIG_PREEMPT_RT_BASE
20433 +static void run_hrtimer_softirq(struct softirq_action *h)
20435 + hrtimer_rt_run_pending();
20438 +static void hrtimers_open_softirq(void)
20440 + open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
20444 +static void hrtimers_open_softirq(void) { }
20447 void __init hrtimers_init(void)
20449 hrtimers_prepare_cpu(smp_processor_id());
20450 + hrtimers_open_softirq();
20454 diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
20455 index 1d5c7204ddc9..184de6751180 100644
20456 --- a/kernel/time/itimer.c
20457 +++ b/kernel/time/itimer.c
20458 @@ -213,6 +213,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
20459 /* We are sharing ->siglock with it_real_fn() */
20460 if (hrtimer_try_to_cancel(timer) < 0) {
20461 spin_unlock_irq(&tsk->sighand->siglock);
20462 + hrtimer_wait_for_timer(&tsk->signal->real_timer);
20465 expires = timeval_to_ktime(value->it_value);
20466 diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
20467 index 555e21f7b966..a5d6435fabbb 100644
20468 --- a/kernel/time/jiffies.c
20469 +++ b/kernel/time/jiffies.c
20470 @@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = {
20474 -__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
20475 +__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
20476 +__cacheline_aligned_in_smp seqcount_t jiffies_seq;
20478 #if (BITS_PER_LONG < 64)
20479 u64 get_jiffies_64(void)
20480 @@ -83,9 +84,9 @@ u64 get_jiffies_64(void)
20484 - seq = read_seqbegin(&jiffies_lock);
20485 + seq = read_seqcount_begin(&jiffies_seq);
20487 - } while (read_seqretry(&jiffies_lock, seq));
20488 + } while (read_seqcount_retry(&jiffies_seq, seq));
20491 EXPORT_SYMBOL(get_jiffies_64);
20492 diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
20493 index 6df8927c58a5..05b7391bf9bd 100644
20494 --- a/kernel/time/ntp.c
20495 +++ b/kernel/time/ntp.c
20497 #include <linux/module.h>
20498 #include <linux/rtc.h>
20499 #include <linux/math64.h>
20500 +#include <linux/swork.h>
20502 #include "ntp_internal.h"
20503 #include "timekeeping_internal.h"
20504 @@ -568,10 +569,35 @@ static void sync_cmos_clock(struct work_struct *work)
20505 &sync_cmos_work, timespec64_to_jiffies(&next));
20508 +#ifdef CONFIG_PREEMPT_RT_FULL
20510 +static void run_clock_set_delay(struct swork_event *event)
20512 + queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
20515 +static struct swork_event ntp_cmos_swork;
20517 +void ntp_notify_cmos_timer(void)
20519 + swork_queue(&ntp_cmos_swork);
20522 +static __init int create_cmos_delay_thread(void)
20524 + WARN_ON(swork_get());
20525 + INIT_SWORK(&ntp_cmos_swork, run_clock_set_delay);
20528 +early_initcall(create_cmos_delay_thread);
20532 void ntp_notify_cmos_timer(void)
20534 queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
20536 +#endif /* CONFIG_PREEMPT_RT_FULL */
20539 void ntp_notify_cmos_timer(void) { }
20540 diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
20541 index 39008d78927a..633f4eaca9e7 100644
20542 --- a/kernel/time/posix-cpu-timers.c
20543 +++ b/kernel/time/posix-cpu-timers.c
20547 #include <linux/sched.h>
20548 +#include <linux/sched/rt.h>
20549 #include <linux/posix-timers.h>
20550 #include <linux/errno.h>
20551 #include <linux/math64.h>
20552 @@ -620,7 +621,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
20554 * Disarm any old timer after extracting its expiry time.
20556 - WARN_ON_ONCE(!irqs_disabled());
20557 + WARN_ON_ONCE_NONRT(!irqs_disabled());
20560 old_incr = timer->it.cpu.incr;
20561 @@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
20563 * Now re-arm for the new expiry time.
20565 - WARN_ON_ONCE(!irqs_disabled());
20566 + WARN_ON_ONCE_NONRT(!irqs_disabled());
20568 unlock_task_sighand(p, &flags);
20570 @@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
20571 * already updated our counts. We need to check if any timers fire now.
20572 * Interrupts are disabled.
20574 -void run_posix_cpu_timers(struct task_struct *tsk)
20575 +static void __run_posix_cpu_timers(struct task_struct *tsk)
20578 struct k_itimer *timer, *next;
20579 unsigned long flags;
20581 - WARN_ON_ONCE(!irqs_disabled());
20582 + WARN_ON_ONCE_NONRT(!irqs_disabled());
20585 * The fast path checks that there are no expired thread or thread
20586 @@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_struct *tsk)
20590 +#ifdef CONFIG_PREEMPT_RT_BASE
20591 +#include <linux/kthread.h>
20592 +#include <linux/cpu.h>
20593 +DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
20594 +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
20596 +static int posix_cpu_timers_thread(void *data)
20598 + int cpu = (long)data;
20600 + BUG_ON(per_cpu(posix_timer_task,cpu) != current);
20602 + while (!kthread_should_stop()) {
20603 + struct task_struct *tsk = NULL;
20604 + struct task_struct *next = NULL;
20606 + if (cpu_is_offline(cpu))
20607 + goto wait_to_die;
20609 + /* grab task list */
20610 + raw_local_irq_disable();
20611 + tsk = per_cpu(posix_timer_tasklist, cpu);
20612 + per_cpu(posix_timer_tasklist, cpu) = NULL;
20613 + raw_local_irq_enable();
20615 + /* its possible the list is empty, just return */
20617 + set_current_state(TASK_INTERRUPTIBLE);
20619 + __set_current_state(TASK_RUNNING);
20623 + /* Process task list */
20626 + next = tsk->posix_timer_list;
20628 + /* run the task timers, clear its ptr and
20631 + __run_posix_cpu_timers(tsk);
20632 + tsk->posix_timer_list = NULL;
20633 + put_task_struct(tsk);
20635 + /* check if this is the last on the list */
20644 + /* Wait for kthread_stop */
20645 + set_current_state(TASK_INTERRUPTIBLE);
20646 + while (!kthread_should_stop()) {
20648 + set_current_state(TASK_INTERRUPTIBLE);
20650 + __set_current_state(TASK_RUNNING);
20654 +static inline int __fastpath_timer_check(struct task_struct *tsk)
20656 + /* tsk == current, ensure it is safe to use ->signal/sighand */
20657 + if (unlikely(tsk->exit_state))
20660 + if (!task_cputime_zero(&tsk->cputime_expires))
20663 + if (!task_cputime_zero(&tsk->signal->cputime_expires))
20669 +void run_posix_cpu_timers(struct task_struct *tsk)
20671 + unsigned long cpu = smp_processor_id();
20672 + struct task_struct *tasklist;
20674 + BUG_ON(!irqs_disabled());
20675 + if(!per_cpu(posix_timer_task, cpu))
20677 + /* get per-cpu references */
20678 + tasklist = per_cpu(posix_timer_tasklist, cpu);
20680 + /* check to see if we're already queued */
20681 + if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
20682 + get_task_struct(tsk);
20684 + tsk->posix_timer_list = tasklist;
20687 + * The list is terminated by a self-pointing
20690 + tsk->posix_timer_list = tsk;
20692 + per_cpu(posix_timer_tasklist, cpu) = tsk;
20694 + wake_up_process(per_cpu(posix_timer_task, cpu));
20699 + * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
20700 + * Here we can start up the necessary migration thread for the new CPU.
20702 +static int posix_cpu_thread_call(struct notifier_block *nfb,
20703 + unsigned long action, void *hcpu)
20705 + int cpu = (long)hcpu;
20706 + struct task_struct *p;
20707 + struct sched_param param;
20709 + switch (action) {
20710 + case CPU_UP_PREPARE:
20711 + p = kthread_create(posix_cpu_timers_thread, hcpu,
20712 + "posixcputmr/%d",cpu);
20714 + return NOTIFY_BAD;
20715 + p->flags |= PF_NOFREEZE;
20716 + kthread_bind(p, cpu);
20717 + /* Must be high prio to avoid getting starved */
20718 + param.sched_priority = MAX_RT_PRIO-1;
20719 + sched_setscheduler(p, SCHED_FIFO, ¶m);
20720 + per_cpu(posix_timer_task,cpu) = p;
20723 + /* Strictly unneccessary, as first user will wake it. */
20724 + wake_up_process(per_cpu(posix_timer_task,cpu));
20726 +#ifdef CONFIG_HOTPLUG_CPU
20727 + case CPU_UP_CANCELED:
20728 + /* Unbind it from offline cpu so it can run. Fall thru. */
20729 + kthread_bind(per_cpu(posix_timer_task, cpu),
20730 + cpumask_any(cpu_online_mask));
20731 + kthread_stop(per_cpu(posix_timer_task,cpu));
20732 + per_cpu(posix_timer_task,cpu) = NULL;
20735 + kthread_stop(per_cpu(posix_timer_task,cpu));
20736 + per_cpu(posix_timer_task,cpu) = NULL;
20740 + return NOTIFY_OK;
20743 +/* Register at highest priority so that task migration (migrate_all_tasks)
20744 + * happens before everything else.
20746 +static struct notifier_block posix_cpu_thread_notifier = {
20747 + .notifier_call = posix_cpu_thread_call,
20751 +static int __init posix_cpu_thread_init(void)
20753 + void *hcpu = (void *)(long)smp_processor_id();
20754 + /* Start one for boot CPU. */
20755 + unsigned long cpu;
20757 + /* init the per-cpu posix_timer_tasklets */
20758 + for_each_possible_cpu(cpu)
20759 + per_cpu(posix_timer_tasklist, cpu) = NULL;
20761 + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
20762 + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
20763 + register_cpu_notifier(&posix_cpu_thread_notifier);
20766 +early_initcall(posix_cpu_thread_init);
20767 +#else /* CONFIG_PREEMPT_RT_BASE */
20768 +void run_posix_cpu_timers(struct task_struct *tsk)
20770 + __run_posix_cpu_timers(tsk);
20772 +#endif /* CONFIG_PREEMPT_RT_BASE */
20775 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
20776 * The tsk->sighand->siglock must be held by the caller.
20777 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
20778 index f2826c35e918..464a98155a0e 100644
20779 --- a/kernel/time/posix-timers.c
20780 +++ b/kernel/time/posix-timers.c
20781 @@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
20782 static struct pid *good_sigevent(sigevent_t * event)
20784 struct task_struct *rtn = current->group_leader;
20785 + int sig = event->sigev_signo;
20787 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
20788 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
20789 @@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigevent_t * event)
20792 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
20793 - ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
20794 + (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
20795 + sig_kernel_coredump(sig)))
20798 return task_pid(rtn);
20799 @@ -826,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
20804 + * Protected by RCU!
20806 +static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
20808 +#ifdef CONFIG_PREEMPT_RT_FULL
20809 + if (kc->timer_set == common_timer_set)
20810 + hrtimer_wait_for_timer(&timr->it.real.timer);
20812 + /* FIXME: Whacky hack for posix-cpu-timers */
20813 + schedule_timeout(1);
20817 /* Set a POSIX.1b interval timer. */
20818 /* timr->it_lock is taken. */
20820 @@ -903,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
20825 kc = clockid_to_kclock(timr->it_clock);
20826 if (WARN_ON_ONCE(!kc || !kc->timer_set))
20828 @@ -911,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
20830 unlock_timer(timr, flag);
20831 if (error == TIMER_RETRY) {
20832 + timer_wait_for_callback(kc, timr);
20833 rtn = NULL; // We already got the old time...
20834 + rcu_read_unlock();
20837 + rcu_read_unlock();
20839 if (old_setting && !error &&
20840 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
20841 @@ -951,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
20846 if (timer_delete_hook(timer) == TIMER_RETRY) {
20847 unlock_timer(timer, flags);
20848 + timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
20850 + rcu_read_unlock();
20853 + rcu_read_unlock();
20855 spin_lock(¤t->sighand->siglock);
20856 list_del(&timer->list);
20857 @@ -980,8 +1005,18 @@ static void itimer_delete(struct k_itimer *timer)
20859 spin_lock_irqsave(&timer->it_lock, flags);
20861 - if (timer_delete_hook(timer) == TIMER_RETRY) {
20862 + /* On RT we can race with a deletion */
20863 + if (!timer->it_signal) {
20864 unlock_timer(timer, flags);
20868 + if (timer_delete_hook(timer) == TIMER_RETRY) {
20870 + unlock_timer(timer, flags);
20871 + timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
20873 + rcu_read_unlock();
20876 list_del(&timer->list);
20877 diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
20878 index 690b797f522e..fe8ba1619879 100644
20879 --- a/kernel/time/tick-broadcast-hrtimer.c
20880 +++ b/kernel/time/tick-broadcast-hrtimer.c
20881 @@ -107,5 +107,6 @@ void tick_setup_hrtimer_broadcast(void)
20883 hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
20884 bctimer.function = bc_handler;
20885 + bctimer.irqsafe = true;
20886 clockevents_register_device(&ce_broadcast_hrtimer);
20888 diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
20889 index 4fcd99e12aa0..5a47f2e98faf 100644
20890 --- a/kernel/time/tick-common.c
20891 +++ b/kernel/time/tick-common.c
20892 @@ -79,13 +79,15 @@ int tick_is_oneshot_available(void)
20893 static void tick_periodic(int cpu)
20895 if (tick_do_timer_cpu == cpu) {
20896 - write_seqlock(&jiffies_lock);
20897 + raw_spin_lock(&jiffies_lock);
20898 + write_seqcount_begin(&jiffies_seq);
20900 /* Keep track of the next tick event */
20901 tick_next_period = ktime_add(tick_next_period, tick_period);
20904 - write_sequnlock(&jiffies_lock);
20905 + write_seqcount_end(&jiffies_seq);
20906 + raw_spin_unlock(&jiffies_lock);
20907 update_wall_time();
20910 @@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
20914 - seq = read_seqbegin(&jiffies_lock);
20915 + seq = read_seqcount_begin(&jiffies_seq);
20916 next = tick_next_period;
20917 - } while (read_seqretry(&jiffies_lock, seq));
20918 + } while (read_seqcount_retry(&jiffies_seq, seq));
20920 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
20922 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
20923 index 3bcb61b52f6c..66d85482a96e 100644
20924 --- a/kernel/time/tick-sched.c
20925 +++ b/kernel/time/tick-sched.c
20926 @@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now)
20929 /* Reevaluate with jiffies_lock held */
20930 - write_seqlock(&jiffies_lock);
20931 + raw_spin_lock(&jiffies_lock);
20932 + write_seqcount_begin(&jiffies_seq);
20934 delta = ktime_sub(now, last_jiffies_update);
20935 if (delta.tv64 >= tick_period.tv64) {
20936 @@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(ktime_t now)
20937 /* Keep the tick_next_period variable up to date */
20938 tick_next_period = ktime_add(last_jiffies_update, tick_period);
20940 - write_sequnlock(&jiffies_lock);
20941 + write_seqcount_end(&jiffies_seq);
20942 + raw_spin_unlock(&jiffies_lock);
20945 - write_sequnlock(&jiffies_lock);
20946 + write_seqcount_end(&jiffies_seq);
20947 + raw_spin_unlock(&jiffies_lock);
20948 update_wall_time();
20951 @@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(void)
20955 - write_seqlock(&jiffies_lock);
20956 + raw_spin_lock(&jiffies_lock);
20957 + write_seqcount_begin(&jiffies_seq);
20958 /* Did we start the jiffies update yet ? */
20959 if (last_jiffies_update.tv64 == 0)
20960 last_jiffies_update = tick_next_period;
20961 period = last_jiffies_update;
20962 - write_sequnlock(&jiffies_lock);
20963 + write_seqcount_end(&jiffies_seq);
20964 + raw_spin_unlock(&jiffies_lock);
20968 @@ -215,6 +220,7 @@ static void nohz_full_kick_func(struct irq_work *work)
20970 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
20971 .func = nohz_full_kick_func,
20972 + .flags = IRQ_WORK_HARD_IRQ,
20976 @@ -673,10 +679,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
20978 /* Read jiffies and the time when jiffies were updated last */
20980 - seq = read_seqbegin(&jiffies_lock);
20981 + seq = read_seqcount_begin(&jiffies_seq);
20982 basemono = last_jiffies_update.tv64;
20983 basejiff = jiffies;
20984 - } while (read_seqretry(&jiffies_lock, seq));
20985 + } while (read_seqcount_retry(&jiffies_seq, seq));
20986 ts->last_jiffies = basejiff;
20988 if (rcu_needs_cpu(basemono, &next_rcu) ||
20989 @@ -877,14 +883,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
20992 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
20993 - static int ratelimit;
20995 - if (ratelimit < 10 &&
20996 - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
20997 - pr_warn("NOHZ: local_softirq_pending %02x\n",
20998 - (unsigned int) local_softirq_pending());
21001 + softirq_check_pending_idle();
21005 @@ -1193,6 +1192,7 @@ void tick_setup_sched_timer(void)
21006 * Emulate tick processing via per-CPU hrtimers:
21008 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
21009 + ts->sched_timer.irqsafe = 1;
21010 ts->sched_timer.function = tick_sched_timer;
21012 /* Get the next period (per-CPU) */
21013 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
21014 index 46e312e9be38..fa75cf5d9253 100644
21015 --- a/kernel/time/timekeeping.c
21016 +++ b/kernel/time/timekeeping.c
21017 @@ -2328,8 +2328,10 @@ EXPORT_SYMBOL(hardpps);
21019 void xtime_update(unsigned long ticks)
21021 - write_seqlock(&jiffies_lock);
21022 + raw_spin_lock(&jiffies_lock);
21023 + write_seqcount_begin(&jiffies_seq);
21025 - write_sequnlock(&jiffies_lock);
21026 + write_seqcount_end(&jiffies_seq);
21027 + raw_spin_unlock(&jiffies_lock);
21028 update_wall_time();
21030 diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
21031 index 704f595ce83f..763a3e5121ff 100644
21032 --- a/kernel/time/timekeeping.h
21033 +++ b/kernel/time/timekeeping.h
21034 @@ -19,7 +19,8 @@ extern void timekeeping_resume(void);
21035 extern void do_timer(unsigned long ticks);
21036 extern void update_wall_time(void);
21038 -extern seqlock_t jiffies_lock;
21039 +extern raw_spinlock_t jiffies_lock;
21040 +extern seqcount_t jiffies_seq;
21042 #define CS_NAME_LEN 32
21044 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
21045 index c611c47de884..08a5ab762495 100644
21046 --- a/kernel/time/timer.c
21047 +++ b/kernel/time/timer.c
21048 @@ -193,8 +193,11 @@ EXPORT_SYMBOL(jiffies_64);
21051 struct timer_base {
21053 + raw_spinlock_t lock;
21054 struct timer_list *running_timer;
21055 +#ifdef CONFIG_PREEMPT_RT_FULL
21056 + struct swait_queue_head wait_for_running_timer;
21059 unsigned long next_expiry;
21061 @@ -948,10 +951,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
21063 if (!(tf & TIMER_MIGRATING)) {
21064 base = get_timer_base(tf);
21065 - spin_lock_irqsave(&base->lock, *flags);
21066 + raw_spin_lock_irqsave(&base->lock, *flags);
21067 if (timer->flags == tf)
21069 - spin_unlock_irqrestore(&base->lock, *flags);
21070 + raw_spin_unlock_irqrestore(&base->lock, *flags);
21074 @@ -1023,9 +1026,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
21075 /* See the comment in lock_timer_base() */
21076 timer->flags |= TIMER_MIGRATING;
21078 - spin_unlock(&base->lock);
21079 + raw_spin_unlock(&base->lock);
21081 - spin_lock(&base->lock);
21082 + raw_spin_lock(&base->lock);
21083 WRITE_ONCE(timer->flags,
21084 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
21086 @@ -1050,7 +1053,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
21090 - spin_unlock_irqrestore(&base->lock, flags);
21091 + raw_spin_unlock_irqrestore(&base->lock, flags);
21095 @@ -1144,19 +1147,46 @@ void add_timer_on(struct timer_list *timer, int cpu)
21096 if (base != new_base) {
21097 timer->flags |= TIMER_MIGRATING;
21099 - spin_unlock(&base->lock);
21100 + raw_spin_unlock(&base->lock);
21102 - spin_lock(&base->lock);
21103 + raw_spin_lock(&base->lock);
21104 WRITE_ONCE(timer->flags,
21105 (timer->flags & ~TIMER_BASEMASK) | cpu);
21108 debug_activate(timer, timer->expires);
21109 internal_add_timer(base, timer);
21110 - spin_unlock_irqrestore(&base->lock, flags);
21111 + raw_spin_unlock_irqrestore(&base->lock, flags);
21113 EXPORT_SYMBOL_GPL(add_timer_on);
21115 +#ifdef CONFIG_PREEMPT_RT_FULL
21117 + * Wait for a running timer
21119 +static void wait_for_running_timer(struct timer_list *timer)
21121 + struct timer_base *base;
21122 + u32 tf = timer->flags;
21124 + if (tf & TIMER_MIGRATING)
21127 + base = get_timer_base(tf);
21128 + swait_event(base->wait_for_running_timer,
21129 + base->running_timer != timer);
21132 +# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer)
21134 +static inline void wait_for_running_timer(struct timer_list *timer)
21139 +# define wakeup_timer_waiters(b) do { } while (0)
21143 * del_timer - deactive a timer.
21144 * @timer: the timer to be deactivated
21145 @@ -1180,7 +1210,7 @@ int del_timer(struct timer_list *timer)
21146 if (timer_pending(timer)) {
21147 base = lock_timer_base(timer, &flags);
21148 ret = detach_if_pending(timer, base, true);
21149 - spin_unlock_irqrestore(&base->lock, flags);
21150 + raw_spin_unlock_irqrestore(&base->lock, flags);
21154 @@ -1208,13 +1238,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
21155 timer_stats_timer_clear_start_info(timer);
21156 ret = detach_if_pending(timer, base, true);
21158 - spin_unlock_irqrestore(&base->lock, flags);
21159 + raw_spin_unlock_irqrestore(&base->lock, flags);
21163 EXPORT_SYMBOL(try_to_del_timer_sync);
21166 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
21168 * del_timer_sync - deactivate a timer and wait for the handler to finish.
21169 * @timer: the timer to be deactivated
21170 @@ -1274,7 +1304,7 @@ int del_timer_sync(struct timer_list *timer)
21171 int ret = try_to_del_timer_sync(timer);
21175 + wait_for_running_timer(timer);
21178 EXPORT_SYMBOL(del_timer_sync);
21179 @@ -1339,14 +1369,17 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
21180 fn = timer->function;
21181 data = timer->data;
21183 - if (timer->flags & TIMER_IRQSAFE) {
21184 - spin_unlock(&base->lock);
21185 + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) &&
21186 + timer->flags & TIMER_IRQSAFE) {
21187 + raw_spin_unlock(&base->lock);
21188 call_timer_fn(timer, fn, data);
21189 - spin_lock(&base->lock);
21190 + base->running_timer = NULL;
21191 + raw_spin_lock(&base->lock);
21193 - spin_unlock_irq(&base->lock);
21194 + raw_spin_unlock_irq(&base->lock);
21195 call_timer_fn(timer, fn, data);
21196 - spin_lock_irq(&base->lock);
21197 + base->running_timer = NULL;
21198 + raw_spin_lock_irq(&base->lock);
21202 @@ -1515,7 +1548,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
21203 if (cpu_is_offline(smp_processor_id()))
21206 - spin_lock(&base->lock);
21207 + raw_spin_lock(&base->lock);
21208 nextevt = __next_timer_interrupt(base);
21209 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
21210 base->next_expiry = nextevt;
21211 @@ -1543,7 +1576,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
21212 if ((expires - basem) > TICK_NSEC)
21213 base->is_idle = true;
21215 - spin_unlock(&base->lock);
21216 + raw_spin_unlock(&base->lock);
21218 return cmp_next_hrtimer_event(basem, expires);
21220 @@ -1608,13 +1641,13 @@ void update_process_times(int user_tick)
21222 /* Note: this timer irq context must be accounted for as well. */
21223 account_process_tick(p, user_tick);
21224 + scheduler_tick();
21225 run_local_timers();
21226 rcu_check_callbacks(user_tick);
21227 -#ifdef CONFIG_IRQ_WORK
21228 +#if defined(CONFIG_IRQ_WORK)
21232 - scheduler_tick();
21233 run_posix_cpu_timers(p);
21236 @@ -1630,7 +1663,7 @@ static inline void __run_timers(struct timer_base *base)
21237 if (!time_after_eq(jiffies, base->clk))
21240 - spin_lock_irq(&base->lock);
21241 + raw_spin_lock_irq(&base->lock);
21243 while (time_after_eq(jiffies, base->clk)) {
21245 @@ -1640,8 +1673,8 @@ static inline void __run_timers(struct timer_base *base)
21247 expire_timers(base, heads + levels);
21249 - base->running_timer = NULL;
21250 - spin_unlock_irq(&base->lock);
21251 + raw_spin_unlock_irq(&base->lock);
21252 + wakeup_timer_waiters(base);
21256 @@ -1651,6 +1684,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
21258 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
21260 + irq_work_tick_soft();
21262 __run_timers(base);
21263 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
21264 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
21265 @@ -1836,16 +1871,16 @@ int timers_dead_cpu(unsigned int cpu)
21266 * The caller is globally serialized and nobody else
21267 * takes two locks at once, deadlock is not possible.
21269 - spin_lock_irq(&new_base->lock);
21270 - spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
21271 + raw_spin_lock_irq(&new_base->lock);
21272 + raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
21274 BUG_ON(old_base->running_timer);
21276 for (i = 0; i < WHEEL_SIZE; i++)
21277 migrate_timer_list(new_base, old_base->vectors + i);
21279 - spin_unlock(&old_base->lock);
21280 - spin_unlock_irq(&new_base->lock);
21281 + raw_spin_unlock(&old_base->lock);
21282 + raw_spin_unlock_irq(&new_base->lock);
21283 put_cpu_ptr(&timer_bases);
21286 @@ -1861,8 +1896,11 @@ static void __init init_timer_cpu(int cpu)
21287 for (i = 0; i < NR_BASES; i++) {
21288 base = per_cpu_ptr(&timer_bases[i], cpu);
21290 - spin_lock_init(&base->lock);
21291 + raw_spin_lock_init(&base->lock);
21292 base->clk = jiffies;
21293 +#ifdef CONFIG_PREEMPT_RT_FULL
21294 + init_swait_queue_head(&base->wait_for_running_timer);
21299 diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
21300 index 2a96b063d659..812e37237eb8 100644
21301 --- a/kernel/trace/Kconfig
21302 +++ b/kernel/trace/Kconfig
21303 @@ -182,6 +182,24 @@ config IRQSOFF_TRACER
21304 enabled. This option and the preempt-off timing option can be
21305 used together or separately.)
21307 +config INTERRUPT_OFF_HIST
21308 + bool "Interrupts-off Latency Histogram"
21309 + depends on IRQSOFF_TRACER
21311 + This option generates continuously updated histograms (one per cpu)
21312 + of the duration of time periods with interrupts disabled. The
21313 + histograms are disabled by default. To enable them, write a non-zero
21316 + /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
21318 + If PREEMPT_OFF_HIST is also selected, additional histograms (one
21319 + per cpu) are generated that accumulate the duration of time periods
21320 + when both interrupts and preemption are disabled. The histogram data
21321 + will be located in the debug file system at
21323 + /sys/kernel/debug/tracing/latency_hist/irqsoff
21325 config PREEMPT_TRACER
21326 bool "Preemption-off Latency Tracer"
21328 @@ -206,6 +224,24 @@ config PREEMPT_TRACER
21329 enabled. This option and the irqs-off timing option can be
21330 used together or separately.)
21332 +config PREEMPT_OFF_HIST
21333 + bool "Preemption-off Latency Histogram"
21334 + depends on PREEMPT_TRACER
21336 + This option generates continuously updated histograms (one per cpu)
21337 + of the duration of time periods with preemption disabled. The
21338 + histograms are disabled by default. To enable them, write a non-zero
21341 + /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
21343 + If INTERRUPT_OFF_HIST is also selected, additional histograms (one
21344 + per cpu) are generated that accumulate the duration of time periods
21345 + when both interrupts and preemption are disabled. The histogram data
21346 + will be located in the debug file system at
21348 + /sys/kernel/debug/tracing/latency_hist/preemptoff
21350 config SCHED_TRACER
21351 bool "Scheduling Latency Tracer"
21352 select GENERIC_TRACER
21353 @@ -251,6 +287,74 @@ config HWLAT_TRACER
21354 file. Every time a latency is greater than tracing_thresh, it will
21355 be recorded into the ring buffer.
21357 +config WAKEUP_LATENCY_HIST
21358 + bool "Scheduling Latency Histogram"
21359 + depends on SCHED_TRACER
21361 + This option generates continuously updated histograms (one per cpu)
21362 + of the scheduling latency of the highest priority task.
21363 + The histograms are disabled by default. To enable them, write a
21364 + non-zero number to
21366 + /sys/kernel/debug/tracing/latency_hist/enable/wakeup
21368 + Two different algorithms are used, one to determine the latency of
21369 + processes that exclusively use the highest priority of the system and
21370 + another one to determine the latency of processes that share the
21371 + highest system priority with other processes. The former is used to
21372 + improve hardware and system software, the latter to optimize the
21373 + priority design of a given system. The histogram data will be
21374 + located in the debug file system at
21376 + /sys/kernel/debug/tracing/latency_hist/wakeup
21380 + /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
21382 + If both Scheduling Latency Histogram and Missed Timer Offsets
21383 + Histogram are selected, additional histogram data will be collected
21384 + that contain, in addition to the wakeup latency, the timer latency, in
21385 + case the wakeup was triggered by an expired timer. These histograms
21386 + are available in the
21388 + /sys/kernel/debug/tracing/latency_hist/timerandwakeup
21390 + directory. They reflect the apparent interrupt and scheduling latency
21391 + and are best suitable to determine the worst-case latency of a given
21392 + system. To enable these histograms, write a non-zero number to
21394 + /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
21396 +config MISSED_TIMER_OFFSETS_HIST
21397 + depends on HIGH_RES_TIMERS
21398 + select GENERIC_TRACER
21399 + bool "Missed Timer Offsets Histogram"
21401 + Generate a histogram of missed timer offsets in microseconds. The
21402 + histograms are disabled by default. To enable them, write a non-zero
21405 + /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
21407 + The histogram data will be located in the debug file system at
21409 + /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
21411 + If both Scheduling Latency Histogram and Missed Timer Offsets
21412 + Histogram are selected, additional histogram data will be collected
21413 + that contain, in addition to the wakeup latency, the timer latency, in
21414 + case the wakeup was triggered by an expired timer. These histograms
21415 + are available in the
21417 + /sys/kernel/debug/tracing/latency_hist/timerandwakeup
21419 + directory. They reflect the apparent interrupt and scheduling latency
21420 + and are best suitable to determine the worst-case latency of a given
21421 + system. To enable these histograms, write a non-zero number to
21423 + /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
21425 config ENABLE_DEFAULT_TRACERS
21426 bool "Trace process context switches and events"
21427 depends on !GENERIC_TRACER
21428 diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
21429 index e57980845549..83af000b783c 100644
21430 --- a/kernel/trace/Makefile
21431 +++ b/kernel/trace/Makefile
21432 @@ -38,6 +38,10 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
21433 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
21434 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
21435 obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
21436 +obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
21437 +obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
21438 +obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
21439 +obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
21440 obj-$(CONFIG_NOP_TRACER) += trace_nop.o
21441 obj-$(CONFIG_STACK_TRACER) += trace_stack.o
21442 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
21443 diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
21444 new file mode 100644
21445 index 000000000000..7f6ee70dea41
21447 +++ b/kernel/trace/latency_hist.c
21450 + * kernel/trace/latency_hist.c
21452 + * Add support for histograms of preemption-off latency and
21453 + * interrupt-off latency and wakeup latency, it depends on
21454 + * Real-Time Preemption Support.
21456 + * Copyright (C) 2005 MontaVista Software, Inc.
21457 + * Yi Yang <yyang@ch.mvista.com>
21459 + * Converted to work with the new latency tracer.
21460 + * Copyright (C) 2008 Red Hat, Inc.
21461 + * Steven Rostedt <srostedt@redhat.com>
21464 +#include <linux/module.h>
21465 +#include <linux/debugfs.h>
21466 +#include <linux/seq_file.h>
21467 +#include <linux/percpu.h>
21468 +#include <linux/kallsyms.h>
21469 +#include <linux/uaccess.h>
21470 +#include <linux/sched.h>
21471 +#include <linux/sched/rt.h>
21472 +#include <linux/slab.h>
21473 +#include <linux/atomic.h>
21474 +#include <asm/div64.h>
21476 +#include "trace.h"
21477 +#include <trace/events/sched.h>
21479 +#define NSECS_PER_USECS 1000L
21481 +#define CREATE_TRACE_POINTS
21482 +#include <trace/events/hist.h>
21485 + IRQSOFF_LATENCY = 0,
21486 + PREEMPTOFF_LATENCY,
21487 + PREEMPTIRQSOFF_LATENCY,
21489 + WAKEUP_LATENCY_SHAREDPRIO,
21490 + MISSED_TIMER_OFFSETS,
21491 + TIMERANDWAKEUP_LATENCY,
21492 + MAX_LATENCY_TYPE,
21495 +#define MAX_ENTRY_NUM 10240
21497 +struct hist_data {
21498 + atomic_t hist_mode; /* 0 log, 1 don't log */
21499 + long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
21502 + unsigned long long below_hist_bound_samples;
21503 + unsigned long long above_hist_bound_samples;
21504 + long long accumulate_lat;
21505 + unsigned long long total_samples;
21506 + unsigned long long hist_array[MAX_ENTRY_NUM];
21509 +struct enable_data {
21510 + int latency_type;
21514 +static char *latency_hist_dir_root = "latency_hist";
21516 +#ifdef CONFIG_INTERRUPT_OFF_HIST
21517 +static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
21518 +static char *irqsoff_hist_dir = "irqsoff";
21519 +static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
21520 +static DEFINE_PER_CPU(int, hist_irqsoff_counting);
21523 +#ifdef CONFIG_PREEMPT_OFF_HIST
21524 +static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
21525 +static char *preemptoff_hist_dir = "preemptoff";
21526 +static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
21527 +static DEFINE_PER_CPU(int, hist_preemptoff_counting);
21530 +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
21531 +static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
21532 +static char *preemptirqsoff_hist_dir = "preemptirqsoff";
21533 +static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
21534 +static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
21537 +#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
21538 +static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
21539 +static struct enable_data preemptirqsoff_enabled_data = {
21540 + .latency_type = PREEMPTIRQSOFF_LATENCY,
21545 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
21546 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21547 +struct maxlatproc_data {
21548 + char comm[FIELD_SIZEOF(struct task_struct, comm)];
21549 + char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
21553 + int current_prio;
21555 + long timeroffset;
21556 + cycle_t timestamp;
21560 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
21561 +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
21562 +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
21563 +static char *wakeup_latency_hist_dir = "wakeup";
21564 +static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
21565 +static notrace void probe_wakeup_latency_hist_start(void *v,
21566 + struct task_struct *p);
21567 +static notrace void probe_wakeup_latency_hist_stop(void *v,
21568 + bool preempt, struct task_struct *prev, struct task_struct *next);
21569 +static notrace void probe_sched_migrate_task(void *,
21570 + struct task_struct *task, int cpu);
21571 +static struct enable_data wakeup_latency_enabled_data = {
21572 + .latency_type = WAKEUP_LATENCY,
21575 +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
21576 +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
21577 +static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
21578 +static DEFINE_PER_CPU(int, wakeup_sharedprio);
21579 +static unsigned long wakeup_pid;
21582 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
21583 +static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
21584 +static char *missed_timer_offsets_dir = "missed_timer_offsets";
21585 +static notrace void probe_hrtimer_interrupt(void *v, int cpu,
21586 + long long offset, struct task_struct *curr, struct task_struct *task);
21587 +static struct enable_data missed_timer_offsets_enabled_data = {
21588 + .latency_type = MISSED_TIMER_OFFSETS,
21591 +static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
21592 +static unsigned long missed_timer_offsets_pid;
21595 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
21596 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21597 +static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
21598 +static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
21599 +static struct enable_data timerandwakeup_enabled_data = {
21600 + .latency_type = TIMERANDWAKEUP_LATENCY,
21603 +static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
21606 +void notrace latency_hist(int latency_type, int cpu, long latency,
21607 + long timeroffset, cycle_t stop,
21608 + struct task_struct *p)
21610 + struct hist_data *my_hist;
21611 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
21612 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21613 + struct maxlatproc_data *mp = NULL;
21616 + if (!cpu_possible(cpu) || latency_type < 0 ||
21617 + latency_type >= MAX_LATENCY_TYPE)
21620 + switch (latency_type) {
21621 +#ifdef CONFIG_INTERRUPT_OFF_HIST
21622 + case IRQSOFF_LATENCY:
21623 + my_hist = &per_cpu(irqsoff_hist, cpu);
21626 +#ifdef CONFIG_PREEMPT_OFF_HIST
21627 + case PREEMPTOFF_LATENCY:
21628 + my_hist = &per_cpu(preemptoff_hist, cpu);
21631 +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
21632 + case PREEMPTIRQSOFF_LATENCY:
21633 + my_hist = &per_cpu(preemptirqsoff_hist, cpu);
21636 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
21637 + case WAKEUP_LATENCY:
21638 + my_hist = &per_cpu(wakeup_latency_hist, cpu);
21639 + mp = &per_cpu(wakeup_maxlatproc, cpu);
21641 + case WAKEUP_LATENCY_SHAREDPRIO:
21642 + my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
21643 + mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
21646 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
21647 + case MISSED_TIMER_OFFSETS:
21648 + my_hist = &per_cpu(missed_timer_offsets, cpu);
21649 + mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
21652 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
21653 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21654 + case TIMERANDWAKEUP_LATENCY:
21655 + my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
21656 + mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
21664 + latency += my_hist->offset;
21666 + if (atomic_read(&my_hist->hist_mode) == 0)
21669 + if (latency < 0 || latency >= MAX_ENTRY_NUM) {
21671 + my_hist->below_hist_bound_samples++;
21673 + my_hist->above_hist_bound_samples++;
21675 + my_hist->hist_array[latency]++;
21677 + if (unlikely(latency > my_hist->max_lat ||
21678 + my_hist->min_lat == LONG_MAX)) {
21679 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
21680 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21681 + if (latency_type == WAKEUP_LATENCY ||
21682 + latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
21683 + latency_type == MISSED_TIMER_OFFSETS ||
21684 + latency_type == TIMERANDWAKEUP_LATENCY) {
21685 + strncpy(mp->comm, p->comm, sizeof(mp->comm));
21686 + strncpy(mp->current_comm, current->comm,
21687 + sizeof(mp->current_comm));
21688 + mp->pid = task_pid_nr(p);
21689 + mp->current_pid = task_pid_nr(current);
21690 + mp->prio = p->prio;
21691 + mp->current_prio = current->prio;
21692 + mp->latency = latency;
21693 + mp->timeroffset = timeroffset;
21694 + mp->timestamp = stop;
21697 + my_hist->max_lat = latency;
21699 + if (unlikely(latency < my_hist->min_lat))
21700 + my_hist->min_lat = latency;
21701 + my_hist->total_samples++;
21702 + my_hist->accumulate_lat += latency;
21705 +static void *l_start(struct seq_file *m, loff_t *pos)
21707 + loff_t *index_ptr = NULL;
21708 + loff_t index = *pos;
21709 + struct hist_data *my_hist = m->private;
21711 + if (index == 0) {
21712 + char minstr[32], avgstr[32], maxstr[32];
21714 + atomic_dec(&my_hist->hist_mode);
21716 + if (likely(my_hist->total_samples)) {
21717 + long avg = (long) div64_s64(my_hist->accumulate_lat,
21718 + my_hist->total_samples);
21719 + snprintf(minstr, sizeof(minstr), "%ld",
21720 + my_hist->min_lat - my_hist->offset);
21721 + snprintf(avgstr, sizeof(avgstr), "%ld",
21722 + avg - my_hist->offset);
21723 + snprintf(maxstr, sizeof(maxstr), "%ld",
21724 + my_hist->max_lat - my_hist->offset);
21726 + strcpy(minstr, "<undef>");
21727 + strcpy(avgstr, minstr);
21728 + strcpy(maxstr, minstr);
21731 + seq_printf(m, "#Minimum latency: %s microseconds\n"
21732 + "#Average latency: %s microseconds\n"
21733 + "#Maximum latency: %s microseconds\n"
21734 + "#Total samples: %llu\n"
21735 + "#There are %llu samples lower than %ld"
21736 + " microseconds.\n"
21737 + "#There are %llu samples greater or equal"
21738 + " than %ld microseconds.\n"
21739 + "#usecs\t%16s\n",
21740 + minstr, avgstr, maxstr,
21741 + my_hist->total_samples,
21742 + my_hist->below_hist_bound_samples,
21743 + -my_hist->offset,
21744 + my_hist->above_hist_bound_samples,
21745 + MAX_ENTRY_NUM - my_hist->offset,
21748 + if (index < MAX_ENTRY_NUM) {
21749 + index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
21751 + *index_ptr = index;
21754 + return index_ptr;
21757 +static void *l_next(struct seq_file *m, void *p, loff_t *pos)
21759 + loff_t *index_ptr = p;
21760 + struct hist_data *my_hist = m->private;
21762 + if (++*pos >= MAX_ENTRY_NUM) {
21763 + atomic_inc(&my_hist->hist_mode);
21766 + *index_ptr = *pos;
21767 + return index_ptr;
21770 +static void l_stop(struct seq_file *m, void *p)
21775 +static int l_show(struct seq_file *m, void *p)
21777 + int index = *(loff_t *) p;
21778 + struct hist_data *my_hist = m->private;
21780 + seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
21781 + my_hist->hist_array[index]);
21785 +static const struct seq_operations latency_hist_seq_op = {
21786 + .start = l_start,
21792 +static int latency_hist_open(struct inode *inode, struct file *file)
21796 + ret = seq_open(file, &latency_hist_seq_op);
21798 + struct seq_file *seq = file->private_data;
21799 + seq->private = inode->i_private;
21804 +static const struct file_operations latency_hist_fops = {
21805 + .open = latency_hist_open,
21806 + .read = seq_read,
21807 + .llseek = seq_lseek,
21808 + .release = seq_release,
21811 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
21812 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21813 +static void clear_maxlatprocdata(struct maxlatproc_data *mp)
21815 + mp->comm[0] = mp->current_comm[0] = '\0';
21816 + mp->prio = mp->current_prio = mp->pid = mp->current_pid =
21817 + mp->latency = mp->timeroffset = -1;
21818 + mp->timestamp = 0;
21822 +static void hist_reset(struct hist_data *hist)
21824 + atomic_dec(&hist->hist_mode);
21826 + memset(hist->hist_array, 0, sizeof(hist->hist_array));
21827 + hist->below_hist_bound_samples = 0ULL;
21828 + hist->above_hist_bound_samples = 0ULL;
21829 + hist->min_lat = LONG_MAX;
21830 + hist->max_lat = LONG_MIN;
21831 + hist->total_samples = 0ULL;
21832 + hist->accumulate_lat = 0LL;
21834 + atomic_inc(&hist->hist_mode);
21838 +latency_hist_reset(struct file *file, const char __user *a,
21839 + size_t size, loff_t *off)
21842 + struct hist_data *hist = NULL;
21843 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
21844 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21845 + struct maxlatproc_data *mp = NULL;
21847 + off_t latency_type = (off_t) file->private_data;
21849 + for_each_online_cpu(cpu) {
21851 + switch (latency_type) {
21852 +#ifdef CONFIG_PREEMPT_OFF_HIST
21853 + case PREEMPTOFF_LATENCY:
21854 + hist = &per_cpu(preemptoff_hist, cpu);
21857 +#ifdef CONFIG_INTERRUPT_OFF_HIST
21858 + case IRQSOFF_LATENCY:
21859 + hist = &per_cpu(irqsoff_hist, cpu);
21862 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
21863 + case PREEMPTIRQSOFF_LATENCY:
21864 + hist = &per_cpu(preemptirqsoff_hist, cpu);
21867 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
21868 + case WAKEUP_LATENCY:
21869 + hist = &per_cpu(wakeup_latency_hist, cpu);
21870 + mp = &per_cpu(wakeup_maxlatproc, cpu);
21872 + case WAKEUP_LATENCY_SHAREDPRIO:
21873 + hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
21874 + mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
21877 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
21878 + case MISSED_TIMER_OFFSETS:
21879 + hist = &per_cpu(missed_timer_offsets, cpu);
21880 + mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
21883 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
21884 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21885 + case TIMERANDWAKEUP_LATENCY:
21886 + hist = &per_cpu(timerandwakeup_latency_hist, cpu);
21887 + mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
21892 + hist_reset(hist);
21893 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
21894 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21895 + if (latency_type == WAKEUP_LATENCY ||
21896 + latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
21897 + latency_type == MISSED_TIMER_OFFSETS ||
21898 + latency_type == TIMERANDWAKEUP_LATENCY)
21899 + clear_maxlatprocdata(mp);
21906 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
21907 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21909 +show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
21913 + unsigned long *this_pid = file->private_data;
21915 + r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
21916 + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
21919 +static ssize_t do_pid(struct file *file, const char __user *ubuf,
21920 + size_t cnt, loff_t *ppos)
21923 + unsigned long pid;
21924 + unsigned long *this_pid = file->private_data;
21926 + if (cnt >= sizeof(buf))
21929 + if (copy_from_user(&buf, ubuf, cnt))
21934 + if (kstrtoul(buf, 10, &pid))
21943 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
21944 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
21946 +show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
21949 + struct maxlatproc_data *mp = file->private_data;
21950 + int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
21951 + unsigned long long t;
21952 + unsigned long usecs, secs;
21955 + if (mp->pid == -1 || mp->current_pid == -1) {
21956 + buf = "(none)\n";
21957 + return simple_read_from_buffer(ubuf, cnt, ppos, buf,
21961 + buf = kmalloc(strmaxlen, GFP_KERNEL);
21965 + t = ns2usecs(mp->timestamp);
21966 + usecs = do_div(t, USEC_PER_SEC);
21967 + secs = (unsigned long) t;
21968 + r = snprintf(buf, strmaxlen,
21969 + "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
21970 + MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
21971 + mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
21973 + r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
21980 +show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
21983 + struct enable_data *ed = file->private_data;
21986 + r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
21987 + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
21991 +do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
21995 + struct enable_data *ed = file->private_data;
21997 + if (cnt >= sizeof(buf))
22000 + if (copy_from_user(&buf, ubuf, cnt))
22005 + if (kstrtoul(buf, 10, &enable))
22008 + if ((enable && ed->enabled) || (!enable && !ed->enabled))
22014 + switch (ed->latency_type) {
22015 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
22016 + case PREEMPTIRQSOFF_LATENCY:
22017 + ret = register_trace_preemptirqsoff_hist(
22018 + probe_preemptirqsoff_hist, NULL);
22020 + pr_info("wakeup trace: Couldn't assign "
22021 + "probe_preemptirqsoff_hist "
22022 + "to trace_preemptirqsoff_hist\n");
22027 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
22028 + case WAKEUP_LATENCY:
22029 + ret = register_trace_sched_wakeup(
22030 + probe_wakeup_latency_hist_start, NULL);
22032 + pr_info("wakeup trace: Couldn't assign "
22033 + "probe_wakeup_latency_hist_start "
22034 + "to trace_sched_wakeup\n");
22037 + ret = register_trace_sched_wakeup_new(
22038 + probe_wakeup_latency_hist_start, NULL);
22040 + pr_info("wakeup trace: Couldn't assign "
22041 + "probe_wakeup_latency_hist_start "
22042 + "to trace_sched_wakeup_new\n");
22043 + unregister_trace_sched_wakeup(
22044 + probe_wakeup_latency_hist_start, NULL);
22047 + ret = register_trace_sched_switch(
22048 + probe_wakeup_latency_hist_stop, NULL);
22050 + pr_info("wakeup trace: Couldn't assign "
22051 + "probe_wakeup_latency_hist_stop "
22052 + "to trace_sched_switch\n");
22053 + unregister_trace_sched_wakeup(
22054 + probe_wakeup_latency_hist_start, NULL);
22055 + unregister_trace_sched_wakeup_new(
22056 + probe_wakeup_latency_hist_start, NULL);
22059 + ret = register_trace_sched_migrate_task(
22060 + probe_sched_migrate_task, NULL);
22062 + pr_info("wakeup trace: Couldn't assign "
22063 + "probe_sched_migrate_task "
22064 + "to trace_sched_migrate_task\n");
22065 + unregister_trace_sched_wakeup(
22066 + probe_wakeup_latency_hist_start, NULL);
22067 + unregister_trace_sched_wakeup_new(
22068 + probe_wakeup_latency_hist_start, NULL);
22069 + unregister_trace_sched_switch(
22070 + probe_wakeup_latency_hist_stop, NULL);
22075 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
22076 + case MISSED_TIMER_OFFSETS:
22077 + ret = register_trace_hrtimer_interrupt(
22078 + probe_hrtimer_interrupt, NULL);
22080 + pr_info("wakeup trace: Couldn't assign "
22081 + "probe_hrtimer_interrupt "
22082 + "to trace_hrtimer_interrupt\n");
22087 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
22088 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
22089 + case TIMERANDWAKEUP_LATENCY:
22090 + if (!wakeup_latency_enabled_data.enabled ||
22091 + !missed_timer_offsets_enabled_data.enabled)
22099 + switch (ed->latency_type) {
22100 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
22101 + case PREEMPTIRQSOFF_LATENCY:
22105 + unregister_trace_preemptirqsoff_hist(
22106 + probe_preemptirqsoff_hist, NULL);
22107 + for_each_online_cpu(cpu) {
22108 +#ifdef CONFIG_INTERRUPT_OFF_HIST
22109 + per_cpu(hist_irqsoff_counting,
22112 +#ifdef CONFIG_PREEMPT_OFF_HIST
22113 + per_cpu(hist_preemptoff_counting,
22116 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
22117 + per_cpu(hist_preemptirqsoff_counting,
22124 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
22125 + case WAKEUP_LATENCY:
22129 + unregister_trace_sched_wakeup(
22130 + probe_wakeup_latency_hist_start, NULL);
22131 + unregister_trace_sched_wakeup_new(
22132 + probe_wakeup_latency_hist_start, NULL);
22133 + unregister_trace_sched_switch(
22134 + probe_wakeup_latency_hist_stop, NULL);
22135 + unregister_trace_sched_migrate_task(
22136 + probe_sched_migrate_task, NULL);
22138 + for_each_online_cpu(cpu) {
22139 + per_cpu(wakeup_task, cpu) = NULL;
22140 + per_cpu(wakeup_sharedprio, cpu) = 0;
22143 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
22144 + timerandwakeup_enabled_data.enabled = 0;
22148 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
22149 + case MISSED_TIMER_OFFSETS:
22150 + unregister_trace_hrtimer_interrupt(
22151 + probe_hrtimer_interrupt, NULL);
22152 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
22153 + timerandwakeup_enabled_data.enabled = 0;
22161 + ed->enabled = enable;
22165 +static const struct file_operations latency_hist_reset_fops = {
22166 + .open = tracing_open_generic,
22167 + .write = latency_hist_reset,
22170 +static const struct file_operations enable_fops = {
22171 + .open = tracing_open_generic,
22172 + .read = show_enable,
22173 + .write = do_enable,
22176 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
22177 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
22178 +static const struct file_operations pid_fops = {
22179 + .open = tracing_open_generic,
22180 + .read = show_pid,
22184 +static const struct file_operations maxlatproc_fops = {
22185 + .open = tracing_open_generic,
22186 + .read = show_maxlatproc,
22190 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
22191 +static notrace void probe_preemptirqsoff_hist(void *v, int reason,
22194 + int cpu = raw_smp_processor_id();
22195 + int time_set = 0;
22198 + cycle_t uninitialized_var(start);
22200 + if (!preempt_count() && !irqs_disabled())
22203 +#ifdef CONFIG_INTERRUPT_OFF_HIST
22204 + if ((reason == IRQS_OFF || reason == TRACE_START) &&
22205 + !per_cpu(hist_irqsoff_counting, cpu)) {
22206 + per_cpu(hist_irqsoff_counting, cpu) = 1;
22207 + start = ftrace_now(cpu);
22209 + per_cpu(hist_irqsoff_start, cpu) = start;
22213 +#ifdef CONFIG_PREEMPT_OFF_HIST
22214 + if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
22215 + !per_cpu(hist_preemptoff_counting, cpu)) {
22216 + per_cpu(hist_preemptoff_counting, cpu) = 1;
22217 + if (!(time_set++))
22218 + start = ftrace_now(cpu);
22219 + per_cpu(hist_preemptoff_start, cpu) = start;
22223 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
22224 + if (per_cpu(hist_irqsoff_counting, cpu) &&
22225 + per_cpu(hist_preemptoff_counting, cpu) &&
22226 + !per_cpu(hist_preemptirqsoff_counting, cpu)) {
22227 + per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
22229 + start = ftrace_now(cpu);
22230 + per_cpu(hist_preemptirqsoff_start, cpu) = start;
22234 + cycle_t uninitialized_var(stop);
22236 +#ifdef CONFIG_INTERRUPT_OFF_HIST
22237 + if ((reason == IRQS_ON || reason == TRACE_STOP) &&
22238 + per_cpu(hist_irqsoff_counting, cpu)) {
22239 + cycle_t start = per_cpu(hist_irqsoff_start, cpu);
22241 + stop = ftrace_now(cpu);
22244 + long latency = ((long) (stop - start)) /
22247 + latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
22250 + per_cpu(hist_irqsoff_counting, cpu) = 0;
22254 +#ifdef CONFIG_PREEMPT_OFF_HIST
22255 + if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
22256 + per_cpu(hist_preemptoff_counting, cpu)) {
22257 + cycle_t start = per_cpu(hist_preemptoff_start, cpu);
22259 + if (!(time_set++))
22260 + stop = ftrace_now(cpu);
22262 + long latency = ((long) (stop - start)) /
22265 + latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
22268 + per_cpu(hist_preemptoff_counting, cpu) = 0;
22272 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
22273 + if ((!per_cpu(hist_irqsoff_counting, cpu) ||
22274 + !per_cpu(hist_preemptoff_counting, cpu)) &&
22275 + per_cpu(hist_preemptirqsoff_counting, cpu)) {
22276 + cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
22279 + stop = ftrace_now(cpu);
22281 + long latency = ((long) (stop - start)) /
22284 + latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
22285 + latency, 0, stop, NULL);
22287 + per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
22294 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
22295 +static DEFINE_RAW_SPINLOCK(wakeup_lock);
22296 +static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
22299 + int old_cpu = task_cpu(task);
22301 + if (cpu != old_cpu) {
22302 + unsigned long flags;
22303 + struct task_struct *cpu_wakeup_task;
22305 + raw_spin_lock_irqsave(&wakeup_lock, flags);
22307 + cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
22308 + if (task == cpu_wakeup_task) {
22309 + put_task_struct(cpu_wakeup_task);
22310 + per_cpu(wakeup_task, old_cpu) = NULL;
22311 + cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
22312 + get_task_struct(cpu_wakeup_task);
22315 + raw_spin_unlock_irqrestore(&wakeup_lock, flags);
22319 +static notrace void probe_wakeup_latency_hist_start(void *v,
22320 + struct task_struct *p)
22322 + unsigned long flags;
22323 + struct task_struct *curr = current;
22324 + int cpu = task_cpu(p);
22325 + struct task_struct *cpu_wakeup_task;
22327 + raw_spin_lock_irqsave(&wakeup_lock, flags);
22329 + cpu_wakeup_task = per_cpu(wakeup_task, cpu);
22331 + if (wakeup_pid) {
22332 + if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
22333 + p->prio == curr->prio)
22334 + per_cpu(wakeup_sharedprio, cpu) = 1;
22335 + if (likely(wakeup_pid != task_pid_nr(p)))
22338 + if (likely(!rt_task(p)) ||
22339 + (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
22340 + p->prio > curr->prio)
22342 + if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
22343 + p->prio == curr->prio)
22344 + per_cpu(wakeup_sharedprio, cpu) = 1;
22347 + if (cpu_wakeup_task)
22348 + put_task_struct(cpu_wakeup_task);
22349 + cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
22350 + get_task_struct(cpu_wakeup_task);
22351 + cpu_wakeup_task->preempt_timestamp_hist =
22352 + ftrace_now(raw_smp_processor_id());
22354 + raw_spin_unlock_irqrestore(&wakeup_lock, flags);
22357 +static notrace void probe_wakeup_latency_hist_stop(void *v,
22358 + bool preempt, struct task_struct *prev, struct task_struct *next)
22360 + unsigned long flags;
22361 + int cpu = task_cpu(next);
22364 + struct task_struct *cpu_wakeup_task;
22366 + raw_spin_lock_irqsave(&wakeup_lock, flags);
22368 + cpu_wakeup_task = per_cpu(wakeup_task, cpu);
22370 + if (cpu_wakeup_task == NULL)
22373 + /* Already running? */
22374 + if (unlikely(current == cpu_wakeup_task))
22377 + if (next != cpu_wakeup_task) {
22378 + if (next->prio < cpu_wakeup_task->prio)
22381 + if (next->prio == cpu_wakeup_task->prio)
22382 + per_cpu(wakeup_sharedprio, cpu) = 1;
22387 + if (current->prio == cpu_wakeup_task->prio)
22388 + per_cpu(wakeup_sharedprio, cpu) = 1;
22391 + * The task we are waiting for is about to be switched to.
22392 + * Calculate latency and store it in histogram.
22394 + stop = ftrace_now(raw_smp_processor_id());
22396 + latency = ((long) (stop - next->preempt_timestamp_hist)) /
22399 + if (per_cpu(wakeup_sharedprio, cpu)) {
22400 + latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
22402 + per_cpu(wakeup_sharedprio, cpu) = 0;
22404 + latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
22405 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
22406 + if (timerandwakeup_enabled_data.enabled) {
22407 + latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
22408 + next->timer_offset + latency, next->timer_offset,
22415 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
22416 + next->timer_offset = 0;
22418 + put_task_struct(cpu_wakeup_task);
22419 + per_cpu(wakeup_task, cpu) = NULL;
22421 + raw_spin_unlock_irqrestore(&wakeup_lock, flags);
22425 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
22426 +static notrace void probe_hrtimer_interrupt(void *v, int cpu,
22427 + long long latency_ns, struct task_struct *curr,
22428 + struct task_struct *task)
22430 + if (latency_ns <= 0 && task != NULL && rt_task(task) &&
22431 + (task->prio < curr->prio ||
22432 + (task->prio == curr->prio &&
22433 + !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
22437 + if (missed_timer_offsets_pid) {
22438 + if (likely(missed_timer_offsets_pid !=
22439 + task_pid_nr(task)))
22443 + now = ftrace_now(cpu);
22444 + latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
22445 + latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
22447 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
22448 + task->timer_offset = latency;
22454 +static __init int latency_hist_init(void)
22456 + struct dentry *latency_hist_root = NULL;
22457 + struct dentry *dentry;
22458 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
22459 + struct dentry *dentry_sharedprio;
22461 + struct dentry *entry;
22462 + struct dentry *enable_root;
22464 + struct hist_data *my_hist;
22466 + char *cpufmt = "CPU%d";
22467 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
22468 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
22469 + char *cpufmt_maxlatproc = "max_latency-CPU%d";
22470 + struct maxlatproc_data *mp = NULL;
22473 + dentry = tracing_init_dentry();
22474 + latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
22475 + enable_root = debugfs_create_dir("enable", latency_hist_root);
22477 +#ifdef CONFIG_INTERRUPT_OFF_HIST
22478 + dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
22479 + for_each_possible_cpu(i) {
22480 + sprintf(name, cpufmt, i);
22481 + entry = debugfs_create_file(name, 0444, dentry,
22482 + &per_cpu(irqsoff_hist, i), &latency_hist_fops);
22483 + my_hist = &per_cpu(irqsoff_hist, i);
22484 + atomic_set(&my_hist->hist_mode, 1);
22485 + my_hist->min_lat = LONG_MAX;
22487 + entry = debugfs_create_file("reset", 0644, dentry,
22488 + (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
22491 +#ifdef CONFIG_PREEMPT_OFF_HIST
22492 + dentry = debugfs_create_dir(preemptoff_hist_dir,
22493 + latency_hist_root);
22494 + for_each_possible_cpu(i) {
22495 + sprintf(name, cpufmt, i);
22496 + entry = debugfs_create_file(name, 0444, dentry,
22497 + &per_cpu(preemptoff_hist, i), &latency_hist_fops);
22498 + my_hist = &per_cpu(preemptoff_hist, i);
22499 + atomic_set(&my_hist->hist_mode, 1);
22500 + my_hist->min_lat = LONG_MAX;
22502 + entry = debugfs_create_file("reset", 0644, dentry,
22503 + (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
22506 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
22507 + dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
22508 + latency_hist_root);
22509 + for_each_possible_cpu(i) {
22510 + sprintf(name, cpufmt, i);
22511 + entry = debugfs_create_file(name, 0444, dentry,
22512 + &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
22513 + my_hist = &per_cpu(preemptirqsoff_hist, i);
22514 + atomic_set(&my_hist->hist_mode, 1);
22515 + my_hist->min_lat = LONG_MAX;
22517 + entry = debugfs_create_file("reset", 0644, dentry,
22518 + (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
22521 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
22522 + entry = debugfs_create_file("preemptirqsoff", 0644,
22523 + enable_root, (void *)&preemptirqsoff_enabled_data,
22527 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
22528 + dentry = debugfs_create_dir(wakeup_latency_hist_dir,
22529 + latency_hist_root);
22530 + dentry_sharedprio = debugfs_create_dir(
22531 + wakeup_latency_hist_dir_sharedprio, dentry);
22532 + for_each_possible_cpu(i) {
22533 + sprintf(name, cpufmt, i);
22535 + entry = debugfs_create_file(name, 0444, dentry,
22536 + &per_cpu(wakeup_latency_hist, i),
22537 + &latency_hist_fops);
22538 + my_hist = &per_cpu(wakeup_latency_hist, i);
22539 + atomic_set(&my_hist->hist_mode, 1);
22540 + my_hist->min_lat = LONG_MAX;
22542 + entry = debugfs_create_file(name, 0444, dentry_sharedprio,
22543 + &per_cpu(wakeup_latency_hist_sharedprio, i),
22544 + &latency_hist_fops);
22545 + my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
22546 + atomic_set(&my_hist->hist_mode, 1);
22547 + my_hist->min_lat = LONG_MAX;
22549 + sprintf(name, cpufmt_maxlatproc, i);
22551 + mp = &per_cpu(wakeup_maxlatproc, i);
22552 + entry = debugfs_create_file(name, 0444, dentry, mp,
22553 + &maxlatproc_fops);
22554 + clear_maxlatprocdata(mp);
22556 + mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
22557 + entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
22558 + &maxlatproc_fops);
22559 + clear_maxlatprocdata(mp);
22561 + entry = debugfs_create_file("pid", 0644, dentry,
22562 + (void *)&wakeup_pid, &pid_fops);
22563 + entry = debugfs_create_file("reset", 0644, dentry,
22564 + (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
22565 + entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
22566 + (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
22567 + entry = debugfs_create_file("wakeup", 0644,
22568 + enable_root, (void *)&wakeup_latency_enabled_data,
22572 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
22573 + dentry = debugfs_create_dir(missed_timer_offsets_dir,
22574 + latency_hist_root);
22575 + for_each_possible_cpu(i) {
22576 + sprintf(name, cpufmt, i);
22577 + entry = debugfs_create_file(name, 0444, dentry,
22578 + &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
22579 + my_hist = &per_cpu(missed_timer_offsets, i);
22580 + atomic_set(&my_hist->hist_mode, 1);
22581 + my_hist->min_lat = LONG_MAX;
22583 + sprintf(name, cpufmt_maxlatproc, i);
22584 + mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
22585 + entry = debugfs_create_file(name, 0444, dentry, mp,
22586 + &maxlatproc_fops);
22587 + clear_maxlatprocdata(mp);
22589 + entry = debugfs_create_file("pid", 0644, dentry,
22590 + (void *)&missed_timer_offsets_pid, &pid_fops);
22591 + entry = debugfs_create_file("reset", 0644, dentry,
22592 + (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
22593 + entry = debugfs_create_file("missed_timer_offsets", 0644,
22594 + enable_root, (void *)&missed_timer_offsets_enabled_data,
22598 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
22599 + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
22600 + dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
22601 + latency_hist_root);
22602 + for_each_possible_cpu(i) {
22603 + sprintf(name, cpufmt, i);
22604 + entry = debugfs_create_file(name, 0444, dentry,
22605 + &per_cpu(timerandwakeup_latency_hist, i),
22606 + &latency_hist_fops);
22607 + my_hist = &per_cpu(timerandwakeup_latency_hist, i);
22608 + atomic_set(&my_hist->hist_mode, 1);
22609 + my_hist->min_lat = LONG_MAX;
22611 + sprintf(name, cpufmt_maxlatproc, i);
22612 + mp = &per_cpu(timerandwakeup_maxlatproc, i);
22613 + entry = debugfs_create_file(name, 0444, dentry, mp,
22614 + &maxlatproc_fops);
22615 + clear_maxlatprocdata(mp);
22617 + entry = debugfs_create_file("reset", 0644, dentry,
22618 + (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
22619 + entry = debugfs_create_file("timerandwakeup", 0644,
22620 + enable_root, (void *)&timerandwakeup_enabled_data,
22626 +device_initcall(latency_hist_init);
22627 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
22628 index 83c60f9013cb..6fb207964a84 100644
22629 --- a/kernel/trace/trace.c
22630 +++ b/kernel/trace/trace.c
22631 @@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
22632 struct task_struct *tsk = current;
22634 entry->preempt_count = pc & 0xff;
22635 + entry->preempt_lazy_count = preempt_lazy_count();
22636 entry->pid = (tsk) ? tsk->pid : 0;
22638 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
22639 @@ -1907,8 +1908,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
22640 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
22641 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
22642 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
22643 - (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
22644 + (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
22645 + (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
22646 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
22648 + entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
22650 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
22652 @@ -2892,14 +2896,17 @@ get_total_entries(struct trace_buffer *buf,
22654 static void print_lat_help_header(struct seq_file *m)
22656 - seq_puts(m, "# _------=> CPU# \n"
22657 - "# / _-----=> irqs-off \n"
22658 - "# | / _----=> need-resched \n"
22659 - "# || / _---=> hardirq/softirq \n"
22660 - "# ||| / _--=> preempt-depth \n"
22661 - "# |||| / delay \n"
22662 - "# cmd pid ||||| time | caller \n"
22663 - "# \\ / ||||| \\ | / \n");
22664 + seq_puts(m, "# _--------=> CPU# \n"
22665 + "# / _-------=> irqs-off \n"
22666 + "# | / _------=> need-resched \n"
22667 + "# || / _-----=> need-resched_lazy \n"
22668 + "# ||| / _----=> hardirq/softirq \n"
22669 + "# |||| / _---=> preempt-depth \n"
22670 + "# ||||| / _--=> preempt-lazy-depth\n"
22671 + "# |||||| / _-=> migrate-disable \n"
22672 + "# ||||||| / delay \n"
22673 + "# cmd pid |||||||| time | caller \n"
22674 + "# \\ / |||||||| \\ | / \n");
22677 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
22678 @@ -2925,11 +2932,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
22679 print_event_info(buf, m);
22680 seq_puts(m, "# _-----=> irqs-off\n"
22681 "# / _----=> need-resched\n"
22682 - "# | / _---=> hardirq/softirq\n"
22683 - "# || / _--=> preempt-depth\n"
22684 - "# ||| / delay\n"
22685 - "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
22686 - "# | | | |||| | |\n");
22687 + "# |/ _-----=> need-resched_lazy\n"
22688 + "# || / _---=> hardirq/softirq\n"
22689 + "# ||| / _--=> preempt-depth\n"
22690 + "# |||| / _-=> preempt-lazy-depth\n"
22691 + "# ||||| / _-=> migrate-disable \n"
22692 + "# |||||| / delay\n"
22693 + "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n"
22694 + "# | | | ||||||| | |\n");
22698 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
22699 index b0d8576c27ae..702b9376b278 100644
22700 --- a/kernel/trace/trace.h
22701 +++ b/kernel/trace/trace.h
22702 @@ -124,6 +124,7 @@ struct kretprobe_trace_entry_head {
22703 * NEED_RESCHED - reschedule is requested
22704 * HARDIRQ - inside an interrupt handler
22705 * SOFTIRQ - inside a softirq handler
22706 + * NEED_RESCHED_LAZY - lazy reschedule is requested
22708 enum trace_flag_type {
22709 TRACE_FLAG_IRQS_OFF = 0x01,
22710 @@ -133,6 +134,7 @@ enum trace_flag_type {
22711 TRACE_FLAG_SOFTIRQ = 0x10,
22712 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
22713 TRACE_FLAG_NMI = 0x40,
22714 + TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
22717 #define TRACE_BUF_SIZE 1024
22718 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
22719 index 03c0a48c3ac4..0b85d516b491 100644
22720 --- a/kernel/trace/trace_events.c
22721 +++ b/kernel/trace/trace_events.c
22722 @@ -187,6 +187,8 @@ static int trace_define_common_fields(void)
22723 __common_field(unsigned char, flags);
22724 __common_field(unsigned char, preempt_count);
22725 __common_field(int, pid);
22726 + __common_field(unsigned short, migrate_disable);
22727 + __common_field(unsigned short, padding);
22731 diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
22732 index 03cdff84d026..940bd10b4406 100644
22733 --- a/kernel/trace/trace_irqsoff.c
22734 +++ b/kernel/trace/trace_irqsoff.c
22736 #include <linux/uaccess.h>
22737 #include <linux/module.h>
22738 #include <linux/ftrace.h>
22739 +#include <trace/events/hist.h>
22743 @@ -424,11 +425,13 @@ void start_critical_timings(void)
22745 if (preempt_trace() || irq_trace())
22746 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
22747 + trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1);
22749 EXPORT_SYMBOL_GPL(start_critical_timings);
22751 void stop_critical_timings(void)
22753 + trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0);
22754 if (preempt_trace() || irq_trace())
22755 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
22757 @@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
22758 #ifdef CONFIG_PROVE_LOCKING
22759 void time_hardirqs_on(unsigned long a0, unsigned long a1)
22761 + trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
22762 if (!preempt_trace() && irq_trace())
22763 stop_critical_timing(a0, a1);
22765 @@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
22767 if (!preempt_trace() && irq_trace())
22768 start_critical_timing(a0, a1);
22769 + trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
22772 #else /* !CONFIG_PROVE_LOCKING */
22773 @@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct task_struct *curr)
22775 void trace_hardirqs_on(void)
22777 + trace_preemptirqsoff_hist(IRQS_ON, 0);
22778 if (!preempt_trace() && irq_trace())
22779 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
22781 @@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
22783 if (!preempt_trace() && irq_trace())
22784 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
22785 + trace_preemptirqsoff_hist(IRQS_OFF, 1);
22787 EXPORT_SYMBOL(trace_hardirqs_off);
22789 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
22791 + trace_preemptirqsoff_hist(IRQS_ON, 0);
22792 if (!preempt_trace() && irq_trace())
22793 stop_critical_timing(CALLER_ADDR0, caller_addr);
22795 @@ -494,6 +502,7 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
22797 if (!preempt_trace() && irq_trace())
22798 start_critical_timing(CALLER_ADDR0, caller_addr);
22799 + trace_preemptirqsoff_hist(IRQS_OFF, 1);
22801 EXPORT_SYMBOL(trace_hardirqs_off_caller);
22803 @@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
22804 #ifdef CONFIG_PREEMPT_TRACER
22805 void trace_preempt_on(unsigned long a0, unsigned long a1)
22807 + trace_preemptirqsoff_hist(PREEMPT_ON, 0);
22808 if (preempt_trace() && !irq_trace())
22809 stop_critical_timing(a0, a1);
22812 void trace_preempt_off(unsigned long a0, unsigned long a1)
22814 + trace_preemptirqsoff_hist(PREEMPT_ON, 1);
22815 if (preempt_trace() && !irq_trace())
22816 start_critical_timing(a0, a1);
22818 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
22819 index 3fc20422c166..65a6dde71a7d 100644
22820 --- a/kernel/trace/trace_output.c
22821 +++ b/kernel/trace/trace_output.c
22822 @@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
22826 + char need_resched_lazy;
22830 @@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
22834 + need_resched_lazy =
22835 + (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
22838 (nmi && hardirq) ? 'Z' :
22840 @@ -424,14 +428,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
22844 - trace_seq_printf(s, "%c%c%c",
22845 - irqs_off, need_resched, hardsoft_irq);
22846 + trace_seq_printf(s, "%c%c%c%c",
22847 + irqs_off, need_resched, need_resched_lazy,
22850 if (entry->preempt_count)
22851 trace_seq_printf(s, "%x", entry->preempt_count);
22853 trace_seq_putc(s, '.');
22855 + if (entry->preempt_lazy_count)
22856 + trace_seq_printf(s, "%x", entry->preempt_lazy_count);
22858 + trace_seq_putc(s, '.');
22860 + if (entry->migrate_disable)
22861 + trace_seq_printf(s, "%x", entry->migrate_disable);
22863 + trace_seq_putc(s, '.');
22865 return !trace_seq_has_overflowed(s);
22868 diff --git a/kernel/user.c b/kernel/user.c
22869 index b069ccbfb0b0..1a2e88e98b5e 100644
22870 --- a/kernel/user.c
22871 +++ b/kernel/user.c
22872 @@ -161,11 +161,11 @@ void free_uid(struct user_struct *up)
22876 - local_irq_save(flags);
22877 + local_irq_save_nort(flags);
22878 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
22879 free_user(up, flags);
22881 - local_irq_restore(flags);
22882 + local_irq_restore_nort(flags);
22885 struct user_struct *alloc_uid(kuid_t uid)
22886 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
22887 index 6d1020c03d41..70c6a2f79f7e 100644
22888 --- a/kernel/watchdog.c
22889 +++ b/kernel/watchdog.c
22890 @@ -315,6 +315,8 @@ static int is_softlockup(unsigned long touch_ts)
22892 #ifdef CONFIG_HARDLOCKUP_DETECTOR
22894 +static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
22896 static struct perf_event_attr wd_hw_attr = {
22897 .type = PERF_TYPE_HARDWARE,
22898 .config = PERF_COUNT_HW_CPU_CYCLES,
22899 @@ -348,6 +350,13 @@ static void watchdog_overflow_callback(struct perf_event *event,
22900 /* only print hardlockups once */
22901 if (__this_cpu_read(hard_watchdog_warn) == true)
22904 + * If early-printk is enabled then make sure we do not
22905 + * lock up in printk() and kill console logging:
22909 + raw_spin_lock(&watchdog_output_lock);
22911 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
22913 @@ -365,6 +374,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
22914 !test_and_set_bit(0, &hardlockup_allcpu_dumped))
22915 trigger_allbutself_cpu_backtrace();
22917 + raw_spin_unlock(&watchdog_output_lock);
22918 if (hardlockup_panic)
22919 nmi_panic(regs, "Hard LOCKUP");
22921 @@ -512,6 +522,7 @@ static void watchdog_enable(unsigned int cpu)
22922 /* kick off the timer for the hardlockup detector */
22923 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
22924 hrtimer->function = watchdog_timer_fn;
22925 + hrtimer->irqsafe = 1;
22927 /* Enable the perf event */
22928 watchdog_nmi_enable(cpu);
22929 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
22930 index 479d840db286..24eba6620a45 100644
22931 --- a/kernel/workqueue.c
22932 +++ b/kernel/workqueue.c
22934 #include <linux/nodemask.h>
22935 #include <linux/moduleparam.h>
22936 #include <linux/uaccess.h>
22937 +#include <linux/locallock.h>
22938 +#include <linux/delay.h>
22940 #include "workqueue_internal.h"
22942 @@ -121,11 +123,16 @@ enum {
22943 * cpu or grabbing pool->lock is enough for read access. If
22944 * POOL_DISASSOCIATED is set, it's identical to L.
22946 + * On RT we need the extra protection via rt_lock_idle_list() for
22947 + * the list manipulations against read access from
22948 + * wq_worker_sleeping(). All other places are nicely serialized via
22951 * A: pool->attach_mutex protected.
22953 * PL: wq_pool_mutex protected.
22955 - * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
22956 + * PR: wq_pool_mutex protected for writes. RCU protected for reads.
22958 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
22960 @@ -134,7 +141,7 @@ enum {
22962 * WQ: wq->mutex protected.
22964 - * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
22965 + * WR: wq->mutex protected for writes. RCU protected for reads.
22967 * MD: wq_mayday_lock protected.
22969 @@ -185,7 +192,7 @@ struct worker_pool {
22970 atomic_t nr_running ____cacheline_aligned_in_smp;
22973 - * Destruction of pool is sched-RCU protected to allow dereferences
22974 + * Destruction of pool is RCU protected to allow dereferences
22975 * from get_work_pool().
22977 struct rcu_head rcu;
22978 @@ -214,7 +221,7 @@ struct pool_workqueue {
22980 * Release of unbound pwq is punted to system_wq. See put_pwq()
22981 * and pwq_unbound_release_workfn() for details. pool_workqueue
22982 - * itself is also sched-RCU protected so that the first pwq can be
22983 + * itself is also RCU protected so that the first pwq can be
22984 * determined without grabbing wq->mutex.
22986 struct work_struct unbound_release_work;
22987 @@ -348,6 +355,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
22988 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
22989 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
22991 +static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
22993 static int worker_thread(void *__worker);
22994 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
22996 @@ -355,20 +364,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
22997 #include <trace/events/workqueue.h>
22999 #define assert_rcu_or_pool_mutex() \
23000 - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
23001 + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
23002 !lockdep_is_held(&wq_pool_mutex), \
23003 - "sched RCU or wq_pool_mutex should be held")
23004 + "RCU or wq_pool_mutex should be held")
23006 #define assert_rcu_or_wq_mutex(wq) \
23007 - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
23008 + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
23009 !lockdep_is_held(&wq->mutex), \
23010 - "sched RCU or wq->mutex should be held")
23011 + "RCU or wq->mutex should be held")
23013 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
23014 - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
23015 + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
23016 !lockdep_is_held(&wq->mutex) && \
23017 !lockdep_is_held(&wq_pool_mutex), \
23018 - "sched RCU, wq->mutex or wq_pool_mutex should be held")
23019 + "RCU, wq->mutex or wq_pool_mutex should be held")
23021 #define for_each_cpu_worker_pool(pool, cpu) \
23022 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
23023 @@ -380,7 +389,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
23024 * @pool: iteration cursor
23025 * @pi: integer used for iteration
23027 - * This must be called either with wq_pool_mutex held or sched RCU read
23028 + * This must be called either with wq_pool_mutex held or RCU read
23029 * locked. If the pool needs to be used beyond the locking in effect, the
23030 * caller is responsible for guaranteeing that the pool stays online.
23032 @@ -412,7 +421,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
23033 * @pwq: iteration cursor
23034 * @wq: the target workqueue
23036 - * This must be called either with wq->mutex held or sched RCU read locked.
23037 + * This must be called either with wq->mutex held or RCU read locked.
23038 * If the pwq needs to be used beyond the locking in effect, the caller is
23039 * responsible for guaranteeing that the pwq stays online.
23041 @@ -424,6 +433,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
23042 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
23045 +#ifdef CONFIG_PREEMPT_RT_BASE
23046 +static inline void rt_lock_idle_list(struct worker_pool *pool)
23048 + preempt_disable();
23050 +static inline void rt_unlock_idle_list(struct worker_pool *pool)
23052 + preempt_enable();
23054 +static inline void sched_lock_idle_list(struct worker_pool *pool) { }
23055 +static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
23057 +static inline void rt_lock_idle_list(struct worker_pool *pool) { }
23058 +static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
23059 +static inline void sched_lock_idle_list(struct worker_pool *pool)
23061 + spin_lock_irq(&pool->lock);
23063 +static inline void sched_unlock_idle_list(struct worker_pool *pool)
23065 + spin_unlock_irq(&pool->lock);
23070 #ifdef CONFIG_DEBUG_OBJECTS_WORK
23072 static struct debug_obj_descr work_debug_descr;
23073 @@ -548,7 +582,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
23074 * @wq: the target workqueue
23075 * @node: the node ID
23077 - * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
23078 + * This must be called with any of wq_pool_mutex, wq->mutex or RCU
23080 * If the pwq needs to be used beyond the locking in effect, the caller is
23081 * responsible for guaranteeing that the pwq stays online.
23082 @@ -692,8 +726,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
23083 * @work: the work item of interest
23085 * Pools are created and destroyed under wq_pool_mutex, and allows read
23086 - * access under sched-RCU read lock. As such, this function should be
23087 - * called under wq_pool_mutex or with preemption disabled.
23088 + * access under RCU read lock. As such, this function should be
23089 + * called under wq_pool_mutex or inside of a rcu_read_lock() region.
23091 * All fields of the returned pool are accessible as long as the above
23092 * mentioned locking is in effect. If the returned pool needs to be used
23093 @@ -830,50 +864,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
23095 static void wake_up_worker(struct worker_pool *pool)
23097 - struct worker *worker = first_idle_worker(pool);
23098 + struct worker *worker;
23100 + rt_lock_idle_list(pool);
23102 + worker = first_idle_worker(pool);
23104 if (likely(worker))
23105 wake_up_process(worker->task);
23107 + rt_unlock_idle_list(pool);
23111 - * wq_worker_waking_up - a worker is waking up
23112 + * wq_worker_running - a worker is running again
23113 * @task: task waking up
23114 - * @cpu: CPU @task is waking up to
23116 - * This function is called during try_to_wake_up() when a worker is
23120 - * spin_lock_irq(rq->lock)
23121 + * This function is called when a worker returns from schedule()
23123 -void wq_worker_waking_up(struct task_struct *task, int cpu)
23124 +void wq_worker_running(struct task_struct *task)
23126 struct worker *worker = kthread_data(task);
23128 - if (!(worker->flags & WORKER_NOT_RUNNING)) {
23129 - WARN_ON_ONCE(worker->pool->cpu != cpu);
23130 + if (!worker->sleeping)
23132 + if (!(worker->flags & WORKER_NOT_RUNNING))
23133 atomic_inc(&worker->pool->nr_running);
23135 + worker->sleeping = 0;
23139 * wq_worker_sleeping - a worker is going to sleep
23140 * @task: task going to sleep
23142 - * This function is called during schedule() when a busy worker is
23143 - * going to sleep. Worker on the same cpu can be woken up by
23144 - * returning pointer to its task.
23147 - * spin_lock_irq(rq->lock)
23150 - * Worker task on @cpu to wake up, %NULL if none.
23151 + * This function is called from schedule() when a busy worker is
23152 + * going to sleep.
23154 -struct task_struct *wq_worker_sleeping(struct task_struct *task)
23155 +void wq_worker_sleeping(struct task_struct *task)
23157 - struct worker *worker = kthread_data(task), *to_wakeup = NULL;
23158 + struct worker *worker = kthread_data(task);
23159 struct worker_pool *pool;
23162 @@ -882,29 +911,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
23163 * checking NOT_RUNNING.
23165 if (worker->flags & WORKER_NOT_RUNNING)
23169 pool = worker->pool;
23171 - /* this can only happen on the local cpu */
23172 - if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
23174 + if (WARN_ON_ONCE(worker->sleeping))
23177 + worker->sleeping = 1;
23180 * The counterpart of the following dec_and_test, implied mb,
23181 * worklist not empty test sequence is in insert_work().
23182 * Please read comment there.
23184 - * NOT_RUNNING is clear. This means that we're bound to and
23185 - * running on the local cpu w/ rq lock held and preemption
23186 - * disabled, which in turn means that none else could be
23187 - * manipulating idle_list, so dereferencing idle_list without pool
23190 if (atomic_dec_and_test(&pool->nr_running) &&
23191 - !list_empty(&pool->worklist))
23192 - to_wakeup = first_idle_worker(pool);
23193 - return to_wakeup ? to_wakeup->task : NULL;
23194 + !list_empty(&pool->worklist)) {
23195 + sched_lock_idle_list(pool);
23196 + wake_up_worker(pool);
23197 + sched_unlock_idle_list(pool);
23202 @@ -1098,12 +1124,14 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
23206 - * As both pwqs and pools are sched-RCU protected, the
23207 + * As both pwqs and pools are RCU protected, the
23208 * following lock operations are safe.
23210 - spin_lock_irq(&pwq->pool->lock);
23212 + local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
23214 - spin_unlock_irq(&pwq->pool->lock);
23215 + local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
23216 + rcu_read_unlock();
23220 @@ -1207,7 +1235,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
23221 struct worker_pool *pool;
23222 struct pool_workqueue *pwq;
23224 - local_irq_save(*flags);
23225 + local_lock_irqsave(pendingb_lock, *flags);
23227 /* try to steal the timer if it exists */
23229 @@ -1226,6 +1254,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
23230 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
23235 * The queueing is in progress, or it is already queued. Try to
23236 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
23237 @@ -1264,14 +1293,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
23238 set_work_pool_and_keep_pending(work, pool->id);
23240 spin_unlock(&pool->lock);
23241 + rcu_read_unlock();
23244 spin_unlock(&pool->lock);
23246 - local_irq_restore(*flags);
23247 + rcu_read_unlock();
23248 + local_unlock_irqrestore(pendingb_lock, *flags);
23249 if (work_is_canceling(work))
23256 @@ -1373,7 +1404,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
23257 * queued or lose PENDING. Grabbing PENDING and queueing should
23258 * happen with IRQ disabled.
23260 - WARN_ON_ONCE(!irqs_disabled());
23261 + WARN_ON_ONCE_NONRT(!irqs_disabled());
23263 debug_work_activate(work);
23265 @@ -1381,6 +1412,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
23266 if (unlikely(wq->flags & __WQ_DRAINING) &&
23267 WARN_ON_ONCE(!is_chained_work(wq)))
23271 if (req_cpu == WORK_CPU_UNBOUND)
23272 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
23273 @@ -1437,10 +1469,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
23274 /* pwq determined, queue */
23275 trace_workqueue_queue_work(req_cpu, pwq, work);
23277 - if (WARN_ON(!list_empty(&work->entry))) {
23278 - spin_unlock(&pwq->pool->lock);
23281 + if (WARN_ON(!list_empty(&work->entry)))
23284 pwq->nr_in_flight[pwq->work_color]++;
23285 work_flags = work_color_to_flags(pwq->work_color);
23286 @@ -1458,7 +1488,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
23288 insert_work(pwq, work, worklist, work_flags);
23291 spin_unlock(&pwq->pool->lock);
23292 + rcu_read_unlock();
23296 @@ -1478,14 +1510,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
23298 unsigned long flags;
23300 - local_irq_save(flags);
23301 + local_lock_irqsave(pendingb_lock,flags);
23303 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
23304 __queue_work(cpu, wq, work);
23308 - local_irq_restore(flags);
23309 + local_unlock_irqrestore(pendingb_lock, flags);
23312 EXPORT_SYMBOL(queue_work_on);
23313 @@ -1552,14 +1584,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
23314 unsigned long flags;
23316 /* read the comment in __queue_work() */
23317 - local_irq_save(flags);
23318 + local_lock_irqsave(pendingb_lock, flags);
23320 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
23321 __queue_delayed_work(cpu, wq, dwork, delay);
23325 - local_irq_restore(flags);
23326 + local_unlock_irqrestore(pendingb_lock, flags);
23329 EXPORT_SYMBOL(queue_delayed_work_on);
23330 @@ -1594,7 +1626,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
23332 if (likely(ret >= 0)) {
23333 __queue_delayed_work(cpu, wq, dwork, delay);
23334 - local_irq_restore(flags);
23335 + local_unlock_irqrestore(pendingb_lock, flags);
23338 /* -ENOENT from try_to_grab_pending() becomes %true */
23339 @@ -1627,7 +1659,9 @@ static void worker_enter_idle(struct worker *worker)
23340 worker->last_active = jiffies;
23342 /* idle_list is LIFO */
23343 + rt_lock_idle_list(pool);
23344 list_add(&worker->entry, &pool->idle_list);
23345 + rt_unlock_idle_list(pool);
23347 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
23348 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
23349 @@ -1660,7 +1694,9 @@ static void worker_leave_idle(struct worker *worker)
23351 worker_clr_flags(worker, WORKER_IDLE);
23353 + rt_lock_idle_list(pool);
23354 list_del_init(&worker->entry);
23355 + rt_unlock_idle_list(pool);
23358 static struct worker *alloc_worker(int node)
23359 @@ -1826,7 +1862,9 @@ static void destroy_worker(struct worker *worker)
23360 pool->nr_workers--;
23363 + rt_lock_idle_list(pool);
23364 list_del_init(&worker->entry);
23365 + rt_unlock_idle_list(pool);
23366 worker->flags |= WORKER_DIE;
23367 wake_up_process(worker->task);
23369 @@ -2785,14 +2823,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
23373 - local_irq_disable();
23375 pool = get_work_pool(work);
23377 - local_irq_enable();
23378 + rcu_read_unlock();
23382 - spin_lock(&pool->lock);
23383 + spin_lock_irq(&pool->lock);
23384 /* see the comment in try_to_grab_pending() with the same code */
23385 pwq = get_work_pwq(work);
23387 @@ -2821,10 +2859,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
23389 lock_map_acquire_read(&pwq->wq->lockdep_map);
23390 lock_map_release(&pwq->wq->lockdep_map);
23392 + rcu_read_unlock();
23395 spin_unlock_irq(&pool->lock);
23396 + rcu_read_unlock();
23400 @@ -2911,7 +2950,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
23402 /* tell other tasks trying to grab @work to back off */
23403 mark_work_canceling(work);
23404 - local_irq_restore(flags);
23405 + local_unlock_irqrestore(pendingb_lock, flags);
23408 clear_work_data(work);
23409 @@ -2966,10 +3005,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
23411 bool flush_delayed_work(struct delayed_work *dwork)
23413 - local_irq_disable();
23414 + local_lock_irq(pendingb_lock);
23415 if (del_timer_sync(&dwork->timer))
23416 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
23417 - local_irq_enable();
23418 + local_unlock_irq(pendingb_lock);
23419 return flush_work(&dwork->work);
23421 EXPORT_SYMBOL(flush_delayed_work);
23422 @@ -2987,7 +3026,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
23425 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
23426 - local_irq_restore(flags);
23427 + local_unlock_irqrestore(pendingb_lock, flags);
23431 @@ -3245,7 +3284,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
23432 * put_unbound_pool - put a worker_pool
23433 * @pool: worker_pool to put
23435 - * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
23436 + * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
23437 * safe manner. get_unbound_pool() calls this function on its failure path
23438 * and this function should be able to release pools which went through,
23439 * successfully or not, init_worker_pool().
23440 @@ -3299,8 +3338,8 @@ static void put_unbound_pool(struct worker_pool *pool)
23441 del_timer_sync(&pool->idle_timer);
23442 del_timer_sync(&pool->mayday_timer);
23444 - /* sched-RCU protected to allow dereferences from get_work_pool() */
23445 - call_rcu_sched(&pool->rcu, rcu_free_pool);
23446 + /* RCU protected to allow dereferences from get_work_pool() */
23447 + call_rcu(&pool->rcu, rcu_free_pool);
23451 @@ -3407,14 +3446,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
23452 put_unbound_pool(pool);
23453 mutex_unlock(&wq_pool_mutex);
23455 - call_rcu_sched(&pwq->rcu, rcu_free_pwq);
23456 + call_rcu(&pwq->rcu, rcu_free_pwq);
23459 * If we're the last pwq going away, @wq is already dead and no one
23460 * is gonna access it anymore. Schedule RCU free.
23463 - call_rcu_sched(&wq->rcu, rcu_free_wq);
23464 + call_rcu(&wq->rcu, rcu_free_wq);
23468 @@ -4064,7 +4103,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
23469 * The base ref is never dropped on per-cpu pwqs. Directly
23470 * schedule RCU free.
23472 - call_rcu_sched(&wq->rcu, rcu_free_wq);
23473 + call_rcu(&wq->rcu, rcu_free_wq);
23476 * We're the sole accessor of @wq at this point. Directly
23477 @@ -4157,7 +4196,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
23478 struct pool_workqueue *pwq;
23481 - rcu_read_lock_sched();
23483 + preempt_disable();
23485 if (cpu == WORK_CPU_UNBOUND)
23486 cpu = smp_processor_id();
23487 @@ -4168,7 +4208,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
23488 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
23490 ret = !list_empty(&pwq->delayed_works);
23491 - rcu_read_unlock_sched();
23492 + preempt_enable();
23493 + rcu_read_unlock();
23497 @@ -4194,15 +4235,15 @@ unsigned int work_busy(struct work_struct *work)
23498 if (work_pending(work))
23499 ret |= WORK_BUSY_PENDING;
23501 - local_irq_save(flags);
23503 pool = get_work_pool(work);
23505 - spin_lock(&pool->lock);
23506 + spin_lock_irqsave(&pool->lock, flags);
23507 if (find_worker_executing_work(pool, work))
23508 ret |= WORK_BUSY_RUNNING;
23509 - spin_unlock(&pool->lock);
23510 + spin_unlock_irqrestore(&pool->lock, flags);
23512 - local_irq_restore(flags);
23513 + rcu_read_unlock();
23517 @@ -4391,7 +4432,7 @@ void show_workqueue_state(void)
23518 unsigned long flags;
23521 - rcu_read_lock_sched();
23524 pr_info("Showing busy workqueues and worker pools:\n");
23526 @@ -4444,7 +4485,7 @@ void show_workqueue_state(void)
23527 spin_unlock_irqrestore(&pool->lock, flags);
23530 - rcu_read_unlock_sched();
23531 + rcu_read_unlock();
23535 @@ -4782,16 +4823,16 @@ bool freeze_workqueues_busy(void)
23536 * nr_active is monotonically decreasing. It's safe
23537 * to peek without lock.
23539 - rcu_read_lock_sched();
23541 for_each_pwq(pwq, wq) {
23542 WARN_ON_ONCE(pwq->nr_active < 0);
23543 if (pwq->nr_active) {
23545 - rcu_read_unlock_sched();
23546 + rcu_read_unlock();
23550 - rcu_read_unlock_sched();
23551 + rcu_read_unlock();
23554 mutex_unlock(&wq_pool_mutex);
23555 @@ -4981,7 +5022,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
23556 const char *delim = "";
23557 int node, written = 0;
23559 - rcu_read_lock_sched();
23560 + get_online_cpus();
23562 for_each_node(node) {
23563 written += scnprintf(buf + written, PAGE_SIZE - written,
23564 "%s%d:%d", delim, node,
23565 @@ -4989,7 +5031,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
23568 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
23569 - rcu_read_unlock_sched();
23570 + rcu_read_unlock();
23571 + put_online_cpus();
23575 diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
23576 index 8635417c587b..f000c4d6917e 100644
23577 --- a/kernel/workqueue_internal.h
23578 +++ b/kernel/workqueue_internal.h
23579 @@ -43,6 +43,7 @@ struct worker {
23580 unsigned long last_active; /* L: last active timestamp */
23581 unsigned int flags; /* X: flags */
23582 int id; /* I: worker id */
23583 + int sleeping; /* None */
23586 * Opaque string set with work_set_desc(). Printed out with task
23587 @@ -68,7 +69,7 @@ static inline struct worker *current_wq_worker(void)
23588 * Scheduler hooks for concurrency managed workqueue. Only to be used from
23589 * sched/core.c and workqueue.c.
23591 -void wq_worker_waking_up(struct task_struct *task, int cpu);
23592 -struct task_struct *wq_worker_sleeping(struct task_struct *task);
23593 +void wq_worker_running(struct task_struct *task);
23594 +void wq_worker_sleeping(struct task_struct *task);
23596 #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
23597 diff --git a/lib/Kconfig b/lib/Kconfig
23598 index 260a80e313b9..b06becb3f477 100644
23601 @@ -400,6 +400,7 @@ config CHECK_SIGNATURE
23603 config CPUMASK_OFFSTACK
23604 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
23605 + depends on !PREEMPT_RT_FULL
23607 Use dynamic allocation for cpumask_var_t, instead of putting
23608 them on the stack. This is a bit more expensive, but avoids
23609 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
23610 index 056052dc8e91..d8494e126de8 100644
23611 --- a/lib/debugobjects.c
23612 +++ b/lib/debugobjects.c
23613 @@ -308,7 +308,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
23614 struct debug_obj *obj;
23615 unsigned long flags;
23618 +#ifdef CONFIG_PREEMPT_RT_FULL
23619 + if (preempt_count() == 0 && !irqs_disabled())
23623 db = get_bucket((unsigned long) addr);
23625 diff --git a/lib/idr.c b/lib/idr.c
23626 index 6098336df267..9decbe914595 100644
23630 #include <linux/idr.h>
23631 #include <linux/spinlock.h>
23632 #include <linux/percpu.h>
23633 +#include <linux/locallock.h>
23635 #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
23636 #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
23637 @@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
23638 static DEFINE_PER_CPU(int, idr_preload_cnt);
23639 static DEFINE_SPINLOCK(simple_ida_lock);
23641 +#ifdef CONFIG_PREEMPT_RT_FULL
23642 +static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
23644 +static inline void idr_preload_lock(void)
23646 + local_lock(idr_lock);
23649 +static inline void idr_preload_unlock(void)
23651 + local_unlock(idr_lock);
23654 +void idr_preload_end(void)
23656 + idr_preload_unlock();
23658 +EXPORT_SYMBOL(idr_preload_end);
23660 +static inline void idr_preload_lock(void)
23662 + preempt_disable();
23665 +static inline void idr_preload_unlock(void)
23667 + preempt_enable();
23672 /* the maximum ID which can be allocated given idr->layers */
23673 static int idr_max(int layers)
23675 @@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
23676 * context. See idr_preload() for details.
23678 if (!in_interrupt()) {
23679 - preempt_disable();
23680 + idr_preload_lock();
23681 new = __this_cpu_read(idr_preload_head);
23683 __this_cpu_write(idr_preload_head, new->ary[0]);
23684 __this_cpu_dec(idr_preload_cnt);
23685 new->ary[0] = NULL;
23687 - preempt_enable();
23688 + idr_preload_unlock();
23692 @@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
23693 idr_mark_full(pa, id);
23698 * idr_preload - preload for idr_alloc()
23699 * @gfp_mask: allocation mask to use for preloading
23700 @@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
23701 WARN_ON_ONCE(in_interrupt());
23702 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
23704 - preempt_disable();
23705 + idr_preload_lock();
23708 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
23709 @@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
23710 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
23711 struct idr_layer *new;
23713 - preempt_enable();
23714 + idr_preload_unlock();
23715 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
23716 - preempt_disable();
23717 + idr_preload_lock();
23721 diff --git a/lib/irq_poll.c b/lib/irq_poll.c
23722 index 1d6565e81030..b23a79761df7 100644
23723 --- a/lib/irq_poll.c
23724 +++ b/lib/irq_poll.c
23725 @@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop)
23726 list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
23727 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
23728 local_irq_restore(flags);
23729 + preempt_check_resched_rt();
23731 EXPORT_SYMBOL(irq_poll_sched);
23733 @@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *iop)
23734 local_irq_save(flags);
23735 __irq_poll_complete(iop);
23736 local_irq_restore(flags);
23737 + preempt_check_resched_rt();
23739 EXPORT_SYMBOL(irq_poll_complete);
23741 @@ -95,6 +97,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
23744 local_irq_enable();
23745 + preempt_check_resched_rt();
23747 /* Even though interrupts have been re-enabled, this
23748 * access is safe because interrupts can only add new
23749 @@ -132,6 +135,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
23750 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
23752 local_irq_enable();
23753 + preempt_check_resched_rt();
23757 @@ -195,6 +199,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
23758 this_cpu_ptr(&blk_cpu_iopoll));
23759 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
23760 local_irq_enable();
23761 + preempt_check_resched_rt();
23765 diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
23766 index f3a217ea0388..4611b156ef79 100644
23767 --- a/lib/locking-selftest.c
23768 +++ b/lib/locking-selftest.c
23769 @@ -590,6 +590,8 @@ GENERATE_TESTCASE(init_held_rsem)
23770 #include "locking-selftest-spin-hardirq.h"
23771 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
23773 +#ifndef CONFIG_PREEMPT_RT_FULL
23775 #include "locking-selftest-rlock-hardirq.h"
23776 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
23778 @@ -605,9 +607,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
23779 #include "locking-selftest-wlock-softirq.h"
23780 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
23787 +#ifndef CONFIG_PREEMPT_RT_FULL
23789 * Enabling hardirqs with a softirq-safe lock held:
23791 @@ -640,6 +645,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
23798 * Enabling irqs with an irq-safe lock held:
23800 @@ -663,6 +670,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
23801 #include "locking-selftest-spin-hardirq.h"
23802 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
23804 +#ifndef CONFIG_PREEMPT_RT_FULL
23806 #include "locking-selftest-rlock-hardirq.h"
23807 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
23809 @@ -678,6 +687,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
23810 #include "locking-selftest-wlock-softirq.h"
23811 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
23818 @@ -709,6 +720,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
23819 #include "locking-selftest-spin-hardirq.h"
23820 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
23822 +#ifndef CONFIG_PREEMPT_RT_FULL
23824 #include "locking-selftest-rlock-hardirq.h"
23825 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
23827 @@ -724,6 +737,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
23828 #include "locking-selftest-wlock-softirq.h"
23829 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
23836 @@ -757,6 +772,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
23837 #include "locking-selftest-spin-hardirq.h"
23838 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
23840 +#ifndef CONFIG_PREEMPT_RT_FULL
23842 #include "locking-selftest-rlock-hardirq.h"
23843 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
23845 @@ -772,10 +789,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
23846 #include "locking-selftest-wlock-softirq.h"
23847 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
23855 +#ifndef CONFIG_PREEMPT_RT_FULL
23858 * read-lock / write-lock irq inversion.
23860 @@ -838,6 +859,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
23866 +#ifndef CONFIG_PREEMPT_RT_FULL
23869 * read-lock / write-lock recursion that is actually safe.
23871 @@ -876,6 +901,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
23878 * read-lock / write-lock recursion that is unsafe.
23880 @@ -1858,6 +1885,7 @@ void locking_selftest(void)
23882 printk(" --------------------------------------------------------------------------\n");
23884 +#ifndef CONFIG_PREEMPT_RT_FULL
23886 * irq-context testcases:
23888 @@ -1870,6 +1898,28 @@ void locking_selftest(void)
23890 DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
23891 // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
23893 + /* On -rt, we only do hardirq context test for raw spinlock */
23894 + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
23895 + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
23897 + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
23898 + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
23900 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
23901 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
23902 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
23903 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
23904 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
23905 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
23907 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
23908 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
23909 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
23910 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
23911 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
23912 + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
23917 diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
23918 index 6d40944960de..822a2c027e72 100644
23919 --- a/lib/percpu_ida.c
23920 +++ b/lib/percpu_ida.c
23922 #include <linux/string.h>
23923 #include <linux/spinlock.h>
23924 #include <linux/percpu_ida.h>
23925 +#include <linux/locallock.h>
23927 +static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock);
23929 struct percpu_ida_cpu {
23931 @@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
23932 unsigned long flags;
23935 - local_irq_save(flags);
23936 + local_lock_irqsave(irq_off_lock, flags);
23937 tags = this_cpu_ptr(pool->tag_cpu);
23940 tag = alloc_local_tag(tags);
23941 if (likely(tag >= 0)) {
23942 - local_irq_restore(flags);
23943 + local_unlock_irqrestore(irq_off_lock, flags);
23947 @@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
23949 if (!tags->nr_free)
23950 alloc_global_tags(pool, tags);
23952 if (!tags->nr_free)
23953 steal_tags(pool, tags);
23955 @@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
23958 spin_unlock(&pool->lock);
23959 - local_irq_restore(flags);
23960 + local_unlock_irqrestore(irq_off_lock, flags);
23962 if (tag >= 0 || state == TASK_RUNNING)
23964 @@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
23968 - local_irq_save(flags);
23969 + local_lock_irqsave(irq_off_lock, flags);
23970 tags = this_cpu_ptr(pool->tag_cpu);
23972 if (state != TASK_RUNNING)
23973 @@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
23975 BUG_ON(tag >= pool->nr_tags);
23977 - local_irq_save(flags);
23978 + local_lock_irqsave(irq_off_lock, flags);
23979 tags = this_cpu_ptr(pool->tag_cpu);
23981 spin_lock(&tags->lock);
23982 @@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
23983 spin_unlock(&pool->lock);
23986 - local_irq_restore(flags);
23987 + local_unlock_irqrestore(irq_off_lock, flags);
23989 EXPORT_SYMBOL_GPL(percpu_ida_free);
23991 @@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
23992 struct percpu_ida_cpu *remote;
23993 unsigned cpu, i, err = 0;
23995 - local_irq_save(flags);
23996 + local_lock_irqsave(irq_off_lock, flags);
23997 for_each_possible_cpu(cpu) {
23998 remote = per_cpu_ptr(pool->tag_cpu, cpu);
23999 spin_lock(&remote->lock);
24000 @@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
24002 spin_unlock(&pool->lock);
24004 - local_irq_restore(flags);
24005 + local_unlock_irqrestore(irq_off_lock, flags);
24008 EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
24009 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
24010 index 8e6d552c40dd..741da5a77fd5 100644
24011 --- a/lib/radix-tree.c
24012 +++ b/lib/radix-tree.c
24014 #include <linux/bitops.h>
24015 #include <linux/rcupdate.h>
24016 #include <linux/preempt.h> /* in_interrupt() */
24018 +#include <linux/locallock.h>
24020 /* Number of nodes in fully populated tree of given height */
24021 static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
24022 @@ -68,6 +68,7 @@ struct radix_tree_preload {
24023 struct radix_tree_node *nodes;
24025 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
24026 +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
24028 static inline void *node_to_entry(void *ptr)
24030 @@ -290,13 +291,14 @@ radix_tree_node_alloc(struct radix_tree_root *root)
24031 * succeed in getting a node here (and never reach
24032 * kmem_cache_alloc)
24034 - rtp = this_cpu_ptr(&radix_tree_preloads);
24035 + rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
24038 rtp->nodes = ret->private_data;
24039 ret->private_data = NULL;
24042 + put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
24044 * Update the allocation stack trace as this is more useful
24046 @@ -357,14 +359,14 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr)
24048 gfp_mask &= ~__GFP_ACCOUNT;
24050 - preempt_disable();
24051 + local_lock(radix_tree_preloads_lock);
24052 rtp = this_cpu_ptr(&radix_tree_preloads);
24053 while (rtp->nr < nr) {
24054 - preempt_enable();
24055 + local_unlock(radix_tree_preloads_lock);
24056 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
24059 - preempt_disable();
24060 + local_lock(radix_tree_preloads_lock);
24061 rtp = this_cpu_ptr(&radix_tree_preloads);
24062 if (rtp->nr < nr) {
24063 node->private_data = rtp->nodes;
24064 @@ -406,7 +408,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
24065 if (gfpflags_allow_blocking(gfp_mask))
24066 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
24067 /* Preloading doesn't help anything with this gfp mask, skip it */
24068 - preempt_disable();
24069 + local_lock(radix_tree_preloads_lock);
24072 EXPORT_SYMBOL(radix_tree_maybe_preload);
24073 @@ -422,7 +424,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
24075 /* Preloading doesn't help anything with this gfp mask, skip it */
24076 if (!gfpflags_allow_blocking(gfp_mask)) {
24077 - preempt_disable();
24078 + local_lock(radix_tree_preloads_lock);
24082 @@ -456,6 +458,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
24083 return __radix_tree_preload(gfp_mask, nr_nodes);
24086 +void radix_tree_preload_end(void)
24088 + local_unlock(radix_tree_preloads_lock);
24090 +EXPORT_SYMBOL(radix_tree_preload_end);
24093 * The maximum index which can be stored in a radix tree
24095 diff --git a/lib/scatterlist.c b/lib/scatterlist.c
24096 index 004fc70fc56a..ccc46992a517 100644
24097 --- a/lib/scatterlist.c
24098 +++ b/lib/scatterlist.c
24099 @@ -620,7 +620,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
24100 flush_kernel_dcache_page(miter->page);
24102 if (miter->__flags & SG_MITER_ATOMIC) {
24103 - WARN_ON_ONCE(preemptible());
24104 + WARN_ON_ONCE(!pagefault_disabled());
24105 kunmap_atomic(miter->addr);
24107 kunmap(miter->page);
24108 @@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
24109 if (!sg_miter_skip(&miter, skip))
24112 - local_irq_save(flags);
24113 + local_irq_save_nort(flags);
24115 while (sg_miter_next(&miter) && offset < buflen) {
24117 @@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
24119 sg_miter_stop(&miter);
24121 - local_irq_restore(flags);
24122 + local_irq_restore_nort(flags);
24125 EXPORT_SYMBOL(sg_copy_buffer);
24126 diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
24127 index 1afec32de6f2..11fa431046a8 100644
24128 --- a/lib/smp_processor_id.c
24129 +++ b/lib/smp_processor_id.c
24130 @@ -39,8 +39,9 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
24131 if (!printk_ratelimit())
24134 - printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
24135 - what1, what2, preempt_count() - 1, current->comm, current->pid);
24136 + printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
24137 + what1, what2, preempt_count() - 1, __migrate_disabled(current),
24138 + current->comm, current->pid);
24140 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
24142 diff --git a/mm/Kconfig b/mm/Kconfig
24143 index 86e3e0e74d20..77e5862a1ed2 100644
24146 @@ -410,7 +410,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
24148 config TRANSPARENT_HUGEPAGE
24149 bool "Transparent Hugepage Support"
24150 - depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
24151 + depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
24153 select RADIX_TREE_MULTIORDER
24155 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
24156 index 6ff2d7744223..b5a91dd53b5f 100644
24157 --- a/mm/backing-dev.c
24158 +++ b/mm/backing-dev.c
24159 @@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
24161 unsigned long flags;
24163 - local_irq_save(flags);
24164 + local_irq_save_nort(flags);
24165 if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
24166 - local_irq_restore(flags);
24167 + local_irq_restore_nort(flags);
24171 diff --git a/mm/compaction.c b/mm/compaction.c
24172 index 70e6bec46dc2..6678ed58b7c6 100644
24173 --- a/mm/compaction.c
24174 +++ b/mm/compaction.c
24175 @@ -1593,10 +1593,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
24176 block_start_pfn(cc->migrate_pfn, cc->order);
24178 if (cc->last_migrated_pfn < current_block_start) {
24180 + cpu = get_cpu_light();
24181 + local_lock_irq(swapvec_lock);
24182 lru_add_drain_cpu(cpu);
24183 + local_unlock_irq(swapvec_lock);
24184 drain_local_pages(zone);
24187 /* No more flushing until we migrate again */
24188 cc->last_migrated_pfn = 0;
24190 diff --git a/mm/filemap.c b/mm/filemap.c
24191 index edfb90e3830c..a8d2c7a73d54 100644
24194 @@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct address_space *mapping,
24195 * node->private_list is protected by
24196 * mapping->tree_lock.
24198 - if (!list_empty(&node->private_list))
24199 - list_lru_del(&workingset_shadow_nodes,
24200 + if (!list_empty(&node->private_list)) {
24201 + local_lock(workingset_shadow_lock);
24202 + list_lru_del(&__workingset_shadow_nodes,
24203 &node->private_list);
24204 + local_unlock(workingset_shadow_lock);
24209 @@ -217,8 +220,10 @@ static void page_cache_tree_delete(struct address_space *mapping,
24210 if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
24211 list_empty(&node->private_list)) {
24212 node->private_data = mapping;
24213 - list_lru_add(&workingset_shadow_nodes,
24214 - &node->private_list);
24215 + local_lock(workingset_shadow_lock);
24216 + list_lru_add(&__workingset_shadow_nodes,
24217 + &node->private_list);
24218 + local_unlock(workingset_shadow_lock);
24222 diff --git a/mm/highmem.c b/mm/highmem.c
24223 index 50b4ca6787f0..77518a3b35a1 100644
24226 @@ -29,10 +29,11 @@
24227 #include <linux/kgdb.h>
24228 #include <asm/tlbflush.h>
24231 +#ifndef CONFIG_PREEMPT_RT_FULL
24232 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
24233 DEFINE_PER_CPU(int, __kmap_atomic_idx);
24238 * Virtual_count is not a pure "count".
24239 @@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
24240 unsigned long totalhigh_pages __read_mostly;
24241 EXPORT_SYMBOL(totalhigh_pages);
24244 +#ifndef CONFIG_PREEMPT_RT_FULL
24245 EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
24248 unsigned int nr_free_highpages (void)
24250 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
24251 index 47559cc0cdcc..1f2ebc924916 100644
24252 --- a/mm/memcontrol.c
24253 +++ b/mm/memcontrol.c
24255 #include <net/sock.h>
24256 #include <net/ip.h>
24258 +#include <linux/locallock.h>
24260 #include <asm/uaccess.h>
24262 @@ -92,6 +93,8 @@ int do_swap_account __read_mostly;
24263 #define do_swap_account 0
24266 +static DEFINE_LOCAL_IRQ_LOCK(event_lock);
24268 /* Whether legacy memory+swap accounting is active */
24269 static bool do_memsw_account(void)
24271 @@ -1692,6 +1695,7 @@ struct memcg_stock_pcp {
24272 #define FLUSHING_CACHED_CHARGE 0
24274 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
24275 +static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
24276 static DEFINE_MUTEX(percpu_charge_mutex);
24279 @@ -1714,7 +1718,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
24280 if (nr_pages > CHARGE_BATCH)
24283 - local_irq_save(flags);
24284 + local_lock_irqsave(memcg_stock_ll, flags);
24286 stock = this_cpu_ptr(&memcg_stock);
24287 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
24288 @@ -1722,7 +1726,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
24292 - local_irq_restore(flags);
24293 + local_unlock_irqrestore(memcg_stock_ll, flags);
24297 @@ -1749,13 +1753,13 @@ static void drain_local_stock(struct work_struct *dummy)
24298 struct memcg_stock_pcp *stock;
24299 unsigned long flags;
24301 - local_irq_save(flags);
24302 + local_lock_irqsave(memcg_stock_ll, flags);
24304 stock = this_cpu_ptr(&memcg_stock);
24305 drain_stock(stock);
24306 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
24308 - local_irq_restore(flags);
24309 + local_unlock_irqrestore(memcg_stock_ll, flags);
24313 @@ -1767,7 +1771,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
24314 struct memcg_stock_pcp *stock;
24315 unsigned long flags;
24317 - local_irq_save(flags);
24318 + local_lock_irqsave(memcg_stock_ll, flags);
24320 stock = this_cpu_ptr(&memcg_stock);
24321 if (stock->cached != memcg) { /* reset if necessary */
24322 @@ -1776,7 +1780,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
24324 stock->nr_pages += nr_pages;
24326 - local_irq_restore(flags);
24327 + local_unlock_irqrestore(memcg_stock_ll, flags);
24331 @@ -1792,7 +1796,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
24333 /* Notify other cpus that system-wide "drain" is running */
24335 - curcpu = get_cpu();
24336 + curcpu = get_cpu_light();
24337 for_each_online_cpu(cpu) {
24338 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
24339 struct mem_cgroup *memcg;
24340 @@ -1809,7 +1813,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
24341 schedule_work_on(cpu, &stock->work);
24347 mutex_unlock(&percpu_charge_mutex);
24349 @@ -4555,12 +4559,12 @@ static int mem_cgroup_move_account(struct page *page,
24353 - local_irq_disable();
24354 + local_lock_irq(event_lock);
24355 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
24356 memcg_check_events(to, page);
24357 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
24358 memcg_check_events(from, page);
24359 - local_irq_enable();
24360 + local_unlock_irq(event_lock);
24364 @@ -5435,10 +5439,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
24366 commit_charge(page, memcg, lrucare);
24368 - local_irq_disable();
24369 + local_lock_irq(event_lock);
24370 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
24371 memcg_check_events(memcg, page);
24372 - local_irq_enable();
24373 + local_unlock_irq(event_lock);
24375 if (do_memsw_account() && PageSwapCache(page)) {
24376 swp_entry_t entry = { .val = page_private(page) };
24377 @@ -5494,14 +5498,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
24378 memcg_oom_recover(memcg);
24381 - local_irq_save(flags);
24382 + local_lock_irqsave(event_lock, flags);
24383 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
24384 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
24385 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
24386 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
24387 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
24388 memcg_check_events(memcg, dummy_page);
24389 - local_irq_restore(flags);
24390 + local_unlock_irqrestore(event_lock, flags);
24392 if (!mem_cgroup_is_root(memcg))
24393 css_put_many(&memcg->css, nr_pages);
24394 @@ -5656,10 +5660,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
24396 commit_charge(newpage, memcg, false);
24398 - local_irq_save(flags);
24399 + local_lock_irqsave(event_lock, flags);
24400 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
24401 memcg_check_events(memcg, newpage);
24402 - local_irq_restore(flags);
24403 + local_unlock_irqrestore(event_lock, flags);
24406 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
24407 @@ -5850,6 +5854,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
24409 struct mem_cgroup *memcg, *swap_memcg;
24410 unsigned short oldid;
24411 + unsigned long flags;
24413 VM_BUG_ON_PAGE(PageLRU(page), page);
24414 VM_BUG_ON_PAGE(page_count(page), page);
24415 @@ -5890,12 +5895,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
24416 * important here to have the interrupts disabled because it is the
24417 * only synchronisation we have for udpating the per-CPU variables.
24419 + local_lock_irqsave(event_lock, flags);
24420 +#ifndef CONFIG_PREEMPT_RT_BASE
24421 VM_BUG_ON(!irqs_disabled());
24423 mem_cgroup_charge_statistics(memcg, page, false, -1);
24424 memcg_check_events(memcg, page);
24426 if (!mem_cgroup_is_root(memcg))
24427 css_put(&memcg->css);
24428 + local_unlock_irqrestore(event_lock, flags);
24432 diff --git a/mm/mmu_context.c b/mm/mmu_context.c
24433 index 6f4d27c5bb32..5cd25c745a8f 100644
24434 --- a/mm/mmu_context.c
24435 +++ b/mm/mmu_context.c
24436 @@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm)
24437 struct task_struct *tsk = current;
24440 + preempt_disable_rt();
24441 active_mm = tsk->active_mm;
24442 if (active_mm != mm) {
24443 atomic_inc(&mm->mm_count);
24444 @@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm)
24447 switch_mm(active_mm, mm, tsk);
24448 + preempt_enable_rt();
24450 #ifdef finish_arch_post_lock_switch
24451 finish_arch_post_lock_switch();
24452 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
24453 index 5b06fb385dd7..86457a2fca20 100644
24454 --- a/mm/page_alloc.c
24455 +++ b/mm/page_alloc.c
24457 #include <linux/page_ext.h>
24458 #include <linux/hugetlb.h>
24459 #include <linux/sched/rt.h>
24460 +#include <linux/locallock.h>
24461 #include <linux/page_owner.h>
24462 #include <linux/kthread.h>
24463 #include <linux/memcontrol.h>
24464 @@ -281,6 +282,18 @@ EXPORT_SYMBOL(nr_node_ids);
24465 EXPORT_SYMBOL(nr_online_nodes);
24468 +static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
24470 +#ifdef CONFIG_PREEMPT_RT_BASE
24471 +# define cpu_lock_irqsave(cpu, flags) \
24472 + local_lock_irqsave_on(pa_lock, flags, cpu)
24473 +# define cpu_unlock_irqrestore(cpu, flags) \
24474 + local_unlock_irqrestore_on(pa_lock, flags, cpu)
24476 +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
24477 +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
24480 int page_group_by_mobility_disabled __read_mostly;
24482 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
24483 @@ -1072,7 +1085,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
24484 #endif /* CONFIG_DEBUG_VM */
24487 - * Frees a number of pages from the PCP lists
24488 + * Frees a number of pages which have been collected from the pcp lists.
24489 * Assumes all pages on list are in same zone, and of same order.
24490 * count is the number of pages to free.
24492 @@ -1083,19 +1096,58 @@ static bool bulkfree_pcp_prepare(struct page *page)
24493 * pinned" detection logic.
24495 static void free_pcppages_bulk(struct zone *zone, int count,
24496 - struct per_cpu_pages *pcp)
24497 + struct list_head *list)
24499 - int migratetype = 0;
24500 - int batch_free = 0;
24501 unsigned long nr_scanned;
24502 bool isolated_pageblocks;
24503 + unsigned long flags;
24505 + spin_lock_irqsave(&zone->lock, flags);
24507 - spin_lock(&zone->lock);
24508 isolated_pageblocks = has_isolate_pageblock(zone);
24509 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
24511 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
24513 + while (!list_empty(list)) {
24514 + struct page *page;
24515 + int mt; /* migratetype of the to-be-freed page */
24517 + page = list_first_entry(list, struct page, lru);
24518 + /* must delete as __free_one_page list manipulates */
24519 + list_del(&page->lru);
24521 + mt = get_pcppage_migratetype(page);
24522 + /* MIGRATE_ISOLATE page should not go to pcplists */
24523 + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
24524 + /* Pageblock could have been isolated meanwhile */
24525 + if (unlikely(isolated_pageblocks))
24526 + mt = get_pageblock_migratetype(page);
24528 + if (bulkfree_pcp_prepare(page))
24531 + __free_one_page(page, page_to_pfn(page), zone, 0, mt);
24532 + trace_mm_page_pcpu_drain(page, 0, mt);
24535 + WARN_ON(count != 0);
24536 + spin_unlock_irqrestore(&zone->lock, flags);
24540 + * Moves a number of pages from the PCP lists to free list which
24541 + * is freed outside of the locked region.
24543 + * Assumes all pages on list are in same zone, and of same order.
24544 + * count is the number of pages to free.
24546 +static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
24547 + struct list_head *dst)
24549 + int migratetype = 0;
24550 + int batch_free = 0;
24554 struct list_head *list;
24555 @@ -1111,7 +1163,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
24557 if (++migratetype == MIGRATE_PCPTYPES)
24559 - list = &pcp->lists[migratetype];
24560 + list = &src->lists[migratetype];
24561 } while (list_empty(list));
24563 /* This is the only non-empty list. Free them all. */
24564 @@ -1119,27 +1171,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
24565 batch_free = count;
24568 - int mt; /* migratetype of the to-be-freed page */
24570 page = list_last_entry(list, struct page, lru);
24571 - /* must delete as __free_one_page list manipulates */
24572 list_del(&page->lru);
24574 - mt = get_pcppage_migratetype(page);
24575 - /* MIGRATE_ISOLATE page should not go to pcplists */
24576 - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
24577 - /* Pageblock could have been isolated meanwhile */
24578 - if (unlikely(isolated_pageblocks))
24579 - mt = get_pageblock_migratetype(page);
24581 - if (bulkfree_pcp_prepare(page))
24584 - __free_one_page(page, page_to_pfn(page), zone, 0, mt);
24585 - trace_mm_page_pcpu_drain(page, 0, mt);
24586 + list_add(&page->lru, dst);
24587 } while (--count && --batch_free && !list_empty(list));
24589 - spin_unlock(&zone->lock);
24592 static void free_one_page(struct zone *zone,
24593 @@ -1148,7 +1185,9 @@ static void free_one_page(struct zone *zone,
24596 unsigned long nr_scanned;
24597 - spin_lock(&zone->lock);
24598 + unsigned long flags;
24600 + spin_lock_irqsave(&zone->lock, flags);
24601 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
24603 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
24604 @@ -1158,7 +1197,7 @@ static void free_one_page(struct zone *zone,
24605 migratetype = get_pfnblock_migratetype(page, pfn);
24607 __free_one_page(page, pfn, zone, order, migratetype);
24608 - spin_unlock(&zone->lock);
24609 + spin_unlock_irqrestore(&zone->lock, flags);
24612 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
24613 @@ -1244,10 +1283,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
24616 migratetype = get_pfnblock_migratetype(page, pfn);
24617 - local_irq_save(flags);
24618 + local_lock_irqsave(pa_lock, flags);
24619 __count_vm_events(PGFREE, 1 << order);
24620 free_one_page(page_zone(page), page, pfn, order, migratetype);
24621 - local_irq_restore(flags);
24622 + local_unlock_irqrestore(pa_lock, flags);
24625 static void __init __free_pages_boot_core(struct page *page, unsigned int order)
24626 @@ -2246,16 +2285,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
24627 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
24629 unsigned long flags;
24631 int to_drain, batch;
24633 - local_irq_save(flags);
24634 + local_lock_irqsave(pa_lock, flags);
24635 batch = READ_ONCE(pcp->batch);
24636 to_drain = min(pcp->count, batch);
24637 if (to_drain > 0) {
24638 - free_pcppages_bulk(zone, to_drain, pcp);
24639 + isolate_pcp_pages(to_drain, pcp, &dst);
24640 pcp->count -= to_drain;
24642 - local_irq_restore(flags);
24643 + local_unlock_irqrestore(pa_lock, flags);
24644 + free_pcppages_bulk(zone, to_drain, &dst);
24648 @@ -2271,16 +2312,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
24649 unsigned long flags;
24650 struct per_cpu_pageset *pset;
24651 struct per_cpu_pages *pcp;
24655 - local_irq_save(flags);
24656 + cpu_lock_irqsave(cpu, flags);
24657 pset = per_cpu_ptr(zone->pageset, cpu);
24660 - if (pcp->count) {
24661 - free_pcppages_bulk(zone, pcp->count, pcp);
24662 + count = pcp->count;
24664 + isolate_pcp_pages(count, pcp, &dst);
24667 - local_irq_restore(flags);
24668 + cpu_unlock_irqrestore(cpu, flags);
24670 + free_pcppages_bulk(zone, count, &dst);
24674 @@ -2366,8 +2412,17 @@ void drain_all_pages(struct zone *zone)
24676 cpumask_clear_cpu(cpu, &cpus_with_pcps);
24678 +#ifndef CONFIG_PREEMPT_RT_BASE
24679 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
24682 + for_each_cpu(cpu, &cpus_with_pcps) {
24684 + drain_pages_zone(cpu, zone);
24686 + drain_pages(cpu);
24691 #ifdef CONFIG_HIBERNATION
24692 @@ -2427,7 +2482,7 @@ void free_hot_cold_page(struct page *page, bool cold)
24694 migratetype = get_pfnblock_migratetype(page, pfn);
24695 set_pcppage_migratetype(page, migratetype);
24696 - local_irq_save(flags);
24697 + local_lock_irqsave(pa_lock, flags);
24698 __count_vm_event(PGFREE);
24701 @@ -2453,12 +2508,17 @@ void free_hot_cold_page(struct page *page, bool cold)
24703 if (pcp->count >= pcp->high) {
24704 unsigned long batch = READ_ONCE(pcp->batch);
24705 - free_pcppages_bulk(zone, batch, pcp);
24708 + isolate_pcp_pages(batch, pcp, &dst);
24709 pcp->count -= batch;
24710 + local_unlock_irqrestore(pa_lock, flags);
24711 + free_pcppages_bulk(zone, batch, &dst);
24716 - local_irq_restore(flags);
24717 + local_unlock_irqrestore(pa_lock, flags);
24721 @@ -2600,7 +2660,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
24722 struct per_cpu_pages *pcp;
24723 struct list_head *list;
24725 - local_irq_save(flags);
24726 + local_lock_irqsave(pa_lock, flags);
24728 pcp = &this_cpu_ptr(zone->pageset)->pcp;
24729 list = &pcp->lists[migratetype];
24730 @@ -2627,7 +2687,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
24731 * allocate greater than order-1 page units with __GFP_NOFAIL.
24733 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
24734 - spin_lock_irqsave(&zone->lock, flags);
24735 + local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
24739 @@ -2639,22 +2699,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
24741 page = __rmqueue(zone, order, migratetype);
24742 } while (page && check_new_pages(page, order));
24743 - spin_unlock(&zone->lock);
24746 + spin_unlock(&zone->lock);
24749 __mod_zone_freepage_state(zone, -(1 << order),
24750 get_pcppage_migratetype(page));
24751 + spin_unlock(&zone->lock);
24754 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
24755 zone_statistics(preferred_zone, zone, gfp_flags);
24756 - local_irq_restore(flags);
24757 + local_unlock_irqrestore(pa_lock, flags);
24759 VM_BUG_ON_PAGE(bad_range(zone, page), page);
24763 - local_irq_restore(flags);
24764 + local_unlock_irqrestore(pa_lock, flags);
24768 @@ -6532,7 +6594,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
24769 int cpu = (unsigned long)hcpu;
24771 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
24772 + local_lock_irq_on(swapvec_lock, cpu);
24773 lru_add_drain_cpu(cpu);
24774 + local_unlock_irq_on(swapvec_lock, cpu);
24778 @@ -6558,6 +6622,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
24779 void __init page_alloc_init(void)
24781 hotcpu_notifier(page_alloc_cpu_notify, 0);
24782 + local_irq_lock_init(pa_lock);
24786 @@ -7386,7 +7451,7 @@ void zone_pcp_reset(struct zone *zone)
24787 struct per_cpu_pageset *pset;
24789 /* avoid races with drain_pages() */
24790 - local_irq_save(flags);
24791 + local_lock_irqsave(pa_lock, flags);
24792 if (zone->pageset != &boot_pageset) {
24793 for_each_online_cpu(cpu) {
24794 pset = per_cpu_ptr(zone->pageset, cpu);
24795 @@ -7395,7 +7460,7 @@ void zone_pcp_reset(struct zone *zone)
24796 free_percpu(zone->pageset);
24797 zone->pageset = &boot_pageset;
24799 - local_irq_restore(flags);
24800 + local_unlock_irqrestore(pa_lock, flags);
24803 #ifdef CONFIG_MEMORY_HOTREMOVE
24804 diff --git a/mm/percpu.c b/mm/percpu.c
24805 index f014cebbf405..4e739fcf91bf 100644
24808 @@ -1283,6 +1283,31 @@ void free_percpu(void __percpu *ptr)
24810 EXPORT_SYMBOL_GPL(free_percpu);
24812 +bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
24815 + const size_t static_size = __per_cpu_end - __per_cpu_start;
24816 + void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
24817 + unsigned int cpu;
24819 + for_each_possible_cpu(cpu) {
24820 + void *start = per_cpu_ptr(base, cpu);
24821 + void *va = (void *)addr;
24823 + if (va >= start && va < start + static_size) {
24825 + *can_addr = (unsigned long) (va - start);
24826 + *can_addr += (unsigned long)
24827 + per_cpu_ptr(base, get_boot_cpu_id());
24833 + /* on UP, can't distinguish from other static vars, always false */
24838 * is_kernel_percpu_address - test whether address is from static percpu area
24839 * @addr: address to test
24840 @@ -1296,20 +1321,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
24842 bool is_kernel_percpu_address(unsigned long addr)
24845 - const size_t static_size = __per_cpu_end - __per_cpu_start;
24846 - void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
24847 - unsigned int cpu;
24849 - for_each_possible_cpu(cpu) {
24850 - void *start = per_cpu_ptr(base, cpu);
24852 - if ((void *)addr >= start && (void *)addr < start + static_size)
24856 - /* on UP, can't distinguish from other static vars, always false */
24858 + return __is_kernel_percpu_address(addr, NULL);
24862 diff --git a/mm/slab.h b/mm/slab.h
24863 index ceb7d70cdb76..dfd281e43fbe 100644
24866 @@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
24867 * The slab lists for all objects.
24869 struct kmem_cache_node {
24870 +#ifdef CONFIG_SLUB
24871 + raw_spinlock_t list_lock;
24873 spinlock_t list_lock;
24877 struct list_head slabs_partial; /* partial list first, better asm code */
24878 diff --git a/mm/slub.c b/mm/slub.c
24879 index 58c7526f8de2..6d72b7f87129 100644
24882 @@ -1141,7 +1141,7 @@ static noinline int free_debug_processing(
24883 unsigned long uninitialized_var(flags);
24886 - spin_lock_irqsave(&n->list_lock, flags);
24887 + raw_spin_lock_irqsave(&n->list_lock, flags);
24890 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
24891 @@ -1176,7 +1176,7 @@ static noinline int free_debug_processing(
24895 - spin_unlock_irqrestore(&n->list_lock, flags);
24896 + raw_spin_unlock_irqrestore(&n->list_lock, flags);
24898 slab_fix(s, "Object at 0x%p not freed", object);
24900 @@ -1304,6 +1304,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
24902 #endif /* CONFIG_SLUB_DEBUG */
24904 +struct slub_free_list {
24905 + raw_spinlock_t lock;
24906 + struct list_head list;
24908 +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
24911 * Hooks for other subsystems that check memory allocations. In a typical
24912 * production configuration these hooks all should produce no code at all.
24913 @@ -1527,10 +1533,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
24917 + bool enableirqs = false;
24919 flags &= gfp_allowed_mask;
24921 if (gfpflags_allow_blocking(flags))
24922 + enableirqs = true;
24923 +#ifdef CONFIG_PREEMPT_RT_FULL
24924 + if (system_state == SYSTEM_RUNNING)
24925 + enableirqs = true;
24928 local_irq_enable();
24930 flags |= s->allocflags;
24931 @@ -1605,7 +1618,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
24935 - if (gfpflags_allow_blocking(flags))
24937 local_irq_disable();
24940 @@ -1664,6 +1677,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
24941 __free_pages(page, order);
24944 +static void free_delayed(struct list_head *h)
24946 + while(!list_empty(h)) {
24947 + struct page *page = list_first_entry(h, struct page, lru);
24949 + list_del(&page->lru);
24950 + __free_slab(page->slab_cache, page);
24954 #define need_reserve_slab_rcu \
24955 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
24957 @@ -1695,6 +1718,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
24960 call_rcu(head, rcu_free_slab);
24961 + } else if (irqs_disabled()) {
24962 + struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
24964 + raw_spin_lock(&f->lock);
24965 + list_add(&page->lru, &f->list);
24966 + raw_spin_unlock(&f->lock);
24968 __free_slab(s, page);
24970 @@ -1802,7 +1831,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
24971 if (!n || !n->nr_partial)
24974 - spin_lock(&n->list_lock);
24975 + raw_spin_lock(&n->list_lock);
24976 list_for_each_entry_safe(page, page2, &n->partial, lru) {
24979 @@ -1827,7 +1856,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
24983 - spin_unlock(&n->list_lock);
24984 + raw_spin_unlock(&n->list_lock);
24988 @@ -2073,7 +2102,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
24989 * that acquire_slab() will see a slab page that
24992 - spin_lock(&n->list_lock);
24993 + raw_spin_lock(&n->list_lock);
24997 @@ -2084,7 +2113,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
24998 * slabs from diagnostic functions will not see
24999 * any frozen slabs.
25001 - spin_lock(&n->list_lock);
25002 + raw_spin_lock(&n->list_lock);
25006 @@ -2119,7 +2148,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
25010 - spin_unlock(&n->list_lock);
25011 + raw_spin_unlock(&n->list_lock);
25014 stat(s, DEACTIVATE_EMPTY);
25015 @@ -2151,10 +2180,10 @@ static void unfreeze_partials(struct kmem_cache *s,
25016 n2 = get_node(s, page_to_nid(page));
25019 - spin_unlock(&n->list_lock);
25020 + raw_spin_unlock(&n->list_lock);
25023 - spin_lock(&n->list_lock);
25024 + raw_spin_lock(&n->list_lock);
25028 @@ -2183,7 +2212,7 @@ static void unfreeze_partials(struct kmem_cache *s,
25032 - spin_unlock(&n->list_lock);
25033 + raw_spin_unlock(&n->list_lock);
25035 while (discard_page) {
25036 page = discard_page;
25037 @@ -2222,14 +2251,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
25038 pobjects = oldpage->pobjects;
25039 pages = oldpage->pages;
25040 if (drain && pobjects > s->cpu_partial) {
25041 + struct slub_free_list *f;
25042 unsigned long flags;
25043 + LIST_HEAD(tofree);
25045 * partial array is full. Move the existing
25046 * set to the per node partial list.
25048 local_irq_save(flags);
25049 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
25050 + f = this_cpu_ptr(&slub_free_list);
25051 + raw_spin_lock(&f->lock);
25052 + list_splice_init(&f->list, &tofree);
25053 + raw_spin_unlock(&f->lock);
25054 local_irq_restore(flags);
25055 + free_delayed(&tofree);
25059 @@ -2301,7 +2337,22 @@ static bool has_cpu_slab(int cpu, void *info)
25061 static void flush_all(struct kmem_cache *s)
25063 + LIST_HEAD(tofree);
25066 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
25067 + for_each_online_cpu(cpu) {
25068 + struct slub_free_list *f;
25070 + if (!has_cpu_slab(cpu, s))
25073 + f = &per_cpu(slub_free_list, cpu);
25074 + raw_spin_lock_irq(&f->lock);
25075 + list_splice_init(&f->list, &tofree);
25076 + raw_spin_unlock_irq(&f->lock);
25077 + free_delayed(&tofree);
25082 @@ -2356,10 +2407,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
25083 unsigned long x = 0;
25086 - spin_lock_irqsave(&n->list_lock, flags);
25087 + raw_spin_lock_irqsave(&n->list_lock, flags);
25088 list_for_each_entry(page, &n->partial, lru)
25089 x += get_count(page);
25090 - spin_unlock_irqrestore(&n->list_lock, flags);
25091 + raw_spin_unlock_irqrestore(&n->list_lock, flags);
25094 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
25095 @@ -2497,8 +2548,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
25096 * already disabled (which is the case for bulk allocation).
25098 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
25099 - unsigned long addr, struct kmem_cache_cpu *c)
25100 + unsigned long addr, struct kmem_cache_cpu *c,
25101 + struct list_head *to_free)
25103 + struct slub_free_list *f;
25107 @@ -2558,6 +2611,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
25108 VM_BUG_ON(!c->page->frozen);
25109 c->freelist = get_freepointer(s, freelist);
25110 c->tid = next_tid(c->tid);
25113 + f = this_cpu_ptr(&slub_free_list);
25114 + raw_spin_lock(&f->lock);
25115 + list_splice_init(&f->list, to_free);
25116 + raw_spin_unlock(&f->lock);
25121 @@ -2589,7 +2649,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
25122 deactivate_slab(s, page, get_freepointer(s, freelist));
25124 c->freelist = NULL;
25130 @@ -2601,6 +2661,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
25133 unsigned long flags;
25134 + LIST_HEAD(tofree);
25136 local_irq_save(flags);
25137 #ifdef CONFIG_PREEMPT
25138 @@ -2612,8 +2673,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
25139 c = this_cpu_ptr(s->cpu_slab);
25142 - p = ___slab_alloc(s, gfpflags, node, addr, c);
25143 + p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree);
25144 local_irq_restore(flags);
25145 + free_delayed(&tofree);
25149 @@ -2799,7 +2861,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
25153 - spin_unlock_irqrestore(&n->list_lock, flags);
25154 + raw_spin_unlock_irqrestore(&n->list_lock, flags);
25157 prior = page->freelist;
25158 @@ -2831,7 +2893,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
25159 * Otherwise the list_lock will synchronize with
25160 * other processors updating the list of slabs.
25162 - spin_lock_irqsave(&n->list_lock, flags);
25163 + raw_spin_lock_irqsave(&n->list_lock, flags);
25167 @@ -2873,7 +2935,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
25168 add_partial(n, page, DEACTIVATE_TO_TAIL);
25169 stat(s, FREE_ADD_PARTIAL);
25171 - spin_unlock_irqrestore(&n->list_lock, flags);
25172 + raw_spin_unlock_irqrestore(&n->list_lock, flags);
25176 @@ -2888,7 +2950,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
25177 remove_full(s, n, page);
25180 - spin_unlock_irqrestore(&n->list_lock, flags);
25181 + raw_spin_unlock_irqrestore(&n->list_lock, flags);
25182 stat(s, FREE_SLAB);
25183 discard_slab(s, page);
25185 @@ -3093,6 +3155,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
25188 struct kmem_cache_cpu *c;
25189 + LIST_HEAD(to_free);
25192 /* memcg and kmem_cache debug support */
25193 @@ -3116,7 +3179,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
25194 * of re-populating per CPU c->freelist
25196 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
25198 + _RET_IP_, c, &to_free);
25199 if (unlikely(!p[i]))
25202 @@ -3128,6 +3191,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
25204 c->tid = next_tid(c->tid);
25205 local_irq_enable();
25206 + free_delayed(&to_free);
25208 /* Clear memory outside IRQ disabled fastpath loop */
25209 if (unlikely(flags & __GFP_ZERO)) {
25210 @@ -3275,7 +3339,7 @@ static void
25211 init_kmem_cache_node(struct kmem_cache_node *n)
25214 - spin_lock_init(&n->list_lock);
25215 + raw_spin_lock_init(&n->list_lock);
25216 INIT_LIST_HEAD(&n->partial);
25217 #ifdef CONFIG_SLUB_DEBUG
25218 atomic_long_set(&n->nr_slabs, 0);
25219 @@ -3619,6 +3683,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
25222 #ifdef CONFIG_SLUB_DEBUG
25223 +#ifdef CONFIG_PREEMPT_RT_BASE
25224 + /* XXX move out of irq-off section */
25225 + slab_err(s, page, text, s->name);
25227 void *addr = page_address(page);
25229 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
25230 @@ -3639,6 +3707,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
25238 @@ -3652,7 +3721,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
25239 struct page *page, *h;
25241 BUG_ON(irqs_disabled());
25242 - spin_lock_irq(&n->list_lock);
25243 + raw_spin_lock_irq(&n->list_lock);
25244 list_for_each_entry_safe(page, h, &n->partial, lru) {
25245 if (!page->inuse) {
25246 remove_partial(n, page);
25247 @@ -3662,7 +3731,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
25248 "Objects remaining in %s on __kmem_cache_shutdown()");
25251 - spin_unlock_irq(&n->list_lock);
25252 + raw_spin_unlock_irq(&n->list_lock);
25254 list_for_each_entry_safe(page, h, &discard, lru)
25255 discard_slab(s, page);
25256 @@ -3905,7 +3974,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
25257 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
25258 INIT_LIST_HEAD(promote + i);
25260 - spin_lock_irqsave(&n->list_lock, flags);
25261 + raw_spin_lock_irqsave(&n->list_lock, flags);
25264 * Build lists of slabs to discard or promote.
25265 @@ -3936,7 +4005,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
25266 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
25267 list_splice(promote + i, &n->partial);
25269 - spin_unlock_irqrestore(&n->list_lock, flags);
25270 + raw_spin_unlock_irqrestore(&n->list_lock, flags);
25272 /* Release empty slabs */
25273 list_for_each_entry_safe(page, t, &discard, lru)
25274 @@ -4112,6 +4181,12 @@ void __init kmem_cache_init(void)
25276 static __initdata struct kmem_cache boot_kmem_cache,
25277 boot_kmem_cache_node;
25280 + for_each_possible_cpu(cpu) {
25281 + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
25282 + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
25285 if (debug_guardpage_minorder())
25286 slub_max_order = 0;
25287 @@ -4320,7 +4395,7 @@ static int validate_slab_node(struct kmem_cache *s,
25289 unsigned long flags;
25291 - spin_lock_irqsave(&n->list_lock, flags);
25292 + raw_spin_lock_irqsave(&n->list_lock, flags);
25294 list_for_each_entry(page, &n->partial, lru) {
25295 validate_slab_slab(s, page, map);
25296 @@ -4342,7 +4417,7 @@ static int validate_slab_node(struct kmem_cache *s,
25297 s->name, count, atomic_long_read(&n->nr_slabs));
25300 - spin_unlock_irqrestore(&n->list_lock, flags);
25301 + raw_spin_unlock_irqrestore(&n->list_lock, flags);
25305 @@ -4530,12 +4605,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
25306 if (!atomic_long_read(&n->nr_slabs))
25309 - spin_lock_irqsave(&n->list_lock, flags);
25310 + raw_spin_lock_irqsave(&n->list_lock, flags);
25311 list_for_each_entry(page, &n->partial, lru)
25312 process_slab(&t, s, page, alloc, map);
25313 list_for_each_entry(page, &n->full, lru)
25314 process_slab(&t, s, page, alloc, map);
25315 - spin_unlock_irqrestore(&n->list_lock, flags);
25316 + raw_spin_unlock_irqrestore(&n->list_lock, flags);
25319 for (i = 0; i < t.count; i++) {
25320 diff --git a/mm/swap.c b/mm/swap.c
25321 index 4dcf852e1e6d..69c3a5b24060 100644
25325 #include <linux/memcontrol.h>
25326 #include <linux/gfp.h>
25327 #include <linux/uio.h>
25328 +#include <linux/locallock.h>
25329 #include <linux/hugetlb.h>
25330 #include <linux/page_idle.h>
25332 @@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
25334 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
25336 +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
25337 +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
25340 * This path almost never happens for VM activity - pages are normally
25341 @@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page *page)
25342 unsigned long flags;
25345 - local_irq_save(flags);
25346 + local_lock_irqsave(rotate_lock, flags);
25347 pvec = this_cpu_ptr(&lru_rotate_pvecs);
25348 if (!pagevec_add(pvec, page) || PageCompound(page))
25349 pagevec_move_tail(pvec);
25350 - local_irq_restore(flags);
25351 + local_unlock_irqrestore(rotate_lock, flags);
25355 @@ -294,12 +297,13 @@ void activate_page(struct page *page)
25357 page = compound_head(page);
25358 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
25359 - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
25360 + struct pagevec *pvec = &get_locked_var(swapvec_lock,
25361 + activate_page_pvecs);
25364 if (!pagevec_add(pvec, page) || PageCompound(page))
25365 pagevec_lru_move_fn(pvec, __activate_page, NULL);
25366 - put_cpu_var(activate_page_pvecs);
25367 + put_locked_var(swapvec_lock, activate_page_pvecs);
25371 @@ -326,7 +330,7 @@ void activate_page(struct page *page)
25373 static void __lru_cache_activate_page(struct page *page)
25375 - struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
25376 + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
25380 @@ -348,7 +352,7 @@ static void __lru_cache_activate_page(struct page *page)
25384 - put_cpu_var(lru_add_pvec);
25385 + put_locked_var(swapvec_lock, lru_add_pvec);
25389 @@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed);
25391 static void __lru_cache_add(struct page *page)
25393 - struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
25394 + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
25397 if (!pagevec_add(pvec, page) || PageCompound(page))
25398 __pagevec_lru_add(pvec);
25399 - put_cpu_var(lru_add_pvec);
25400 + put_locked_var(swapvec_lock, lru_add_pvec);
25404 @@ -593,9 +597,15 @@ void lru_add_drain_cpu(int cpu)
25405 unsigned long flags;
25407 /* No harm done if a racing interrupt already did this */
25408 - local_irq_save(flags);
25409 +#ifdef CONFIG_PREEMPT_RT_BASE
25410 + local_lock_irqsave_on(rotate_lock, flags, cpu);
25411 pagevec_move_tail(pvec);
25412 - local_irq_restore(flags);
25413 + local_unlock_irqrestore_on(rotate_lock, flags, cpu);
25415 + local_lock_irqsave(rotate_lock, flags);
25416 + pagevec_move_tail(pvec);
25417 + local_unlock_irqrestore(rotate_lock, flags);
25421 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
25422 @@ -627,11 +637,12 @@ void deactivate_file_page(struct page *page)
25425 if (likely(get_page_unless_zero(page))) {
25426 - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
25427 + struct pagevec *pvec = &get_locked_var(swapvec_lock,
25428 + lru_deactivate_file_pvecs);
25430 if (!pagevec_add(pvec, page) || PageCompound(page))
25431 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
25432 - put_cpu_var(lru_deactivate_file_pvecs);
25433 + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
25437 @@ -646,27 +657,31 @@ void deactivate_file_page(struct page *page)
25438 void deactivate_page(struct page *page)
25440 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
25441 - struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
25442 + struct pagevec *pvec = &get_locked_var(swapvec_lock,
25443 + lru_deactivate_pvecs);
25446 if (!pagevec_add(pvec, page) || PageCompound(page))
25447 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
25448 - put_cpu_var(lru_deactivate_pvecs);
25449 + put_locked_var(swapvec_lock, lru_deactivate_pvecs);
25453 void lru_add_drain(void)
25455 - lru_add_drain_cpu(get_cpu());
25457 + lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
25458 + local_unlock_cpu(swapvec_lock);
25461 -static void lru_add_drain_per_cpu(struct work_struct *dummy)
25462 +#ifdef CONFIG_PREEMPT_RT_BASE
25463 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
25466 + local_lock_on(swapvec_lock, cpu);
25467 + lru_add_drain_cpu(cpu);
25468 + local_unlock_on(swapvec_lock, cpu);
25471 -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
25475 * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
25476 @@ -686,6 +701,22 @@ static int __init lru_init(void)
25478 early_initcall(lru_init);
25480 +static void lru_add_drain_per_cpu(struct work_struct *dummy)
25485 +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
25486 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
25488 + struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
25490 + INIT_WORK(work, lru_add_drain_per_cpu);
25491 + queue_work_on(cpu, lru_add_drain_wq, work);
25492 + cpumask_set_cpu(cpu, has_work);
25496 void lru_add_drain_all(void)
25498 static DEFINE_MUTEX(lock);
25499 @@ -697,21 +728,18 @@ void lru_add_drain_all(void)
25500 cpumask_clear(&has_work);
25502 for_each_online_cpu(cpu) {
25503 - struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
25505 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
25506 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
25507 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
25508 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
25509 - need_activate_page_drain(cpu)) {
25510 - INIT_WORK(work, lru_add_drain_per_cpu);
25511 - queue_work_on(cpu, lru_add_drain_wq, work);
25512 - cpumask_set_cpu(cpu, &has_work);
25514 + need_activate_page_drain(cpu))
25515 + remote_lru_add_drain(cpu, &has_work);
25518 +#ifndef CONFIG_PREEMPT_RT_BASE
25519 for_each_cpu(cpu, &has_work)
25520 flush_work(&per_cpu(lru_add_drain_work, cpu));
25524 mutex_unlock(&lock);
25525 diff --git a/mm/truncate.c b/mm/truncate.c
25526 index 8d8c62d89e6d..5bf1bd25d077 100644
25527 --- a/mm/truncate.c
25528 +++ b/mm/truncate.c
25529 @@ -62,9 +62,12 @@ static void clear_exceptional_entry(struct address_space *mapping,
25530 * protected by mapping->tree_lock.
25532 if (!workingset_node_shadows(node) &&
25533 - !list_empty(&node->private_list))
25534 - list_lru_del(&workingset_shadow_nodes,
25535 + !list_empty(&node->private_list)) {
25536 + local_lock(workingset_shadow_lock);
25537 + list_lru_del(&__workingset_shadow_nodes,
25538 &node->private_list);
25539 + local_unlock(workingset_shadow_lock);
25541 __radix_tree_delete_node(&mapping->page_tree, node);
25543 spin_unlock_irq(&mapping->tree_lock);
25544 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
25545 index f2481cb4e6b2..db4de08fa97c 100644
25548 @@ -845,7 +845,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
25549 struct vmap_block *vb;
25550 struct vmap_area *va;
25551 unsigned long vb_idx;
25553 + int node, err, cpu;
25556 node = numa_node_id();
25557 @@ -888,11 +888,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
25559 radix_tree_preload_end();
25561 - vbq = &get_cpu_var(vmap_block_queue);
25562 + cpu = get_cpu_light();
25563 + vbq = this_cpu_ptr(&vmap_block_queue);
25564 spin_lock(&vbq->lock);
25565 list_add_tail_rcu(&vb->free_list, &vbq->free);
25566 spin_unlock(&vbq->lock);
25567 - put_cpu_var(vmap_block_queue);
25572 @@ -961,6 +962,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
25573 struct vmap_block *vb;
25574 void *vaddr = NULL;
25575 unsigned int order;
25578 BUG_ON(offset_in_page(size));
25579 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
25580 @@ -975,7 +977,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
25581 order = get_order(size);
25584 - vbq = &get_cpu_var(vmap_block_queue);
25585 + cpu = get_cpu_light();
25586 + vbq = this_cpu_ptr(&vmap_block_queue);
25587 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
25588 unsigned long pages_off;
25590 @@ -998,7 +1001,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
25594 - put_cpu_var(vmap_block_queue);
25598 /* Allocate new block if nothing was found */
25599 diff --git a/mm/vmstat.c b/mm/vmstat.c
25600 index 604f26a4f696..312006d2db50 100644
25603 @@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
25607 + preempt_disable_rt();
25608 x = delta + __this_cpu_read(*p);
25610 t = __this_cpu_read(pcp->stat_threshold);
25611 @@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
25614 __this_cpu_write(*p, x);
25615 + preempt_enable_rt();
25617 EXPORT_SYMBOL(__mod_zone_page_state);
25619 @@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
25623 + preempt_disable_rt();
25624 x = delta + __this_cpu_read(*p);
25626 t = __this_cpu_read(pcp->stat_threshold);
25627 @@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
25630 __this_cpu_write(*p, x);
25631 + preempt_enable_rt();
25633 EXPORT_SYMBOL(__mod_node_page_state);
25635 @@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
25636 s8 __percpu *p = pcp->vm_stat_diff + item;
25639 + preempt_disable_rt();
25640 v = __this_cpu_inc_return(*p);
25641 t = __this_cpu_read(pcp->stat_threshold);
25642 if (unlikely(v > t)) {
25643 @@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
25644 zone_page_state_add(v + overstep, zone, item);
25645 __this_cpu_write(*p, -overstep);
25647 + preempt_enable_rt();
25650 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
25651 @@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
25652 s8 __percpu *p = pcp->vm_node_stat_diff + item;
25655 + preempt_disable_rt();
25656 v = __this_cpu_inc_return(*p);
25657 t = __this_cpu_read(pcp->stat_threshold);
25658 if (unlikely(v > t)) {
25659 @@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
25660 node_page_state_add(v + overstep, pgdat, item);
25661 __this_cpu_write(*p, -overstep);
25663 + preempt_enable_rt();
25666 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
25667 @@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
25668 s8 __percpu *p = pcp->vm_stat_diff + item;
25671 + preempt_disable_rt();
25672 v = __this_cpu_dec_return(*p);
25673 t = __this_cpu_read(pcp->stat_threshold);
25674 if (unlikely(v < - t)) {
25675 @@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
25676 zone_page_state_add(v - overstep, zone, item);
25677 __this_cpu_write(*p, overstep);
25679 + preempt_enable_rt();
25682 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
25683 @@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
25684 s8 __percpu *p = pcp->vm_node_stat_diff + item;
25687 + preempt_disable_rt();
25688 v = __this_cpu_dec_return(*p);
25689 t = __this_cpu_read(pcp->stat_threshold);
25690 if (unlikely(v < - t)) {
25691 @@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
25692 node_page_state_add(v - overstep, pgdat, item);
25693 __this_cpu_write(*p, overstep);
25695 + preempt_enable_rt();
25698 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
25699 diff --git a/mm/workingset.c b/mm/workingset.c
25700 index 4c4f05655e6e..b97b1e87b54c 100644
25701 --- a/mm/workingset.c
25702 +++ b/mm/workingset.c
25703 @@ -334,7 +334,8 @@ void workingset_activation(struct page *page)
25704 * point where they would still be useful.
25707 -struct list_lru workingset_shadow_nodes;
25708 +struct list_lru __workingset_shadow_nodes;
25709 +DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
25711 static unsigned long count_shadow_nodes(struct shrinker *shrinker,
25712 struct shrink_control *sc)
25713 @@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
25714 unsigned long pages;
25716 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
25717 - local_irq_disable();
25718 - shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
25719 - local_irq_enable();
25720 + local_lock_irq(workingset_shadow_lock);
25721 + shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
25722 + local_unlock_irq(workingset_shadow_lock);
25725 pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
25726 @@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
25727 spin_unlock(&mapping->tree_lock);
25728 ret = LRU_REMOVED_RETRY;
25730 - local_irq_enable();
25731 + local_unlock_irq(workingset_shadow_lock);
25733 - local_irq_disable();
25734 + local_lock_irq(workingset_shadow_lock);
25735 spin_lock(lru_lock);
25738 @@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
25741 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
25742 - local_irq_disable();
25743 - ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc,
25744 + local_lock_irq(workingset_shadow_lock);
25745 + ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc,
25746 shadow_lru_isolate, NULL);
25747 - local_irq_enable();
25748 + local_unlock_irq(workingset_shadow_lock);
25752 @@ -492,7 +493,7 @@ static int __init workingset_init(void)
25753 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
25754 timestamp_bits, max_order, bucket_order);
25756 - ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key);
25757 + ret = __list_lru_init(&__workingset_shadow_nodes, true, &shadow_nodes_key);
25760 ret = register_shrinker(&workingset_shadow_shrinker);
25761 @@ -500,7 +501,7 @@ static int __init workingset_init(void)
25765 - list_lru_destroy(&workingset_shadow_nodes);
25766 + list_lru_destroy(&__workingset_shadow_nodes);
25770 diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
25771 index 1689bb58e0d1..e52a8cb6aa5a 100644
25772 --- a/mm/zsmalloc.c
25773 +++ b/mm/zsmalloc.c
25775 #include <linux/mount.h>
25776 #include <linux/migrate.h>
25777 #include <linux/pagemap.h>
25778 +#include <linux/locallock.h>
25780 #define ZSPAGE_MAGIC 0x58
25784 #define ZS_MAX_ZSPAGE_ORDER 2
25785 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
25787 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
25789 +#ifdef CONFIG_PREEMPT_RT_FULL
25791 +struct zsmalloc_handle {
25792 + unsigned long addr;
25793 + struct mutex lock;
25796 +#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
25800 +#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long))
25804 * Object location (<PFN>, <obj_idx>) is encoded as
25805 * as single (unsigned long) handle value.
25806 @@ -327,7 +341,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
25808 static int create_cache(struct zs_pool *pool)
25810 - pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
25811 + pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE,
25813 if (!pool->handle_cachep)
25815 @@ -351,10 +365,27 @@ static void destroy_cache(struct zs_pool *pool)
25817 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
25819 - return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
25820 - gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
25823 + p = kmem_cache_alloc(pool->handle_cachep,
25824 + gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
25825 +#ifdef CONFIG_PREEMPT_RT_FULL
25827 + struct zsmalloc_handle *zh = p;
25829 + mutex_init(&zh->lock);
25832 + return (unsigned long)p;
25835 +#ifdef CONFIG_PREEMPT_RT_FULL
25836 +static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
25838 + return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
25842 static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
25844 kmem_cache_free(pool->handle_cachep, (void *)handle);
25845 @@ -373,12 +404,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
25847 static void record_obj(unsigned long handle, unsigned long obj)
25849 +#ifdef CONFIG_PREEMPT_RT_FULL
25850 + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
25852 + WRITE_ONCE(zh->addr, obj);
25855 * lsb of @obj represents handle lock while other bits
25856 * represent object value the handle is pointing so
25857 * updating shouldn't do store tearing.
25859 WRITE_ONCE(*(unsigned long *)handle, obj);
25864 @@ -467,6 +504,7 @@ MODULE_ALIAS("zpool-zsmalloc");
25866 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
25867 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
25868 +static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock);
25870 static bool is_zspage_isolated(struct zspage *zspage)
25872 @@ -902,7 +940,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
25874 static unsigned long handle_to_obj(unsigned long handle)
25876 +#ifdef CONFIG_PREEMPT_RT_FULL
25877 + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
25881 return *(unsigned long *)handle;
25885 static unsigned long obj_to_head(struct page *page, void *obj)
25886 @@ -916,22 +960,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
25888 static inline int testpin_tag(unsigned long handle)
25890 +#ifdef CONFIG_PREEMPT_RT_FULL
25891 + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
25893 + return mutex_is_locked(&zh->lock);
25895 return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
25899 static inline int trypin_tag(unsigned long handle)
25901 +#ifdef CONFIG_PREEMPT_RT_FULL
25902 + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
25904 + return mutex_trylock(&zh->lock);
25906 return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
25910 static void pin_tag(unsigned long handle)
25912 +#ifdef CONFIG_PREEMPT_RT_FULL
25913 + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
25915 + return mutex_lock(&zh->lock);
25917 bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
25921 static void unpin_tag(unsigned long handle)
25923 +#ifdef CONFIG_PREEMPT_RT_FULL
25924 + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
25926 + return mutex_unlock(&zh->lock);
25928 bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
25932 static void reset_page(struct page *page)
25933 @@ -1423,7 +1491,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
25934 class = pool->size_class[class_idx];
25935 off = (class->size * obj_idx) & ~PAGE_MASK;
25937 - area = &get_cpu_var(zs_map_area);
25938 + area = &get_locked_var(zs_map_area_lock, zs_map_area);
25940 if (off + class->size <= PAGE_SIZE) {
25941 /* this object is contained entirely within a page */
25942 @@ -1477,7 +1545,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
25944 __zs_unmap_object(area, pages, off, class->size);
25946 - put_cpu_var(zs_map_area);
25947 + put_locked_var(zs_map_area_lock, zs_map_area);
25949 migrate_read_unlock(zspage);
25951 diff --git a/net/core/dev.c b/net/core/dev.c
25952 index 2e04fd188081..3ba60ef8c79e 100644
25953 --- a/net/core/dev.c
25954 +++ b/net/core/dev.c
25955 @@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS;
25956 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
25958 static seqcount_t devnet_rename_seq;
25959 +static DEFINE_MUTEX(devnet_rename_mutex);
25961 static inline void dev_base_seq_inc(struct net *net)
25963 @@ -211,14 +212,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
25964 static inline void rps_lock(struct softnet_data *sd)
25967 - spin_lock(&sd->input_pkt_queue.lock);
25968 + raw_spin_lock(&sd->input_pkt_queue.raw_lock);
25972 static inline void rps_unlock(struct softnet_data *sd)
25975 - spin_unlock(&sd->input_pkt_queue.lock);
25976 + raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
25980 @@ -888,7 +889,8 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
25981 strcpy(name, dev->name);
25983 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
25985 + mutex_lock(&devnet_rename_mutex);
25986 + mutex_unlock(&devnet_rename_mutex);
25990 @@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
25991 if (dev->flags & IFF_UP)
25994 - write_seqcount_begin(&devnet_rename_seq);
25995 + mutex_lock(&devnet_rename_mutex);
25996 + __raw_write_seqcount_begin(&devnet_rename_seq);
25998 - if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
25999 - write_seqcount_end(&devnet_rename_seq);
26002 + if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
26005 memcpy(oldname, dev->name, IFNAMSIZ);
26007 err = dev_get_valid_name(net, dev, newname);
26009 - write_seqcount_end(&devnet_rename_seq);
26015 if (oldname[0] && !strchr(oldname, '%'))
26016 netdev_info(dev, "renamed from %s\n", oldname);
26017 @@ -1183,11 +1182,12 @@ int dev_change_name(struct net_device *dev, const char *newname)
26019 memcpy(dev->name, oldname, IFNAMSIZ);
26020 dev->name_assign_type = old_assign_type;
26021 - write_seqcount_end(&devnet_rename_seq);
26027 - write_seqcount_end(&devnet_rename_seq);
26028 + __raw_write_seqcount_end(&devnet_rename_seq);
26029 + mutex_unlock(&devnet_rename_mutex);
26031 netdev_adjacent_rename_links(dev, oldname);
26033 @@ -1208,7 +1208,8 @@ int dev_change_name(struct net_device *dev, const char *newname)
26034 /* err >= 0 after dev_alloc_name() or stores the first errno */
26037 - write_seqcount_begin(&devnet_rename_seq);
26038 + mutex_lock(&devnet_rename_mutex);
26039 + __raw_write_seqcount_begin(&devnet_rename_seq);
26040 memcpy(dev->name, oldname, IFNAMSIZ);
26041 memcpy(oldname, newname, IFNAMSIZ);
26042 dev->name_assign_type = old_assign_type;
26043 @@ -1221,6 +1222,11 @@ int dev_change_name(struct net_device *dev, const char *newname)
26049 + __raw_write_seqcount_end(&devnet_rename_seq);
26050 + mutex_unlock(&devnet_rename_mutex);
26055 @@ -2285,6 +2291,7 @@ static void __netif_reschedule(struct Qdisc *q)
26056 sd->output_queue_tailp = &q->next_sched;
26057 raise_softirq_irqoff(NET_TX_SOFTIRQ);
26058 local_irq_restore(flags);
26059 + preempt_check_resched_rt();
26062 void __netif_schedule(struct Qdisc *q)
26063 @@ -2366,6 +2373,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
26064 __this_cpu_write(softnet_data.completion_queue, skb);
26065 raise_softirq_irqoff(NET_TX_SOFTIRQ);
26066 local_irq_restore(flags);
26067 + preempt_check_resched_rt();
26069 EXPORT_SYMBOL(__dev_kfree_skb_irq);
26071 @@ -3100,7 +3108,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
26072 * This permits qdisc->running owner to get the lock more
26073 * often and dequeue packets faster.
26075 +#ifdef CONFIG_PREEMPT_RT_FULL
26076 + contended = true;
26078 contended = qdisc_is_running(q);
26080 if (unlikely(contended))
26081 spin_lock(&q->busylock);
26083 @@ -3163,8 +3175,10 @@ static void skb_update_prio(struct sk_buff *skb)
26084 #define skb_update_prio(skb)
26087 +#ifndef CONFIG_PREEMPT_RT_FULL
26088 DEFINE_PER_CPU(int, xmit_recursion);
26089 EXPORT_SYMBOL(xmit_recursion);
26093 * dev_loopback_xmit - loop back @skb
26094 @@ -3398,8 +3412,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
26095 int cpu = smp_processor_id(); /* ok because BHs are off */
26097 if (txq->xmit_lock_owner != cpu) {
26098 - if (unlikely(__this_cpu_read(xmit_recursion) >
26099 - XMIT_RECURSION_LIMIT))
26100 + if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
26101 goto recursion_alert;
26103 skb = validate_xmit_skb(skb, dev);
26104 @@ -3409,9 +3422,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
26105 HARD_TX_LOCK(dev, txq, cpu);
26107 if (!netif_xmit_stopped(txq)) {
26108 - __this_cpu_inc(xmit_recursion);
26110 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
26111 - __this_cpu_dec(xmit_recursion);
26113 if (dev_xmit_complete(rc)) {
26114 HARD_TX_UNLOCK(dev, txq);
26116 @@ -3785,6 +3798,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
26119 local_irq_restore(flags);
26120 + preempt_check_resched_rt();
26122 atomic_long_inc(&skb->dev->rx_dropped);
26124 @@ -3803,7 +3817,7 @@ static int netif_rx_internal(struct sk_buff *skb)
26125 struct rps_dev_flow voidflow, *rflow = &voidflow;
26128 - preempt_disable();
26129 + migrate_disable();
26132 cpu = get_rps_cpu(skb->dev, skb, &rflow);
26133 @@ -3813,13 +3827,13 @@ static int netif_rx_internal(struct sk_buff *skb)
26134 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
26137 - preempt_enable();
26138 + migrate_enable();
26142 unsigned int qtail;
26143 - ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
26145 + ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
26150 @@ -3853,11 +3867,9 @@ int netif_rx_ni(struct sk_buff *skb)
26152 trace_netif_rx_ni_entry(skb);
26154 - preempt_disable();
26155 + local_bh_disable();
26156 err = netif_rx_internal(skb);
26157 - if (local_softirq_pending())
26159 - preempt_enable();
26160 + local_bh_enable();
26164 @@ -4336,7 +4348,7 @@ static void flush_backlog(struct work_struct *work)
26165 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
26166 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
26167 __skb_unlink(skb, &sd->input_pkt_queue);
26169 + __skb_queue_tail(&sd->tofree_queue, skb);
26170 input_queue_head_incr(sd);
26173 @@ -4346,11 +4358,14 @@ static void flush_backlog(struct work_struct *work)
26174 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
26175 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
26176 __skb_unlink(skb, &sd->process_queue);
26178 + __skb_queue_tail(&sd->tofree_queue, skb);
26179 input_queue_head_incr(sd);
26182 + if (!skb_queue_empty(&sd->tofree_queue))
26183 + raise_softirq_irqoff(NET_RX_SOFTIRQ);
26188 static void flush_all_backlogs(void)
26189 @@ -4831,6 +4846,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
26190 sd->rps_ipi_list = NULL;
26192 local_irq_enable();
26193 + preempt_check_resched_rt();
26195 /* Send pending IPI's to kick RPS processing on remote cpus. */
26197 @@ -4844,6 +4860,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
26200 local_irq_enable();
26201 + preempt_check_resched_rt();
26204 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
26205 @@ -4873,7 +4890,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
26207 struct sk_buff *skb;
26209 + local_irq_disable();
26210 while ((skb = __skb_dequeue(&sd->process_queue))) {
26211 + local_irq_enable();
26213 __netif_receive_skb(skb);
26215 @@ -4881,9 +4900,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
26216 if (++work >= quota)
26219 + local_irq_disable();
26222 - local_irq_disable();
26224 if (skb_queue_empty(&sd->input_pkt_queue)) {
26226 @@ -4921,9 +4940,11 @@ void __napi_schedule(struct napi_struct *n)
26227 local_irq_save(flags);
26228 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
26229 local_irq_restore(flags);
26230 + preempt_check_resched_rt();
26232 EXPORT_SYMBOL(__napi_schedule);
26234 +#ifndef CONFIG_PREEMPT_RT_FULL
26236 * __napi_schedule_irqoff - schedule for receive
26237 * @n: entry to schedule
26238 @@ -4935,6 +4956,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
26239 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
26241 EXPORT_SYMBOL(__napi_schedule_irqoff);
26244 void __napi_complete(struct napi_struct *n)
26246 @@ -5224,13 +5246,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
26247 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
26248 unsigned long time_limit = jiffies + 2;
26249 int budget = netdev_budget;
26250 + struct sk_buff_head tofree_q;
26251 + struct sk_buff *skb;
26255 + __skb_queue_head_init(&tofree_q);
26257 local_irq_disable();
26258 + skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
26259 list_splice_init(&sd->poll_list, &list);
26260 local_irq_enable();
26262 + while ((skb = __skb_dequeue(&tofree_q)))
26266 struct napi_struct *n;
26268 @@ -5261,7 +5291,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
26269 list_splice_tail(&repoll, &list);
26270 list_splice(&list, &sd->poll_list);
26271 if (!list_empty(&sd->poll_list))
26272 - __raise_softirq_irqoff(NET_RX_SOFTIRQ);
26273 + __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
26275 net_rps_action_and_irq_enable(sd);
26277 @@ -8022,16 +8052,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
26279 raise_softirq_irqoff(NET_TX_SOFTIRQ);
26280 local_irq_enable();
26281 + preempt_check_resched_rt();
26283 /* Process offline CPU's input_pkt_queue */
26284 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
26286 input_queue_head_incr(oldsd);
26288 - while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
26289 + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
26291 input_queue_head_incr(oldsd);
26293 + while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
26299 @@ -8336,8 +8370,9 @@ static int __init net_dev_init(void)
26301 INIT_WORK(flush, flush_backlog);
26303 - skb_queue_head_init(&sd->input_pkt_queue);
26304 - skb_queue_head_init(&sd->process_queue);
26305 + skb_queue_head_init_raw(&sd->input_pkt_queue);
26306 + skb_queue_head_init_raw(&sd->process_queue);
26307 + skb_queue_head_init_raw(&sd->tofree_queue);
26308 INIT_LIST_HEAD(&sd->poll_list);
26309 sd->output_queue_tailp = &sd->output_queue;
26311 diff --git a/net/core/filter.c b/net/core/filter.c
26312 index b391209838ef..b86e9681a88e 100644
26313 --- a/net/core/filter.c
26314 +++ b/net/core/filter.c
26315 @@ -1645,7 +1645,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
26319 - if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
26320 + if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
26321 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
26324 @@ -1653,9 +1653,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
26328 - __this_cpu_inc(xmit_recursion);
26330 ret = dev_queue_xmit(skb);
26331 - __this_cpu_dec(xmit_recursion);
26336 diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
26337 index cad8e791f28e..2a9364fe62a5 100644
26338 --- a/net/core/gen_estimator.c
26339 +++ b/net/core/gen_estimator.c
26340 @@ -84,7 +84,7 @@ struct gen_estimator
26341 struct gnet_stats_basic_packed *bstats;
26342 struct gnet_stats_rate_est64 *rate_est;
26343 spinlock_t *stats_lock;
26344 - seqcount_t *running;
26345 + net_seqlock_t *running;
26348 unsigned long avpps;
26349 @@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
26350 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
26351 struct gnet_stats_rate_est64 *rate_est,
26352 spinlock_t *stats_lock,
26353 - seqcount_t *running,
26354 + net_seqlock_t *running,
26355 struct nlattr *opt)
26357 struct gen_estimator *est;
26358 @@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
26359 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
26360 struct gnet_stats_rate_est64 *rate_est,
26361 spinlock_t *stats_lock,
26362 - seqcount_t *running, struct nlattr *opt)
26363 + net_seqlock_t *running, struct nlattr *opt)
26365 gen_kill_estimator(bstats, rate_est);
26366 return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
26367 diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
26368 index 508e051304fb..bc3b17b78c94 100644
26369 --- a/net/core/gen_stats.c
26370 +++ b/net/core/gen_stats.c
26371 @@ -130,7 +130,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
26375 -__gnet_stats_copy_basic(const seqcount_t *running,
26376 +__gnet_stats_copy_basic(net_seqlock_t *running,
26377 struct gnet_stats_basic_packed *bstats,
26378 struct gnet_stats_basic_cpu __percpu *cpu,
26379 struct gnet_stats_basic_packed *b)
26380 @@ -143,10 +143,10 @@ __gnet_stats_copy_basic(const seqcount_t *running,
26384 - seq = read_seqcount_begin(running);
26385 + seq = net_seq_begin(running);
26386 bstats->bytes = b->bytes;
26387 bstats->packets = b->packets;
26388 - } while (running && read_seqcount_retry(running, seq));
26389 + } while (running && net_seq_retry(running, seq));
26391 EXPORT_SYMBOL(__gnet_stats_copy_basic);
26393 @@ -164,7 +164,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
26394 * if the room in the socket buffer was not sufficient.
26397 -gnet_stats_copy_basic(const seqcount_t *running,
26398 +gnet_stats_copy_basic(net_seqlock_t *running,
26399 struct gnet_dump *d,
26400 struct gnet_stats_basic_cpu __percpu *cpu,
26401 struct gnet_stats_basic_packed *b)
26402 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
26403 index fe008f1bd930..9fa6bea3dd3f 100644
26404 --- a/net/core/skbuff.c
26405 +++ b/net/core/skbuff.c
26407 #include <linux/errqueue.h>
26408 #include <linux/prefetch.h>
26409 #include <linux/if_vlan.h>
26410 +#include <linux/locallock.h>
26412 #include <net/protocol.h>
26413 #include <net/dst.h>
26414 @@ -360,6 +361,8 @@ struct napi_alloc_cache {
26416 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
26417 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
26418 +static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
26419 +static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
26421 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
26423 @@ -367,10 +370,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
26424 unsigned long flags;
26427 - local_irq_save(flags);
26428 + local_lock_irqsave(netdev_alloc_lock, flags);
26429 nc = this_cpu_ptr(&netdev_alloc_cache);
26430 data = __alloc_page_frag(nc, fragsz, gfp_mask);
26431 - local_irq_restore(flags);
26432 + local_unlock_irqrestore(netdev_alloc_lock, flags);
26436 @@ -389,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
26438 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
26440 - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
26441 + struct napi_alloc_cache *nc;
26444 - return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
26445 + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
26446 + data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
26447 + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
26451 void *napi_alloc_frag(unsigned int fragsz)
26452 @@ -438,13 +445,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
26453 if (sk_memalloc_socks())
26454 gfp_mask |= __GFP_MEMALLOC;
26456 - local_irq_save(flags);
26457 + local_lock_irqsave(netdev_alloc_lock, flags);
26459 nc = this_cpu_ptr(&netdev_alloc_cache);
26460 data = __alloc_page_frag(nc, len, gfp_mask);
26461 pfmemalloc = nc->pfmemalloc;
26463 - local_irq_restore(flags);
26464 + local_unlock_irqrestore(netdev_alloc_lock, flags);
26466 if (unlikely(!data))
26468 @@ -485,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
26469 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
26472 - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
26473 + struct napi_alloc_cache *nc;
26474 struct sk_buff *skb;
26478 len += NET_SKB_PAD + NET_IP_ALIGN;
26480 @@ -505,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
26481 if (sk_memalloc_socks())
26482 gfp_mask |= __GFP_MEMALLOC;
26484 + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
26485 data = __alloc_page_frag(&nc->page, len, gfp_mask);
26486 + pfmemalloc = nc->page.pfmemalloc;
26487 + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
26488 if (unlikely(!data))
26491 @@ -516,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
26494 /* use OR instead of assignment to avoid clearing of bits in mask */
26495 - if (nc->page.pfmemalloc)
26497 skb->pfmemalloc = 1;
26498 skb->head_frag = 1;
26500 @@ -760,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
26502 void __kfree_skb_flush(void)
26504 - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
26505 + struct napi_alloc_cache *nc;
26507 + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
26508 /* flush skb_cache if containing objects */
26509 if (nc->skb_count) {
26510 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
26514 + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
26517 static inline void _kfree_skb_defer(struct sk_buff *skb)
26519 - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
26520 + struct napi_alloc_cache *nc;
26522 /* drop skb->head and call any destructors for packet */
26523 skb_release_all(skb);
26525 + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
26526 /* record skb to CPU local list */
26527 nc->skb_cache[nc->skb_count++] = skb;
26529 @@ -791,6 +805,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
26533 + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
26535 void __kfree_skb_defer(struct sk_buff *skb)
26537 diff --git a/net/core/sock.c b/net/core/sock.c
26538 index 470a2043b846..2b09a5a33d8d 100644
26539 --- a/net/core/sock.c
26540 +++ b/net/core/sock.c
26541 @@ -2499,12 +2499,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
26542 if (sk->sk_lock.owned)
26544 sk->sk_lock.owned = 1;
26545 - spin_unlock(&sk->sk_lock.slock);
26546 + spin_unlock_bh(&sk->sk_lock.slock);
26548 * The sk_lock has mutex_lock() semantics here:
26550 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
26551 - local_bh_enable();
26553 EXPORT_SYMBOL(lock_sock_nested);
26555 diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
26556 index 48734ee6293f..e6864ff11352 100644
26557 --- a/net/ipv4/icmp.c
26558 +++ b/net/ipv4/icmp.c
26560 #include <linux/jiffies.h>
26561 #include <linux/kernel.h>
26562 #include <linux/fcntl.h>
26563 +#include <linux/sysrq.h>
26564 #include <linux/socket.h>
26565 #include <linux/in.h>
26566 #include <linux/inet.h>
26568 #include <linux/string.h>
26569 #include <linux/netfilter_ipv4.h>
26570 #include <linux/slab.h>
26571 +#include <linux/locallock.h>
26572 #include <net/snmp.h>
26573 #include <net/ip.h>
26574 #include <net/route.h>
26575 @@ -204,6 +206,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
26577 * On SMP we have one ICMP socket per-cpu.
26579 +static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock);
26581 static struct sock *icmp_sk(struct net *net)
26583 return *this_cpu_ptr(net->ipv4.icmp_sk);
26584 @@ -215,12 +219,14 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
26586 local_bh_disable();
26588 + local_lock(icmp_sk_lock);
26591 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
26592 /* This can happen if the output path signals a
26593 * dst_link_failure() for an outgoing ICMP packet.
26595 + local_unlock(icmp_sk_lock);
26599 @@ -230,6 +236,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
26600 static inline void icmp_xmit_unlock(struct sock *sk)
26602 spin_unlock_bh(&sk->sk_lock.slock);
26603 + local_unlock(icmp_sk_lock);
26606 int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
26607 @@ -358,6 +365,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
26609 struct sk_buff *skb;
26611 + local_lock(icmp_sk_lock);
26612 sk = icmp_sk(dev_net((*rt)->dst.dev));
26613 if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
26614 icmp_param->data_len+icmp_param->head_len,
26615 @@ -380,6 +388,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
26616 skb->ip_summed = CHECKSUM_NONE;
26617 ip_push_pending_frames(sk, fl4);
26619 + local_unlock(icmp_sk_lock);
26623 @@ -891,6 +900,30 @@ static bool icmp_redirect(struct sk_buff *skb)
26627 + * 32bit and 64bit have different timestamp length, so we check for
26628 + * the cookie at offset 20 and verify it is repeated at offset 50
26630 +#define CO_POS0 20
26631 +#define CO_POS1 50
26632 +#define CO_SIZE sizeof(int)
26633 +#define ICMP_SYSRQ_SIZE 57
26636 + * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
26637 + * pattern and if it matches send the next byte as a trigger to sysrq.
26639 +static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
26641 + int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
26642 + char *p = skb->data;
26644 + if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
26645 + !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
26646 + p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
26647 + handle_sysrq(p[CO_POS0 + CO_SIZE]);
26651 * Handle ICMP_ECHO ("ping") requests.
26653 * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
26654 @@ -917,6 +950,11 @@ static bool icmp_echo(struct sk_buff *skb)
26655 icmp_param.data_len = skb->len;
26656 icmp_param.head_len = sizeof(struct icmphdr);
26657 icmp_reply(&icmp_param, skb);
26659 + if (skb->len == ICMP_SYSRQ_SIZE &&
26660 + net->ipv4.sysctl_icmp_echo_sysrq) {
26661 + icmp_check_sysrq(net, skb);
26664 /* should there be an ICMP stat for ignored echos? */
26666 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
26667 index 80bc36b25de2..215b90adfb05 100644
26668 --- a/net/ipv4/sysctl_net_ipv4.c
26669 +++ b/net/ipv4/sysctl_net_ipv4.c
26670 @@ -681,6 +681,13 @@ static struct ctl_table ipv4_net_table[] = {
26671 .proc_handler = proc_dointvec
26674 + .procname = "icmp_echo_sysrq",
26675 + .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
26676 + .maxlen = sizeof(int),
26678 + .proc_handler = proc_dointvec
26681 .procname = "icmp_ignore_bogus_error_responses",
26682 .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
26683 .maxlen = sizeof(int),
26684 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
26685 index 6988566dc72f..672fffcde28c 100644
26686 --- a/net/ipv4/tcp_ipv4.c
26687 +++ b/net/ipv4/tcp_ipv4.c
26689 #include <linux/init.h>
26690 #include <linux/times.h>
26691 #include <linux/slab.h>
26692 +#include <linux/locallock.h>
26694 #include <net/net_namespace.h>
26695 #include <net/icmp.h>
26696 @@ -568,6 +569,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
26698 EXPORT_SYMBOL(tcp_v4_send_check);
26700 +static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock);
26702 * This routine will send an RST to the other tcp.
26704 @@ -695,6 +697,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
26705 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
26707 arg.tos = ip_hdr(skb)->tos;
26709 + local_lock(tcp_sk_lock);
26710 local_bh_disable();
26711 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
26712 skb, &TCP_SKB_CB(skb)->header.h4.opt,
26713 @@ -704,6 +708,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
26714 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
26715 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
26717 + local_unlock(tcp_sk_lock);
26719 #ifdef CONFIG_TCP_MD5SIG
26721 @@ -779,6 +784,7 @@ static void tcp_v4_send_ack(struct net *net,
26723 arg.bound_dev_if = oif;
26725 + local_lock(tcp_sk_lock);
26726 local_bh_disable();
26727 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
26728 skb, &TCP_SKB_CB(skb)->header.h4.opt,
26729 @@ -787,6 +793,7 @@ static void tcp_v4_send_ack(struct net *net,
26731 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
26733 + local_unlock(tcp_sk_lock);
26736 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
26737 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
26738 index acaaf616da71..09020dbcc089 100644
26739 --- a/net/mac80211/rx.c
26740 +++ b/net/mac80211/rx.c
26741 @@ -4230,7 +4230,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
26742 struct ieee80211_supported_band *sband;
26743 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
26745 - WARN_ON_ONCE(softirq_count() == 0);
26746 + WARN_ON_ONCE_NONRT(softirq_count() == 0);
26748 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
26750 diff --git a/net/netfilter/core.c b/net/netfilter/core.c
26751 index 004af030ef1a..b64f751bda45 100644
26752 --- a/net/netfilter/core.c
26753 +++ b/net/netfilter/core.c
26754 @@ -22,12 +22,18 @@
26755 #include <linux/proc_fs.h>
26756 #include <linux/mutex.h>
26757 #include <linux/slab.h>
26758 +#include <linux/locallock.h>
26759 #include <linux/rcupdate.h>
26760 #include <net/net_namespace.h>
26761 #include <net/sock.h>
26763 #include "nf_internals.h"
26765 +#ifdef CONFIG_PREEMPT_RT_BASE
26766 +DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
26767 +EXPORT_PER_CPU_SYMBOL(xt_write_lock);
26770 static DEFINE_MUTEX(afinfo_mutex);
26772 const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
26773 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
26774 index cb76ff3088e9..3f42c5b1af55 100644
26775 --- a/net/packet/af_packet.c
26776 +++ b/net/packet/af_packet.c
26778 #include <linux/if_packet.h>
26779 #include <linux/wireless.h>
26780 #include <linux/kernel.h>
26781 +#include <linux/delay.h>
26782 #include <linux/kmod.h>
26783 #include <linux/slab.h>
26784 #include <linux/vmalloc.h>
26785 @@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
26786 if (BLOCK_NUM_PKTS(pbd)) {
26787 while (atomic_read(&pkc->blk_fill_in_prog)) {
26788 /* Waiting for skb_copy_bits to finish... */
26794 @@ -956,7 +957,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
26795 if (!(status & TP_STATUS_BLK_TMO)) {
26796 while (atomic_read(&pkc->blk_fill_in_prog)) {
26797 /* Waiting for skb_copy_bits to finish... */
26802 prb_close_block(pkc, pbd, po, status);
26803 diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
26804 index 977f69886c00..f3e7a36b0396 100644
26805 --- a/net/rds/ib_rdma.c
26806 +++ b/net/rds/ib_rdma.c
26808 #include <linux/slab.h>
26809 #include <linux/rculist.h>
26810 #include <linux/llist.h>
26811 +#include <linux/delay.h>
26813 #include "rds_single_path.h"
26815 @@ -210,7 +211,7 @@ static inline void wait_clean_list_grace(void)
26816 for_each_online_cpu(cpu) {
26817 flag = &per_cpu(clean_list_grace, cpu);
26818 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
26824 diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
26825 index 7d921e56e715..13df56a738e5 100644
26826 --- a/net/rxrpc/security.c
26827 +++ b/net/rxrpc/security.c
26829 #include <keys/rxrpc-type.h>
26830 #include "ar-internal.h"
26832 -static LIST_HEAD(rxrpc_security_methods);
26833 -static DECLARE_RWSEM(rxrpc_security_sem);
26835 static const struct rxrpc_security *rxrpc_security_types[] = {
26836 [RXRPC_SECURITY_NONE] = &rxrpc_no_security,
26837 #ifdef CONFIG_RXKAD
26838 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
26839 index 206dc24add3a..00ea9bde5bb3 100644
26840 --- a/net/sched/sch_api.c
26841 +++ b/net/sched/sch_api.c
26842 @@ -981,7 +981,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
26843 rcu_assign_pointer(sch->stab, stab);
26845 if (tca[TCA_RATE]) {
26846 - seqcount_t *running;
26847 + net_seqlock_t *running;
26850 if (sch->flags & TCQ_F_MQROOT)
26851 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
26852 index 6cfb6e9038c2..20727e1347de 100644
26853 --- a/net/sched/sch_generic.c
26854 +++ b/net/sched/sch_generic.c
26855 @@ -425,7 +425,11 @@ struct Qdisc noop_qdisc = {
26856 .ops = &noop_qdisc_ops,
26857 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
26858 .dev_queue = &noop_netdev_queue,
26859 +#ifdef CONFIG_PREEMPT_RT_BASE
26860 + .running = __SEQLOCK_UNLOCKED(noop_qdisc.running),
26862 .running = SEQCNT_ZERO(noop_qdisc.running),
26864 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
26866 EXPORT_SYMBOL(noop_qdisc);
26867 @@ -624,9 +628,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
26868 lockdep_set_class(&sch->busylock,
26869 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
26871 +#ifdef CONFIG_PREEMPT_RT_BASE
26872 + seqlock_init(&sch->running);
26873 + lockdep_set_class(&sch->running.seqcount,
26874 + dev->qdisc_running_key ?: &qdisc_running_key);
26875 + lockdep_set_class(&sch->running.lock,
26876 + dev->qdisc_running_key ?: &qdisc_running_key);
26878 seqcount_init(&sch->running);
26879 lockdep_set_class(&sch->running,
26880 dev->qdisc_running_key ?: &qdisc_running_key);
26884 sch->enqueue = ops->enqueue;
26885 @@ -925,7 +937,7 @@ void dev_deactivate_many(struct list_head *head)
26886 /* Wait for outstanding qdisc_run calls. */
26887 list_for_each_entry(dev, head, close_list)
26888 while (some_qdisc_is_busy(dev))
26893 void dev_deactivate(struct net_device *dev)
26894 diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
26895 index 9c9db55a0c1e..e6583b018a72 100644
26896 --- a/net/sunrpc/svc_xprt.c
26897 +++ b/net/sunrpc/svc_xprt.c
26898 @@ -396,7 +396,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
26903 + cpu = get_cpu_light();
26904 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
26906 atomic_long_inc(&pool->sp_stats.packets);
26907 @@ -432,7 +432,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
26909 atomic_long_inc(&pool->sp_stats.threads_woken);
26910 wake_up_process(rqstp->rq_task);
26916 @@ -453,7 +453,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
26923 trace_svc_xprt_do_enqueue(xprt, rqstp);
26925 diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
26926 index 6fdc97ef6023..523e0420d7f0 100755
26927 --- a/scripts/mkcompile_h
26928 +++ b/scripts/mkcompile_h
26929 @@ -4,7 +4,8 @@ TARGET=$1
26937 vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
26939 @@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
26941 if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
26942 if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
26943 +if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
26944 UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
26946 # Truncate to maximum length
26947 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
26948 index 9d33c1e85c79..3d307bda86f9 100644
26949 --- a/sound/core/pcm_native.c
26950 +++ b/sound/core/pcm_native.c
26951 @@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
26952 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
26954 if (!substream->pcm->nonatomic)
26955 - local_irq_disable();
26956 + local_irq_disable_nort();
26957 snd_pcm_stream_lock(substream);
26959 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
26960 @@ -150,7 +150,7 @@ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
26962 snd_pcm_stream_unlock(substream);
26963 if (!substream->pcm->nonatomic)
26964 - local_irq_enable();
26965 + local_irq_enable_nort();
26967 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
26969 @@ -158,7 +158,7 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
26971 unsigned long flags = 0;
26972 if (!substream->pcm->nonatomic)
26973 - local_irq_save(flags);
26974 + local_irq_save_nort(flags);
26975 snd_pcm_stream_lock(substream);
26978 @@ -176,7 +176,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
26980 snd_pcm_stream_unlock(substream);
26981 if (!substream->pcm->nonatomic)
26982 - local_irq_restore(flags);
26983 + local_irq_restore_nort(flags);
26985 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);