]> git.pld-linux.org Git - packages/kernel.git/blob - kernel-rt.patch
- 4.9.20
[packages/kernel.git] / kernel-rt.patch
1 diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
2 index 3a3b30ac2a75..9e0745cafbd8 100644
3 --- a/Documentation/sysrq.txt
4 +++ b/Documentation/sysrq.txt
5 @@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
6  On other - If you know of the key combos for other architectures, please
7             let me know so I can add them to this section.
8  
9 -On all -  write a character to /proc/sysrq-trigger.  e.g.:
10 -
11 +On all -  write a character to /proc/sysrq-trigger, e.g.:
12                 echo t > /proc/sysrq-trigger
13  
14 +On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
15 +               echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
16 +        Send an ICMP echo request with this pattern plus the particular
17 +        SysRq command key. Example:
18 +               # ping -c1 -s57 -p0102030468
19 +        will trigger the SysRq-H (help) command.
20 +
21 +
22  *  What are the 'command' keys?
23  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24  'b'     - Will immediately reboot the system without syncing or unmounting
25 diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt
26 new file mode 100644
27 index 000000000000..6f2aeabf7faa
28 --- /dev/null
29 +++ b/Documentation/trace/histograms.txt
30 @@ -0,0 +1,186 @@
31 +               Using the Linux Kernel Latency Histograms
32 +
33 +
34 +This document gives a short explanation how to enable, configure and use
35 +latency histograms. Latency histograms are primarily relevant in the
36 +context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
37 +and are used in the quality management of the Linux real-time
38 +capabilities.
39 +
40 +
41 +* Purpose of latency histograms
42 +
43 +A latency histogram continuously accumulates the frequencies of latency
44 +data. There are two types of histograms
45 +- potential sources of latencies
46 +- effective latencies
47 +
48 +
49 +* Potential sources of latencies
50 +
51 +Potential sources of latencies are code segments where interrupts,
52 +preemption or both are disabled (aka critical sections). To create
53 +histograms of potential sources of latency, the kernel stores the time
54 +stamp at the start of a critical section, determines the time elapsed
55 +when the end of the section is reached, and increments the frequency
56 +counter of that latency value - irrespective of whether any concurrently
57 +running process is affected by latency or not.
58 +- Configuration items (in the Kernel hacking/Tracers submenu)
59 +  CONFIG_INTERRUPT_OFF_LATENCY
60 +  CONFIG_PREEMPT_OFF_LATENCY
61 +
62 +
63 +* Effective latencies
64 +
65 +Effective latencies are actually occuring during wakeup of a process. To
66 +determine effective latencies, the kernel stores the time stamp when a
67 +process is scheduled to be woken up, and determines the duration of the
68 +wakeup time shortly before control is passed over to this process. Note
69 +that the apparent latency in user space may be somewhat longer, since the
70 +process may be interrupted after control is passed over to it but before
71 +the execution in user space takes place. Simply measuring the interval
72 +between enqueuing and wakeup may also not appropriate in cases when a
73 +process is scheduled as a result of a timer expiration. The timer may have
74 +missed its deadline, e.g. due to disabled interrupts, but this latency
75 +would not be registered. Therefore, the offsets of missed timers are
76 +recorded in a separate histogram. If both wakeup latency and missed timer
77 +offsets are configured and enabled, a third histogram may be enabled that
78 +records the overall latency as a sum of the timer latency, if any, and the
79 +wakeup latency. This histogram is called "timerandwakeup".
80 +- Configuration items (in the Kernel hacking/Tracers submenu)
81 +  CONFIG_WAKEUP_LATENCY
82 +  CONFIG_MISSED_TIMER_OFSETS
83 +
84 +
85 +* Usage
86 +
87 +The interface to the administration of the latency histograms is located
88 +in the debugfs file system. To mount it, either enter
89 +
90 +mount -t sysfs nodev /sys
91 +mount -t debugfs nodev /sys/kernel/debug
92 +
93 +from shell command line level, or add
94 +
95 +nodev  /sys                    sysfs   defaults        0 0
96 +nodev  /sys/kernel/debug       debugfs defaults        0 0
97 +
98 +to the file /etc/fstab. All latency histogram related files are then
99 +available in the directory /sys/kernel/debug/tracing/latency_hist. A
100 +particular histogram type is enabled by writing non-zero to the related
101 +variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
102 +Select "preemptirqsoff" for the histograms of potential sources of
103 +latencies and "wakeup" for histograms of effective latencies etc. The
104 +histogram data - one per CPU - are available in the files
105 +
106 +/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
107 +/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
108 +/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
109 +/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
110 +/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
111 +/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
112 +/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
113 +
114 +The histograms are reset by writing non-zero to the file "reset" in a
115 +particular latency directory. To reset all latency data, use
116 +
117 +#!/bin/sh
118 +
119 +TRACINGDIR=/sys/kernel/debug/tracing
120 +HISTDIR=$TRACINGDIR/latency_hist
121 +
122 +if test -d $HISTDIR
123 +then
124 +  cd $HISTDIR
125 +  for i in `find . | grep /reset$`
126 +  do
127 +    echo 1 >$i
128 +  done
129 +fi
130 +
131 +
132 +* Data format
133 +
134 +Latency data are stored with a resolution of one microsecond. The
135 +maximum latency is 10,240 microseconds. The data are only valid, if the
136 +overflow register is empty. Every output line contains the latency in
137 +microseconds in the first row and the number of samples in the second
138 +row. To display only lines with a positive latency count, use, for
139 +example,
140 +
141 +grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
142 +
143 +#Minimum latency: 0 microseconds.
144 +#Average latency: 0 microseconds.
145 +#Maximum latency: 25 microseconds.
146 +#Total samples: 3104770694
147 +#There are 0 samples greater or equal than 10240 microseconds
148 +#usecs          samples
149 +    0        2984486876
150 +    1          49843506
151 +    2          58219047
152 +    3           5348126
153 +    4           2187960
154 +    5           3388262
155 +    6            959289
156 +    7            208294
157 +    8             40420
158 +    9              4485
159 +   10             14918
160 +   11             18340
161 +   12             25052
162 +   13             19455
163 +   14              5602
164 +   15               969
165 +   16                47
166 +   17                18
167 +   18                14
168 +   19                 1
169 +   20                 3
170 +   21                 2
171 +   22                 5
172 +   23                 2
173 +   25                 1
174 +
175 +
176 +* Wakeup latency of a selected process
177 +
178 +To only collect wakeup latency data of a particular process, write the
179 +PID of the requested process to
180 +
181 +/sys/kernel/debug/tracing/latency_hist/wakeup/pid
182 +
183 +PIDs are not considered, if this variable is set to 0.
184 +
185 +
186 +* Details of the process with the highest wakeup latency so far
187 +
188 +Selected data of the process that suffered from the highest wakeup
189 +latency that occurred in a particular CPU are available in the file
190 +
191 +/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
192 +
193 +In addition, other relevant system data at the time when the
194 +latency occurred are given.
195 +
196 +The format of the data is (all in one line):
197 +<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
198 +<- <PID> <Priority> <Command> <Timestamp>
199 +
200 +The value of <Timeroffset> is only relevant in the combined timer
201 +and wakeup latency recording. In the wakeup recording, it is
202 +always 0, in the missed_timer_offsets recording, it is the same
203 +as <Latency>.
204 +
205 +When retrospectively searching for the origin of a latency and
206 +tracing was not enabled, it may be helpful to know the name and
207 +some basic data of the task that (finally) was switching to the
208 +late real-tlme task. In addition to the victim's data, also the
209 +data of the possible culprit are therefore displayed after the
210 +"<-" symbol.
211 +
212 +Finally, the timestamp of the time when the latency occurred
213 +in <seconds>.<microseconds> after the most recent system boot
214 +is provided.
215 +
216 +These data are also reset when the wakeup histogram is reset.
217 diff --git a/arch/Kconfig b/arch/Kconfig
218 index 659bdd079277..099fc0f5155e 100644
219 --- a/arch/Kconfig
220 +++ b/arch/Kconfig
221 @@ -9,6 +9,7 @@ config OPROFILE
222         tristate "OProfile system profiling"
223         depends on PROFILING
224         depends on HAVE_OPROFILE
225 +       depends on !PREEMPT_RT_FULL
226         select RING_BUFFER
227         select RING_BUFFER_ALLOW_SWAP
228         help
229 @@ -52,6 +53,7 @@ config KPROBES
230  config JUMP_LABEL
231         bool "Optimize very unlikely/likely branches"
232         depends on HAVE_ARCH_JUMP_LABEL
233 +       depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
234         help
235           This option enables a transparent branch optimization that
236          makes certain almost-always-true or almost-always-false branch
237 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
238 index b5d529fdffab..5715844e83e3 100644
239 --- a/arch/arm/Kconfig
240 +++ b/arch/arm/Kconfig
241 @@ -36,7 +36,7 @@ config ARM
242         select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
243         select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
244         select HAVE_ARCH_HARDENED_USERCOPY
245 -       select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
246 +       select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
247         select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
248         select HAVE_ARCH_MMAP_RND_BITS if MMU
249         select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
250 @@ -75,6 +75,7 @@ config ARM
251         select HAVE_PERF_EVENTS
252         select HAVE_PERF_REGS
253         select HAVE_PERF_USER_STACK_DUMP
254 +       select HAVE_PREEMPT_LAZY
255         select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
256         select HAVE_REGS_AND_STACK_ACCESS_API
257         select HAVE_SYSCALL_TRACEPOINTS
258 diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
259 index e53638c8ed8a..6095a1649865 100644
260 --- a/arch/arm/include/asm/irq.h
261 +++ b/arch/arm/include/asm/irq.h
262 @@ -22,6 +22,8 @@
263  #endif
264  
265  #ifndef __ASSEMBLY__
266 +#include <linux/cpumask.h>
267 +
268  struct irqaction;
269  struct pt_regs;
270  extern void migrate_irqs(void);
271 diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
272 index 12ebfcc1d539..c962084605bc 100644
273 --- a/arch/arm/include/asm/switch_to.h
274 +++ b/arch/arm/include/asm/switch_to.h
275 @@ -3,6 +3,13 @@
276  
277  #include <linux/thread_info.h>
278  
279 +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
280 +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
281 +#else
282 +static inline void
283 +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
284 +#endif
285 +
286  /*
287   * For v7 SMP cores running a preemptible kernel we may be pre-empted
288   * during a TLB maintenance operation, so execute an inner-shareable dsb
289 @@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
290  #define switch_to(prev,next,last)                                      \
291  do {                                                                   \
292         __complete_pending_tlbi();                                      \
293 +       switch_kmaps(prev, next);                                       \
294         last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));        \
295  } while (0)
296  
297 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
298 index 776757d1604a..1f36a4eccc72 100644
299 --- a/arch/arm/include/asm/thread_info.h
300 +++ b/arch/arm/include/asm/thread_info.h
301 @@ -49,6 +49,7 @@ struct cpu_context_save {
302  struct thread_info {
303         unsigned long           flags;          /* low level flags */
304         int                     preempt_count;  /* 0 => preemptable, <0 => bug */
305 +       int                     preempt_lazy_count; /* 0 => preemptable, <0 => bug */
306         mm_segment_t            addr_limit;     /* address limit */
307         struct task_struct      *task;          /* main task structure */
308         __u32                   cpu;            /* cpu */
309 @@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
310  #define TIF_SYSCALL_TRACE      4       /* syscall trace active */
311  #define TIF_SYSCALL_AUDIT      5       /* syscall auditing active */
312  #define TIF_SYSCALL_TRACEPOINT 6       /* syscall tracepoint instrumentation */
313 -#define TIF_SECCOMP            7       /* seccomp syscall filtering active */
314 +#define TIF_SECCOMP            8       /* seccomp syscall filtering active */
315 +#define TIF_NEED_RESCHED_LAZY  7
316  
317  #define TIF_NOHZ               12      /* in adaptive nohz mode */
318  #define TIF_USING_IWMMXT       17
319 @@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
320  #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
321  #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
322  #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
323 +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
324  #define _TIF_UPROBE            (1 << TIF_UPROBE)
325  #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
326  #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
327 @@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
328   * Change these and you break ASM code in entry-common.S
329   */
330  #define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
331 -                                _TIF_NOTIFY_RESUME | _TIF_UPROBE)
332 +                                _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
333 +                                _TIF_NEED_RESCHED_LAZY)
334  
335  #endif /* __KERNEL__ */
336  #endif /* __ASM_ARM_THREAD_INFO_H */
337 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
338 index 608008229c7d..3866da3f7bb7 100644
339 --- a/arch/arm/kernel/asm-offsets.c
340 +++ b/arch/arm/kernel/asm-offsets.c
341 @@ -65,6 +65,7 @@ int main(void)
342    BLANK();
343    DEFINE(TI_FLAGS,             offsetof(struct thread_info, flags));
344    DEFINE(TI_PREEMPT,           offsetof(struct thread_info, preempt_count));
345 +  DEFINE(TI_PREEMPT_LAZY,      offsetof(struct thread_info, preempt_lazy_count));
346    DEFINE(TI_ADDR_LIMIT,                offsetof(struct thread_info, addr_limit));
347    DEFINE(TI_TASK,              offsetof(struct thread_info, task));
348    DEFINE(TI_CPU,               offsetof(struct thread_info, cpu));
349 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
350 index 9f157e7c51e7..468e224d76aa 100644
351 --- a/arch/arm/kernel/entry-armv.S
352 +++ b/arch/arm/kernel/entry-armv.S
353 @@ -220,11 +220,18 @@ ENDPROC(__dabt_svc)
354  
355  #ifdef CONFIG_PREEMPT
356         ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
357 -       ldr     r0, [tsk, #TI_FLAGS]            @ get flags
358         teq     r8, #0                          @ if preempt count != 0
359 +       bne     1f                              @ return from exeption
360 +       ldr     r0, [tsk, #TI_FLAGS]            @ get flags
361 +       tst     r0, #_TIF_NEED_RESCHED          @ if NEED_RESCHED is set
362 +       blne    svc_preempt                     @ preempt!
363 +
364 +       ldr     r8, [tsk, #TI_PREEMPT_LAZY]     @ get preempt lazy count
365 +       teq     r8, #0                          @ if preempt lazy count != 0
366         movne   r0, #0                          @ force flags to 0
367 -       tst     r0, #_TIF_NEED_RESCHED
368 +       tst     r0, #_TIF_NEED_RESCHED_LAZY
369         blne    svc_preempt
370 +1:
371  #endif
372  
373         svc_exit r5, irq = 1                    @ return from exception
374 @@ -239,8 +246,14 @@ ENDPROC(__irq_svc)
375  1:     bl      preempt_schedule_irq            @ irq en/disable is done inside
376         ldr     r0, [tsk, #TI_FLAGS]            @ get new tasks TI_FLAGS
377         tst     r0, #_TIF_NEED_RESCHED
378 +       bne     1b
379 +       tst     r0, #_TIF_NEED_RESCHED_LAZY
380         reteq   r8                              @ go again
381 -       b       1b
382 +       ldr     r0, [tsk, #TI_PREEMPT_LAZY]     @ get preempt lazy count
383 +       teq     r0, #0                          @ if preempt lazy count != 0
384 +       beq     1b
385 +       ret     r8                              @ go again
386 +
387  #endif
388  
389  __und_fault:
390 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
391 index 10c3283d6c19..8872937862cc 100644
392 --- a/arch/arm/kernel/entry-common.S
393 +++ b/arch/arm/kernel/entry-common.S
394 @@ -36,7 +36,9 @@
395   UNWIND(.cantunwind    )
396         disable_irq_notrace                     @ disable interrupts
397         ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
398 -       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
399 +       tst     r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
400 +       bne     fast_work_pending
401 +       tst     r1, #_TIF_SECCOMP
402         bne     fast_work_pending
403  
404         /* perform architecture specific actions before user return */
405 @@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
406         str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
407         disable_irq_notrace                     @ disable interrupts
408         ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
409 -       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
410 +       tst     r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
411 +       bne     do_slower_path
412 +       tst     r1, #_TIF_SECCOMP
413         beq     no_work_pending
414 +do_slower_path:
415   UNWIND(.fnend         )
416  ENDPROC(ret_fast_syscall)
417  
418 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
419 index 69bda1a5707e..1f665acaa6a9 100644
420 --- a/arch/arm/kernel/patch.c
421 +++ b/arch/arm/kernel/patch.c
422 @@ -15,7 +15,7 @@ struct patch {
423         unsigned int insn;
424  };
425  
426 -static DEFINE_SPINLOCK(patch_lock);
427 +static DEFINE_RAW_SPINLOCK(patch_lock);
428  
429  static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
430         __acquires(&patch_lock)
431 @@ -32,7 +32,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
432                 return addr;
433  
434         if (flags)
435 -               spin_lock_irqsave(&patch_lock, *flags);
436 +               raw_spin_lock_irqsave(&patch_lock, *flags);
437         else
438                 __acquire(&patch_lock);
439  
440 @@ -47,7 +47,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
441         clear_fixmap(fixmap);
442  
443         if (flags)
444 -               spin_unlock_irqrestore(&patch_lock, *flags);
445 +               raw_spin_unlock_irqrestore(&patch_lock, *flags);
446         else
447                 __release(&patch_lock);
448  }
449 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
450 index 91d2d5b01414..750550098b59 100644
451 --- a/arch/arm/kernel/process.c
452 +++ b/arch/arm/kernel/process.c
453 @@ -322,6 +322,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
454  }
455  
456  #ifdef CONFIG_MMU
457 +/*
458 + * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock.  If the lock is not
459 + * initialized by pgtable_page_ctor() then a coredump of the vector page will
460 + * fail.
461 + */
462 +static int __init vectors_user_mapping_init_page(void)
463 +{
464 +       struct page *page;
465 +       unsigned long addr = 0xffff0000;
466 +       pgd_t *pgd;
467 +       pud_t *pud;
468 +       pmd_t *pmd;
469 +
470 +       pgd = pgd_offset_k(addr);
471 +       pud = pud_offset(pgd, addr);
472 +       pmd = pmd_offset(pud, addr);
473 +       page = pmd_page(*(pmd));
474 +
475 +       pgtable_page_ctor(page);
476 +
477 +       return 0;
478 +}
479 +late_initcall(vectors_user_mapping_init_page);
480 +
481  #ifdef CONFIG_KUSER_HELPERS
482  /*
483   * The vectors page is always readable from user space for the
484 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
485 index 7b8f2141427b..96541e00b74a 100644
486 --- a/arch/arm/kernel/signal.c
487 +++ b/arch/arm/kernel/signal.c
488 @@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
489          */
490         trace_hardirqs_off();
491         do {
492 -               if (likely(thread_flags & _TIF_NEED_RESCHED)) {
493 +               if (likely(thread_flags & (_TIF_NEED_RESCHED |
494 +                                          _TIF_NEED_RESCHED_LAZY))) {
495                         schedule();
496                 } else {
497                         if (unlikely(!user_mode(regs)))
498 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
499 index 7dd14e8395e6..4cd7e3d98035 100644
500 --- a/arch/arm/kernel/smp.c
501 +++ b/arch/arm/kernel/smp.c
502 @@ -234,8 +234,6 @@ int __cpu_disable(void)
503         flush_cache_louis();
504         local_flush_tlb_all();
505  
506 -       clear_tasks_mm_cpumask(cpu);
507 -
508         return 0;
509  }
510  
511 @@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu)
512                 pr_err("CPU%u: cpu didn't die\n", cpu);
513                 return;
514         }
515 +
516 +       clear_tasks_mm_cpumask(cpu);
517 +
518         pr_notice("CPU%u: shutdown\n", cpu);
519  
520         /*
521 diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
522 index 0bee233fef9a..314cfb232a63 100644
523 --- a/arch/arm/kernel/unwind.c
524 +++ b/arch/arm/kernel/unwind.c
525 @@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
526  static const struct unwind_idx *__origin_unwind_idx;
527  extern const struct unwind_idx __stop_unwind_idx[];
528  
529 -static DEFINE_SPINLOCK(unwind_lock);
530 +static DEFINE_RAW_SPINLOCK(unwind_lock);
531  static LIST_HEAD(unwind_tables);
532  
533  /* Convert a prel31 symbol to an absolute address */
534 @@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
535                 /* module unwind tables */
536                 struct unwind_table *table;
537  
538 -               spin_lock_irqsave(&unwind_lock, flags);
539 +               raw_spin_lock_irqsave(&unwind_lock, flags);
540                 list_for_each_entry(table, &unwind_tables, list) {
541                         if (addr >= table->begin_addr &&
542                             addr < table->end_addr) {
543 @@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
544                                 break;
545                         }
546                 }
547 -               spin_unlock_irqrestore(&unwind_lock, flags);
548 +               raw_spin_unlock_irqrestore(&unwind_lock, flags);
549         }
550  
551         pr_debug("%s: idx = %p\n", __func__, idx);
552 @@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
553         tab->begin_addr = text_addr;
554         tab->end_addr = text_addr + text_size;
555  
556 -       spin_lock_irqsave(&unwind_lock, flags);
557 +       raw_spin_lock_irqsave(&unwind_lock, flags);
558         list_add_tail(&tab->list, &unwind_tables);
559 -       spin_unlock_irqrestore(&unwind_lock, flags);
560 +       raw_spin_unlock_irqrestore(&unwind_lock, flags);
561  
562         return tab;
563  }
564 @@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
565         if (!tab)
566                 return;
567  
568 -       spin_lock_irqsave(&unwind_lock, flags);
569 +       raw_spin_lock_irqsave(&unwind_lock, flags);
570         list_del(&tab->list);
571 -       spin_unlock_irqrestore(&unwind_lock, flags);
572 +       raw_spin_unlock_irqrestore(&unwind_lock, flags);
573  
574         kfree(tab);
575  }
576 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
577 index 19b5f5c1c0ff..82aa639e6737 100644
578 --- a/arch/arm/kvm/arm.c
579 +++ b/arch/arm/kvm/arm.c
580 @@ -619,7 +619,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
581                  * involves poking the GIC, which must be done in a
582                  * non-preemptible context.
583                  */
584 -               preempt_disable();
585 +               migrate_disable();
586                 kvm_pmu_flush_hwstate(vcpu);
587                 kvm_timer_flush_hwstate(vcpu);
588                 kvm_vgic_flush_hwstate(vcpu);
589 @@ -640,7 +640,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
590                         kvm_pmu_sync_hwstate(vcpu);
591                         kvm_timer_sync_hwstate(vcpu);
592                         kvm_vgic_sync_hwstate(vcpu);
593 -                       preempt_enable();
594 +                       migrate_enable();
595                         continue;
596                 }
597  
598 @@ -696,7 +696,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
599  
600                 kvm_vgic_sync_hwstate(vcpu);
601  
602 -               preempt_enable();
603 +               migrate_enable();
604  
605                 ret = handle_exit(vcpu, run, ret);
606         }
607 diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
608 index 98ffe1e62ad5..df9769ddece5 100644
609 --- a/arch/arm/mach-exynos/platsmp.c
610 +++ b/arch/arm/mach-exynos/platsmp.c
611 @@ -229,7 +229,7 @@ static void __iomem *scu_base_addr(void)
612         return (void __iomem *)(S5P_VA_SCU);
613  }
614  
615 -static DEFINE_SPINLOCK(boot_lock);
616 +static DEFINE_RAW_SPINLOCK(boot_lock);
617  
618  static void exynos_secondary_init(unsigned int cpu)
619  {
620 @@ -242,8 +242,8 @@ static void exynos_secondary_init(unsigned int cpu)
621         /*
622          * Synchronise with the boot thread.
623          */
624 -       spin_lock(&boot_lock);
625 -       spin_unlock(&boot_lock);
626 +       raw_spin_lock(&boot_lock);
627 +       raw_spin_unlock(&boot_lock);
628  }
629  
630  int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
631 @@ -307,7 +307,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
632          * Set synchronisation state between this boot processor
633          * and the secondary one
634          */
635 -       spin_lock(&boot_lock);
636 +       raw_spin_lock(&boot_lock);
637  
638         /*
639          * The secondary processor is waiting to be released from
640 @@ -334,7 +334,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
641  
642                 if (timeout == 0) {
643                         printk(KERN_ERR "cpu1 power enable failed");
644 -                       spin_unlock(&boot_lock);
645 +                       raw_spin_unlock(&boot_lock);
646                         return -ETIMEDOUT;
647                 }
648         }
649 @@ -380,7 +380,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
650          * calibrations, then wait for it to finish
651          */
652  fail:
653 -       spin_unlock(&boot_lock);
654 +       raw_spin_unlock(&boot_lock);
655  
656         return pen_release != -1 ? ret : 0;
657  }
658 diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
659 index 4b653a8cb75c..b03d5a922cb1 100644
660 --- a/arch/arm/mach-hisi/platmcpm.c
661 +++ b/arch/arm/mach-hisi/platmcpm.c
662 @@ -61,7 +61,7 @@
663  
664  static void __iomem *sysctrl, *fabric;
665  static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
666 -static DEFINE_SPINLOCK(boot_lock);
667 +static DEFINE_RAW_SPINLOCK(boot_lock);
668  static u32 fabric_phys_addr;
669  /*
670   * [0]: bootwrapper physical address
671 @@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
672         if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
673                 return -EINVAL;
674  
675 -       spin_lock_irq(&boot_lock);
676 +       raw_spin_lock_irq(&boot_lock);
677  
678         if (hip04_cpu_table[cluster][cpu])
679                 goto out;
680 @@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
681  
682  out:
683         hip04_cpu_table[cluster][cpu]++;
684 -       spin_unlock_irq(&boot_lock);
685 +       raw_spin_unlock_irq(&boot_lock);
686  
687         return 0;
688  }
689 @@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
690         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
691         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
692  
693 -       spin_lock(&boot_lock);
694 +       raw_spin_lock(&boot_lock);
695         hip04_cpu_table[cluster][cpu]--;
696         if (hip04_cpu_table[cluster][cpu] == 1) {
697                 /* A power_up request went ahead of us. */
698 -               spin_unlock(&boot_lock);
699 +               raw_spin_unlock(&boot_lock);
700                 return;
701         } else if (hip04_cpu_table[cluster][cpu] > 1) {
702                 pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
703 @@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
704         }
705  
706         last_man = hip04_cluster_is_down(cluster);
707 -       spin_unlock(&boot_lock);
708 +       raw_spin_unlock(&boot_lock);
709         if (last_man) {
710                 /* Since it's Cortex A15, disable L2 prefetching. */
711                 asm volatile(
712 @@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
713                cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
714  
715         count = TIMEOUT_MSEC / POLL_MSEC;
716 -       spin_lock_irq(&boot_lock);
717 +       raw_spin_lock_irq(&boot_lock);
718         for (tries = 0; tries < count; tries++) {
719                 if (hip04_cpu_table[cluster][cpu])
720                         goto err;
721 @@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
722                 data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
723                 if (data & CORE_WFI_STATUS(cpu))
724                         break;
725 -               spin_unlock_irq(&boot_lock);
726 +               raw_spin_unlock_irq(&boot_lock);
727                 /* Wait for clean L2 when the whole cluster is down. */
728                 msleep(POLL_MSEC);
729 -               spin_lock_irq(&boot_lock);
730 +               raw_spin_lock_irq(&boot_lock);
731         }
732         if (tries >= count)
733                 goto err;
734 @@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
735                 goto err;
736         if (hip04_cluster_is_down(cluster))
737                 hip04_set_snoop_filter(cluster, 0);
738 -       spin_unlock_irq(&boot_lock);
739 +       raw_spin_unlock_irq(&boot_lock);
740         return 1;
741  err:
742 -       spin_unlock_irq(&boot_lock);
743 +       raw_spin_unlock_irq(&boot_lock);
744         return 0;
745  }
746  #endif
747 diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
748 index b4de3da6dffa..b52893319d75 100644
749 --- a/arch/arm/mach-omap2/omap-smp.c
750 +++ b/arch/arm/mach-omap2/omap-smp.c
751 @@ -64,7 +64,7 @@ static const struct omap_smp_config omap5_cfg __initconst = {
752         .startup_addr = omap5_secondary_startup,
753  };
754  
755 -static DEFINE_SPINLOCK(boot_lock);
756 +static DEFINE_RAW_SPINLOCK(boot_lock);
757  
758  void __iomem *omap4_get_scu_base(void)
759  {
760 @@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigned int cpu)
761         /*
762          * Synchronise with the boot thread.
763          */
764 -       spin_lock(&boot_lock);
765 -       spin_unlock(&boot_lock);
766 +       raw_spin_lock(&boot_lock);
767 +       raw_spin_unlock(&boot_lock);
768  }
769  
770  static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
771 @@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
772          * Set synchronisation state between this boot processor
773          * and the secondary one
774          */
775 -       spin_lock(&boot_lock);
776 +       raw_spin_lock(&boot_lock);
777  
778         /*
779          * Update the AuxCoreBoot0 with boot state for secondary core.
780 @@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
781          * Now the secondary core is starting up let it run its
782          * calibrations, then wait for it to finish
783          */
784 -       spin_unlock(&boot_lock);
785 +       raw_spin_unlock(&boot_lock);
786  
787         return 0;
788  }
789 diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
790 index 0875b99add18..18b6d98d2581 100644
791 --- a/arch/arm/mach-prima2/platsmp.c
792 +++ b/arch/arm/mach-prima2/platsmp.c
793 @@ -22,7 +22,7 @@
794  
795  static void __iomem *clk_base;
796  
797 -static DEFINE_SPINLOCK(boot_lock);
798 +static DEFINE_RAW_SPINLOCK(boot_lock);
799  
800  static void sirfsoc_secondary_init(unsigned int cpu)
801  {
802 @@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
803         /*
804          * Synchronise with the boot thread.
805          */
806 -       spin_lock(&boot_lock);
807 -       spin_unlock(&boot_lock);
808 +       raw_spin_lock(&boot_lock);
809 +       raw_spin_unlock(&boot_lock);
810  }
811  
812  static const struct of_device_id clk_ids[]  = {
813 @@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
814         /* make sure write buffer is drained */
815         mb();
816  
817 -       spin_lock(&boot_lock);
818 +       raw_spin_lock(&boot_lock);
819  
820         /*
821          * The secondary processor is waiting to be released from
822 @@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
823          * now the secondary core is starting up let it run its
824          * calibrations, then wait for it to finish
825          */
826 -       spin_unlock(&boot_lock);
827 +       raw_spin_unlock(&boot_lock);
828  
829         return pen_release != -1 ? -ENOSYS : 0;
830  }
831 diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
832 index 5494c9e0c909..e8ce157d3548 100644
833 --- a/arch/arm/mach-qcom/platsmp.c
834 +++ b/arch/arm/mach-qcom/platsmp.c
835 @@ -46,7 +46,7 @@
836  
837  extern void secondary_startup_arm(void);
838  
839 -static DEFINE_SPINLOCK(boot_lock);
840 +static DEFINE_RAW_SPINLOCK(boot_lock);
841  
842  #ifdef CONFIG_HOTPLUG_CPU
843  static void qcom_cpu_die(unsigned int cpu)
844 @@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
845         /*
846          * Synchronise with the boot thread.
847          */
848 -       spin_lock(&boot_lock);
849 -       spin_unlock(&boot_lock);
850 +       raw_spin_lock(&boot_lock);
851 +       raw_spin_unlock(&boot_lock);
852  }
853  
854  static int scss_release_secondary(unsigned int cpu)
855 @@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
856          * set synchronisation state between this boot processor
857          * and the secondary one
858          */
859 -       spin_lock(&boot_lock);
860 +       raw_spin_lock(&boot_lock);
861  
862         /*
863          * Send the secondary CPU a soft interrupt, thereby causing
864 @@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
865          * now the secondary core is starting up let it run its
866          * calibrations, then wait for it to finish
867          */
868 -       spin_unlock(&boot_lock);
869 +       raw_spin_unlock(&boot_lock);
870  
871         return ret;
872  }
873 diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
874 index 8d1e2d551786..7fa56cc78118 100644
875 --- a/arch/arm/mach-spear/platsmp.c
876 +++ b/arch/arm/mach-spear/platsmp.c
877 @@ -32,7 +32,7 @@ static void write_pen_release(int val)
878         sync_cache_w(&pen_release);
879  }
880  
881 -static DEFINE_SPINLOCK(boot_lock);
882 +static DEFINE_RAW_SPINLOCK(boot_lock);
883  
884  static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
885  
886 @@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
887         /*
888          * Synchronise with the boot thread.
889          */
890 -       spin_lock(&boot_lock);
891 -       spin_unlock(&boot_lock);
892 +       raw_spin_lock(&boot_lock);
893 +       raw_spin_unlock(&boot_lock);
894  }
895  
896  static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
897 @@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
898          * set synchronisation state between this boot processor
899          * and the secondary one
900          */
901 -       spin_lock(&boot_lock);
902 +       raw_spin_lock(&boot_lock);
903  
904         /*
905          * The secondary processor is waiting to be released from
906 @@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
907          * now the secondary core is starting up let it run its
908          * calibrations, then wait for it to finish
909          */
910 -       spin_unlock(&boot_lock);
911 +       raw_spin_unlock(&boot_lock);
912  
913         return pen_release != -1 ? -ENOSYS : 0;
914  }
915 diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
916 index ea5a2277ee46..b988e081ac79 100644
917 --- a/arch/arm/mach-sti/platsmp.c
918 +++ b/arch/arm/mach-sti/platsmp.c
919 @@ -35,7 +35,7 @@ static void write_pen_release(int val)
920         sync_cache_w(&pen_release);
921  }
922  
923 -static DEFINE_SPINLOCK(boot_lock);
924 +static DEFINE_RAW_SPINLOCK(boot_lock);
925  
926  static void sti_secondary_init(unsigned int cpu)
927  {
928 @@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
929         /*
930          * Synchronise with the boot thread.
931          */
932 -       spin_lock(&boot_lock);
933 -       spin_unlock(&boot_lock);
934 +       raw_spin_lock(&boot_lock);
935 +       raw_spin_unlock(&boot_lock);
936  }
937  
938  static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
939 @@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
940          * set synchronisation state between this boot processor
941          * and the secondary one
942          */
943 -       spin_lock(&boot_lock);
944 +       raw_spin_lock(&boot_lock);
945  
946         /*
947          * The secondary processor is waiting to be released from
948 @@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
949          * now the secondary core is starting up let it run its
950          * calibrations, then wait for it to finish
951          */
952 -       spin_unlock(&boot_lock);
953 +       raw_spin_unlock(&boot_lock);
954  
955         return pen_release != -1 ? -ENOSYS : 0;
956  }
957 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
958 index 0122ad1a6027..926b1be48043 100644
959 --- a/arch/arm/mm/fault.c
960 +++ b/arch/arm/mm/fault.c
961 @@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
962         if (addr < TASK_SIZE)
963                 return do_page_fault(addr, fsr, regs);
964  
965 +       if (interrupts_enabled(regs))
966 +               local_irq_enable();
967 +
968         if (user_mode(regs))
969                 goto bad_area;
970  
971 @@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
972  static int
973  do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
974  {
975 +       if (interrupts_enabled(regs))
976 +               local_irq_enable();
977 +
978         do_bad_area(addr, fsr, regs);
979         return 0;
980  }
981 diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
982 index d02f8187b1cc..542692dbd40a 100644
983 --- a/arch/arm/mm/highmem.c
984 +++ b/arch/arm/mm/highmem.c
985 @@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
986         return *ptep;
987  }
988  
989 +static unsigned int fixmap_idx(int type)
990 +{
991 +       return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
992 +}
993 +
994  void *kmap(struct page *page)
995  {
996         might_sleep();
997 @@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
998  
999  void *kmap_atomic(struct page *page)
1000  {
1001 +       pte_t pte = mk_pte(page, kmap_prot);
1002         unsigned int idx;
1003         unsigned long vaddr;
1004         void *kmap;
1005         int type;
1006  
1007 -       preempt_disable();
1008 +       preempt_disable_nort();
1009         pagefault_disable();
1010         if (!PageHighMem(page))
1011                 return page_address(page);
1012 @@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
1013  
1014         type = kmap_atomic_idx_push();
1015  
1016 -       idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
1017 +       idx = fixmap_idx(type);
1018         vaddr = __fix_to_virt(idx);
1019  #ifdef CONFIG_DEBUG_HIGHMEM
1020         /*
1021 @@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
1022          * in place, so the contained TLB flush ensures the TLB is updated
1023          * with the new mapping.
1024          */
1025 -       set_fixmap_pte(idx, mk_pte(page, kmap_prot));
1026 +#ifdef CONFIG_PREEMPT_RT_FULL
1027 +       current->kmap_pte[type] = pte;
1028 +#endif
1029 +       set_fixmap_pte(idx, pte);
1030  
1031         return (void *)vaddr;
1032  }
1033 @@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr)
1034  
1035         if (kvaddr >= (void *)FIXADDR_START) {
1036                 type = kmap_atomic_idx();
1037 -               idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
1038 +               idx = fixmap_idx(type);
1039  
1040                 if (cache_is_vivt())
1041                         __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
1042 +#ifdef CONFIG_PREEMPT_RT_FULL
1043 +               current->kmap_pte[type] = __pte(0);
1044 +#endif
1045  #ifdef CONFIG_DEBUG_HIGHMEM
1046                 BUG_ON(vaddr != __fix_to_virt(idx));
1047 -               set_fixmap_pte(idx, __pte(0));
1048  #else
1049                 (void) idx;  /* to kill a warning */
1050  #endif
1051 +               set_fixmap_pte(idx, __pte(0));
1052                 kmap_atomic_idx_pop();
1053         } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
1054                 /* this address was obtained through kmap_high_get() */
1055                 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
1056         }
1057         pagefault_enable();
1058 -       preempt_enable();
1059 +       preempt_enable_nort();
1060  }
1061  EXPORT_SYMBOL(__kunmap_atomic);
1062  
1063  void *kmap_atomic_pfn(unsigned long pfn)
1064  {
1065 +       pte_t pte = pfn_pte(pfn, kmap_prot);
1066         unsigned long vaddr;
1067         int idx, type;
1068         struct page *page = pfn_to_page(pfn);
1069  
1070 -       preempt_disable();
1071 +       preempt_disable_nort();
1072         pagefault_disable();
1073         if (!PageHighMem(page))
1074                 return page_address(page);
1075  
1076         type = kmap_atomic_idx_push();
1077 -       idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
1078 +       idx = fixmap_idx(type);
1079         vaddr = __fix_to_virt(idx);
1080  #ifdef CONFIG_DEBUG_HIGHMEM
1081         BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
1082  #endif
1083 -       set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
1084 +#ifdef CONFIG_PREEMPT_RT_FULL
1085 +       current->kmap_pte[type] = pte;
1086 +#endif
1087 +       set_fixmap_pte(idx, pte);
1088  
1089         return (void *)vaddr;
1090  }
1091 +#if defined CONFIG_PREEMPT_RT_FULL
1092 +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
1093 +{
1094 +       int i;
1095 +
1096 +       /*
1097 +        * Clear @prev's kmap_atomic mappings
1098 +        */
1099 +       for (i = 0; i < prev_p->kmap_idx; i++) {
1100 +               int idx = fixmap_idx(i);
1101 +
1102 +               set_fixmap_pte(idx, __pte(0));
1103 +       }
1104 +       /*
1105 +        * Restore @next_p's kmap_atomic mappings
1106 +        */
1107 +       for (i = 0; i < next_p->kmap_idx; i++) {
1108 +               int idx = fixmap_idx(i);
1109 +
1110 +               if (!pte_none(next_p->kmap_pte[i]))
1111 +                       set_fixmap_pte(idx, next_p->kmap_pte[i]);
1112 +       }
1113 +}
1114 +#endif
1115 diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
1116 index c2366510187a..6b60f582b738 100644
1117 --- a/arch/arm/plat-versatile/platsmp.c
1118 +++ b/arch/arm/plat-versatile/platsmp.c
1119 @@ -32,7 +32,7 @@ static void write_pen_release(int val)
1120         sync_cache_w(&pen_release);
1121  }
1122  
1123 -static DEFINE_SPINLOCK(boot_lock);
1124 +static DEFINE_RAW_SPINLOCK(boot_lock);
1125  
1126  void versatile_secondary_init(unsigned int cpu)
1127  {
1128 @@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu)
1129         /*
1130          * Synchronise with the boot thread.
1131          */
1132 -       spin_lock(&boot_lock);
1133 -       spin_unlock(&boot_lock);
1134 +       raw_spin_lock(&boot_lock);
1135 +       raw_spin_unlock(&boot_lock);
1136  }
1137  
1138  int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
1139 @@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
1140          * Set synchronisation state between this boot processor
1141          * and the secondary one
1142          */
1143 -       spin_lock(&boot_lock);
1144 +       raw_spin_lock(&boot_lock);
1145  
1146         /*
1147          * This is really belt and braces; we hold unintended secondary
1148 @@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
1149          * now the secondary core is starting up let it run its
1150          * calibrations, then wait for it to finish
1151          */
1152 -       spin_unlock(&boot_lock);
1153 +       raw_spin_unlock(&boot_lock);
1154  
1155         return pen_release != -1 ? -ENOSYS : 0;
1156  }
1157 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
1158 index 969ef880d234..1182fe883771 100644
1159 --- a/arch/arm64/Kconfig
1160 +++ b/arch/arm64/Kconfig
1161 @@ -91,6 +91,7 @@ config ARM64
1162         select HAVE_PERF_EVENTS
1163         select HAVE_PERF_REGS
1164         select HAVE_PERF_USER_STACK_DUMP
1165 +       select HAVE_PREEMPT_LAZY
1166         select HAVE_REGS_AND_STACK_ACCESS_API
1167         select HAVE_RCU_TABLE_FREE
1168         select HAVE_SYSCALL_TRACEPOINTS
1169 @@ -694,7 +695,7 @@ config XEN_DOM0
1170  
1171  config XEN
1172         bool "Xen guest support on ARM64"
1173 -       depends on ARM64 && OF
1174 +       depends on ARM64 && OF && !PREEMPT_RT_FULL
1175         select SWIOTLB_XEN
1176         select PARAVIRT
1177         help
1178 diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
1179 index e9ea5a6bd449..6c500ad63c6a 100644
1180 --- a/arch/arm64/include/asm/thread_info.h
1181 +++ b/arch/arm64/include/asm/thread_info.h
1182 @@ -49,6 +49,7 @@ struct thread_info {
1183         mm_segment_t            addr_limit;     /* address limit */
1184         struct task_struct      *task;          /* main task structure */
1185         int                     preempt_count;  /* 0 => preemptable, <0 => bug */
1186 +       int                     preempt_lazy_count; /* 0 => preemptable, <0 => bug */
1187         int                     cpu;            /* cpu */
1188  };
1189  
1190 @@ -112,6 +113,7 @@ static inline struct thread_info *current_thread_info(void)
1191  #define TIF_NEED_RESCHED       1
1192  #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
1193  #define TIF_FOREIGN_FPSTATE    3       /* CPU's FP state is not current's */
1194 +#define TIF_NEED_RESCHED_LAZY  4
1195  #define TIF_NOHZ               7
1196  #define TIF_SYSCALL_TRACE      8
1197  #define TIF_SYSCALL_AUDIT      9
1198 @@ -127,6 +129,7 @@ static inline struct thread_info *current_thread_info(void)
1199  #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
1200  #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
1201  #define _TIF_FOREIGN_FPSTATE   (1 << TIF_FOREIGN_FPSTATE)
1202 +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
1203  #define _TIF_NOHZ              (1 << TIF_NOHZ)
1204  #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
1205  #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
1206 @@ -135,7 +138,9 @@ static inline struct thread_info *current_thread_info(void)
1207  #define _TIF_32BIT             (1 << TIF_32BIT)
1208  
1209  #define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
1210 -                                _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
1211 +                                _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
1212 +                                _TIF_NEED_RESCHED_LAZY)
1213 +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
1214  
1215  #define _TIF_SYSCALL_WORK      (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1216                                  _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
1217 diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
1218 index 4a2f0f0fef32..6bf2bc17c400 100644
1219 --- a/arch/arm64/kernel/asm-offsets.c
1220 +++ b/arch/arm64/kernel/asm-offsets.c
1221 @@ -38,6 +38,7 @@ int main(void)
1222    BLANK();
1223    DEFINE(TI_FLAGS,             offsetof(struct thread_info, flags));
1224    DEFINE(TI_PREEMPT,           offsetof(struct thread_info, preempt_count));
1225 +  DEFINE(TI_PREEMPT_LAZY,      offsetof(struct thread_info, preempt_lazy_count));
1226    DEFINE(TI_ADDR_LIMIT,                offsetof(struct thread_info, addr_limit));
1227    DEFINE(TI_TASK,              offsetof(struct thread_info, task));
1228    DEFINE(TI_CPU,               offsetof(struct thread_info, cpu));
1229 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
1230 index 79b0fe24d5b7..f3c959ade308 100644
1231 --- a/arch/arm64/kernel/entry.S
1232 +++ b/arch/arm64/kernel/entry.S
1233 @@ -428,11 +428,16 @@ ENDPROC(el1_sync)
1234  
1235  #ifdef CONFIG_PREEMPT
1236         ldr     w24, [tsk, #TI_PREEMPT]         // get preempt count
1237 -       cbnz    w24, 1f                         // preempt count != 0
1238 +       cbnz    w24, 2f                         // preempt count != 0
1239         ldr     x0, [tsk, #TI_FLAGS]            // get flags
1240 -       tbz     x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
1241 -       bl      el1_preempt
1242 +       tbnz    x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
1243 +
1244 +       ldr     w24, [tsk, #TI_PREEMPT_LAZY]    // get preempt lazy count
1245 +       cbnz    w24, 2f                         // preempt lazy count != 0
1246 +       tbz     x0, #TIF_NEED_RESCHED_LAZY, 2f  // needs rescheduling?
1247  1:
1248 +       bl      el1_preempt
1249 +2:
1250  #endif
1251  #ifdef CONFIG_TRACE_IRQFLAGS
1252         bl      trace_hardirqs_on
1253 @@ -446,6 +451,7 @@ ENDPROC(el1_irq)
1254  1:     bl      preempt_schedule_irq            // irq en/disable is done inside
1255         ldr     x0, [tsk, #TI_FLAGS]            // get new tasks TI_FLAGS
1256         tbnz    x0, #TIF_NEED_RESCHED, 1b       // needs rescheduling?
1257 +       tbnz    x0, #TIF_NEED_RESCHED_LAZY, 1b  // needs rescheduling?
1258         ret     x24
1259  #endif
1260  
1261 diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
1262 index 404dd67080b9..639dc6d12e72 100644
1263 --- a/arch/arm64/kernel/signal.c
1264 +++ b/arch/arm64/kernel/signal.c
1265 @@ -409,7 +409,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
1266          */
1267         trace_hardirqs_off();
1268         do {
1269 -               if (thread_flags & _TIF_NEED_RESCHED) {
1270 +               if (thread_flags & _TIF_NEED_RESCHED_MASK) {
1271                         schedule();
1272                 } else {
1273                         local_irq_enable();
1274 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
1275 index b3c5bde43d34..8122bf058de0 100644
1276 --- a/arch/mips/Kconfig
1277 +++ b/arch/mips/Kconfig
1278 @@ -2514,7 +2514,7 @@ config MIPS_ASID_BITS_VARIABLE
1279  #
1280  config HIGHMEM
1281         bool "High Memory Support"
1282 -       depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
1283 +       depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
1284  
1285  config CPU_SUPPORTS_HIGHMEM
1286         bool
1287 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
1288 index 65fba4c34cd7..4b5ba68910e0 100644
1289 --- a/arch/powerpc/Kconfig
1290 +++ b/arch/powerpc/Kconfig
1291 @@ -52,10 +52,11 @@ config LOCKDEP_SUPPORT
1292  
1293  config RWSEM_GENERIC_SPINLOCK
1294         bool
1295 +       default y if PREEMPT_RT_FULL
1296  
1297  config RWSEM_XCHGADD_ALGORITHM
1298         bool
1299 -       default y
1300 +       default y if !PREEMPT_RT_FULL
1301  
1302  config GENERIC_LOCKBREAK
1303         bool
1304 @@ -134,6 +135,7 @@ config PPC
1305         select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
1306         select GENERIC_STRNCPY_FROM_USER
1307         select GENERIC_STRNLEN_USER
1308 +       select HAVE_PREEMPT_LAZY
1309         select HAVE_MOD_ARCH_SPECIFIC
1310         select MODULES_USE_ELF_RELA
1311         select CLONE_BACKWARDS
1312 @@ -321,7 +323,7 @@ menu "Kernel options"
1313  
1314  config HIGHMEM
1315         bool "High memory support"
1316 -       depends on PPC32
1317 +       depends on PPC32 && !PREEMPT_RT_FULL
1318  
1319  source kernel/Kconfig.hz
1320  source kernel/Kconfig.preempt
1321 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
1322 index 87e4b2d8dcd4..981e501a4359 100644
1323 --- a/arch/powerpc/include/asm/thread_info.h
1324 +++ b/arch/powerpc/include/asm/thread_info.h
1325 @@ -43,6 +43,8 @@ struct thread_info {
1326         int             cpu;                    /* cpu we're on */
1327         int             preempt_count;          /* 0 => preemptable,
1328                                                    <0 => BUG */
1329 +       int             preempt_lazy_count;     /* 0 => preemptable,
1330 +                                                  <0 => BUG */
1331         unsigned long   local_flags;            /* private flags for thread */
1332  #ifdef CONFIG_LIVEPATCH
1333         unsigned long *livepatch_sp;
1334 @@ -88,8 +90,7 @@ static inline struct thread_info *current_thread_info(void)
1335  #define TIF_SYSCALL_TRACE      0       /* syscall trace active */
1336  #define TIF_SIGPENDING         1       /* signal pending */
1337  #define TIF_NEED_RESCHED       2       /* rescheduling necessary */
1338 -#define TIF_POLLING_NRFLAG     3       /* true if poll_idle() is polling
1339 -                                          TIF_NEED_RESCHED */
1340 +#define TIF_NEED_RESCHED_LAZY  3       /* lazy rescheduling necessary */
1341  #define TIF_32BIT              4       /* 32 bit binary */
1342  #define TIF_RESTORE_TM         5       /* need to restore TM FP/VEC/VSX */
1343  #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
1344 @@ -107,6 +108,8 @@ static inline struct thread_info *current_thread_info(void)
1345  #if defined(CONFIG_PPC64)
1346  #define TIF_ELF2ABI            18      /* function descriptors must die! */
1347  #endif
1348 +#define TIF_POLLING_NRFLAG     19      /* true if poll_idle() is polling
1349 +                                          TIF_NEED_RESCHED */
1350  
1351  /* as above, but as bit values */
1352  #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
1353 @@ -125,14 +128,16 @@ static inline struct thread_info *current_thread_info(void)
1354  #define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
1355  #define _TIF_EMULATE_STACK_STORE       (1<<TIF_EMULATE_STACK_STORE)
1356  #define _TIF_NOHZ              (1<<TIF_NOHZ)
1357 +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
1358  #define _TIF_SYSCALL_DOTRACE   (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1359                                  _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
1360                                  _TIF_NOHZ)
1361  
1362  #define _TIF_USER_WORK_MASK    (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
1363                                  _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
1364 -                                _TIF_RESTORE_TM)
1365 +                                _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
1366  #define _TIF_PERSYSCALL_MASK   (_TIF_RESTOREALL|_TIF_NOERROR)
1367 +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
1368  
1369  /* Bits in local_flags */
1370  /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
1371 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
1372 index c833d88c423d..96e9fbc3f684 100644
1373 --- a/arch/powerpc/kernel/asm-offsets.c
1374 +++ b/arch/powerpc/kernel/asm-offsets.c
1375 @@ -156,6 +156,7 @@ int main(void)
1376         DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
1377         DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
1378         DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
1379 +       DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
1380         DEFINE(TI_TASK, offsetof(struct thread_info, task));
1381         DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
1382  
1383 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
1384 index 3841d749a430..6dbaeff192b9 100644
1385 --- a/arch/powerpc/kernel/entry_32.S
1386 +++ b/arch/powerpc/kernel/entry_32.S
1387 @@ -835,7 +835,14 @@ user_exc_return:           /* r10 contains MSR_KERNEL here */
1388         cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
1389         bne     restore
1390         andi.   r8,r8,_TIF_NEED_RESCHED
1391 +       bne+    1f
1392 +       lwz     r0,TI_PREEMPT_LAZY(r9)
1393 +       cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
1394 +       bne     restore
1395 +       lwz     r0,TI_FLAGS(r9)
1396 +       andi.   r0,r0,_TIF_NEED_RESCHED_LAZY
1397         beq+    restore
1398 +1:
1399         lwz     r3,_MSR(r1)
1400         andi.   r0,r3,MSR_EE    /* interrupts off? */
1401         beq     restore         /* don't schedule if so */
1402 @@ -846,11 +853,11 @@ user_exc_return:          /* r10 contains MSR_KERNEL here */
1403          */
1404         bl      trace_hardirqs_off
1405  #endif
1406 -1:     bl      preempt_schedule_irq
1407 +2:     bl      preempt_schedule_irq
1408         CURRENT_THREAD_INFO(r9, r1)
1409         lwz     r3,TI_FLAGS(r9)
1410 -       andi.   r0,r3,_TIF_NEED_RESCHED
1411 -       bne-    1b
1412 +       andi.   r0,r3,_TIF_NEED_RESCHED_MASK
1413 +       bne-    2b
1414  #ifdef CONFIG_TRACE_IRQFLAGS
1415         /* And now, to properly rebalance the above, we tell lockdep they
1416          * are being turned back on, which will happen when we return
1417 @@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
1418  #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1419  
1420  do_work:                       /* r10 contains MSR_KERNEL here */
1421 -       andi.   r0,r9,_TIF_NEED_RESCHED
1422 +       andi.   r0,r9,_TIF_NEED_RESCHED_MASK
1423         beq     do_user_signal
1424  
1425  do_resched:                    /* r10 contains MSR_KERNEL here */
1426 @@ -1192,7 +1199,7 @@ do_resched:                       /* r10 contains MSR_KERNEL here */
1427         MTMSRD(r10)             /* disable interrupts */
1428         CURRENT_THREAD_INFO(r9, r1)
1429         lwz     r9,TI_FLAGS(r9)
1430 -       andi.   r0,r9,_TIF_NEED_RESCHED
1431 +       andi.   r0,r9,_TIF_NEED_RESCHED_MASK
1432         bne-    do_resched
1433         andi.   r0,r9,_TIF_USER_WORK_MASK
1434         beq     restore_user
1435 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
1436 index 6432d4bf08c8..5509a26f1070 100644
1437 --- a/arch/powerpc/kernel/entry_64.S
1438 +++ b/arch/powerpc/kernel/entry_64.S
1439 @@ -656,7 +656,7 @@ _GLOBAL(ret_from_except_lite)
1440         bl      restore_math
1441         b       restore
1442  #endif
1443 -1:     andi.   r0,r4,_TIF_NEED_RESCHED
1444 +1:     andi.   r0,r4,_TIF_NEED_RESCHED_MASK
1445         beq     2f
1446         bl      restore_interrupts
1447         SCHEDULE_USER
1448 @@ -718,10 +718,18 @@ _GLOBAL(ret_from_except_lite)
1449  
1450  #ifdef CONFIG_PREEMPT
1451         /* Check if we need to preempt */
1452 -       andi.   r0,r4,_TIF_NEED_RESCHED
1453 -       beq+    restore
1454 -       /* Check that preempt_count() == 0 and interrupts are enabled */
1455         lwz     r8,TI_PREEMPT(r9)
1456 +       cmpwi   0,r8,0          /* if non-zero, just restore regs and return */
1457 +       bne     restore
1458 +       andi.   r0,r4,_TIF_NEED_RESCHED
1459 +       bne+    check_count
1460 +
1461 +       andi.   r0,r4,_TIF_NEED_RESCHED_LAZY
1462 +       beq+    restore
1463 +       lwz     r8,TI_PREEMPT_LAZY(r9)
1464 +
1465 +       /* Check that preempt_count() == 0 and interrupts are enabled */
1466 +check_count:
1467         cmpwi   cr1,r8,0
1468         ld      r0,SOFTE(r1)
1469         cmpdi   r0,0
1470 @@ -738,7 +746,7 @@ _GLOBAL(ret_from_except_lite)
1471         /* Re-test flags and eventually loop */
1472         CURRENT_THREAD_INFO(r9, r1)
1473         ld      r4,TI_FLAGS(r9)
1474 -       andi.   r0,r4,_TIF_NEED_RESCHED
1475 +       andi.   r0,r4,_TIF_NEED_RESCHED_MASK
1476         bne     1b
1477  
1478         /*
1479 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
1480 index 3c05c311e35e..f83f6ac1274d 100644
1481 --- a/arch/powerpc/kernel/irq.c
1482 +++ b/arch/powerpc/kernel/irq.c
1483 @@ -638,6 +638,7 @@ void irq_ctx_init(void)
1484         }
1485  }
1486  
1487 +#ifndef CONFIG_PREEMPT_RT_FULL
1488  void do_softirq_own_stack(void)
1489  {
1490         struct thread_info *curtp, *irqtp;
1491 @@ -655,6 +656,7 @@ void do_softirq_own_stack(void)
1492         if (irqtp->flags)
1493                 set_bits(irqtp->flags, &curtp->flags);
1494  }
1495 +#endif
1496  
1497  irq_hw_number_t virq_to_hw(unsigned int virq)
1498  {
1499 diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
1500 index 030d72df5dd5..b471a709e100 100644
1501 --- a/arch/powerpc/kernel/misc_32.S
1502 +++ b/arch/powerpc/kernel/misc_32.S
1503 @@ -41,6 +41,7 @@
1504   * We store the saved ksp_limit in the unused part
1505   * of the STACK_FRAME_OVERHEAD
1506   */
1507 +#ifndef CONFIG_PREEMPT_RT_FULL
1508  _GLOBAL(call_do_softirq)
1509         mflr    r0
1510         stw     r0,4(r1)
1511 @@ -57,6 +58,7 @@ _GLOBAL(call_do_softirq)
1512         stw     r10,THREAD+KSP_LIMIT(r2)
1513         mtlr    r0
1514         blr
1515 +#endif
1516  
1517  /*
1518   * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
1519 diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
1520 index 4f178671f230..39e7d84a3492 100644
1521 --- a/arch/powerpc/kernel/misc_64.S
1522 +++ b/arch/powerpc/kernel/misc_64.S
1523 @@ -31,6 +31,7 @@
1524  
1525         .text
1526  
1527 +#ifndef CONFIG_PREEMPT_RT_FULL
1528  _GLOBAL(call_do_softirq)
1529         mflr    r0
1530         std     r0,16(r1)
1531 @@ -41,6 +42,7 @@ _GLOBAL(call_do_softirq)
1532         ld      r0,16(r1)
1533         mtlr    r0
1534         blr
1535 +#endif
1536  
1537  _GLOBAL(call_do_irq)
1538         mflr    r0
1539 diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
1540 index 029be26b5a17..9528089ea142 100644
1541 --- a/arch/powerpc/kvm/Kconfig
1542 +++ b/arch/powerpc/kvm/Kconfig
1543 @@ -175,6 +175,7 @@ config KVM_E500MC
1544  config KVM_MPIC
1545         bool "KVM in-kernel MPIC emulation"
1546         depends on KVM && E500
1547 +       depends on !PREEMPT_RT_FULL
1548         select HAVE_KVM_IRQCHIP
1549         select HAVE_KVM_IRQFD
1550         select HAVE_KVM_IRQ_ROUTING
1551 diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
1552 index e48462447ff0..2670cee66064 100644
1553 --- a/arch/powerpc/platforms/ps3/device-init.c
1554 +++ b/arch/powerpc/platforms/ps3/device-init.c
1555 @@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
1556         }
1557         pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
1558  
1559 -       res = wait_event_interruptible(dev->done.wait,
1560 +       res = swait_event_interruptible(dev->done.wait,
1561                                        dev->done.done || kthread_should_stop());
1562         if (kthread_should_stop())
1563                 res = -EINTR;
1564 diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
1565 index 6c0378c0b8b5..abd58b4dff97 100644
1566 --- a/arch/sh/kernel/irq.c
1567 +++ b/arch/sh/kernel/irq.c
1568 @@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu)
1569         hardirq_ctx[cpu] = NULL;
1570  }
1571  
1572 +#ifndef CONFIG_PREEMPT_RT_FULL
1573  void do_softirq_own_stack(void)
1574  {
1575         struct thread_info *curctx;
1576 @@ -174,6 +175,7 @@ void do_softirq_own_stack(void)
1577                   "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
1578         );
1579  }
1580 +#endif
1581  #else
1582  static inline void handle_one_irq(unsigned int irq)
1583  {
1584 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
1585 index 165ecdd24d22..b68a464a22be 100644
1586 --- a/arch/sparc/Kconfig
1587 +++ b/arch/sparc/Kconfig
1588 @@ -194,12 +194,10 @@ config NR_CPUS
1589  source kernel/Kconfig.hz
1590  
1591  config RWSEM_GENERIC_SPINLOCK
1592 -       bool
1593 -       default y if SPARC32
1594 +       def_bool PREEMPT_RT_FULL
1595  
1596  config RWSEM_XCHGADD_ALGORITHM
1597 -       bool
1598 -       default y if SPARC64
1599 +       def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
1600  
1601  config GENERIC_HWEIGHT
1602         bool
1603 diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
1604 index 34a7930b76ef..773740521008 100644
1605 --- a/arch/sparc/kernel/irq_64.c
1606 +++ b/arch/sparc/kernel/irq_64.c
1607 @@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
1608         set_irq_regs(old_regs);
1609  }
1610  
1611 +#ifndef CONFIG_PREEMPT_RT_FULL
1612  void do_softirq_own_stack(void)
1613  {
1614         void *orig_sp, *sp = softirq_stack[smp_processor_id()];
1615 @@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
1616         __asm__ __volatile__("mov %0, %%sp"
1617                              : : "r" (orig_sp));
1618  }
1619 +#endif
1620  
1621  #ifdef CONFIG_HOTPLUG_CPU
1622  void fixup_irqs(void)
1623 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
1624 index bada636d1065..f8a995c90c01 100644
1625 --- a/arch/x86/Kconfig
1626 +++ b/arch/x86/Kconfig
1627 @@ -17,6 +17,7 @@ config X86_64
1628  ### Arch settings
1629  config X86
1630         def_bool y
1631 +       select HAVE_PREEMPT_LAZY
1632         select ACPI_LEGACY_TABLES_LOOKUP        if ACPI
1633         select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
1634         select ANON_INODES
1635 @@ -232,8 +233,11 @@ config ARCH_MAY_HAVE_PC_FDC
1636         def_bool y
1637         depends on ISA_DMA_API
1638  
1639 +config RWSEM_GENERIC_SPINLOCK
1640 +       def_bool PREEMPT_RT_FULL
1641 +
1642  config RWSEM_XCHGADD_ALGORITHM
1643 -       def_bool y
1644 +       def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
1645  
1646  config GENERIC_CALIBRATE_DELAY
1647         def_bool y
1648 @@ -897,7 +901,7 @@ config IOMMU_HELPER
1649  config MAXSMP
1650         bool "Enable Maximum number of SMP Processors and NUMA Nodes"
1651         depends on X86_64 && SMP && DEBUG_KERNEL
1652 -       select CPUMASK_OFFSTACK
1653 +       select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
1654         ---help---
1655           Enable maximum number of CPUS and NUMA Nodes for this architecture.
1656           If unsure, say N.
1657 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
1658 index aa8b0672f87a..2429414bfc71 100644
1659 --- a/arch/x86/crypto/aesni-intel_glue.c
1660 +++ b/arch/x86/crypto/aesni-intel_glue.c
1661 @@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
1662         err = blkcipher_walk_virt(desc, &walk);
1663         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1664  
1665 -       kernel_fpu_begin();
1666         while ((nbytes = walk.nbytes)) {
1667 +               kernel_fpu_begin();
1668                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1669 -                             nbytes & AES_BLOCK_MASK);
1670 +                               nbytes & AES_BLOCK_MASK);
1671 +               kernel_fpu_end();
1672                 nbytes &= AES_BLOCK_SIZE - 1;
1673                 err = blkcipher_walk_done(desc, &walk, nbytes);
1674         }
1675 -       kernel_fpu_end();
1676  
1677         return err;
1678  }
1679 @@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
1680         err = blkcipher_walk_virt(desc, &walk);
1681         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1682  
1683 -       kernel_fpu_begin();
1684         while ((nbytes = walk.nbytes)) {
1685 +               kernel_fpu_begin();
1686                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1687                               nbytes & AES_BLOCK_MASK);
1688 +               kernel_fpu_end();
1689                 nbytes &= AES_BLOCK_SIZE - 1;
1690                 err = blkcipher_walk_done(desc, &walk, nbytes);
1691         }
1692 -       kernel_fpu_end();
1693  
1694         return err;
1695  }
1696 @@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
1697         err = blkcipher_walk_virt(desc, &walk);
1698         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1699  
1700 -       kernel_fpu_begin();
1701         while ((nbytes = walk.nbytes)) {
1702 +               kernel_fpu_begin();
1703                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1704                               nbytes & AES_BLOCK_MASK, walk.iv);
1705 +               kernel_fpu_end();
1706                 nbytes &= AES_BLOCK_SIZE - 1;
1707                 err = blkcipher_walk_done(desc, &walk, nbytes);
1708         }
1709 -       kernel_fpu_end();
1710  
1711         return err;
1712  }
1713 @@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
1714         err = blkcipher_walk_virt(desc, &walk);
1715         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1716  
1717 -       kernel_fpu_begin();
1718         while ((nbytes = walk.nbytes)) {
1719 +               kernel_fpu_begin();
1720                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1721                               nbytes & AES_BLOCK_MASK, walk.iv);
1722 +               kernel_fpu_end();
1723                 nbytes &= AES_BLOCK_SIZE - 1;
1724                 err = blkcipher_walk_done(desc, &walk, nbytes);
1725         }
1726 -       kernel_fpu_end();
1727  
1728         return err;
1729  }
1730 @@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
1731         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
1732         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1733  
1734 -       kernel_fpu_begin();
1735         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
1736 +               kernel_fpu_begin();
1737                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1738                                       nbytes & AES_BLOCK_MASK, walk.iv);
1739 +               kernel_fpu_end();
1740                 nbytes &= AES_BLOCK_SIZE - 1;
1741                 err = blkcipher_walk_done(desc, &walk, nbytes);
1742         }
1743         if (walk.nbytes) {
1744 +               kernel_fpu_begin();
1745                 ctr_crypt_final(ctx, &walk);
1746 +               kernel_fpu_end();
1747                 err = blkcipher_walk_done(desc, &walk, 0);
1748         }
1749 -       kernel_fpu_end();
1750  
1751         return err;
1752  }
1753 diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
1754 index 8648158f3916..d7699130ee36 100644
1755 --- a/arch/x86/crypto/cast5_avx_glue.c
1756 +++ b/arch/x86/crypto/cast5_avx_glue.c
1757 @@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
1758  static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
1759                      bool enc)
1760  {
1761 -       bool fpu_enabled = false;
1762 +       bool fpu_enabled;
1763         struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
1764         const unsigned int bsize = CAST5_BLOCK_SIZE;
1765         unsigned int nbytes;
1766 @@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
1767                 u8 *wsrc = walk->src.virt.addr;
1768                 u8 *wdst = walk->dst.virt.addr;
1769  
1770 -               fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
1771 +               fpu_enabled = cast5_fpu_begin(false, nbytes);
1772  
1773                 /* Process multi-block batch */
1774                 if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
1775 @@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
1776                 } while (nbytes >= bsize);
1777  
1778  done:
1779 +               cast5_fpu_end(fpu_enabled);
1780                 err = blkcipher_walk_done(desc, walk, nbytes);
1781         }
1782 -
1783 -       cast5_fpu_end(fpu_enabled);
1784         return err;
1785  }
1786  
1787 @@ -227,7 +226,7 @@ static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
1788  static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1789                        struct scatterlist *src, unsigned int nbytes)
1790  {
1791 -       bool fpu_enabled = false;
1792 +       bool fpu_enabled;
1793         struct blkcipher_walk walk;
1794         int err;
1795  
1796 @@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1797         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1798  
1799         while ((nbytes = walk.nbytes)) {
1800 -               fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
1801 +               fpu_enabled = cast5_fpu_begin(false, nbytes);
1802                 nbytes = __cbc_decrypt(desc, &walk);
1803 +               cast5_fpu_end(fpu_enabled);
1804                 err = blkcipher_walk_done(desc, &walk, nbytes);
1805         }
1806 -
1807 -       cast5_fpu_end(fpu_enabled);
1808         return err;
1809  }
1810  
1811 @@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
1812  static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1813                      struct scatterlist *src, unsigned int nbytes)
1814  {
1815 -       bool fpu_enabled = false;
1816 +       bool fpu_enabled;
1817         struct blkcipher_walk walk;
1818         int err;
1819  
1820 @@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1821         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1822  
1823         while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
1824 -               fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
1825 +               fpu_enabled = cast5_fpu_begin(false, nbytes);
1826                 nbytes = __ctr_crypt(desc, &walk);
1827 +               cast5_fpu_end(fpu_enabled);
1828                 err = blkcipher_walk_done(desc, &walk, nbytes);
1829         }
1830  
1831 -       cast5_fpu_end(fpu_enabled);
1832 -
1833         if (walk.nbytes) {
1834                 ctr_crypt_final(desc, &walk);
1835                 err = blkcipher_walk_done(desc, &walk, 0);
1836 diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
1837 index 6a85598931b5..3a506ce7ed93 100644
1838 --- a/arch/x86/crypto/glue_helper.c
1839 +++ b/arch/x86/crypto/glue_helper.c
1840 @@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
1841         void *ctx = crypto_blkcipher_ctx(desc->tfm);
1842         const unsigned int bsize = 128 / 8;
1843         unsigned int nbytes, i, func_bytes;
1844 -       bool fpu_enabled = false;
1845 +       bool fpu_enabled;
1846         int err;
1847  
1848         err = blkcipher_walk_virt(desc, walk);
1849 @@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
1850                 u8 *wdst = walk->dst.virt.addr;
1851  
1852                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1853 -                                            desc, fpu_enabled, nbytes);
1854 +                                            desc, false, nbytes);
1855  
1856                 for (i = 0; i < gctx->num_funcs; i++) {
1857                         func_bytes = bsize * gctx->funcs[i].num_blocks;
1858 @@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
1859                 }
1860  
1861  done:
1862 +               glue_fpu_end(fpu_enabled);
1863                 err = blkcipher_walk_done(desc, walk, nbytes);
1864         }
1865  
1866 -       glue_fpu_end(fpu_enabled);
1867         return err;
1868  }
1869  
1870 @@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
1871                             struct scatterlist *src, unsigned int nbytes)
1872  {
1873         const unsigned int bsize = 128 / 8;
1874 -       bool fpu_enabled = false;
1875 +       bool fpu_enabled;
1876         struct blkcipher_walk walk;
1877         int err;
1878  
1879 @@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
1880  
1881         while ((nbytes = walk.nbytes)) {
1882                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1883 -                                            desc, fpu_enabled, nbytes);
1884 +                                            desc, false, nbytes);
1885                 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
1886 +               glue_fpu_end(fpu_enabled);
1887                 err = blkcipher_walk_done(desc, &walk, nbytes);
1888         }
1889  
1890 -       glue_fpu_end(fpu_enabled);
1891         return err;
1892  }
1893  EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
1894 @@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
1895                           struct scatterlist *src, unsigned int nbytes)
1896  {
1897         const unsigned int bsize = 128 / 8;
1898 -       bool fpu_enabled = false;
1899 +       bool fpu_enabled;
1900         struct blkcipher_walk walk;
1901         int err;
1902  
1903 @@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
1904  
1905         while ((nbytes = walk.nbytes) >= bsize) {
1906                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1907 -                                            desc, fpu_enabled, nbytes);
1908 +                                            desc, false, nbytes);
1909                 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
1910 +               glue_fpu_end(fpu_enabled);
1911                 err = blkcipher_walk_done(desc, &walk, nbytes);
1912         }
1913  
1914 -       glue_fpu_end(fpu_enabled);
1915 -
1916         if (walk.nbytes) {
1917                 glue_ctr_crypt_final_128bit(
1918                         gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
1919 @@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
1920                           void *tweak_ctx, void *crypt_ctx)
1921  {
1922         const unsigned int bsize = 128 / 8;
1923 -       bool fpu_enabled = false;
1924 +       bool fpu_enabled;
1925         struct blkcipher_walk walk;
1926         int err;
1927  
1928 @@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
1929  
1930         /* set minimum length to bsize, for tweak_fn */
1931         fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1932 -                                    desc, fpu_enabled,
1933 +                                    desc, false,
1934                                      nbytes < bsize ? bsize : nbytes);
1935 -
1936         /* calculate first value of T */
1937         tweak_fn(tweak_ctx, walk.iv, walk.iv);
1938 +       glue_fpu_end(fpu_enabled);
1939  
1940         while (nbytes) {
1941 +               fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
1942 +                               desc, false, nbytes);
1943                 nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
1944  
1945 +               glue_fpu_end(fpu_enabled);
1946                 err = blkcipher_walk_done(desc, &walk, nbytes);
1947                 nbytes = walk.nbytes;
1948         }
1949 -
1950 -       glue_fpu_end(fpu_enabled);
1951 -
1952         return err;
1953  }
1954  EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
1955 diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
1956 index bdd9cc59d20f..56d01a339ba4 100644
1957 --- a/arch/x86/entry/common.c
1958 +++ b/arch/x86/entry/common.c
1959 @@ -129,7 +129,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
1960  
1961  #define EXIT_TO_USERMODE_LOOP_FLAGS                            \
1962         (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |   \
1963 -        _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
1964 +        _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY)
1965  
1966  static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
1967  {
1968 @@ -145,9 +145,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
1969                 /* We have work to do. */
1970                 local_irq_enable();
1971  
1972 -               if (cached_flags & _TIF_NEED_RESCHED)
1973 +               if (cached_flags & _TIF_NEED_RESCHED_MASK)
1974                         schedule();
1975  
1976 +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
1977 +               if (unlikely(current->forced_info.si_signo)) {
1978 +                       struct task_struct *t = current;
1979 +                       force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
1980 +                       t->forced_info.si_signo = 0;
1981 +               }
1982 +#endif
1983                 if (cached_flags & _TIF_UPROBE)
1984                         uprobe_notify_resume(regs);
1985  
1986 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
1987 index edba8606b99a..4a3389535fc6 100644
1988 --- a/arch/x86/entry/entry_32.S
1989 +++ b/arch/x86/entry/entry_32.S
1990 @@ -308,8 +308,25 @@ END(ret_from_exception)
1991  ENTRY(resume_kernel)
1992         DISABLE_INTERRUPTS(CLBR_ANY)
1993  need_resched:
1994 +       # preempt count == 0 + NEED_RS set?
1995         cmpl    $0, PER_CPU_VAR(__preempt_count)
1996 +#ifndef CONFIG_PREEMPT_LAZY
1997         jnz     restore_all
1998 +#else
1999 +       jz test_int_off
2000 +
2001 +       # atleast preempt count == 0 ?
2002 +       cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
2003 +       jne restore_all
2004 +
2005 +       movl    PER_CPU_VAR(current_task), %ebp
2006 +       cmpl $0,TASK_TI_preempt_lazy_count(%ebp)        # non-zero preempt_lazy_count ?
2007 +       jnz restore_all
2008 +
2009 +       testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
2010 +       jz restore_all
2011 +test_int_off:
2012 +#endif
2013         testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
2014         jz      restore_all
2015         call    preempt_schedule_irq
2016 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
2017 index ef766a358b37..28401f826ab1 100644
2018 --- a/arch/x86/entry/entry_64.S
2019 +++ b/arch/x86/entry/entry_64.S
2020 @@ -546,7 +546,23 @@ GLOBAL(retint_user)
2021         bt      $9, EFLAGS(%rsp)                /* were interrupts off? */
2022         jnc     1f
2023  0:     cmpl    $0, PER_CPU_VAR(__preempt_count)
2024 +#ifndef CONFIG_PREEMPT_LAZY
2025         jnz     1f
2026 +#else
2027 +       jz      do_preempt_schedule_irq
2028 +
2029 +       # atleast preempt count == 0 ?
2030 +       cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
2031 +       jnz     1f
2032 +
2033 +       movq    PER_CPU_VAR(current_task), %rcx
2034 +       cmpl    $0, TASK_TI_preempt_lazy_count(%rcx)
2035 +       jnz     1f
2036 +
2037 +       bt      $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
2038 +       jnc     1f
2039 +do_preempt_schedule_irq:
2040 +#endif
2041         call    preempt_schedule_irq
2042         jmp     0b
2043  1:
2044 @@ -894,6 +910,7 @@ EXPORT_SYMBOL(native_load_gs_index)
2045         jmp     2b
2046         .previous
2047  
2048 +#ifndef CONFIG_PREEMPT_RT_FULL
2049  /* Call softirq on interrupt stack. Interrupts are off. */
2050  ENTRY(do_softirq_own_stack)
2051         pushq   %rbp
2052 @@ -906,6 +923,7 @@ ENTRY(do_softirq_own_stack)
2053         decl    PER_CPU_VAR(irq_count)
2054         ret
2055  END(do_softirq_own_stack)
2056 +#endif
2057  
2058  #ifdef CONFIG_XEN
2059  idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
2060 diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
2061 index 17f218645701..11bd1b7ee6eb 100644
2062 --- a/arch/x86/include/asm/preempt.h
2063 +++ b/arch/x86/include/asm/preempt.h
2064 @@ -79,17 +79,46 @@ static __always_inline void __preempt_count_sub(int val)
2065   * a decrement which hits zero means we have no preempt_count and should
2066   * reschedule.
2067   */
2068 -static __always_inline bool __preempt_count_dec_and_test(void)
2069 +static __always_inline bool ____preempt_count_dec_and_test(void)
2070  {
2071         GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
2072  }
2073  
2074 +static __always_inline bool __preempt_count_dec_and_test(void)
2075 +{
2076 +       if (____preempt_count_dec_and_test())
2077 +               return true;
2078 +#ifdef CONFIG_PREEMPT_LAZY
2079 +       if (current_thread_info()->preempt_lazy_count)
2080 +               return false;
2081 +       return test_thread_flag(TIF_NEED_RESCHED_LAZY);
2082 +#else
2083 +       return false;
2084 +#endif
2085 +}
2086 +
2087  /*
2088   * Returns true when we need to resched and can (barring IRQ state).
2089   */
2090  static __always_inline bool should_resched(int preempt_offset)
2091  {
2092 +#ifdef CONFIG_PREEMPT_LAZY
2093 +       u32 tmp;
2094 +
2095 +       tmp = raw_cpu_read_4(__preempt_count);
2096 +       if (tmp == preempt_offset)
2097 +               return true;
2098 +
2099 +       /* preempt count == 0 ? */
2100 +       tmp &= ~PREEMPT_NEED_RESCHED;
2101 +       if (tmp)
2102 +               return false;
2103 +       if (current_thread_info()->preempt_lazy_count)
2104 +               return false;
2105 +       return test_thread_flag(TIF_NEED_RESCHED_LAZY);
2106 +#else
2107         return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
2108 +#endif
2109  }
2110  
2111  #ifdef CONFIG_PREEMPT
2112 diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
2113 index 8af22be0fe61..d1328789b759 100644
2114 --- a/arch/x86/include/asm/signal.h
2115 +++ b/arch/x86/include/asm/signal.h
2116 @@ -27,6 +27,19 @@ typedef struct {
2117  #define SA_IA32_ABI    0x02000000u
2118  #define SA_X32_ABI     0x01000000u
2119  
2120 +/*
2121 + * Because some traps use the IST stack, we must keep preemption
2122 + * disabled while calling do_trap(), but do_trap() may call
2123 + * force_sig_info() which will grab the signal spin_locks for the
2124 + * task, which in PREEMPT_RT_FULL are mutexes.  By defining
2125 + * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
2126 + * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
2127 + * trap.
2128 + */
2129 +#if defined(CONFIG_PREEMPT_RT_FULL)
2130 +#define ARCH_RT_DELAYS_SIGNAL_SEND
2131 +#endif
2132 +
2133  #ifndef CONFIG_COMPAT
2134  typedef sigset_t compat_sigset_t;
2135  #endif
2136 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
2137 index 58505f01962f..02fa39652cd6 100644
2138 --- a/arch/x86/include/asm/stackprotector.h
2139 +++ b/arch/x86/include/asm/stackprotector.h
2140 @@ -59,7 +59,7 @@
2141   */
2142  static __always_inline void boot_init_stack_canary(void)
2143  {
2144 -       u64 canary;
2145 +       u64 uninitialized_var(canary);
2146         u64 tsc;
2147  
2148  #ifdef CONFIG_X86_64
2149 @@ -70,8 +70,15 @@ static __always_inline void boot_init_stack_canary(void)
2150          * of randomness. The TSC only matters for very early init,
2151          * there it already has some randomness on most systems. Later
2152          * on during the bootup the random pool has true entropy too.
2153 +        *
2154 +        * For preempt-rt we need to weaken the randomness a bit, as
2155 +        * we can't call into the random generator from atomic context
2156 +        * due to locking constraints. We just leave canary
2157 +        * uninitialized and use the TSC based randomness on top of it.
2158          */
2159 +#ifndef CONFIG_PREEMPT_RT_FULL
2160         get_random_bytes(&canary, sizeof(canary));
2161 +#endif
2162         tsc = rdtsc();
2163         canary += tsc + (tsc << 32UL);
2164  
2165 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
2166 index ad6f5eb07a95..5ceb3a1c2b1a 100644
2167 --- a/arch/x86/include/asm/thread_info.h
2168 +++ b/arch/x86/include/asm/thread_info.h
2169 @@ -54,11 +54,14 @@ struct task_struct;
2170  
2171  struct thread_info {
2172         unsigned long           flags;          /* low level flags */
2173 +       int                     preempt_lazy_count;     /* 0 => lazy preemptable
2174 +                                                          <0 => BUG */
2175  };
2176  
2177  #define INIT_THREAD_INFO(tsk)                  \
2178  {                                              \
2179         .flags          = 0,                    \
2180 +       .preempt_lazy_count = 0,                \
2181  }
2182  
2183  #define init_stack             (init_thread_union.stack)
2184 @@ -67,6 +70,10 @@ struct thread_info {
2185  
2186  #include <asm/asm-offsets.h>
2187  
2188 +#define GET_THREAD_INFO(reg) \
2189 +       _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
2190 +       _ASM_SUB $(THREAD_SIZE),reg ;
2191 +
2192  #endif
2193  
2194  /*
2195 @@ -85,6 +92,7 @@ struct thread_info {
2196  #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
2197  #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
2198  #define TIF_SECCOMP            8       /* secure computing */
2199 +#define TIF_NEED_RESCHED_LAZY  9       /* lazy rescheduling necessary */
2200  #define TIF_USER_RETURN_NOTIFY 11      /* notify kernel of userspace return */
2201  #define TIF_UPROBE             12      /* breakpointed or singlestepping */
2202  #define TIF_NOTSC              16      /* TSC is not accessible in userland */
2203 @@ -108,6 +116,7 @@ struct thread_info {
2204  #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
2205  #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
2206  #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
2207 +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
2208  #define _TIF_USER_RETURN_NOTIFY        (1 << TIF_USER_RETURN_NOTIFY)
2209  #define _TIF_UPROBE            (1 << TIF_UPROBE)
2210  #define _TIF_NOTSC             (1 << TIF_NOTSC)
2211 @@ -143,6 +152,8 @@ struct thread_info {
2212  #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
2213  #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
2214  
2215 +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
2216 +
2217  #define STACK_WARN             (THREAD_SIZE/8)
2218  
2219  /*
2220 diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
2221 index 57ab86d94d64..35d25e27180f 100644
2222 --- a/arch/x86/include/asm/uv/uv_bau.h
2223 +++ b/arch/x86/include/asm/uv/uv_bau.h
2224 @@ -624,9 +624,9 @@ struct bau_control {
2225         cycles_t                send_message;
2226         cycles_t                period_end;
2227         cycles_t                period_time;
2228 -       spinlock_t              uvhub_lock;
2229 -       spinlock_t              queue_lock;
2230 -       spinlock_t              disable_lock;
2231 +       raw_spinlock_t          uvhub_lock;
2232 +       raw_spinlock_t          queue_lock;
2233 +       raw_spinlock_t          disable_lock;
2234         /* tunables */
2235         int                     max_concurr;
2236         int                     max_concurr_const;
2237 @@ -815,15 +815,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
2238   * to be lowered below the current 'v'.  atomic_add_unless can only stop
2239   * on equal.
2240   */
2241 -static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
2242 +static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
2243  {
2244 -       spin_lock(lock);
2245 +       raw_spin_lock(lock);
2246         if (atomic_read(v) >= u) {
2247 -               spin_unlock(lock);
2248 +               raw_spin_unlock(lock);
2249                 return 0;
2250         }
2251         atomic_inc(v);
2252 -       spin_unlock(lock);
2253 +       raw_spin_unlock(lock);
2254         return 1;
2255  }
2256  
2257 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
2258 index 931ced8ca345..167975ac8af7 100644
2259 --- a/arch/x86/kernel/acpi/boot.c
2260 +++ b/arch/x86/kernel/acpi/boot.c
2261 @@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
2262   *             ->ioapic_mutex
2263   *                     ->ioapic_lock
2264   */
2265 +#ifdef CONFIG_X86_IO_APIC
2266  static DEFINE_MUTEX(acpi_ioapic_lock);
2267 +#endif
2268  
2269  /* --------------------------------------------------------------------------
2270                                Boot-time Configuration
2271 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
2272 index d1e25564b3c1..67e585fa801f 100644
2273 --- a/arch/x86/kernel/apic/io_apic.c
2274 +++ b/arch/x86/kernel/apic/io_apic.c
2275 @@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
2276  static inline bool ioapic_irqd_mask(struct irq_data *data)
2277  {
2278         /* If we are moving the irq we need to mask it */
2279 -       if (unlikely(irqd_is_setaffinity_pending(data))) {
2280 +       if (unlikely(irqd_is_setaffinity_pending(data) &&
2281 +                    !irqd_irq_inprogress(data))) {
2282                 mask_ioapic_irq(data);
2283                 return true;
2284         }
2285 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
2286 index c62e015b126c..0cc71257fca6 100644
2287 --- a/arch/x86/kernel/asm-offsets.c
2288 +++ b/arch/x86/kernel/asm-offsets.c
2289 @@ -36,6 +36,7 @@ void common(void) {
2290  
2291         BLANK();
2292         OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
2293 +       OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count);
2294         OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
2295  
2296         BLANK();
2297 @@ -91,4 +92,5 @@ void common(void) {
2298  
2299         BLANK();
2300         DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
2301 +       DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
2302  }
2303 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
2304 index a7fdf453d895..e3a0e969a66e 100644
2305 --- a/arch/x86/kernel/cpu/mcheck/mce.c
2306 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
2307 @@ -41,6 +41,8 @@
2308  #include <linux/debugfs.h>
2309  #include <linux/irq_work.h>
2310  #include <linux/export.h>
2311 +#include <linux/jiffies.h>
2312 +#include <linux/swork.h>
2313  #include <linux/jump_label.h>
2314  
2315  #include <asm/processor.h>
2316 @@ -1317,7 +1319,7 @@ void mce_log_therm_throt_event(__u64 status)
2317  static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
2318  
2319  static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
2320 -static DEFINE_PER_CPU(struct timer_list, mce_timer);
2321 +static DEFINE_PER_CPU(struct hrtimer, mce_timer);
2322  
2323  static unsigned long mce_adjust_timer_default(unsigned long interval)
2324  {
2325 @@ -1326,32 +1328,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
2326  
2327  static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
2328  
2329 -static void __restart_timer(struct timer_list *t, unsigned long interval)
2330 +static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval)
2331  {
2332 -       unsigned long when = jiffies + interval;
2333 -       unsigned long flags;
2334 -
2335 -       local_irq_save(flags);
2336 -
2337 -       if (timer_pending(t)) {
2338 -               if (time_before(when, t->expires))
2339 -                       mod_timer(t, when);
2340 -       } else {
2341 -               t->expires = round_jiffies(when);
2342 -               add_timer_on(t, smp_processor_id());
2343 -       }
2344 -
2345 -       local_irq_restore(flags);
2346 +       if (!interval)
2347 +               return HRTIMER_NORESTART;
2348 +       hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
2349 +       return HRTIMER_RESTART;
2350  }
2351  
2352 -static void mce_timer_fn(unsigned long data)
2353 +static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
2354  {
2355 -       struct timer_list *t = this_cpu_ptr(&mce_timer);
2356 -       int cpu = smp_processor_id();
2357         unsigned long iv;
2358  
2359 -       WARN_ON(cpu != data);
2360 -
2361         iv = __this_cpu_read(mce_next_interval);
2362  
2363         if (mce_available(this_cpu_ptr(&cpu_info))) {
2364 @@ -1374,7 +1362,7 @@ static void mce_timer_fn(unsigned long data)
2365  
2366  done:
2367         __this_cpu_write(mce_next_interval, iv);
2368 -       __restart_timer(t, iv);
2369 +       return __restart_timer(timer, iv);
2370  }
2371  
2372  /*
2373 @@ -1382,7 +1370,7 @@ static void mce_timer_fn(unsigned long data)
2374   */
2375  void mce_timer_kick(unsigned long interval)
2376  {
2377 -       struct timer_list *t = this_cpu_ptr(&mce_timer);
2378 +       struct hrtimer *t = this_cpu_ptr(&mce_timer);
2379         unsigned long iv = __this_cpu_read(mce_next_interval);
2380  
2381         __restart_timer(t, interval);
2382 @@ -1397,7 +1385,7 @@ static void mce_timer_delete_all(void)
2383         int cpu;
2384  
2385         for_each_online_cpu(cpu)
2386 -               del_timer_sync(&per_cpu(mce_timer, cpu));
2387 +               hrtimer_cancel(&per_cpu(mce_timer, cpu));
2388  }
2389  
2390  static void mce_do_trigger(struct work_struct *work)
2391 @@ -1407,6 +1395,56 @@ static void mce_do_trigger(struct work_struct *work)
2392  
2393  static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
2394  
2395 +static void __mce_notify_work(struct swork_event *event)
2396 +{
2397 +       /* Not more than two messages every minute */
2398 +       static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
2399 +
2400 +       /* wake processes polling /dev/mcelog */
2401 +       wake_up_interruptible(&mce_chrdev_wait);
2402 +
2403 +       /*
2404 +        * There is no risk of missing notifications because
2405 +        * work_pending is always cleared before the function is
2406 +        * executed.
2407 +        */
2408 +       if (mce_helper[0] && !work_pending(&mce_trigger_work))
2409 +               schedule_work(&mce_trigger_work);
2410 +
2411 +       if (__ratelimit(&ratelimit))
2412 +               pr_info(HW_ERR "Machine check events logged\n");
2413 +}
2414 +
2415 +#ifdef CONFIG_PREEMPT_RT_FULL
2416 +static bool notify_work_ready __read_mostly;
2417 +static struct swork_event notify_work;
2418 +
2419 +static int mce_notify_work_init(void)
2420 +{
2421 +       int err;
2422 +
2423 +       err = swork_get();
2424 +       if (err)
2425 +               return err;
2426 +
2427 +       INIT_SWORK(&notify_work, __mce_notify_work);
2428 +       notify_work_ready = true;
2429 +       return 0;
2430 +}
2431 +
2432 +static void mce_notify_work(void)
2433 +{
2434 +       if (notify_work_ready)
2435 +               swork_queue(&notify_work);
2436 +}
2437 +#else
2438 +static void mce_notify_work(void)
2439 +{
2440 +       __mce_notify_work(NULL);
2441 +}
2442 +static inline int mce_notify_work_init(void) { return 0; }
2443 +#endif
2444 +
2445  /*
2446   * Notify the user(s) about new machine check events.
2447   * Can be called from interrupt context, but not from machine check/NMI
2448 @@ -1414,19 +1452,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
2449   */
2450  int mce_notify_irq(void)
2451  {
2452 -       /* Not more than two messages every minute */
2453 -       static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
2454 -
2455         if (test_and_clear_bit(0, &mce_need_notify)) {
2456 -               /* wake processes polling /dev/mcelog */
2457 -               wake_up_interruptible(&mce_chrdev_wait);
2458 -
2459 -               if (mce_helper[0])
2460 -                       schedule_work(&mce_trigger_work);
2461 -
2462 -               if (__ratelimit(&ratelimit))
2463 -                       pr_info(HW_ERR "Machine check events logged\n");
2464 -
2465 +               mce_notify_work();
2466                 return 1;
2467         }
2468         return 0;
2469 @@ -1732,7 +1759,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
2470         }
2471  }
2472  
2473 -static void mce_start_timer(unsigned int cpu, struct timer_list *t)
2474 +static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
2475  {
2476         unsigned long iv = check_interval * HZ;
2477  
2478 @@ -1741,16 +1768,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
2479  
2480         per_cpu(mce_next_interval, cpu) = iv;
2481  
2482 -       t->expires = round_jiffies(jiffies + iv);
2483 -       add_timer_on(t, cpu);
2484 +       hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
2485 +                       0, HRTIMER_MODE_REL_PINNED);
2486  }
2487  
2488  static void __mcheck_cpu_init_timer(void)
2489  {
2490 -       struct timer_list *t = this_cpu_ptr(&mce_timer);
2491 +       struct hrtimer *t = this_cpu_ptr(&mce_timer);
2492         unsigned int cpu = smp_processor_id();
2493  
2494 -       setup_pinned_timer(t, mce_timer_fn, cpu);
2495 +       hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2496 +       t->function = mce_timer_fn;
2497         mce_start_timer(cpu, t);
2498  }
2499  
2500 @@ -2475,6 +2503,8 @@ static void mce_disable_cpu(void *h)
2501         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2502                 return;
2503  
2504 +       hrtimer_cancel(this_cpu_ptr(&mce_timer));
2505 +
2506         if (!(action & CPU_TASKS_FROZEN))
2507                 cmci_clear();
2508  
2509 @@ -2497,6 +2527,7 @@ static void mce_reenable_cpu(void *h)
2510                 if (b->init)
2511                         wrmsrl(msr_ops.ctl(i), b->ctl);
2512         }
2513 +       __mcheck_cpu_init_timer();
2514  }
2515  
2516  /* Get notified when a cpu comes on/off. Be hotplug friendly. */
2517 @@ -2504,7 +2535,6 @@ static int
2518  mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2519  {
2520         unsigned int cpu = (unsigned long)hcpu;
2521 -       struct timer_list *t = &per_cpu(mce_timer, cpu);
2522  
2523         switch (action & ~CPU_TASKS_FROZEN) {
2524         case CPU_ONLINE:
2525 @@ -2524,11 +2554,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2526                 break;
2527         case CPU_DOWN_PREPARE:
2528                 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
2529 -               del_timer_sync(t);
2530                 break;
2531         case CPU_DOWN_FAILED:
2532                 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2533 -               mce_start_timer(cpu, t);
2534                 break;
2535         }
2536  
2537 @@ -2567,6 +2595,10 @@ static __init int mcheck_init_device(void)
2538                 goto err_out;
2539         }
2540  
2541 +       err = mce_notify_work_init();
2542 +       if (err)
2543 +               goto err_out;
2544 +
2545         if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2546                 err = -ENOMEM;
2547                 goto err_out;
2548 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
2549 index 1f38d9a4d9de..053bf3b2ef39 100644
2550 --- a/arch/x86/kernel/irq_32.c
2551 +++ b/arch/x86/kernel/irq_32.c
2552 @@ -127,6 +127,7 @@ void irq_ctx_init(int cpu)
2553                cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
2554  }
2555  
2556 +#ifndef CONFIG_PREEMPT_RT_FULL
2557  void do_softirq_own_stack(void)
2558  {
2559         struct irq_stack *irqstk;
2560 @@ -143,6 +144,7 @@ void do_softirq_own_stack(void)
2561  
2562         call_on_stack(__do_softirq, isp);
2563  }
2564 +#endif
2565  
2566  bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
2567  {
2568 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
2569 index bd7be8efdc4c..b3b0a7f7b1ca 100644
2570 --- a/arch/x86/kernel/process_32.c
2571 +++ b/arch/x86/kernel/process_32.c
2572 @@ -35,6 +35,7 @@
2573  #include <linux/uaccess.h>
2574  #include <linux/io.h>
2575  #include <linux/kdebug.h>
2576 +#include <linux/highmem.h>
2577  
2578  #include <asm/pgtable.h>
2579  #include <asm/ldt.h>
2580 @@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
2581  }
2582  EXPORT_SYMBOL_GPL(start_thread);
2583  
2584 +#ifdef CONFIG_PREEMPT_RT_FULL
2585 +static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
2586 +{
2587 +       int i;
2588 +
2589 +       /*
2590 +        * Clear @prev's kmap_atomic mappings
2591 +        */
2592 +       for (i = 0; i < prev_p->kmap_idx; i++) {
2593 +               int idx = i + KM_TYPE_NR * smp_processor_id();
2594 +               pte_t *ptep = kmap_pte - idx;
2595 +
2596 +               kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
2597 +       }
2598 +       /*
2599 +        * Restore @next_p's kmap_atomic mappings
2600 +        */
2601 +       for (i = 0; i < next_p->kmap_idx; i++) {
2602 +               int idx = i + KM_TYPE_NR * smp_processor_id();
2603 +
2604 +               if (!pte_none(next_p->kmap_pte[i]))
2605 +                       set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
2606 +       }
2607 +}
2608 +#else
2609 +static inline void
2610 +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
2611 +#endif
2612 +
2613  
2614  /*
2615   *     switch_to(x,y) should switch tasks from x to y.
2616 @@ -271,6 +301,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
2617                      task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
2618                 __switch_to_xtra(prev_p, next_p, tss);
2619  
2620 +       switch_kmaps(prev_p, next_p);
2621 +
2622         /*
2623          * Leave lazy mode, flushing any hypercalls made here.
2624          * This must be done before restoring TLS segments so
2625 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
2626 index 3f05c044720b..fe68afd37162 100644
2627 --- a/arch/x86/kvm/lapic.c
2628 +++ b/arch/x86/kvm/lapic.c
2629 @@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
2630         hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2631                      HRTIMER_MODE_ABS_PINNED);
2632         apic->lapic_timer.timer.function = apic_timer_fn;
2633 +       apic->lapic_timer.timer.irqsafe = 1;
2634  
2635         /*
2636          * APIC is created enabled. This will prevent kvm_lapic_set_base from
2637 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2638 index 731044efb195..d2905d9881f0 100644
2639 --- a/arch/x86/kvm/x86.c
2640 +++ b/arch/x86/kvm/x86.c
2641 @@ -5933,6 +5933,13 @@ int kvm_arch_init(void *opaque)
2642                 goto out;
2643         }
2644  
2645 +#ifdef CONFIG_PREEMPT_RT_FULL
2646 +       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
2647 +               printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
2648 +               return -EOPNOTSUPP;
2649 +       }
2650 +#endif
2651 +
2652         r = kvm_mmu_module_init();
2653         if (r)
2654                 goto out_free_percpu;
2655 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
2656 index 6d18b70ed5a9..f752724c22e8 100644
2657 --- a/arch/x86/mm/highmem_32.c
2658 +++ b/arch/x86/mm/highmem_32.c
2659 @@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
2660   */
2661  void *kmap_atomic_prot(struct page *page, pgprot_t prot)
2662  {
2663 +       pte_t pte = mk_pte(page, prot);
2664         unsigned long vaddr;
2665         int idx, type;
2666  
2667 -       preempt_disable();
2668 +       preempt_disable_nort();
2669         pagefault_disable();
2670  
2671         if (!PageHighMem(page))
2672 @@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
2673         idx = type + KM_TYPE_NR*smp_processor_id();
2674         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
2675         BUG_ON(!pte_none(*(kmap_pte-idx)));
2676 -       set_pte(kmap_pte-idx, mk_pte(page, prot));
2677 +#ifdef CONFIG_PREEMPT_RT_FULL
2678 +       current->kmap_pte[type] = pte;
2679 +#endif
2680 +       set_pte(kmap_pte-idx, pte);
2681         arch_flush_lazy_mmu_mode();
2682  
2683         return (void *)vaddr;
2684 @@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
2685                  * is a bad idea also, in case the page changes cacheability
2686                  * attributes or becomes a protected page in a hypervisor.
2687                  */
2688 +#ifdef CONFIG_PREEMPT_RT_FULL
2689 +               current->kmap_pte[type] = __pte(0);
2690 +#endif
2691                 kpte_clear_flush(kmap_pte-idx, vaddr);
2692                 kmap_atomic_idx_pop();
2693                 arch_flush_lazy_mmu_mode();
2694 @@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
2695  #endif
2696  
2697         pagefault_enable();
2698 -       preempt_enable();
2699 +       preempt_enable_nort();
2700  }
2701  EXPORT_SYMBOL(__kunmap_atomic);
2702  
2703 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
2704 index ada98b39b8ad..585f6829653b 100644
2705 --- a/arch/x86/mm/iomap_32.c
2706 +++ b/arch/x86/mm/iomap_32.c
2707 @@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
2708  
2709  void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
2710  {
2711 +       pte_t pte = pfn_pte(pfn, prot);
2712         unsigned long vaddr;
2713         int idx, type;
2714  
2715 @@ -65,7 +66,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
2716         type = kmap_atomic_idx_push();
2717         idx = type + KM_TYPE_NR * smp_processor_id();
2718         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
2719 -       set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
2720 +       WARN_ON(!pte_none(*(kmap_pte - idx)));
2721 +
2722 +#ifdef CONFIG_PREEMPT_RT_FULL
2723 +       current->kmap_pte[type] = pte;
2724 +#endif
2725 +       set_pte(kmap_pte - idx, pte);
2726         arch_flush_lazy_mmu_mode();
2727  
2728         return (void *)vaddr;
2729 @@ -113,6 +119,9 @@ iounmap_atomic(void __iomem *kvaddr)
2730                  * is a bad idea also, in case the page changes cacheability
2731                  * attributes or becomes a protected page in a hypervisor.
2732                  */
2733 +#ifdef CONFIG_PREEMPT_RT_FULL
2734 +               current->kmap_pte[type] = __pte(0);
2735 +#endif
2736                 kpte_clear_flush(kmap_pte-idx, vaddr);
2737                 kmap_atomic_idx_pop();
2738         }
2739 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
2740 index e3353c97d086..01664968555c 100644
2741 --- a/arch/x86/mm/pageattr.c
2742 +++ b/arch/x86/mm/pageattr.c
2743 @@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
2744                             int in_flags, struct page **pages)
2745  {
2746         unsigned int i, level;
2747 +#ifdef CONFIG_PREEMPT
2748 +       /*
2749 +        * Avoid wbinvd() because it causes latencies on all CPUs,
2750 +        * regardless of any CPU isolation that may be in effect.
2751 +        */
2752 +       unsigned long do_wbinvd = 0;
2753 +#else
2754         unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
2755 +#endif
2756  
2757         BUG_ON(irqs_disabled());
2758  
2759 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
2760 index 9e42842e924a..5398f97172f9 100644
2761 --- a/arch/x86/platform/uv/tlb_uv.c
2762 +++ b/arch/x86/platform/uv/tlb_uv.c
2763 @@ -748,9 +748,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
2764  
2765                 quiesce_local_uvhub(hmaster);
2766  
2767 -               spin_lock(&hmaster->queue_lock);
2768 +               raw_spin_lock(&hmaster->queue_lock);
2769                 reset_with_ipi(&bau_desc->distribution, bcp);
2770 -               spin_unlock(&hmaster->queue_lock);
2771 +               raw_spin_unlock(&hmaster->queue_lock);
2772  
2773                 end_uvhub_quiesce(hmaster);
2774  
2775 @@ -770,9 +770,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
2776  
2777                 quiesce_local_uvhub(hmaster);
2778  
2779 -               spin_lock(&hmaster->queue_lock);
2780 +               raw_spin_lock(&hmaster->queue_lock);
2781                 reset_with_ipi(&bau_desc->distribution, bcp);
2782 -               spin_unlock(&hmaster->queue_lock);
2783 +               raw_spin_unlock(&hmaster->queue_lock);
2784  
2785                 end_uvhub_quiesce(hmaster);
2786  
2787 @@ -793,7 +793,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
2788         cycles_t tm1;
2789  
2790         hmaster = bcp->uvhub_master;
2791 -       spin_lock(&hmaster->disable_lock);
2792 +       raw_spin_lock(&hmaster->disable_lock);
2793         if (!bcp->baudisabled) {
2794                 stat->s_bau_disabled++;
2795                 tm1 = get_cycles();
2796 @@ -806,7 +806,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
2797                         }
2798                 }
2799         }
2800 -       spin_unlock(&hmaster->disable_lock);
2801 +       raw_spin_unlock(&hmaster->disable_lock);
2802  }
2803  
2804  static void count_max_concurr(int stat, struct bau_control *bcp,
2805 @@ -869,7 +869,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
2806   */
2807  static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
2808  {
2809 -       spinlock_t *lock = &hmaster->uvhub_lock;
2810 +       raw_spinlock_t *lock = &hmaster->uvhub_lock;
2811         atomic_t *v;
2812  
2813         v = &hmaster->active_descriptor_count;
2814 @@ -1002,7 +1002,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
2815         struct bau_control *hmaster;
2816  
2817         hmaster = bcp->uvhub_master;
2818 -       spin_lock(&hmaster->disable_lock);
2819 +       raw_spin_lock(&hmaster->disable_lock);
2820         if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
2821                 stat->s_bau_reenabled++;
2822                 for_each_present_cpu(tcpu) {
2823 @@ -1014,10 +1014,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
2824                                 tbcp->period_giveups = 0;
2825                         }
2826                 }
2827 -               spin_unlock(&hmaster->disable_lock);
2828 +               raw_spin_unlock(&hmaster->disable_lock);
2829                 return 0;
2830         }
2831 -       spin_unlock(&hmaster->disable_lock);
2832 +       raw_spin_unlock(&hmaster->disable_lock);
2833         return -1;
2834  }
2835  
2836 @@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables(void)
2837                 bcp->cong_reps                  = congested_reps;
2838                 bcp->disabled_period            = sec_2_cycles(disabled_period);
2839                 bcp->giveup_limit               = giveup_limit;
2840 -               spin_lock_init(&bcp->queue_lock);
2841 -               spin_lock_init(&bcp->uvhub_lock);
2842 -               spin_lock_init(&bcp->disable_lock);
2843 +               raw_spin_lock_init(&bcp->queue_lock);
2844 +               raw_spin_lock_init(&bcp->uvhub_lock);
2845 +               raw_spin_lock_init(&bcp->disable_lock);
2846         }
2847  }
2848  
2849 diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
2850 index b333fc45f9ec..8b85916e6986 100644
2851 --- a/arch/x86/platform/uv/uv_time.c
2852 +++ b/arch/x86/platform/uv/uv_time.c
2853 @@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
2854  
2855  /* There is one of these allocated per node */
2856  struct uv_rtc_timer_head {
2857 -       spinlock_t      lock;
2858 +       raw_spinlock_t  lock;
2859         /* next cpu waiting for timer, local node relative: */
2860         int             next_cpu;
2861         /* number of cpus on this node: */
2862 @@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
2863                                 uv_rtc_deallocate_timers();
2864                                 return -ENOMEM;
2865                         }
2866 -                       spin_lock_init(&head->lock);
2867 +                       raw_spin_lock_init(&head->lock);
2868                         head->ncpus = uv_blade_nr_possible_cpus(bid);
2869                         head->next_cpu = -1;
2870                         blade_info[bid] = head;
2871 @@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
2872         unsigned long flags;
2873         int next_cpu;
2874  
2875 -       spin_lock_irqsave(&head->lock, flags);
2876 +       raw_spin_lock_irqsave(&head->lock, flags);
2877  
2878         next_cpu = head->next_cpu;
2879         *t = expires;
2880 @@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
2881                 if (uv_setup_intr(cpu, expires)) {
2882                         *t = ULLONG_MAX;
2883                         uv_rtc_find_next_timer(head, pnode);
2884 -                       spin_unlock_irqrestore(&head->lock, flags);
2885 +                       raw_spin_unlock_irqrestore(&head->lock, flags);
2886                         return -ETIME;
2887                 }
2888         }
2889  
2890 -       spin_unlock_irqrestore(&head->lock, flags);
2891 +       raw_spin_unlock_irqrestore(&head->lock, flags);
2892         return 0;
2893  }
2894  
2895 @@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
2896         unsigned long flags;
2897         int rc = 0;
2898  
2899 -       spin_lock_irqsave(&head->lock, flags);
2900 +       raw_spin_lock_irqsave(&head->lock, flags);
2901  
2902         if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
2903                 rc = 1;
2904 @@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
2905                         uv_rtc_find_next_timer(head, pnode);
2906         }
2907  
2908 -       spin_unlock_irqrestore(&head->lock, flags);
2909 +       raw_spin_unlock_irqrestore(&head->lock, flags);
2910  
2911         return rc;
2912  }
2913 @@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
2914  static cycle_t uv_read_rtc(struct clocksource *cs)
2915  {
2916         unsigned long offset;
2917 +       cycle_t cycles;
2918  
2919 +       preempt_disable();
2920         if (uv_get_min_hub_revision_id() == 1)
2921                 offset = 0;
2922         else
2923                 offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
2924  
2925 -       return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
2926 +       cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
2927 +       preempt_enable();
2928 +
2929 +       return cycles;
2930  }
2931  
2932  /*
2933 diff --git a/block/blk-core.c b/block/blk-core.c
2934 index 14d7c0740dc0..dfd905bea77c 100644
2935 --- a/block/blk-core.c
2936 +++ b/block/blk-core.c
2937 @@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
2938  
2939         INIT_LIST_HEAD(&rq->queuelist);
2940         INIT_LIST_HEAD(&rq->timeout_list);
2941 +#ifdef CONFIG_PREEMPT_RT_FULL
2942 +       INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
2943 +#endif
2944         rq->cpu = -1;
2945         rq->q = q;
2946         rq->__sector = (sector_t) -1;
2947 @@ -233,7 +236,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
2948   **/
2949  void blk_start_queue(struct request_queue *q)
2950  {
2951 -       WARN_ON(!irqs_disabled());
2952 +       WARN_ON_NONRT(!irqs_disabled());
2953  
2954         queue_flag_clear(QUEUE_FLAG_STOPPED, q);
2955         __blk_run_queue(q);
2956 @@ -659,7 +662,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
2957                 if (nowait)
2958                         return -EBUSY;
2959  
2960 -               ret = wait_event_interruptible(q->mq_freeze_wq,
2961 +               ret = swait_event_interruptible(q->mq_freeze_wq,
2962                                 !atomic_read(&q->mq_freeze_depth) ||
2963                                 blk_queue_dying(q));
2964                 if (blk_queue_dying(q))
2965 @@ -679,7 +682,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
2966         struct request_queue *q =
2967                 container_of(ref, struct request_queue, q_usage_counter);
2968  
2969 -       wake_up_all(&q->mq_freeze_wq);
2970 +       swake_up_all(&q->mq_freeze_wq);
2971  }
2972  
2973  static void blk_rq_timed_out_timer(unsigned long data)
2974 @@ -748,7 +751,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
2975         q->bypass_depth = 1;
2976         __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
2977  
2978 -       init_waitqueue_head(&q->mq_freeze_wq);
2979 +       init_swait_queue_head(&q->mq_freeze_wq);
2980  
2981         /*
2982          * Init percpu_ref in atomic mode so that it's faster to shutdown.
2983 @@ -3177,7 +3180,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2984                 blk_run_queue_async(q);
2985         else
2986                 __blk_run_queue(q);
2987 -       spin_unlock(q->queue_lock);
2988 +       spin_unlock_irq(q->queue_lock);
2989  }
2990  
2991  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
2992 @@ -3225,7 +3228,6 @@ EXPORT_SYMBOL(blk_check_plugged);
2993  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2994  {
2995         struct request_queue *q;
2996 -       unsigned long flags;
2997         struct request *rq;
2998         LIST_HEAD(list);
2999         unsigned int depth;
3000 @@ -3245,11 +3247,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3001         q = NULL;
3002         depth = 0;
3003  
3004 -       /*
3005 -        * Save and disable interrupts here, to avoid doing it for every
3006 -        * queue lock we have to take.
3007 -        */
3008 -       local_irq_save(flags);
3009         while (!list_empty(&list)) {
3010                 rq = list_entry_rq(list.next);
3011                 list_del_init(&rq->queuelist);
3012 @@ -3262,7 +3259,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3013                                 queue_unplugged(q, depth, from_schedule);
3014                         q = rq->q;
3015                         depth = 0;
3016 -                       spin_lock(q->queue_lock);
3017 +                       spin_lock_irq(q->queue_lock);
3018                 }
3019  
3020                 /*
3021 @@ -3289,8 +3286,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3022          */
3023         if (q)
3024                 queue_unplugged(q, depth, from_schedule);
3025 -
3026 -       local_irq_restore(flags);
3027  }
3028  
3029  void blk_finish_plug(struct blk_plug *plug)
3030 diff --git a/block/blk-ioc.c b/block/blk-ioc.c
3031 index 381cb50a673c..dc8785233d94 100644
3032 --- a/block/blk-ioc.c
3033 +++ b/block/blk-ioc.c
3034 @@ -7,6 +7,7 @@
3035  #include <linux/bio.h>
3036  #include <linux/blkdev.h>
3037  #include <linux/slab.h>
3038 +#include <linux/delay.h>
3039  
3040  #include "blk.h"
3041  
3042 @@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work)
3043                         spin_unlock(q->queue_lock);
3044                 } else {
3045                         spin_unlock_irqrestore(&ioc->lock, flags);
3046 -                       cpu_relax();
3047 +                       cpu_chill();
3048                         spin_lock_irqsave_nested(&ioc->lock, flags, 1);
3049                 }
3050         }
3051 @@ -187,7 +188,7 @@ void put_io_context_active(struct io_context *ioc)
3052                         spin_unlock(icq->q->queue_lock);
3053                 } else {
3054                         spin_unlock_irqrestore(&ioc->lock, flags);
3055 -                       cpu_relax();
3056 +                       cpu_chill();
3057                         goto retry;
3058                 }
3059         }
3060 diff --git a/block/blk-mq.c b/block/blk-mq.c
3061 index 81caceb96c3c..b12b0ab005a9 100644
3062 --- a/block/blk-mq.c
3063 +++ b/block/blk-mq.c
3064 @@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
3065  
3066  static void blk_mq_freeze_queue_wait(struct request_queue *q)
3067  {
3068 -       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
3069 +       swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
3070  }
3071  
3072  /*
3073 @@ -110,7 +110,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
3074         WARN_ON_ONCE(freeze_depth < 0);
3075         if (!freeze_depth) {
3076                 percpu_ref_reinit(&q->q_usage_counter);
3077 -               wake_up_all(&q->mq_freeze_wq);
3078 +               swake_up_all(&q->mq_freeze_wq);
3079         }
3080  }
3081  EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
3082 @@ -129,7 +129,7 @@ void blk_mq_wake_waiters(struct request_queue *q)
3083          * dying, we need to ensure that processes currently waiting on
3084          * the queue are notified as well.
3085          */
3086 -       wake_up_all(&q->mq_freeze_wq);
3087 +       swake_up_all(&q->mq_freeze_wq);
3088  }
3089  
3090  bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
3091 @@ -177,6 +177,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
3092         rq->resid_len = 0;
3093         rq->sense = NULL;
3094  
3095 +#ifdef CONFIG_PREEMPT_RT_FULL
3096 +       INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
3097 +#endif
3098         INIT_LIST_HEAD(&rq->timeout_list);
3099         rq->timeout = 0;
3100  
3101 @@ -345,6 +348,17 @@ void blk_mq_end_request(struct request *rq, int error)
3102  }
3103  EXPORT_SYMBOL(blk_mq_end_request);
3104  
3105 +#ifdef CONFIG_PREEMPT_RT_FULL
3106 +
3107 +void __blk_mq_complete_request_remote_work(struct work_struct *work)
3108 +{
3109 +       struct request *rq = container_of(work, struct request, work);
3110 +
3111 +       rq->q->softirq_done_fn(rq);
3112 +}
3113 +
3114 +#else
3115 +
3116  static void __blk_mq_complete_request_remote(void *data)
3117  {
3118         struct request *rq = data;
3119 @@ -352,6 +366,8 @@ static void __blk_mq_complete_request_remote(void *data)
3120         rq->q->softirq_done_fn(rq);
3121  }
3122  
3123 +#endif
3124 +
3125  static void blk_mq_ipi_complete_request(struct request *rq)
3126  {
3127         struct blk_mq_ctx *ctx = rq->mq_ctx;
3128 @@ -363,19 +379,23 @@ static void blk_mq_ipi_complete_request(struct request *rq)
3129                 return;
3130         }
3131  
3132 -       cpu = get_cpu();
3133 +       cpu = get_cpu_light();
3134         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
3135                 shared = cpus_share_cache(cpu, ctx->cpu);
3136  
3137         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
3138 +#ifdef CONFIG_PREEMPT_RT_FULL
3139 +               schedule_work_on(ctx->cpu, &rq->work);
3140 +#else
3141                 rq->csd.func = __blk_mq_complete_request_remote;
3142                 rq->csd.info = rq;
3143                 rq->csd.flags = 0;
3144                 smp_call_function_single_async(ctx->cpu, &rq->csd);
3145 +#endif
3146         } else {
3147                 rq->q->softirq_done_fn(rq);
3148         }
3149 -       put_cpu();
3150 +       put_cpu_light();
3151  }
3152  
3153  static void __blk_mq_complete_request(struct request *rq)
3154 @@ -915,14 +935,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
3155                 return;
3156  
3157         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
3158 -               int cpu = get_cpu();
3159 +               int cpu = get_cpu_light();
3160                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
3161                         __blk_mq_run_hw_queue(hctx);
3162 -                       put_cpu();
3163 +                       put_cpu_light();
3164                         return;
3165                 }
3166  
3167 -               put_cpu();
3168 +               put_cpu_light();
3169         }
3170  
3171         kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
3172 diff --git a/block/blk-mq.h b/block/blk-mq.h
3173 index e5d25249028c..1e846b842eab 100644
3174 --- a/block/blk-mq.h
3175 +++ b/block/blk-mq.h
3176 @@ -72,12 +72,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
3177   */
3178  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
3179  {
3180 -       return __blk_mq_get_ctx(q, get_cpu());
3181 +       return __blk_mq_get_ctx(q, get_cpu_light());
3182  }
3183  
3184  static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
3185  {
3186 -       put_cpu();
3187 +       put_cpu_light();
3188  }
3189  
3190  struct blk_mq_alloc_data {
3191 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
3192 index 06cf9807f49a..c40342643ca0 100644
3193 --- a/block/blk-softirq.c
3194 +++ b/block/blk-softirq.c
3195 @@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
3196                 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3197  
3198         local_irq_restore(flags);
3199 +       preempt_check_resched_rt();
3200  }
3201  
3202  /*
3203 @@ -89,6 +90,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
3204                          this_cpu_ptr(&blk_cpu_done));
3205         raise_softirq_irqoff(BLOCK_SOFTIRQ);
3206         local_irq_enable();
3207 +       preempt_check_resched_rt();
3208  
3209         return 0;
3210  }
3211 @@ -141,6 +143,7 @@ void __blk_complete_request(struct request *req)
3212                 goto do_local;
3213  
3214         local_irq_restore(flags);
3215 +       preempt_check_resched_rt();
3216  }
3217  
3218  /**
3219 diff --git a/block/bounce.c b/block/bounce.c
3220 index 1cb5dd3a5da1..2f1ec8a67cbe 100644
3221 --- a/block/bounce.c
3222 +++ b/block/bounce.c
3223 @@ -55,11 +55,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
3224         unsigned long flags;
3225         unsigned char *vto;
3226  
3227 -       local_irq_save(flags);
3228 +       local_irq_save_nort(flags);
3229         vto = kmap_atomic(to->bv_page);
3230         memcpy(vto + to->bv_offset, vfrom, to->bv_len);
3231         kunmap_atomic(vto);
3232 -       local_irq_restore(flags);
3233 +       local_irq_restore_nort(flags);
3234  }
3235  
3236  #else /* CONFIG_HIGHMEM */
3237 diff --git a/crypto/algapi.c b/crypto/algapi.c
3238 index 1fad2a6b3bbb..ecb7315426a9 100644
3239 --- a/crypto/algapi.c
3240 +++ b/crypto/algapi.c
3241 @@ -719,13 +719,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
3242  
3243  int crypto_register_notifier(struct notifier_block *nb)
3244  {
3245 -       return blocking_notifier_chain_register(&crypto_chain, nb);
3246 +       return srcu_notifier_chain_register(&crypto_chain, nb);
3247  }
3248  EXPORT_SYMBOL_GPL(crypto_register_notifier);
3249  
3250  int crypto_unregister_notifier(struct notifier_block *nb)
3251  {
3252 -       return blocking_notifier_chain_unregister(&crypto_chain, nb);
3253 +       return srcu_notifier_chain_unregister(&crypto_chain, nb);
3254  }
3255  EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
3256  
3257 diff --git a/crypto/api.c b/crypto/api.c
3258 index bbc147cb5dec..bc1a848f02ec 100644
3259 --- a/crypto/api.c
3260 +++ b/crypto/api.c
3261 @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
3262  DECLARE_RWSEM(crypto_alg_sem);
3263  EXPORT_SYMBOL_GPL(crypto_alg_sem);
3264  
3265 -BLOCKING_NOTIFIER_HEAD(crypto_chain);
3266 +SRCU_NOTIFIER_HEAD(crypto_chain);
3267  EXPORT_SYMBOL_GPL(crypto_chain);
3268  
3269  static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
3270 @@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v)
3271  {
3272         int ok;
3273  
3274 -       ok = blocking_notifier_call_chain(&crypto_chain, val, v);
3275 +       ok = srcu_notifier_call_chain(&crypto_chain, val, v);
3276         if (ok == NOTIFY_DONE) {
3277                 request_module("cryptomgr");
3278 -               ok = blocking_notifier_call_chain(&crypto_chain, val, v);
3279 +               ok = srcu_notifier_call_chain(&crypto_chain, val, v);
3280         }
3281  
3282         return ok;
3283 diff --git a/crypto/internal.h b/crypto/internal.h
3284 index 7eefcdb00227..0ecc7f5a2f40 100644
3285 --- a/crypto/internal.h
3286 +++ b/crypto/internal.h
3287 @@ -47,7 +47,7 @@ struct crypto_larval {
3288  
3289  extern struct list_head crypto_alg_list;
3290  extern struct rw_semaphore crypto_alg_sem;
3291 -extern struct blocking_notifier_head crypto_chain;
3292 +extern struct srcu_notifier_head crypto_chain;
3293  
3294  #ifdef CONFIG_PROC_FS
3295  void __init crypto_init_proc(void);
3296 @@ -146,7 +146,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
3297  
3298  static inline void crypto_notify(unsigned long val, void *v)
3299  {
3300 -       blocking_notifier_call_chain(&crypto_chain, val, v);
3301 +       srcu_notifier_call_chain(&crypto_chain, val, v);
3302  }
3303  
3304  #endif /* _CRYPTO_INTERNAL_H */
3305 diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
3306 index 750fa824d42c..441edf51484a 100644
3307 --- a/drivers/acpi/acpica/acglobal.h
3308 +++ b/drivers/acpi/acpica/acglobal.h
3309 @@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
3310   * interrupt level
3311   */
3312  ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
3313 -ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock);    /* For ACPI H/W except GPE registers */
3314 +ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock);        /* For ACPI H/W except GPE registers */
3315  ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
3316  
3317  /* Mutex for _OSI support */
3318 diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
3319 index 3b7fb99362b6..696bf8e62afb 100644
3320 --- a/drivers/acpi/acpica/hwregs.c
3321 +++ b/drivers/acpi/acpica/hwregs.c
3322 @@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
3323                           ACPI_BITMASK_ALL_FIXED_STATUS,
3324                           ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
3325  
3326 -       lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
3327 +       raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
3328  
3329         /* Clear the fixed events in PM1 A/B */
3330  
3331         status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
3332                                         ACPI_BITMASK_ALL_FIXED_STATUS);
3333  
3334 -       acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
3335 +       raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
3336  
3337         if (ACPI_FAILURE(status)) {
3338                 goto exit;
3339 diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
3340 index 98c26ff39409..6e236f2ea791 100644
3341 --- a/drivers/acpi/acpica/hwxface.c
3342 +++ b/drivers/acpi/acpica/hwxface.c
3343 @@ -373,7 +373,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
3344                 return_ACPI_STATUS(AE_BAD_PARAMETER);
3345         }
3346  
3347 -       lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
3348 +       raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
3349  
3350         /*
3351          * At this point, we know that the parent register is one of the
3352 @@ -434,7 +434,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
3353  
3354  unlock_and_exit:
3355  
3356 -       acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
3357 +       raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
3358         return_ACPI_STATUS(status);
3359  }
3360  
3361 diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
3362 index 15073375bd00..357e7ca5a587 100644
3363 --- a/drivers/acpi/acpica/utmutex.c
3364 +++ b/drivers/acpi/acpica/utmutex.c
3365 @@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
3366                 return_ACPI_STATUS (status);
3367         }
3368  
3369 -       status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
3370 +       status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
3371         if (ACPI_FAILURE (status)) {
3372                 return_ACPI_STATUS (status);
3373         }
3374 @@ -145,7 +145,7 @@ void acpi_ut_mutex_terminate(void)
3375         /* Delete the spinlocks */
3376  
3377         acpi_os_delete_lock(acpi_gbl_gpe_lock);
3378 -       acpi_os_delete_lock(acpi_gbl_hardware_lock);
3379 +       acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
3380         acpi_os_delete_lock(acpi_gbl_reference_count_lock);
3381  
3382         /* Delete the reader/writer lock */
3383 diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
3384 index 051b6158d1b7..7ad293bef6ed 100644
3385 --- a/drivers/ata/libata-sff.c
3386 +++ b/drivers/ata/libata-sff.c
3387 @@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
3388         unsigned long flags;
3389         unsigned int consumed;
3390  
3391 -       local_irq_save(flags);
3392 +       local_irq_save_nort(flags);
3393         consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
3394 -       local_irq_restore(flags);
3395 +       local_irq_restore_nort(flags);
3396  
3397         return consumed;
3398  }
3399 @@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3400                 unsigned long flags;
3401  
3402                 /* FIXME: use a bounce buffer */
3403 -               local_irq_save(flags);
3404 +               local_irq_save_nort(flags);
3405                 buf = kmap_atomic(page);
3406  
3407                 /* do the actual data transfer */
3408 @@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3409                                        do_write);
3410  
3411                 kunmap_atomic(buf);
3412 -               local_irq_restore(flags);
3413 +               local_irq_restore_nort(flags);
3414         } else {
3415                 buf = page_address(page);
3416                 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
3417 @@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3418                 unsigned long flags;
3419  
3420                 /* FIXME: use bounce buffer */
3421 -               local_irq_save(flags);
3422 +               local_irq_save_nort(flags);
3423                 buf = kmap_atomic(page);
3424  
3425                 /* do the actual data transfer */
3426 @@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3427                                                                 count, rw);
3428  
3429                 kunmap_atomic(buf);
3430 -               local_irq_restore(flags);
3431 +               local_irq_restore_nort(flags);
3432         } else {
3433                 buf = page_address(page);
3434                 consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
3435 diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
3436 index 4b5cd3a7b2b6..fa8329ad79fd 100644
3437 --- a/drivers/block/zram/zcomp.c
3438 +++ b/drivers/block/zram/zcomp.c
3439 @@ -118,12 +118,19 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
3440  
3441  struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
3442  {
3443 -       return *get_cpu_ptr(comp->stream);
3444 +       struct zcomp_strm *zstrm;
3445 +
3446 +       zstrm = *this_cpu_ptr(comp->stream);
3447 +       spin_lock(&zstrm->zcomp_lock);
3448 +       return zstrm;
3449  }
3450  
3451  void zcomp_stream_put(struct zcomp *comp)
3452  {
3453 -       put_cpu_ptr(comp->stream);
3454 +       struct zcomp_strm *zstrm;
3455 +
3456 +       zstrm = *this_cpu_ptr(comp->stream);
3457 +       spin_unlock(&zstrm->zcomp_lock);
3458  }
3459  
3460  int zcomp_compress(struct zcomp_strm *zstrm,
3461 @@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
3462                         pr_err("Can't allocate a compression stream\n");
3463                         return NOTIFY_BAD;
3464                 }
3465 +               spin_lock_init(&zstrm->zcomp_lock);
3466                 *per_cpu_ptr(comp->stream, cpu) = zstrm;
3467                 break;
3468         case CPU_DEAD:
3469 diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
3470 index 478cac2ed465..f7a6efdc3285 100644
3471 --- a/drivers/block/zram/zcomp.h
3472 +++ b/drivers/block/zram/zcomp.h
3473 @@ -14,6 +14,7 @@ struct zcomp_strm {
3474         /* compression/decompression buffer */
3475         void *buffer;
3476         struct crypto_comp *tfm;
3477 +       spinlock_t zcomp_lock;
3478  };
3479  
3480  /* dynamic per-device compression frontend */
3481 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
3482 index d2ef51ca9cf4..05e749736560 100644
3483 --- a/drivers/block/zram/zram_drv.c
3484 +++ b/drivers/block/zram/zram_drv.c
3485 @@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
3486                 goto out_error;
3487         }
3488  
3489 +       zram_meta_init_table_locks(meta, disksize);
3490 +
3491         return meta;
3492  
3493  out_error:
3494 @@ -575,28 +577,28 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
3495         struct zram_meta *meta = zram->meta;
3496         unsigned long handle;
3497         unsigned int size;
3498 +       struct zcomp_strm *zstrm;
3499  
3500 -       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3501 +       zram_lock_table(&meta->table[index]);
3502         handle = meta->table[index].handle;
3503         size = zram_get_obj_size(meta, index);
3504  
3505         if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
3506 -               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3507 +               zram_unlock_table(&meta->table[index]);
3508                 clear_page(mem);
3509                 return 0;
3510         }
3511  
3512 +       zstrm = zcomp_stream_get(zram->comp);
3513         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
3514         if (size == PAGE_SIZE) {
3515                 copy_page(mem, cmem);
3516         } else {
3517 -               struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
3518 -
3519                 ret = zcomp_decompress(zstrm, cmem, size, mem);
3520 -               zcomp_stream_put(zram->comp);
3521         }
3522         zs_unmap_object(meta->mem_pool, handle);
3523 -       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3524 +       zcomp_stream_put(zram->comp);
3525 +       zram_unlock_table(&meta->table[index]);
3526  
3527         /* Should NEVER happen. Return bio error if it does. */
3528         if (unlikely(ret)) {
3529 @@ -616,14 +618,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
3530         struct zram_meta *meta = zram->meta;
3531         page = bvec->bv_page;
3532  
3533 -       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3534 +       zram_lock_table(&meta->table[index]);
3535         if (unlikely(!meta->table[index].handle) ||
3536                         zram_test_flag(meta, index, ZRAM_ZERO)) {
3537 -               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3538 +               zram_unlock_table(&meta->table[index]);
3539                 handle_zero_page(bvec);
3540                 return 0;
3541         }
3542 -       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3543 +       zram_unlock_table(&meta->table[index]);
3544  
3545         if (is_partial_io(bvec))
3546                 /* Use  a temporary buffer to decompress the page */
3547 @@ -700,10 +702,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
3548                 if (user_mem)
3549                         kunmap_atomic(user_mem);
3550                 /* Free memory associated with this sector now. */
3551 -               bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3552 +               zram_lock_table(&meta->table[index]);
3553                 zram_free_page(zram, index);
3554                 zram_set_flag(meta, index, ZRAM_ZERO);
3555 -               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3556 +               zram_unlock_table(&meta->table[index]);
3557  
3558                 atomic64_inc(&zram->stats.zero_pages);
3559                 ret = 0;
3560 @@ -794,12 +796,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
3561          * Free memory associated with this sector
3562          * before overwriting unused sectors.
3563          */
3564 -       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3565 +       zram_lock_table(&meta->table[index]);
3566         zram_free_page(zram, index);
3567  
3568         meta->table[index].handle = handle;
3569         zram_set_obj_size(meta, index, clen);
3570 -       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3571 +       zram_unlock_table(&meta->table[index]);
3572  
3573         /* Update stats */
3574         atomic64_add(clen, &zram->stats.compr_data_size);
3575 @@ -842,9 +844,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
3576         }
3577  
3578         while (n >= PAGE_SIZE) {
3579 -               bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3580 +               zram_lock_table(&meta->table[index]);
3581                 zram_free_page(zram, index);
3582 -               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3583 +               zram_unlock_table(&meta->table[index]);
3584                 atomic64_inc(&zram->stats.notify_free);
3585                 index++;
3586                 n -= PAGE_SIZE;
3587 @@ -973,9 +975,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
3588         zram = bdev->bd_disk->private_data;
3589         meta = zram->meta;
3590  
3591 -       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
3592 +       zram_lock_table(&meta->table[index]);
3593         zram_free_page(zram, index);
3594 -       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
3595 +       zram_unlock_table(&meta->table[index]);
3596         atomic64_inc(&zram->stats.notify_free);
3597  }
3598  
3599 diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
3600 index 74fcf10da374..fd4020c99b9e 100644
3601 --- a/drivers/block/zram/zram_drv.h
3602 +++ b/drivers/block/zram/zram_drv.h
3603 @@ -73,6 +73,9 @@ enum zram_pageflags {
3604  struct zram_table_entry {
3605         unsigned long handle;
3606         unsigned long value;
3607 +#ifdef CONFIG_PREEMPT_RT_BASE
3608 +       spinlock_t lock;
3609 +#endif
3610  };
3611  
3612  struct zram_stats {
3613 @@ -120,4 +123,42 @@ struct zram {
3614          */
3615         bool claim; /* Protected by bdev->bd_mutex */
3616  };
3617 +
3618 +#ifndef CONFIG_PREEMPT_RT_BASE
3619 +static inline void zram_lock_table(struct zram_table_entry *table)
3620 +{
3621 +       bit_spin_lock(ZRAM_ACCESS, &table->value);
3622 +}
3623 +
3624 +static inline void zram_unlock_table(struct zram_table_entry *table)
3625 +{
3626 +       bit_spin_unlock(ZRAM_ACCESS, &table->value);
3627 +}
3628 +
3629 +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { }
3630 +#else /* CONFIG_PREEMPT_RT_BASE */
3631 +static inline void zram_lock_table(struct zram_table_entry *table)
3632 +{
3633 +       spin_lock(&table->lock);
3634 +       __set_bit(ZRAM_ACCESS, &table->value);
3635 +}
3636 +
3637 +static inline void zram_unlock_table(struct zram_table_entry *table)
3638 +{
3639 +       __clear_bit(ZRAM_ACCESS, &table->value);
3640 +       spin_unlock(&table->lock);
3641 +}
3642 +
3643 +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
3644 +{
3645 +        size_t num_pages = disksize >> PAGE_SHIFT;
3646 +        size_t index;
3647 +
3648 +        for (index = 0; index < num_pages; index++) {
3649 +               spinlock_t *lock = &meta->table[index].lock;
3650 +               spin_lock_init(lock);
3651 +        }
3652 +}
3653 +#endif /* CONFIG_PREEMPT_RT_BASE */
3654 +
3655  #endif
3656 diff --git a/drivers/char/random.c b/drivers/char/random.c
3657 index d6876d506220..0c60b1e54579 100644
3658 --- a/drivers/char/random.c
3659 +++ b/drivers/char/random.c
3660 @@ -1028,8 +1028,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
3661         } sample;
3662         long delta, delta2, delta3;
3663  
3664 -       preempt_disable();
3665 -
3666         sample.jiffies = jiffies;
3667         sample.cycles = random_get_entropy();
3668         sample.num = num;
3669 @@ -1070,7 +1068,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
3670                  */
3671                 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
3672         }
3673 -       preempt_enable();
3674  }
3675  
3676  void add_input_randomness(unsigned int type, unsigned int code,
3677 @@ -1123,28 +1120,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
3678         return *(ptr + f->reg_idx++);
3679  }
3680  
3681 -void add_interrupt_randomness(int irq, int irq_flags)
3682 +void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
3683  {
3684         struct entropy_store    *r;
3685         struct fast_pool        *fast_pool = this_cpu_ptr(&irq_randomness);
3686 -       struct pt_regs          *regs = get_irq_regs();
3687         unsigned long           now = jiffies;
3688         cycles_t                cycles = random_get_entropy();
3689         __u32                   c_high, j_high;
3690 -       __u64                   ip;
3691         unsigned long           seed;
3692         int                     credit = 0;
3693  
3694         if (cycles == 0)
3695 -               cycles = get_reg(fast_pool, regs);
3696 +               cycles = get_reg(fast_pool, NULL);
3697         c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
3698         j_high = (sizeof(now) > 4) ? now >> 32 : 0;
3699         fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
3700         fast_pool->pool[1] ^= now ^ c_high;
3701 -       ip = regs ? instruction_pointer(regs) : _RET_IP_;
3702 +       if (!ip)
3703 +               ip = _RET_IP_;
3704         fast_pool->pool[2] ^= ip;
3705         fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
3706 -               get_reg(fast_pool, regs);
3707 +               get_reg(fast_pool, NULL);
3708  
3709         fast_mix(fast_pool);
3710         add_interrupt_bench(cycles);
3711 diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
3712 index 4da2af9694a2..5b6f57f500b8 100644
3713 --- a/drivers/clocksource/tcb_clksrc.c
3714 +++ b/drivers/clocksource/tcb_clksrc.c
3715 @@ -23,8 +23,7 @@
3716   *     this 32 bit free-running counter. the second channel is not used.
3717   *
3718   *   - The third channel may be used to provide a 16-bit clockevent
3719 - *     source, used in either periodic or oneshot mode.  This runs
3720 - *     at 32 KiHZ, and can handle delays of up to two seconds.
3721 + *     source, used in either periodic or oneshot mode.
3722   *
3723   * A boot clocksource and clockevent source are also currently needed,
3724   * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
3725 @@ -74,6 +73,8 @@ static struct clocksource clksrc = {
3726  struct tc_clkevt_device {
3727         struct clock_event_device       clkevt;
3728         struct clk                      *clk;
3729 +       bool                            clk_enabled;
3730 +       u32                             freq;
3731         void __iomem                    *regs;
3732  };
3733  
3734 @@ -82,15 +83,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
3735         return container_of(clkevt, struct tc_clkevt_device, clkevt);
3736  }
3737  
3738 -/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
3739 - * because using one of the divided clocks would usually mean the
3740 - * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
3741 - *
3742 - * A divided clock could be good for high resolution timers, since
3743 - * 30.5 usec resolution can seem "low".
3744 - */
3745  static u32 timer_clock;
3746  
3747 +static void tc_clk_disable(struct clock_event_device *d)
3748 +{
3749 +       struct tc_clkevt_device *tcd = to_tc_clkevt(d);
3750 +
3751 +       clk_disable(tcd->clk);
3752 +       tcd->clk_enabled = false;
3753 +}
3754 +
3755 +static void tc_clk_enable(struct clock_event_device *d)
3756 +{
3757 +       struct tc_clkevt_device *tcd = to_tc_clkevt(d);
3758 +
3759 +       if (tcd->clk_enabled)
3760 +               return;
3761 +       clk_enable(tcd->clk);
3762 +       tcd->clk_enabled = true;
3763 +}
3764 +
3765  static int tc_shutdown(struct clock_event_device *d)
3766  {
3767         struct tc_clkevt_device *tcd = to_tc_clkevt(d);
3768 @@ -98,8 +110,14 @@ static int tc_shutdown(struct clock_event_device *d)
3769  
3770         __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
3771         __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
3772 +       return 0;
3773 +}
3774 +
3775 +static int tc_shutdown_clk_off(struct clock_event_device *d)
3776 +{
3777 +       tc_shutdown(d);
3778         if (!clockevent_state_detached(d))
3779 -               clk_disable(tcd->clk);
3780 +               tc_clk_disable(d);
3781  
3782         return 0;
3783  }
3784 @@ -112,9 +130,9 @@ static int tc_set_oneshot(struct clock_event_device *d)
3785         if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
3786                 tc_shutdown(d);
3787  
3788 -       clk_enable(tcd->clk);
3789 +       tc_clk_enable(d);
3790  
3791 -       /* slow clock, count up to RC, then irq and stop */
3792 +       /* count up to RC, then irq and stop */
3793         __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
3794                      ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
3795         __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
3796 @@ -134,12 +152,12 @@ static int tc_set_periodic(struct clock_event_device *d)
3797         /* By not making the gentime core emulate periodic mode on top
3798          * of oneshot, we get lower overhead and improved accuracy.
3799          */
3800 -       clk_enable(tcd->clk);
3801 +       tc_clk_enable(d);
3802  
3803 -       /* slow clock, count up to RC, then irq and restart */
3804 +       /* count up to RC, then irq and restart */
3805         __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
3806                      regs + ATMEL_TC_REG(2, CMR));
3807 -       __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
3808 +       __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
3809  
3810         /* Enable clock and interrupts on RC compare */
3811         __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
3812 @@ -166,9 +184,13 @@ static struct tc_clkevt_device clkevt = {
3813                 .features               = CLOCK_EVT_FEAT_PERIODIC |
3814                                           CLOCK_EVT_FEAT_ONESHOT,
3815                 /* Should be lower than at91rm9200's system timer */
3816 +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
3817                 .rating                 = 125,
3818 +#else
3819 +               .rating                 = 200,
3820 +#endif
3821                 .set_next_event         = tc_next_event,
3822 -               .set_state_shutdown     = tc_shutdown,
3823 +               .set_state_shutdown     = tc_shutdown_clk_off,
3824                 .set_state_periodic     = tc_set_periodic,
3825                 .set_state_oneshot      = tc_set_oneshot,
3826         },
3827 @@ -188,8 +210,9 @@ static irqreturn_t ch2_irq(int irq, void *handle)
3828         return IRQ_NONE;
3829  }
3830  
3831 -static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
3832 +static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
3833  {
3834 +       unsigned divisor = atmel_tc_divisors[divisor_idx];
3835         int ret;
3836         struct clk *t2_clk = tc->clk[2];
3837         int irq = tc->irq[2];
3838 @@ -210,7 +233,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
3839         clkevt.regs = tc->regs;
3840         clkevt.clk = t2_clk;
3841  
3842 -       timer_clock = clk32k_divisor_idx;
3843 +       timer_clock = divisor_idx;
3844 +       if (!divisor)
3845 +               clkevt.freq = 32768;
3846 +       else
3847 +               clkevt.freq = clk_get_rate(t2_clk) / divisor;
3848  
3849         clkevt.clkevt.cpumask = cpumask_of(0);
3850  
3851 @@ -221,7 +248,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
3852                 return ret;
3853         }
3854  
3855 -       clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
3856 +       clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
3857  
3858         return ret;
3859  }
3860 @@ -358,7 +385,11 @@ static int __init tcb_clksrc_init(void)
3861                 goto err_disable_t1;
3862  
3863         /* channel 2:  periodic and oneshot timer support */
3864 +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
3865         ret = setup_clkevents(tc, clk32k_divisor_idx);
3866 +#else
3867 +       ret = setup_clkevents(tc, best_divisor_idx);
3868 +#endif
3869         if (ret)
3870                 goto err_unregister_clksrc;
3871  
3872 diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
3873 index 6555821bbdae..93288849b2bd 100644
3874 --- a/drivers/clocksource/timer-atmel-pit.c
3875 +++ b/drivers/clocksource/timer-atmel-pit.c
3876 @@ -46,6 +46,7 @@ struct pit_data {
3877         u32             cycle;
3878         u32             cnt;
3879         unsigned int    irq;
3880 +       bool            irq_requested;
3881         struct clk      *mck;
3882  };
3883  
3884 @@ -96,15 +97,29 @@ static int pit_clkevt_shutdown(struct clock_event_device *dev)
3885  
3886         /* disable irq, leaving the clocksource active */
3887         pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
3888 +       if (data->irq_requested) {
3889 +               free_irq(data->irq, data);
3890 +               data->irq_requested = false;
3891 +       }
3892         return 0;
3893  }
3894  
3895 +static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id);
3896  /*
3897   * Clockevent device:  interrupts every 1/HZ (== pit_cycles * MCK/16)
3898   */
3899  static int pit_clkevt_set_periodic(struct clock_event_device *dev)
3900  {
3901         struct pit_data *data = clkevt_to_pit_data(dev);
3902 +       int ret;
3903 +
3904 +       ret = request_irq(data->irq, at91sam926x_pit_interrupt,
3905 +                         IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
3906 +                         "at91_tick", data);
3907 +       if (ret)
3908 +               panic(pr_fmt("Unable to setup IRQ\n"));
3909 +
3910 +       data->irq_requested = true;
3911  
3912         /* update clocksource counter */
3913         data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
3914 @@ -230,15 +245,6 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
3915                 return ret;
3916         }
3917  
3918 -       /* Set up irq handler */
3919 -       ret = request_irq(data->irq, at91sam926x_pit_interrupt,
3920 -                         IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
3921 -                         "at91_tick", data);
3922 -       if (ret) {
3923 -               pr_err("Unable to setup IRQ\n");
3924 -               return ret;
3925 -       }
3926 -
3927         /* Set up and register clockevents */
3928         data->clkevt.name = "pit";
3929         data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
3930 diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
3931 index e90ab5b63a90..9e124087c55f 100644
3932 --- a/drivers/clocksource/timer-atmel-st.c
3933 +++ b/drivers/clocksource/timer-atmel-st.c
3934 @@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_irq(void)
3935         last_crtr = read_CRTR();
3936  }
3937  
3938 +static int atmel_st_irq;
3939 +
3940  static int clkevt32k_shutdown(struct clock_event_device *evt)
3941  {
3942         clkdev32k_disable_and_flush_irq();
3943         irqmask = 0;
3944         regmap_write(regmap_st, AT91_ST_IER, irqmask);
3945 +       free_irq(atmel_st_irq, regmap_st);
3946         return 0;
3947  }
3948  
3949  static int clkevt32k_set_oneshot(struct clock_event_device *dev)
3950  {
3951 +       int ret;
3952 +
3953         clkdev32k_disable_and_flush_irq();
3954  
3955 +       ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
3956 +                         IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
3957 +                         "at91_tick", regmap_st);
3958 +       if (ret)
3959 +               panic(pr_fmt("Unable to setup IRQ\n"));
3960 +
3961         /*
3962          * ALM for oneshot irqs, set by next_event()
3963          * before 32 seconds have passed.
3964 @@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct clock_event_device *dev)
3965  
3966  static int clkevt32k_set_periodic(struct clock_event_device *dev)
3967  {
3968 +       int ret;
3969 +
3970         clkdev32k_disable_and_flush_irq();
3971  
3972 +       ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
3973 +                         IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
3974 +                         "at91_tick", regmap_st);
3975 +       if (ret)
3976 +               panic(pr_fmt("Unable to setup IRQ\n"));
3977 +
3978         /* PIT for periodic irqs; fixed rate of 1/HZ */
3979         irqmask = AT91_ST_PITS;
3980         regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
3981 @@ -198,7 +217,7 @@ static int __init atmel_st_timer_init(struct device_node *node)
3982  {
3983         struct clk *sclk;
3984         unsigned int sclk_rate, val;
3985 -       int irq, ret;
3986 +       int ret;
3987  
3988         regmap_st = syscon_node_to_regmap(node);
3989         if (IS_ERR(regmap_st)) {
3990 @@ -212,21 +231,12 @@ static int __init atmel_st_timer_init(struct device_node *node)
3991         regmap_read(regmap_st, AT91_ST_SR, &val);
3992  
3993         /* Get the interrupts property */
3994 -       irq  = irq_of_parse_and_map(node, 0);
3995 -       if (!irq) {
3996 +       atmel_st_irq  = irq_of_parse_and_map(node, 0);
3997 +       if (!atmel_st_irq) {
3998                 pr_err("Unable to get IRQ from DT\n");
3999                 return -EINVAL;
4000         }
4001  
4002 -       /* Make IRQs happen for the system timer */
4003 -       ret = request_irq(irq, at91rm9200_timer_interrupt,
4004 -                         IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
4005 -                         "at91_tick", regmap_st);
4006 -       if (ret) {
4007 -               pr_err("Unable to setup IRQ\n");
4008 -               return ret;
4009 -       }
4010 -
4011         sclk = of_clk_get(node, 0);
4012         if (IS_ERR(sclk)) {
4013                 pr_err("Unable to get slow clock\n");
4014 diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
4015 index a782ce87715c..19d265948526 100644
4016 --- a/drivers/connector/cn_proc.c
4017 +++ b/drivers/connector/cn_proc.c
4018 @@ -32,6 +32,7 @@
4019  #include <linux/pid_namespace.h>
4020  
4021  #include <linux/cn_proc.h>
4022 +#include <linux/locallock.h>
4023  
4024  /*
4025   * Size of a cn_msg followed by a proc_event structure.  Since the
4026 @@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
4027  
4028  /* proc_event_counts is used as the sequence number of the netlink message */
4029  static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
4030 +static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock);
4031  
4032  static inline void send_msg(struct cn_msg *msg)
4033  {
4034 -       preempt_disable();
4035 +       local_lock(send_msg_lock);
4036  
4037         msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
4038         ((struct proc_event *)msg->data)->cpu = smp_processor_id();
4039 @@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg)
4040          */
4041         cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
4042  
4043 -       preempt_enable();
4044 +       local_unlock(send_msg_lock);
4045  }
4046  
4047  void proc_fork_connector(struct task_struct *task)
4048 diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
4049 index adbd1de1cea5..1fac5074f2cf 100644
4050 --- a/drivers/cpufreq/Kconfig.x86
4051 +++ b/drivers/cpufreq/Kconfig.x86
4052 @@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI
4053  
4054  config X86_POWERNOW_K8
4055         tristate "AMD Opteron/Athlon64 PowerNow!"
4056 -       depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
4057 +       depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
4058         help
4059           This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
4060           Support for K10 and newer processors is now in acpi-cpufreq.
4061 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
4062 index 0c400f852a76..97d5f6193751 100644
4063 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
4064 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
4065 @@ -1537,7 +1537,9 @@ execbuf_submit(struct i915_execbuffer_params *params,
4066         if (ret)
4067                 return ret;
4068  
4069 +#ifndef CONFIG_PREEMPT_RT_BASE
4070         trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
4071 +#endif
4072  
4073         i915_gem_execbuffer_move_to_active(vmas, params->request);
4074  
4075 diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
4076 index 1c237d02f30b..9e9b4404c0d7 100644
4077 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
4078 +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
4079 @@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4080         if (!mutex_is_locked(mutex))
4081                 return false;
4082  
4083 -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
4084 +#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
4085         return mutex->owner == task;
4086  #else
4087         /* Since UP may be pre-empted, we cannot assume that we own the lock */
4088 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
4089 index 3fc286cd1157..252a1117b103 100644
4090 --- a/drivers/gpu/drm/i915/i915_irq.c
4091 +++ b/drivers/gpu/drm/i915/i915_irq.c
4092 @@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4093         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4094  
4095         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
4096 +       preempt_disable_rt();
4097  
4098         /* Get optional system timestamp before query. */
4099         if (stime)
4100 @@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4101                 *etime = ktime_get();
4102  
4103         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
4104 +       preempt_enable_rt();
4105  
4106         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4107  
4108 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
4109 index b9be8a6141d8..3162feddabe8 100644
4110 --- a/drivers/gpu/drm/i915/intel_display.c
4111 +++ b/drivers/gpu/drm/i915/intel_display.c
4112 @@ -12141,7 +12141,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
4113         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4114         struct intel_flip_work *work;
4115  
4116 -       WARN_ON(!in_interrupt());
4117 +       WARN_ON_NONRT(!in_interrupt());
4118  
4119         if (crtc == NULL)
4120                 return;
4121 diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
4122 index dbed12c484c9..5c540b78e8b5 100644
4123 --- a/drivers/gpu/drm/i915/intel_sprite.c
4124 +++ b/drivers/gpu/drm/i915/intel_sprite.c
4125 @@ -35,6 +35,7 @@
4126  #include <drm/drm_rect.h>
4127  #include <drm/drm_atomic.h>
4128  #include <drm/drm_plane_helper.h>
4129 +#include <linux/locallock.h>
4130  #include "intel_drv.h"
4131  #include "intel_frontbuffer.h"
4132  #include <drm/i915_drm.h>
4133 @@ -65,6 +66,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
4134                             1000 * adjusted_mode->crtc_htotal);
4135  }
4136  
4137 +static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock);
4138 +
4139  /**
4140   * intel_pipe_update_start() - start update of a set of display registers
4141   * @crtc: the crtc of which the registers are going to be updated
4142 @@ -95,7 +98,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
4143         min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
4144         max = vblank_start - 1;
4145  
4146 -       local_irq_disable();
4147 +       local_lock_irq(pipe_update_lock);
4148  
4149         if (min <= 0 || max <= 0)
4150                 return;
4151 @@ -125,11 +128,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
4152                         break;
4153                 }
4154  
4155 -               local_irq_enable();
4156 +               local_unlock_irq(pipe_update_lock);
4157  
4158                 timeout = schedule_timeout(timeout);
4159  
4160 -               local_irq_disable();
4161 +               local_lock_irq(pipe_update_lock);
4162         }
4163  
4164         finish_wait(wq, &wait);
4165 @@ -181,7 +184,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
4166                 crtc->base.state->event = NULL;
4167         }
4168  
4169 -       local_irq_enable();
4170 +       local_unlock_irq(pipe_update_lock);
4171  
4172         if (crtc->debug.start_vbl_count &&
4173             crtc->debug.start_vbl_count != end_vbl_count) {
4174 diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
4175 index 192b2d3a79cb..d5372a207326 100644
4176 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
4177 +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
4178 @@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4179         if (!mutex_is_locked(mutex))
4180                 return false;
4181  
4182 -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4183 +#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
4184         return mutex->owner == task;
4185  #else
4186         /* Since UP may be pre-empted, we cannot assume that we own the lock */
4187 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
4188 index cdb8cb568c15..b6d7fd964cbc 100644
4189 --- a/drivers/gpu/drm/radeon/radeon_display.c
4190 +++ b/drivers/gpu/drm/radeon/radeon_display.c
4191 @@ -1845,6 +1845,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4192         struct radeon_device *rdev = dev->dev_private;
4193  
4194         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
4195 +       preempt_disable_rt();
4196  
4197         /* Get optional system timestamp before query. */
4198         if (stime)
4199 @@ -1937,6 +1938,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4200                 *etime = ktime_get();
4201  
4202         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
4203 +       preempt_enable_rt();
4204  
4205         /* Decode into vertical and horizontal scanout position. */
4206         *vpos = position & 0x1fff;
4207 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
4208 index 0276d2ef06ee..8868045eabde 100644
4209 --- a/drivers/hv/vmbus_drv.c
4210 +++ b/drivers/hv/vmbus_drv.c
4211 @@ -761,6 +761,8 @@ static void vmbus_isr(void)
4212         void *page_addr;
4213         struct hv_message *msg;
4214         union hv_synic_event_flags *event;
4215 +       struct pt_regs *regs = get_irq_regs();
4216 +       u64 ip = regs ? instruction_pointer(regs) : 0;
4217         bool handled = false;
4218  
4219         page_addr = hv_context.synic_event_page[cpu];
4220 @@ -808,7 +810,7 @@ static void vmbus_isr(void)
4221                         tasklet_schedule(hv_context.msg_dpc[cpu]);
4222         }
4223  
4224 -       add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
4225 +       add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip);
4226  }
4227  
4228  
4229 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
4230 index 36f76e28a0bf..394f142f90c7 100644
4231 --- a/drivers/ide/alim15x3.c
4232 +++ b/drivers/ide/alim15x3.c
4233 @@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
4234  
4235         isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
4236  
4237 -       local_irq_save(flags);
4238 +       local_irq_save_nort(flags);
4239  
4240         if (m5229_revision < 0xC2) {
4241                 /*
4242 @@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
4243         }
4244         pci_dev_put(north);
4245         pci_dev_put(isa_dev);
4246 -       local_irq_restore(flags);
4247 +       local_irq_restore_nort(flags);
4248         return 0;
4249  }
4250  
4251 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
4252 index 0ceae5cbd89a..c212e85d7f3e 100644
4253 --- a/drivers/ide/hpt366.c
4254 +++ b/drivers/ide/hpt366.c
4255 @@ -1236,7 +1236,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
4256  
4257         dma_old = inb(base + 2);
4258  
4259 -       local_irq_save(flags);
4260 +       local_irq_save_nort(flags);
4261  
4262         dma_new = dma_old;
4263         pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
4264 @@ -1247,7 +1247,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
4265         if (dma_new != dma_old)
4266                 outb(dma_new, base + 2);
4267  
4268 -       local_irq_restore(flags);
4269 +       local_irq_restore_nort(flags);
4270  
4271         printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
4272                          hwif->name, base, base + 7);
4273 diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
4274 index 19763977568c..4169433faab5 100644
4275 --- a/drivers/ide/ide-io-std.c
4276 +++ b/drivers/ide/ide-io-std.c
4277 @@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
4278                 unsigned long uninitialized_var(flags);
4279  
4280                 if ((io_32bit & 2) && !mmio) {
4281 -                       local_irq_save(flags);
4282 +                       local_irq_save_nort(flags);
4283                         ata_vlb_sync(io_ports->nsect_addr);
4284                 }
4285  
4286 @@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
4287                         insl(data_addr, buf, words);
4288  
4289                 if ((io_32bit & 2) && !mmio)
4290 -                       local_irq_restore(flags);
4291 +                       local_irq_restore_nort(flags);
4292  
4293                 if (((len + 1) & 3) < 2)
4294                         return;
4295 @@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
4296                 unsigned long uninitialized_var(flags);
4297  
4298                 if ((io_32bit & 2) && !mmio) {
4299 -                       local_irq_save(flags);
4300 +                       local_irq_save_nort(flags);
4301                         ata_vlb_sync(io_ports->nsect_addr);
4302                 }
4303  
4304 @@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
4305                         outsl(data_addr, buf, words);
4306  
4307                 if ((io_32bit & 2) && !mmio)
4308 -                       local_irq_restore(flags);
4309 +                       local_irq_restore_nort(flags);
4310  
4311                 if (((len + 1) & 3) < 2)
4312                         return;
4313 diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
4314 index 669ea1e45795..e12e43e62245 100644
4315 --- a/drivers/ide/ide-io.c
4316 +++ b/drivers/ide/ide-io.c
4317 @@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data)
4318                 /* disable_irq_nosync ?? */
4319                 disable_irq(hwif->irq);
4320                 /* local CPU only, as if we were handling an interrupt */
4321 -               local_irq_disable();
4322 +               local_irq_disable_nort();
4323                 if (hwif->polling) {
4324                         startstop = handler(drive);
4325                 } else if (drive_is_ready(drive)) {
4326 diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
4327 index 376f2dc410c5..f014dd1b73dc 100644
4328 --- a/drivers/ide/ide-iops.c
4329 +++ b/drivers/ide/ide-iops.c
4330 @@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
4331                                 if ((stat & ATA_BUSY) == 0)
4332                                         break;
4333  
4334 -                               local_irq_restore(flags);
4335 +                               local_irq_restore_nort(flags);
4336                                 *rstat = stat;
4337                                 return -EBUSY;
4338                         }
4339                 }
4340 -               local_irq_restore(flags);
4341 +               local_irq_restore_nort(flags);
4342         }
4343         /*
4344          * Allow status to settle, then read it again.
4345 diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
4346 index 0b63facd1d87..4ceba37afc0c 100644
4347 --- a/drivers/ide/ide-probe.c
4348 +++ b/drivers/ide/ide-probe.c
4349 @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
4350         int bswap = 1;
4351  
4352         /* local CPU only; some systems need this */
4353 -       local_irq_save(flags);
4354 +       local_irq_save_nort(flags);
4355         /* read 512 bytes of id info */
4356         hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
4357 -       local_irq_restore(flags);
4358 +       local_irq_restore_nort(flags);
4359  
4360         drive->dev_flags |= IDE_DFLAG_ID_READ;
4361  #ifdef DEBUG
4362 diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
4363 index a716693417a3..be0568c722d6 100644
4364 --- a/drivers/ide/ide-taskfile.c
4365 +++ b/drivers/ide/ide-taskfile.c
4366 @@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
4367  
4368                 page_is_high = PageHighMem(page);
4369                 if (page_is_high)
4370 -                       local_irq_save(flags);
4371 +                       local_irq_save_nort(flags);
4372  
4373                 buf = kmap_atomic(page) + offset;
4374  
4375 @@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
4376                 kunmap_atomic(buf);
4377  
4378                 if (page_is_high)
4379 -                       local_irq_restore(flags);
4380 +                       local_irq_restore_nort(flags);
4381  
4382                 len -= nr_bytes;
4383         }
4384 @@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
4385         }
4386  
4387         if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
4388 -               local_irq_disable();
4389 +               local_irq_disable_nort();
4390  
4391         ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
4392  
4393 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
4394 index fddff403d5d2..cca1bb4fbfe3 100644
4395 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
4396 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
4397 @@ -902,7 +902,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
4398  
4399         ipoib_dbg_mcast(priv, "restarting multicast task\n");
4400  
4401 -       local_irq_save(flags);
4402 +       local_irq_save_nort(flags);
4403         netif_addr_lock(dev);
4404         spin_lock(&priv->lock);
4405  
4406 @@ -984,7 +984,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
4407  
4408         spin_unlock(&priv->lock);
4409         netif_addr_unlock(dev);
4410 -       local_irq_restore(flags);
4411 +       local_irq_restore_nort(flags);
4412  
4413         /*
4414          * make sure the in-flight joins have finished before we attempt
4415 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
4416 index 4a2a9e370be7..e970d9afd179 100644
4417 --- a/drivers/input/gameport/gameport.c
4418 +++ b/drivers/input/gameport/gameport.c
4419 @@ -91,13 +91,13 @@ static int gameport_measure_speed(struct gameport *gameport)
4420         tx = ~0;
4421  
4422         for (i = 0; i < 50; i++) {
4423 -               local_irq_save(flags);
4424 +               local_irq_save_nort(flags);
4425                 t1 = ktime_get_ns();
4426                 for (t = 0; t < 50; t++)
4427                         gameport_read(gameport);
4428                 t2 = ktime_get_ns();
4429                 t3 = ktime_get_ns();
4430 -               local_irq_restore(flags);
4431 +               local_irq_restore_nort(flags);
4432                 udelay(i * 10);
4433                 t = (t2 - t1) - (t3 - t2);
4434                 if (t < tx)
4435 @@ -124,12 +124,12 @@ static int old_gameport_measure_speed(struct gameport *gameport)
4436         tx = 1 << 30;
4437  
4438         for(i = 0; i < 50; i++) {
4439 -               local_irq_save(flags);
4440 +               local_irq_save_nort(flags);
4441                 GET_TIME(t1);
4442                 for (t = 0; t < 50; t++) gameport_read(gameport);
4443                 GET_TIME(t2);
4444                 GET_TIME(t3);
4445 -               local_irq_restore(flags);
4446 +               local_irq_restore_nort(flags);
4447                 udelay(i * 10);
4448                 if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
4449         }
4450 @@ -148,11 +148,11 @@ static int old_gameport_measure_speed(struct gameport *gameport)
4451         tx = 1 << 30;
4452  
4453         for(i = 0; i < 50; i++) {
4454 -               local_irq_save(flags);
4455 +               local_irq_save_nort(flags);
4456                 t1 = rdtsc();
4457                 for (t = 0; t < 50; t++) gameport_read(gameport);
4458                 t2 = rdtsc();
4459 -               local_irq_restore(flags);
4460 +               local_irq_restore_nort(flags);
4461                 udelay(i * 10);
4462                 if (t2 - t1 < tx) tx = t2 - t1;
4463         }
4464 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
4465 index 11a13b5be73a..baaed0ac274b 100644
4466 --- a/drivers/iommu/amd_iommu.c
4467 +++ b/drivers/iommu/amd_iommu.c
4468 @@ -1923,10 +1923,10 @@ static int __attach_device(struct iommu_dev_data *dev_data,
4469         int ret;
4470  
4471         /*
4472 -        * Must be called with IRQs disabled. Warn here to detect early
4473 -        * when its not.
4474 +        * Must be called with IRQs disabled on a non RT kernel. Warn here to
4475 +        * detect early when its not.
4476          */
4477 -       WARN_ON(!irqs_disabled());
4478 +       WARN_ON_NONRT(!irqs_disabled());
4479  
4480         /* lock domain */
4481         spin_lock(&domain->lock);
4482 @@ -2094,10 +2094,10 @@ static void __detach_device(struct iommu_dev_data *dev_data)
4483         struct protection_domain *domain;
4484  
4485         /*
4486 -        * Must be called with IRQs disabled. Warn here to detect early
4487 -        * when its not.
4488 +        * Must be called with IRQs disabled on a non RT kernel. Warn here to
4489 +        * detect early when its not.
4490          */
4491 -       WARN_ON(!irqs_disabled());
4492 +       WARN_ON_NONRT(!irqs_disabled());
4493  
4494         if (WARN_ON(!dev_data->domain))
4495                 return;
4496 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
4497 index d82637ab09fd..ebe41d30c093 100644
4498 --- a/drivers/iommu/intel-iommu.c
4499 +++ b/drivers/iommu/intel-iommu.c
4500 @@ -479,7 +479,7 @@ struct deferred_flush_data {
4501         struct deferred_flush_table *tables;
4502  };
4503  
4504 -DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
4505 +static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
4506  
4507  /* bitmap for indexing intel_iommus */
4508  static int g_num_of_iommus;
4509 @@ -3715,10 +3715,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
4510         struct intel_iommu *iommu;
4511         struct deferred_flush_entry *entry;
4512         struct deferred_flush_data *flush_data;
4513 -       unsigned int cpuid;
4514  
4515 -       cpuid = get_cpu();
4516 -       flush_data = per_cpu_ptr(&deferred_flush, cpuid);
4517 +       flush_data = raw_cpu_ptr(&deferred_flush);
4518  
4519         /* Flush all CPUs' entries to avoid deferring too much.  If
4520          * this becomes a bottleneck, can just flush us, and rely on
4521 @@ -3751,8 +3749,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
4522         }
4523         flush_data->size++;
4524         spin_unlock_irqrestore(&flush_data->lock, flags);
4525 -
4526 -       put_cpu();
4527  }
4528  
4529  static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
4530 diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
4531 index e23001bfcfee..359d5d169ec0 100644
4532 --- a/drivers/iommu/iova.c
4533 +++ b/drivers/iommu/iova.c
4534 @@ -22,6 +22,7 @@
4535  #include <linux/slab.h>
4536  #include <linux/smp.h>
4537  #include <linux/bitops.h>
4538 +#include <linux/cpu.h>
4539  
4540  static bool iova_rcache_insert(struct iova_domain *iovad,
4541                                unsigned long pfn,
4542 @@ -420,10 +421,8 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
4543  
4544                 /* Try replenishing IOVAs by flushing rcache. */
4545                 flushed_rcache = true;
4546 -               preempt_disable();
4547                 for_each_online_cpu(cpu)
4548                         free_cpu_cached_iovas(cpu, iovad);
4549 -               preempt_enable();
4550                 goto retry;
4551         }
4552  
4553 @@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
4554         bool can_insert = false;
4555         unsigned long flags;
4556  
4557 -       cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
4558 +       cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
4559         spin_lock_irqsave(&cpu_rcache->lock, flags);
4560  
4561         if (!iova_magazine_full(cpu_rcache->loaded)) {
4562 @@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
4563                 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
4564  
4565         spin_unlock_irqrestore(&cpu_rcache->lock, flags);
4566 -       put_cpu_ptr(rcache->cpu_rcaches);
4567  
4568         if (mag_to_free) {
4569                 iova_magazine_free_pfns(mag_to_free, iovad);
4570 @@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
4571         bool has_pfn = false;
4572         unsigned long flags;
4573  
4574 -       cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
4575 +       cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
4576         spin_lock_irqsave(&cpu_rcache->lock, flags);
4577  
4578         if (!iova_magazine_empty(cpu_rcache->loaded)) {
4579 @@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
4580                 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
4581  
4582         spin_unlock_irqrestore(&cpu_rcache->lock, flags);
4583 -       put_cpu_ptr(rcache->cpu_rcaches);
4584  
4585         return iova_pfn;
4586  }
4587 diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
4588 index 3f9ddb9fafa7..09da5b6b44a1 100644
4589 --- a/drivers/leds/trigger/Kconfig
4590 +++ b/drivers/leds/trigger/Kconfig
4591 @@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT
4592  
4593  config LEDS_TRIGGER_CPU
4594         bool "LED CPU Trigger"
4595 -       depends on LEDS_TRIGGERS
4596 +       depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE
4597         help
4598           This allows LEDs to be controlled by active CPUs. This shows
4599           the active CPUs across an array of LEDs so you can see which
4600 diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
4601 index 4d200883c505..98b64ed5cb81 100644
4602 --- a/drivers/md/bcache/Kconfig
4603 +++ b/drivers/md/bcache/Kconfig
4604 @@ -1,6 +1,7 @@
4605  
4606  config BCACHE
4607         tristate "Block device as cache"
4608 +       depends on !PREEMPT_RT_FULL
4609         ---help---
4610         Allows a block device to be used as cache for other devices; uses
4611         a btree for indexing and the layout is optimized for SSDs.
4612 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
4613 index 2c965424d383..2c8877f50626 100644
4614 --- a/drivers/md/dm-rq.c
4615 +++ b/drivers/md/dm-rq.c
4616 @@ -842,7 +842,7 @@ static void dm_old_request_fn(struct request_queue *q)
4617                 /* Establish tio->ti before queuing work (map_tio_request) */
4618                 tio->ti = ti;
4619                 kthread_queue_work(&md->kworker, &tio->work);
4620 -               BUG_ON(!irqs_disabled());
4621 +               BUG_ON_NONRT(!irqs_disabled());
4622         }
4623  }
4624  
4625 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
4626 index cce6057b9aca..fa2c4de32a64 100644
4627 --- a/drivers/md/raid5.c
4628 +++ b/drivers/md/raid5.c
4629 @@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
4630         struct raid5_percpu *percpu;
4631         unsigned long cpu;
4632  
4633 -       cpu = get_cpu();
4634 +       cpu = get_cpu_light();
4635         percpu = per_cpu_ptr(conf->percpu, cpu);
4636 +       spin_lock(&percpu->lock);
4637         if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
4638                 ops_run_biofill(sh);
4639                 overlap_clear++;
4640 @@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
4641                         if (test_and_clear_bit(R5_Overlap, &dev->flags))
4642                                 wake_up(&sh->raid_conf->wait_for_overlap);
4643                 }
4644 -       put_cpu();
4645 +       spin_unlock(&percpu->lock);
4646 +       put_cpu_light();
4647  }
4648  
4649  static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
4650 @@ -6391,6 +6393,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
4651                        __func__, cpu);
4652                 return -ENOMEM;
4653         }
4654 +       spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
4655         return 0;
4656  }
4657  
4658 @@ -6401,7 +6404,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
4659         conf->percpu = alloc_percpu(struct raid5_percpu);
4660         if (!conf->percpu)
4661                 return -ENOMEM;
4662 -
4663         err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
4664         if (!err) {
4665                 conf->scribble_disks = max(conf->raid_disks,
4666 diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
4667 index 57ec49f0839e..0739604990b7 100644
4668 --- a/drivers/md/raid5.h
4669 +++ b/drivers/md/raid5.h
4670 @@ -504,6 +504,7 @@ struct r5conf {
4671         int                     recovery_disabled;
4672         /* per cpu variables */
4673         struct raid5_percpu {
4674 +               spinlock_t      lock;           /* Protection for -RT */
4675                 struct page     *spare_page; /* Used when checking P/Q in raid6 */
4676                 struct flex_array *scribble;   /* space for constructing buffer
4677                                               * lists and performing address
4678 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
4679 index 64971baf11fa..215e91e36198 100644
4680 --- a/drivers/misc/Kconfig
4681 +++ b/drivers/misc/Kconfig
4682 @@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
4683  config ATMEL_TCLIB
4684         bool "Atmel AT32/AT91 Timer/Counter Library"
4685         depends on (AVR32 || ARCH_AT91)
4686 +       default y if PREEMPT_RT_FULL
4687         help
4688           Select this if you want a library to allocate the Timer/Counter
4689           blocks found on many Atmel processors.  This facilitates using
4690 @@ -69,8 +70,7 @@ config ATMEL_TCB_CLKSRC
4691           are combined to make a single 32-bit timer.
4692  
4693           When GENERIC_CLOCKEVENTS is defined, the third timer channel
4694 -         may be used as a clock event device supporting oneshot mode
4695 -         (delays of up to two seconds) based on the 32 KiHz clock.
4696 +         may be used as a clock event device supporting oneshot mode.
4697  
4698  config ATMEL_TCB_CLKSRC_BLOCK
4699         int
4700 @@ -84,6 +84,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
4701           TC can be used for other purposes, such as PWM generation and
4702           interval timing.
4703  
4704 +config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
4705 +       bool "TC Block use 32 KiHz clock"
4706 +       depends on ATMEL_TCB_CLKSRC
4707 +       default y if !PREEMPT_RT_FULL
4708 +       help
4709 +         Select this to use 32 KiHz base clock rate as TC block clock
4710 +         source for clock events.
4711 +
4712 +
4713  config DUMMY_IRQ
4714         tristate "Dummy IRQ handler"
4715         default n
4716 diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
4717 index df990bb8c873..1a162709a85e 100644
4718 --- a/drivers/mmc/host/mmci.c
4719 +++ b/drivers/mmc/host/mmci.c
4720 @@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
4721         struct sg_mapping_iter *sg_miter = &host->sg_miter;
4722         struct variant_data *variant = host->variant;
4723         void __iomem *base = host->base;
4724 -       unsigned long flags;
4725         u32 status;
4726  
4727         status = readl(base + MMCISTATUS);
4728  
4729         dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
4730  
4731 -       local_irq_save(flags);
4732 -
4733         do {
4734                 unsigned int remain, len;
4735                 char *buffer;
4736 @@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
4737  
4738         sg_miter_stop(sg_miter);
4739  
4740 -       local_irq_restore(flags);
4741 -
4742         /*
4743          * If we have less than the fifo 'half-full' threshold to transfer,
4744          * trigger a PIO interrupt as soon as any data is available.
4745 diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
4746 index 9133e7926da5..63afb921ed40 100644
4747 --- a/drivers/net/ethernet/3com/3c59x.c
4748 +++ b/drivers/net/ethernet/3com/3c59x.c
4749 @@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev)
4750  {
4751         struct vortex_private *vp = netdev_priv(dev);
4752         unsigned long flags;
4753 -       local_irq_save(flags);
4754 +       local_irq_save_nort(flags);
4755         (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
4756 -       local_irq_restore(flags);
4757 +       local_irq_restore_nort(flags);
4758  }
4759  #endif
4760  
4761 @@ -1910,12 +1910,12 @@ static void vortex_tx_timeout(struct net_device *dev)
4762                          * Block interrupts because vortex_interrupt does a bare spin_lock()
4763                          */
4764                         unsigned long flags;
4765 -                       local_irq_save(flags);
4766 +                       local_irq_save_nort(flags);
4767                         if (vp->full_bus_master_tx)
4768                                 boomerang_interrupt(dev->irq, dev);
4769                         else
4770                                 vortex_interrupt(dev->irq, dev);
4771 -                       local_irq_restore(flags);
4772 +                       local_irq_restore_nort(flags);
4773                 }
4774         }
4775  
4776 diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
4777 index da4c2d8a4173..1420dfb56bac 100644
4778 --- a/drivers/net/ethernet/realtek/8139too.c
4779 +++ b/drivers/net/ethernet/realtek/8139too.c
4780 @@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
4781         struct rtl8139_private *tp = netdev_priv(dev);
4782         const int irq = tp->pci_dev->irq;
4783  
4784 -       disable_irq(irq);
4785 +       disable_irq_nosync(irq);
4786         rtl8139_interrupt(irq, dev);
4787         enable_irq(irq);
4788  }
4789 diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4790 index bca6935a94db..d7a35ee34d03 100644
4791 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4792 +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4793 @@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
4794                         while (!ctx->done.done && msecs--)
4795                                 udelay(1000);
4796                 } else {
4797 -                       wait_event_interruptible(ctx->done.wait,
4798 +                       swait_event_interruptible(ctx->done.wait,
4799                                                  ctx->done.done);
4800                 }
4801                 break;
4802 diff --git a/drivers/pci/access.c b/drivers/pci/access.c
4803 index d11cdbb8fba3..223bbb9acb03 100644
4804 --- a/drivers/pci/access.c
4805 +++ b/drivers/pci/access.c
4806 @@ -672,7 +672,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
4807         WARN_ON(!dev->block_cfg_access);
4808  
4809         dev->block_cfg_access = 0;
4810 -       wake_up_all(&pci_cfg_wait);
4811 +       wake_up_all_locked(&pci_cfg_wait);
4812         raw_spin_unlock_irqrestore(&pci_lock, flags);
4813  }
4814  EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
4815 diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
4816 index 775c88303017..f8e9e1c2b2f6 100644
4817 --- a/drivers/pinctrl/qcom/pinctrl-msm.c
4818 +++ b/drivers/pinctrl/qcom/pinctrl-msm.c
4819 @@ -61,7 +61,7 @@ struct msm_pinctrl {
4820         struct notifier_block restart_nb;
4821         int irq;
4822  
4823 -       spinlock_t lock;
4824 +       raw_spinlock_t lock;
4825  
4826         DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
4827         DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
4828 @@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
4829         if (WARN_ON(i == g->nfuncs))
4830                 return -EINVAL;
4831  
4832 -       spin_lock_irqsave(&pctrl->lock, flags);
4833 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
4834  
4835         val = readl(pctrl->regs + g->ctl_reg);
4836         val &= ~mask;
4837         val |= i << g->mux_bit;
4838         writel(val, pctrl->regs + g->ctl_reg);
4839  
4840 -       spin_unlock_irqrestore(&pctrl->lock, flags);
4841 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4842  
4843         return 0;
4844  }
4845 @@ -323,14 +323,14 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
4846                         break;
4847                 case PIN_CONFIG_OUTPUT:
4848                         /* set output value */
4849 -                       spin_lock_irqsave(&pctrl->lock, flags);
4850 +                       raw_spin_lock_irqsave(&pctrl->lock, flags);
4851                         val = readl(pctrl->regs + g->io_reg);
4852                         if (arg)
4853                                 val |= BIT(g->out_bit);
4854                         else
4855                                 val &= ~BIT(g->out_bit);
4856                         writel(val, pctrl->regs + g->io_reg);
4857 -                       spin_unlock_irqrestore(&pctrl->lock, flags);
4858 +                       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4859  
4860                         /* enable output */
4861                         arg = 1;
4862 @@ -351,12 +351,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
4863                         return -EINVAL;
4864                 }
4865  
4866 -               spin_lock_irqsave(&pctrl->lock, flags);
4867 +               raw_spin_lock_irqsave(&pctrl->lock, flags);
4868                 val = readl(pctrl->regs + g->ctl_reg);
4869                 val &= ~(mask << bit);
4870                 val |= arg << bit;
4871                 writel(val, pctrl->regs + g->ctl_reg);
4872 -               spin_unlock_irqrestore(&pctrl->lock, flags);
4873 +               raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4874         }
4875  
4876         return 0;
4877 @@ -384,13 +384,13 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
4878  
4879         g = &pctrl->soc->groups[offset];
4880  
4881 -       spin_lock_irqsave(&pctrl->lock, flags);
4882 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
4883  
4884         val = readl(pctrl->regs + g->ctl_reg);
4885         val &= ~BIT(g->oe_bit);
4886         writel(val, pctrl->regs + g->ctl_reg);
4887  
4888 -       spin_unlock_irqrestore(&pctrl->lock, flags);
4889 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4890  
4891         return 0;
4892  }
4893 @@ -404,7 +404,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
4894  
4895         g = &pctrl->soc->groups[offset];
4896  
4897 -       spin_lock_irqsave(&pctrl->lock, flags);
4898 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
4899  
4900         val = readl(pctrl->regs + g->io_reg);
4901         if (value)
4902 @@ -417,7 +417,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
4903         val |= BIT(g->oe_bit);
4904         writel(val, pctrl->regs + g->ctl_reg);
4905  
4906 -       spin_unlock_irqrestore(&pctrl->lock, flags);
4907 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4908  
4909         return 0;
4910  }
4911 @@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
4912  
4913         g = &pctrl->soc->groups[offset];
4914  
4915 -       spin_lock_irqsave(&pctrl->lock, flags);
4916 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
4917  
4918         val = readl(pctrl->regs + g->io_reg);
4919         if (value)
4920 @@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
4921                 val &= ~BIT(g->out_bit);
4922         writel(val, pctrl->regs + g->io_reg);
4923  
4924 -       spin_unlock_irqrestore(&pctrl->lock, flags);
4925 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4926  }
4927  
4928  #ifdef CONFIG_DEBUG_FS
4929 @@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
4930  
4931         g = &pctrl->soc->groups[d->hwirq];
4932  
4933 -       spin_lock_irqsave(&pctrl->lock, flags);
4934 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
4935  
4936         val = readl(pctrl->regs + g->intr_cfg_reg);
4937         val &= ~BIT(g->intr_enable_bit);
4938 @@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
4939  
4940         clear_bit(d->hwirq, pctrl->enabled_irqs);
4941  
4942 -       spin_unlock_irqrestore(&pctrl->lock, flags);
4943 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4944  }
4945  
4946  static void msm_gpio_irq_unmask(struct irq_data *d)
4947 @@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
4948  
4949         g = &pctrl->soc->groups[d->hwirq];
4950  
4951 -       spin_lock_irqsave(&pctrl->lock, flags);
4952 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
4953  
4954         val = readl(pctrl->regs + g->intr_status_reg);
4955         val &= ~BIT(g->intr_status_bit);
4956 @@ -604,7 +604,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
4957  
4958         set_bit(d->hwirq, pctrl->enabled_irqs);
4959  
4960 -       spin_unlock_irqrestore(&pctrl->lock, flags);
4961 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4962  }
4963  
4964  static void msm_gpio_irq_ack(struct irq_data *d)
4965 @@ -617,7 +617,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
4966  
4967         g = &pctrl->soc->groups[d->hwirq];
4968  
4969 -       spin_lock_irqsave(&pctrl->lock, flags);
4970 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
4971  
4972         val = readl(pctrl->regs + g->intr_status_reg);
4973         if (g->intr_ack_high)
4974 @@ -629,7 +629,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
4975         if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
4976                 msm_gpio_update_dual_edge_pos(pctrl, g, d);
4977  
4978 -       spin_unlock_irqrestore(&pctrl->lock, flags);
4979 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4980  }
4981  
4982  static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
4983 @@ -642,7 +642,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
4984  
4985         g = &pctrl->soc->groups[d->hwirq];
4986  
4987 -       spin_lock_irqsave(&pctrl->lock, flags);
4988 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
4989  
4990         /*
4991          * For hw without possibility of detecting both edges
4992 @@ -716,7 +716,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
4993         if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
4994                 msm_gpio_update_dual_edge_pos(pctrl, g, d);
4995  
4996 -       spin_unlock_irqrestore(&pctrl->lock, flags);
4997 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
4998  
4999         if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
5000                 irq_set_handler_locked(d, handle_level_irq);
5001 @@ -732,11 +732,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
5002         struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
5003         unsigned long flags;
5004  
5005 -       spin_lock_irqsave(&pctrl->lock, flags);
5006 +       raw_spin_lock_irqsave(&pctrl->lock, flags);
5007  
5008         irq_set_irq_wake(pctrl->irq, on);
5009  
5010 -       spin_unlock_irqrestore(&pctrl->lock, flags);
5011 +       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
5012  
5013         return 0;
5014  }
5015 @@ -882,7 +882,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
5016         pctrl->soc = soc_data;
5017         pctrl->chip = msm_gpio_template;
5018  
5019 -       spin_lock_init(&pctrl->lock);
5020 +       raw_spin_lock_init(&pctrl->lock);
5021  
5022         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5023         pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
5024 diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
5025 index 9bd41a35a78a..8e2d436c2e3f 100644
5026 --- a/drivers/scsi/fcoe/fcoe.c
5027 +++ b/drivers/scsi/fcoe/fcoe.c
5028 @@ -1455,11 +1455,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
5029  static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
5030  {
5031         struct fcoe_percpu_s *fps;
5032 -       int rc;
5033 +       int rc, cpu = get_cpu_light();
5034  
5035 -       fps = &get_cpu_var(fcoe_percpu);
5036 +       fps = &per_cpu(fcoe_percpu, cpu);
5037         rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
5038 -       put_cpu_var(fcoe_percpu);
5039 +       put_cpu_light();
5040  
5041         return rc;
5042  }
5043 @@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
5044                 return 0;
5045         }
5046  
5047 -       stats = per_cpu_ptr(lport->stats, get_cpu());
5048 +       stats = per_cpu_ptr(lport->stats, get_cpu_light());
5049         stats->InvalidCRCCount++;
5050         if (stats->InvalidCRCCount < 5)
5051                 printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
5052 -       put_cpu();
5053 +       put_cpu_light();
5054         return -EINVAL;
5055  }
5056  
5057 @@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
5058          */
5059         hp = (struct fcoe_hdr *) skb_network_header(skb);
5060  
5061 -       stats = per_cpu_ptr(lport->stats, get_cpu());
5062 +       stats = per_cpu_ptr(lport->stats, get_cpu_light());
5063         if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
5064                 if (stats->ErrorFrames < 5)
5065                         printk(KERN_WARNING "fcoe: FCoE version "
5066 @@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
5067                 goto drop;
5068  
5069         if (!fcoe_filter_frames(lport, fp)) {
5070 -               put_cpu();
5071 +               put_cpu_light();
5072                 fc_exch_recv(lport, fp);
5073                 return;
5074         }
5075  drop:
5076         stats->ErrorFrames++;
5077 -       put_cpu();
5078 +       put_cpu_light();
5079         kfree_skb(skb);
5080  }
5081  
5082 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
5083 index dcf36537a767..1a1f2e46452c 100644
5084 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
5085 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
5086 @@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
5087  
5088         INIT_LIST_HEAD(&del_list);
5089  
5090 -       stats = per_cpu_ptr(fip->lp->stats, get_cpu());
5091 +       stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
5092  
5093         list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
5094                 deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
5095 @@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
5096                                 sel_time = fcf->time;
5097                 }
5098         }
5099 -       put_cpu();
5100 +       put_cpu_light();
5101  
5102         list_for_each_entry_safe(fcf, next, &del_list, list) {
5103                 /* Removes fcf from current list */
5104 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
5105 index 16ca31ad5ec0..c3987347e762 100644
5106 --- a/drivers/scsi/libfc/fc_exch.c
5107 +++ b/drivers/scsi/libfc/fc_exch.c
5108 @@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
5109         }
5110         memset(ep, 0, sizeof(*ep));
5111  
5112 -       cpu = get_cpu();
5113 +       cpu = get_cpu_light();
5114         pool = per_cpu_ptr(mp->pool, cpu);
5115         spin_lock_bh(&pool->lock);
5116 -       put_cpu();
5117 +       put_cpu_light();
5118  
5119         /* peek cache of free slot */
5120         if (pool->left != FC_XID_UNKNOWN) {
5121 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
5122 index 763f012fdeca..d0f61b595470 100644
5123 --- a/drivers/scsi/libsas/sas_ata.c
5124 +++ b/drivers/scsi/libsas/sas_ata.c
5125 @@ -190,7 +190,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
5126         /* TODO: audit callers to ensure they are ready for qc_issue to
5127          * unconditionally re-enable interrupts
5128          */
5129 -       local_irq_save(flags);
5130 +       local_irq_save_nort(flags);
5131         spin_unlock(ap->lock);
5132  
5133         /* If the device fell off, no sense in issuing commands */
5134 @@ -252,7 +252,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
5135  
5136   out:
5137         spin_lock(ap->lock);
5138 -       local_irq_restore(flags);
5139 +       local_irq_restore_nort(flags);
5140         return ret;
5141  }
5142  
5143 diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
5144 index edc48f3b8230..ee5c6f9dfb6f 100644
5145 --- a/drivers/scsi/qla2xxx/qla_inline.h
5146 +++ b/drivers/scsi/qla2xxx/qla_inline.h
5147 @@ -59,12 +59,12 @@ qla2x00_poll(struct rsp_que *rsp)
5148  {
5149         unsigned long flags;
5150         struct qla_hw_data *ha = rsp->hw;
5151 -       local_irq_save(flags);
5152 +       local_irq_save_nort(flags);
5153         if (IS_P3P_TYPE(ha))
5154                 qla82xx_poll(0, rsp);
5155         else
5156                 ha->isp_ops->intr_handler(0, rsp);
5157 -       local_irq_restore(flags);
5158 +       local_irq_restore_nort(flags);
5159  }
5160  
5161  static inline uint8_t *
5162 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
5163 index 068c4e47fac9..a2090f640397 100644
5164 --- a/drivers/scsi/qla2xxx/qla_isr.c
5165 +++ b/drivers/scsi/qla2xxx/qla_isr.c
5166 @@ -3125,7 +3125,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
5167                 * kref_put().
5168                 */
5169                 kref_get(&qentry->irq_notify.kref);
5170 +#ifdef CONFIG_PREEMPT_RT_BASE
5171 +               swork_queue(&qentry->irq_notify.swork);
5172 +#else
5173                 schedule_work(&qentry->irq_notify.work);
5174 +#endif
5175         }
5176  
5177         /*
5178 diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
5179 index 95f4c1bcdb4c..0be934799bff 100644
5180 --- a/drivers/thermal/x86_pkg_temp_thermal.c
5181 +++ b/drivers/thermal/x86_pkg_temp_thermal.c
5182 @@ -29,6 +29,7 @@
5183  #include <linux/pm.h>
5184  #include <linux/thermal.h>
5185  #include <linux/debugfs.h>
5186 +#include <linux/swork.h>
5187  #include <asm/cpu_device_id.h>
5188  #include <asm/mce.h>
5189  
5190 @@ -353,7 +354,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
5191         }
5192  }
5193  
5194 -static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5195 +static void platform_thermal_notify_work(struct swork_event *event)
5196  {
5197         unsigned long flags;
5198         int cpu = smp_processor_id();
5199 @@ -370,7 +371,7 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5200                         pkg_work_scheduled[phy_id]) {
5201                 disable_pkg_thres_interrupt();
5202                 spin_unlock_irqrestore(&pkg_work_lock, flags);
5203 -               return -EINVAL;
5204 +               return;
5205         }
5206         pkg_work_scheduled[phy_id] = 1;
5207         spin_unlock_irqrestore(&pkg_work_lock, flags);
5208 @@ -379,9 +380,48 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5209         schedule_delayed_work_on(cpu,
5210                                 &per_cpu(pkg_temp_thermal_threshold_work, cpu),
5211                                 msecs_to_jiffies(notify_delay_ms));
5212 +}
5213 +
5214 +#ifdef CONFIG_PREEMPT_RT_FULL
5215 +static struct swork_event notify_work;
5216 +
5217 +static int thermal_notify_work_init(void)
5218 +{
5219 +       int err;
5220 +
5221 +       err = swork_get();
5222 +       if (err)
5223 +               return err;
5224 +
5225 +       INIT_SWORK(&notify_work, platform_thermal_notify_work);
5226         return 0;
5227  }
5228  
5229 +static void thermal_notify_work_cleanup(void)
5230 +{
5231 +       swork_put();
5232 +}
5233 +
5234 +static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5235 +{
5236 +       swork_queue(&notify_work);
5237 +       return 0;
5238 +}
5239 +
5240 +#else  /* !CONFIG_PREEMPT_RT_FULL */
5241 +
5242 +static int thermal_notify_work_init(void) { return 0; }
5243 +
5244 +static void thermal_notify_work_cleanup(void) {  }
5245 +
5246 +static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
5247 +{
5248 +       platform_thermal_notify_work(NULL);
5249 +
5250 +       return 0;
5251 +}
5252 +#endif /* CONFIG_PREEMPT_RT_FULL */
5253 +
5254  static int find_siblings_cpu(int cpu)
5255  {
5256         int i;
5257 @@ -585,6 +625,9 @@ static int __init pkg_temp_thermal_init(void)
5258         if (!x86_match_cpu(pkg_temp_thermal_ids))
5259                 return -ENODEV;
5260  
5261 +       if (!thermal_notify_work_init())
5262 +               return -ENODEV;
5263 +
5264         spin_lock_init(&pkg_work_lock);
5265         platform_thermal_package_notify =
5266                         pkg_temp_thermal_platform_thermal_notify;
5267 @@ -609,7 +652,7 @@ static int __init pkg_temp_thermal_init(void)
5268         kfree(pkg_work_scheduled);
5269         platform_thermal_package_notify = NULL;
5270         platform_thermal_package_rate_control = NULL;
5271 -
5272 +       thermal_notify_work_cleanup();
5273         return -ENODEV;
5274  }
5275  
5276 @@ -634,6 +677,7 @@ static void __exit pkg_temp_thermal_exit(void)
5277         mutex_unlock(&phy_dev_list_mutex);
5278         platform_thermal_package_notify = NULL;
5279         platform_thermal_package_rate_control = NULL;
5280 +       thermal_notify_work_cleanup();
5281         for_each_online_cpu(i)
5282                 cancel_delayed_work_sync(
5283                         &per_cpu(pkg_temp_thermal_threshold_work, i));
5284 diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
5285 index e8819aa20415..dd7f9bf45d6c 100644
5286 --- a/drivers/tty/serial/8250/8250_core.c
5287 +++ b/drivers/tty/serial/8250/8250_core.c
5288 @@ -58,7 +58,16 @@ static struct uart_driver serial8250_reg;
5289  
5290  static unsigned int skip_txen_test; /* force skip of txen test at init time */
5291  
5292 -#define PASS_LIMIT     512
5293 +/*
5294 + * On -rt we can have a more delays, and legitimately
5295 + * so - so don't drop work spuriously and spam the
5296 + * syslog:
5297 + */
5298 +#ifdef CONFIG_PREEMPT_RT_FULL
5299 +# define PASS_LIMIT    1000000
5300 +#else
5301 +# define PASS_LIMIT    512
5302 +#endif
5303  
5304  #include <asm/serial.h>
5305  /*
5306 diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
5307 index 080d5a59d0a7..eecc4f111473 100644
5308 --- a/drivers/tty/serial/8250/8250_port.c
5309 +++ b/drivers/tty/serial/8250/8250_port.c
5310 @@ -35,6 +35,7 @@
5311  #include <linux/nmi.h>
5312  #include <linux/mutex.h>
5313  #include <linux/slab.h>
5314 +#include <linux/kdb.h>
5315  #include <linux/uaccess.h>
5316  #include <linux/pm_runtime.h>
5317  #include <linux/timer.h>
5318 @@ -3144,9 +3145,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
5319  
5320         serial8250_rpm_get(up);
5321  
5322 -       if (port->sysrq)
5323 +       if (port->sysrq || oops_in_progress)
5324                 locked = 0;
5325 -       else if (oops_in_progress)
5326 +       else if (in_kdb_printk())
5327                 locked = spin_trylock_irqsave(&port->lock, flags);
5328         else
5329                 spin_lock_irqsave(&port->lock, flags);
5330 diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
5331 index e2c33b9528d8..53af53c43e8c 100644
5332 --- a/drivers/tty/serial/amba-pl011.c
5333 +++ b/drivers/tty/serial/amba-pl011.c
5334 @@ -2194,13 +2194,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
5335  
5336         clk_enable(uap->clk);
5337  
5338 -       local_irq_save(flags);
5339 +       /*
5340 +        * local_irq_save(flags);
5341 +        *
5342 +        * This local_irq_save() is nonsense. If we come in via sysrq
5343 +        * handling then interrupts are already disabled. Aside of
5344 +        * that the port.sysrq check is racy on SMP regardless.
5345 +       */
5346         if (uap->port.sysrq)
5347                 locked = 0;
5348         else if (oops_in_progress)
5349 -               locked = spin_trylock(&uap->port.lock);
5350 +               locked = spin_trylock_irqsave(&uap->port.lock, flags);
5351         else
5352 -               spin_lock(&uap->port.lock);
5353 +               spin_lock_irqsave(&uap->port.lock, flags);
5354  
5355         /*
5356          *      First save the CR then disable the interrupts
5357 @@ -2224,8 +2230,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
5358                 pl011_write(old_cr, uap, REG_CR);
5359  
5360         if (locked)
5361 -               spin_unlock(&uap->port.lock);
5362 -       local_irq_restore(flags);
5363 +               spin_unlock_irqrestore(&uap->port.lock, flags);
5364  
5365         clk_disable(uap->clk);
5366  }
5367 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
5368 index a2a529994ba5..0ee7c4c518df 100644
5369 --- a/drivers/tty/serial/omap-serial.c
5370 +++ b/drivers/tty/serial/omap-serial.c
5371 @@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console *co, const char *s,
5372  
5373         pm_runtime_get_sync(up->dev);
5374  
5375 -       local_irq_save(flags);
5376 -       if (up->port.sysrq)
5377 -               locked = 0;
5378 -       else if (oops_in_progress)
5379 -               locked = spin_trylock(&up->port.lock);
5380 +       if (up->port.sysrq || oops_in_progress)
5381 +               locked = spin_trylock_irqsave(&up->port.lock, flags);
5382         else
5383 -               spin_lock(&up->port.lock);
5384 +               spin_lock_irqsave(&up->port.lock, flags);
5385  
5386         /*
5387          * First save the IER then disable the interrupts
5388 @@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console *co, const char *s,
5389         pm_runtime_mark_last_busy(up->dev);
5390         pm_runtime_put_autosuspend(up->dev);
5391         if (locked)
5392 -               spin_unlock(&up->port.lock);
5393 -       local_irq_restore(flags);
5394 +               spin_unlock_irqrestore(&up->port.lock, flags);
5395  }
5396  
5397  static int __init
5398 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
5399 index 479e223f9cff..3418a54b4131 100644
5400 --- a/drivers/usb/core/hcd.c
5401 +++ b/drivers/usb/core/hcd.c
5402 @@ -1761,9 +1761,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
5403          * and no one may trigger the above deadlock situation when
5404          * running complete() in tasklet.
5405          */
5406 -       local_irq_save(flags);
5407 +       local_irq_save_nort(flags);
5408         urb->complete(urb);
5409 -       local_irq_restore(flags);
5410 +       local_irq_restore_nort(flags);
5411  
5412         usb_anchor_resume_wakeups(anchor);
5413         atomic_dec(&urb->use_count);
5414 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
5415 index 8d412d8b1f29..176491dd739e 100644
5416 --- a/drivers/usb/gadget/function/f_fs.c
5417 +++ b/drivers/usb/gadget/function/f_fs.c
5418 @@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data *ffs)
5419                 pr_info("%s(): freeing\n", __func__);
5420                 ffs_data_clear(ffs);
5421                 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
5422 -                      waitqueue_active(&ffs->ep0req_completion.wait));
5423 +                      swait_active(&ffs->ep0req_completion.wait));
5424                 kfree(ffs->dev_name);
5425                 kfree(ffs);
5426         }
5427 diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
5428 index 1468d8f085a3..6aae3ae25c18 100644
5429 --- a/drivers/usb/gadget/legacy/inode.c
5430 +++ b/drivers/usb/gadget/legacy/inode.c
5431 @@ -346,7 +346,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
5432         spin_unlock_irq (&epdata->dev->lock);
5433  
5434         if (likely (value == 0)) {
5435 -               value = wait_event_interruptible (done.wait, done.done);
5436 +               value = swait_event_interruptible (done.wait, done.done);
5437                 if (value != 0) {
5438                         spin_lock_irq (&epdata->dev->lock);
5439                         if (likely (epdata->ep != NULL)) {
5440 @@ -355,7 +355,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
5441                                 usb_ep_dequeue (epdata->ep, epdata->req);
5442                                 spin_unlock_irq (&epdata->dev->lock);
5443  
5444 -                               wait_event (done.wait, done.done);
5445 +                               swait_event (done.wait, done.done);
5446                                 if (epdata->status == -ECONNRESET)
5447                                         epdata->status = -EINTR;
5448                         } else {
5449 diff --git a/fs/aio.c b/fs/aio.c
5450 index 428484f2f841..2b02e2eb2158 100644
5451 --- a/fs/aio.c
5452 +++ b/fs/aio.c
5453 @@ -40,6 +40,7 @@
5454  #include <linux/ramfs.h>
5455  #include <linux/percpu-refcount.h>
5456  #include <linux/mount.h>
5457 +#include <linux/swork.h>
5458  
5459  #include <asm/kmap_types.h>
5460  #include <asm/uaccess.h>
5461 @@ -115,7 +116,7 @@ struct kioctx {
5462         struct page             **ring_pages;
5463         long                    nr_pages;
5464  
5465 -       struct work_struct      free_work;
5466 +       struct swork_event      free_work;
5467  
5468         /*
5469          * signals when all in-flight requests are done
5470 @@ -258,6 +259,7 @@ static int __init aio_setup(void)
5471                 .mount          = aio_mount,
5472                 .kill_sb        = kill_anon_super,
5473         };
5474 +       BUG_ON(swork_get());
5475         aio_mnt = kern_mount(&aio_fs);
5476         if (IS_ERR(aio_mnt))
5477                 panic("Failed to create aio fs mount.");
5478 @@ -581,9 +583,9 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
5479         return cancel(&kiocb->common);
5480  }
5481  
5482 -static void free_ioctx(struct work_struct *work)
5483 +static void free_ioctx(struct swork_event *sev)
5484  {
5485 -       struct kioctx *ctx = container_of(work, struct kioctx, free_work);
5486 +       struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
5487  
5488         pr_debug("freeing %p\n", ctx);
5489  
5490 @@ -602,8 +604,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
5491         if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
5492                 complete(&ctx->rq_wait->comp);
5493  
5494 -       INIT_WORK(&ctx->free_work, free_ioctx);
5495 -       schedule_work(&ctx->free_work);
5496 +       INIT_SWORK(&ctx->free_work, free_ioctx);
5497 +       swork_queue(&ctx->free_work);
5498  }
5499  
5500  /*
5501 @@ -611,9 +613,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
5502   * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
5503   * now it's safe to cancel any that need to be.
5504   */
5505 -static void free_ioctx_users(struct percpu_ref *ref)
5506 +static void free_ioctx_users_work(struct swork_event *sev)
5507  {
5508 -       struct kioctx *ctx = container_of(ref, struct kioctx, users);
5509 +       struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
5510         struct aio_kiocb *req;
5511  
5512         spin_lock_irq(&ctx->ctx_lock);
5513 @@ -632,6 +634,14 @@ static void free_ioctx_users(struct percpu_ref *ref)
5514         percpu_ref_put(&ctx->reqs);
5515  }
5516  
5517 +static void free_ioctx_users(struct percpu_ref *ref)
5518 +{
5519 +       struct kioctx *ctx = container_of(ref, struct kioctx, users);
5520 +
5521 +       INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
5522 +       swork_queue(&ctx->free_work);
5523 +}
5524 +
5525  static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
5526  {
5527         unsigned i, new_nr;
5528 diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
5529 index a1fba4285277..3796769b4cd1 100644
5530 --- a/fs/autofs4/autofs_i.h
5531 +++ b/fs/autofs4/autofs_i.h
5532 @@ -31,6 +31,7 @@
5533  #include <linux/sched.h>
5534  #include <linux/mount.h>
5535  #include <linux/namei.h>
5536 +#include <linux/delay.h>
5537  #include <asm/current.h>
5538  #include <linux/uaccess.h>
5539  
5540 diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
5541 index d8e6d421c27f..2e689ab1306b 100644
5542 --- a/fs/autofs4/expire.c
5543 +++ b/fs/autofs4/expire.c
5544 @@ -148,7 +148,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev,
5545                         parent = p->d_parent;
5546                         if (!spin_trylock(&parent->d_lock)) {
5547                                 spin_unlock(&p->d_lock);
5548 -                               cpu_relax();
5549 +                               cpu_chill();
5550                                 goto relock;
5551                         }
5552                         spin_unlock(&p->d_lock);
5553 diff --git a/fs/buffer.c b/fs/buffer.c
5554 index b205a629001d..5646afc022ba 100644
5555 --- a/fs/buffer.c
5556 +++ b/fs/buffer.c
5557 @@ -301,8 +301,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
5558          * decide that the page is now completely done.
5559          */
5560         first = page_buffers(page);
5561 -       local_irq_save(flags);
5562 -       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
5563 +       flags = bh_uptodate_lock_irqsave(first);
5564         clear_buffer_async_read(bh);
5565         unlock_buffer(bh);
5566         tmp = bh;
5567 @@ -315,8 +314,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
5568                 }
5569                 tmp = tmp->b_this_page;
5570         } while (tmp != bh);
5571 -       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
5572 -       local_irq_restore(flags);
5573 +       bh_uptodate_unlock_irqrestore(first, flags);
5574  
5575         /*
5576          * If none of the buffers had errors and they are all
5577 @@ -328,9 +326,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
5578         return;
5579  
5580  still_busy:
5581 -       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
5582 -       local_irq_restore(flags);
5583 -       return;
5584 +       bh_uptodate_unlock_irqrestore(first, flags);
5585  }
5586  
5587  /*
5588 @@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
5589         }
5590  
5591         first = page_buffers(page);
5592 -       local_irq_save(flags);
5593 -       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
5594 +       flags = bh_uptodate_lock_irqsave(first);
5595  
5596         clear_buffer_async_write(bh);
5597         unlock_buffer(bh);
5598 @@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
5599                 }
5600                 tmp = tmp->b_this_page;
5601         }
5602 -       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
5603 -       local_irq_restore(flags);
5604 +       bh_uptodate_unlock_irqrestore(first, flags);
5605         end_page_writeback(page);
5606         return;
5607  
5608  still_busy:
5609 -       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
5610 -       local_irq_restore(flags);
5611 -       return;
5612 +       bh_uptodate_unlock_irqrestore(first, flags);
5613  }
5614  EXPORT_SYMBOL(end_buffer_async_write);
5615  
5616 @@ -3383,6 +3375,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
5617         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
5618         if (ret) {
5619                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
5620 +               buffer_head_init_locks(ret);
5621                 preempt_disable();
5622                 __this_cpu_inc(bh_accounting.nr);
5623                 recalc_bh_state();
5624 diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
5625 index a27fc8791551..791aecb7c1ac 100644
5626 --- a/fs/cifs/readdir.c
5627 +++ b/fs/cifs/readdir.c
5628 @@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
5629         struct inode *inode;
5630         struct super_block *sb = parent->d_sb;
5631         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
5632 -       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
5633 +       DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
5634  
5635         cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
5636  
5637 diff --git a/fs/dcache.c b/fs/dcache.c
5638 index 4485a48f4091..691039a6a872 100644
5639 --- a/fs/dcache.c
5640 +++ b/fs/dcache.c
5641 @@ -19,6 +19,7 @@
5642  #include <linux/mm.h>
5643  #include <linux/fs.h>
5644  #include <linux/fsnotify.h>
5645 +#include <linux/delay.h>
5646  #include <linux/slab.h>
5647  #include <linux/init.h>
5648  #include <linux/hash.h>
5649 @@ -750,6 +751,8 @@ static inline bool fast_dput(struct dentry *dentry)
5650   */
5651  void dput(struct dentry *dentry)
5652  {
5653 +       struct dentry *parent;
5654 +
5655         if (unlikely(!dentry))
5656                 return;
5657  
5658 @@ -788,9 +791,18 @@ void dput(struct dentry *dentry)
5659         return;
5660  
5661  kill_it:
5662 -       dentry = dentry_kill(dentry);
5663 -       if (dentry) {
5664 -               cond_resched();
5665 +       parent = dentry_kill(dentry);
5666 +       if (parent) {
5667 +               int r;
5668 +
5669 +               if (parent == dentry) {
5670 +                       /* the task with the highest priority won't schedule */
5671 +                       r = cond_resched();
5672 +                       if (!r)
5673 +                               cpu_chill();
5674 +               } else {
5675 +                       dentry = parent;
5676 +               }
5677                 goto repeat;
5678         }
5679  }
5680 @@ -2324,7 +2336,7 @@ void d_delete(struct dentry * dentry)
5681         if (dentry->d_lockref.count == 1) {
5682                 if (!spin_trylock(&inode->i_lock)) {
5683                         spin_unlock(&dentry->d_lock);
5684 -                       cpu_relax();
5685 +                       cpu_chill();
5686                         goto again;
5687                 }
5688                 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
5689 @@ -2384,21 +2396,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n)
5690  
5691  static void d_wait_lookup(struct dentry *dentry)
5692  {
5693 -       if (d_in_lookup(dentry)) {
5694 -               DECLARE_WAITQUEUE(wait, current);
5695 -               add_wait_queue(dentry->d_wait, &wait);
5696 -               do {
5697 -                       set_current_state(TASK_UNINTERRUPTIBLE);
5698 -                       spin_unlock(&dentry->d_lock);
5699 -                       schedule();
5700 -                       spin_lock(&dentry->d_lock);
5701 -               } while (d_in_lookup(dentry));
5702 -       }
5703 +       struct swait_queue __wait;
5704 +
5705 +       if (!d_in_lookup(dentry))
5706 +               return;
5707 +
5708 +       INIT_LIST_HEAD(&__wait.task_list);
5709 +       do {
5710 +               prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
5711 +               spin_unlock(&dentry->d_lock);
5712 +               schedule();
5713 +               spin_lock(&dentry->d_lock);
5714 +       } while (d_in_lookup(dentry));
5715 +       finish_swait(dentry->d_wait, &__wait);
5716  }
5717  
5718  struct dentry *d_alloc_parallel(struct dentry *parent,
5719                                 const struct qstr *name,
5720 -                               wait_queue_head_t *wq)
5721 +                               struct swait_queue_head *wq)
5722  {
5723         unsigned int hash = name->hash;
5724         struct hlist_bl_head *b = in_lookup_hash(parent, hash);
5725 @@ -2507,7 +2522,7 @@ void __d_lookup_done(struct dentry *dentry)
5726         hlist_bl_lock(b);
5727         dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
5728         __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
5729 -       wake_up_all(dentry->d_wait);
5730 +       swake_up_all(dentry->d_wait);
5731         dentry->d_wait = NULL;
5732         hlist_bl_unlock(b);
5733         INIT_HLIST_NODE(&dentry->d_u.d_alias);
5734 @@ -3604,6 +3619,11 @@ EXPORT_SYMBOL(d_genocide);
5735  
5736  void __init vfs_caches_init_early(void)
5737  {
5738 +       int i;
5739 +
5740 +       for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
5741 +               INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
5742 +
5743         dcache_init_early();
5744         inode_init_early();
5745  }
5746 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
5747 index 10db91218933..42af0a06f657 100644
5748 --- a/fs/eventpoll.c
5749 +++ b/fs/eventpoll.c
5750 @@ -510,12 +510,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
5751   */
5752  static void ep_poll_safewake(wait_queue_head_t *wq)
5753  {
5754 -       int this_cpu = get_cpu();
5755 +       int this_cpu = get_cpu_light();
5756  
5757         ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
5758                        ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
5759  
5760 -       put_cpu();
5761 +       put_cpu_light();
5762  }
5763  
5764  static void ep_remove_wait_queue(struct eppoll_entry *pwq)
5765 diff --git a/fs/exec.c b/fs/exec.c
5766 index 67e86571685a..fe14cdd84016 100644
5767 --- a/fs/exec.c
5768 +++ b/fs/exec.c
5769 @@ -1017,12 +1017,14 @@ static int exec_mmap(struct mm_struct *mm)
5770                 }
5771         }
5772         task_lock(tsk);
5773 +       preempt_disable_rt();
5774         active_mm = tsk->active_mm;
5775         tsk->mm = mm;
5776         tsk->active_mm = mm;
5777         activate_mm(active_mm, mm);
5778         tsk->mm->vmacache_seqnum = 0;
5779         vmacache_flush(tsk);
5780 +       preempt_enable_rt();
5781         task_unlock(tsk);
5782         if (old_mm) {
5783                 up_read(&old_mm->mmap_sem);
5784 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
5785 index 642c57b8de7b..8494b9308333 100644
5786 --- a/fs/fuse/dir.c
5787 +++ b/fs/fuse/dir.c
5788 @@ -1191,7 +1191,7 @@ static int fuse_direntplus_link(struct file *file,
5789         struct inode *dir = d_inode(parent);
5790         struct fuse_conn *fc;
5791         struct inode *inode;
5792 -       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
5793 +       DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
5794  
5795         if (!o->nodeid) {
5796                 /*
5797 diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
5798 index 684996c8a3a4..6e18a06aaabe 100644
5799 --- a/fs/jbd2/checkpoint.c
5800 +++ b/fs/jbd2/checkpoint.c
5801 @@ -116,6 +116,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
5802         nblocks = jbd2_space_needed(journal);
5803         while (jbd2_log_space_left(journal) < nblocks) {
5804                 write_unlock(&journal->j_state_lock);
5805 +               if (current->plug)
5806 +                       io_schedule();
5807                 mutex_lock(&journal->j_checkpoint_mutex);
5808  
5809                 /*
5810 diff --git a/fs/locks.c b/fs/locks.c
5811 index 22c5b4aa4961..269c6a44449a 100644
5812 --- a/fs/locks.c
5813 +++ b/fs/locks.c
5814 @@ -935,7 +935,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
5815                         return -ENOMEM;
5816         }
5817  
5818 -       percpu_down_read_preempt_disable(&file_rwsem);
5819 +       percpu_down_read(&file_rwsem);
5820         spin_lock(&ctx->flc_lock);
5821         if (request->fl_flags & FL_ACCESS)
5822                 goto find_conflict;
5823 @@ -976,7 +976,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
5824  
5825  out:
5826         spin_unlock(&ctx->flc_lock);
5827 -       percpu_up_read_preempt_enable(&file_rwsem);
5828 +       percpu_up_read(&file_rwsem);
5829         if (new_fl)
5830                 locks_free_lock(new_fl);
5831         locks_dispose_list(&dispose);
5832 @@ -1013,7 +1013,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
5833                 new_fl2 = locks_alloc_lock();
5834         }
5835  
5836 -       percpu_down_read_preempt_disable(&file_rwsem);
5837 +       percpu_down_read(&file_rwsem);
5838         spin_lock(&ctx->flc_lock);
5839         /*
5840          * New lock request. Walk all POSIX locks and look for conflicts. If
5841 @@ -1185,7 +1185,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
5842         }
5843   out:
5844         spin_unlock(&ctx->flc_lock);
5845 -       percpu_up_read_preempt_enable(&file_rwsem);
5846 +       percpu_up_read(&file_rwsem);
5847         /*
5848          * Free any unused locks.
5849          */
5850 @@ -1460,7 +1460,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
5851                 return error;
5852         }
5853  
5854 -       percpu_down_read_preempt_disable(&file_rwsem);
5855 +       percpu_down_read(&file_rwsem);
5856         spin_lock(&ctx->flc_lock);
5857  
5858         time_out_leases(inode, &dispose);
5859 @@ -1512,13 +1512,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
5860         locks_insert_block(fl, new_fl);
5861         trace_break_lease_block(inode, new_fl);
5862         spin_unlock(&ctx->flc_lock);
5863 -       percpu_up_read_preempt_enable(&file_rwsem);
5864 +       percpu_up_read(&file_rwsem);
5865  
5866         locks_dispose_list(&dispose);
5867         error = wait_event_interruptible_timeout(new_fl->fl_wait,
5868                                                 !new_fl->fl_next, break_time);
5869  
5870 -       percpu_down_read_preempt_disable(&file_rwsem);
5871 +       percpu_down_read(&file_rwsem);
5872         spin_lock(&ctx->flc_lock);
5873         trace_break_lease_unblock(inode, new_fl);
5874         locks_delete_block(new_fl);
5875 @@ -1535,7 +1535,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
5876         }
5877  out:
5878         spin_unlock(&ctx->flc_lock);
5879 -       percpu_up_read_preempt_enable(&file_rwsem);
5880 +       percpu_up_read(&file_rwsem);
5881         locks_dispose_list(&dispose);
5882         locks_free_lock(new_fl);
5883         return error;
5884 @@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp)
5885  
5886         ctx = smp_load_acquire(&inode->i_flctx);
5887         if (ctx && !list_empty_careful(&ctx->flc_lease)) {
5888 -               percpu_down_read_preempt_disable(&file_rwsem);
5889 +               percpu_down_read(&file_rwsem);
5890                 spin_lock(&ctx->flc_lock);
5891                 time_out_leases(inode, &dispose);
5892                 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
5893 @@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
5894                         break;
5895                 }
5896                 spin_unlock(&ctx->flc_lock);
5897 -               percpu_up_read_preempt_enable(&file_rwsem);
5898 +               percpu_up_read(&file_rwsem);
5899  
5900                 locks_dispose_list(&dispose);
5901         }
5902 @@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
5903                 return -EINVAL;
5904         }
5905  
5906 -       percpu_down_read_preempt_disable(&file_rwsem);
5907 +       percpu_down_read(&file_rwsem);
5908         spin_lock(&ctx->flc_lock);
5909         time_out_leases(inode, &dispose);
5910         error = check_conflicting_open(dentry, arg, lease->fl_flags);
5911 @@ -1765,7 +1765,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
5912                 lease->fl_lmops->lm_setup(lease, priv);
5913  out:
5914         spin_unlock(&ctx->flc_lock);
5915 -       percpu_up_read_preempt_enable(&file_rwsem);
5916 +       percpu_up_read(&file_rwsem);
5917         locks_dispose_list(&dispose);
5918         if (is_deleg)
5919                 inode_unlock(inode);
5920 @@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
5921                 return error;
5922         }
5923  
5924 -       percpu_down_read_preempt_disable(&file_rwsem);
5925 +       percpu_down_read(&file_rwsem);
5926         spin_lock(&ctx->flc_lock);
5927         list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
5928                 if (fl->fl_file == filp &&
5929 @@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
5930         if (victim)
5931                 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
5932         spin_unlock(&ctx->flc_lock);
5933 -       percpu_up_read_preempt_enable(&file_rwsem);
5934 +       percpu_up_read(&file_rwsem);
5935         locks_dispose_list(&dispose);
5936         return error;
5937  }
5938 @@ -2532,13 +2532,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
5939         if (list_empty(&ctx->flc_lease))
5940                 return;
5941  
5942 -       percpu_down_read_preempt_disable(&file_rwsem);
5943 +       percpu_down_read(&file_rwsem);
5944         spin_lock(&ctx->flc_lock);
5945         list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
5946                 if (filp == fl->fl_file)
5947                         lease_modify(fl, F_UNLCK, &dispose);
5948         spin_unlock(&ctx->flc_lock);
5949 -       percpu_up_read_preempt_enable(&file_rwsem);
5950 +       percpu_up_read(&file_rwsem);
5951  
5952         locks_dispose_list(&dispose);
5953  }
5954 diff --git a/fs/namei.c b/fs/namei.c
5955 index 5b4eed221530..9c8dd3c83a80 100644
5956 --- a/fs/namei.c
5957 +++ b/fs/namei.c
5958 @@ -1629,7 +1629,7 @@ static struct dentry *lookup_slow(const struct qstr *name,
5959  {
5960         struct dentry *dentry = ERR_PTR(-ENOENT), *old;
5961         struct inode *inode = dir->d_inode;
5962 -       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
5963 +       DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
5964  
5965         inode_lock_shared(inode);
5966         /* Don't go there if it's already dead */
5967 @@ -3086,7 +3086,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
5968         struct dentry *dentry;
5969         int error, create_error = 0;
5970         umode_t mode = op->mode;
5971 -       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
5972 +       DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
5973  
5974         if (unlikely(IS_DEADDIR(dir_inode)))
5975                 return -ENOENT;
5976 diff --git a/fs/namespace.c b/fs/namespace.c
5977 index 7cea503ae06d..cb15f5397991 100644
5978 --- a/fs/namespace.c
5979 +++ b/fs/namespace.c
5980 @@ -14,6 +14,7 @@
5981  #include <linux/mnt_namespace.h>
5982  #include <linux/user_namespace.h>
5983  #include <linux/namei.h>
5984 +#include <linux/delay.h>
5985  #include <linux/security.h>
5986  #include <linux/idr.h>
5987  #include <linux/init.h>                /* init_rootfs */
5988 @@ -356,8 +357,11 @@ int __mnt_want_write(struct vfsmount *m)
5989          * incremented count after it has set MNT_WRITE_HOLD.
5990          */
5991         smp_mb();
5992 -       while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
5993 -               cpu_relax();
5994 +       while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
5995 +               preempt_enable();
5996 +               cpu_chill();
5997 +               preempt_disable();
5998 +       }
5999         /*
6000          * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
6001          * be set to match its requirements. So we must not load that until
6002 diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
6003 index dff600ae0d74..d726d2e09353 100644
6004 --- a/fs/nfs/delegation.c
6005 +++ b/fs/nfs/delegation.c
6006 @@ -150,11 +150,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
6007                 sp = state->owner;
6008                 /* Block nfs4_proc_unlck */
6009                 mutex_lock(&sp->so_delegreturn_mutex);
6010 -               seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
6011 +               seq = read_seqbegin(&sp->so_reclaim_seqlock);
6012                 err = nfs4_open_delegation_recall(ctx, state, stateid, type);
6013                 if (!err)
6014                         err = nfs_delegation_claim_locks(ctx, state, stateid);
6015 -               if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
6016 +               if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq))
6017                         err = -EAGAIN;
6018                 mutex_unlock(&sp->so_delegreturn_mutex);
6019                 put_nfs_open_context(ctx);
6020 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
6021 index 53e02b8bd9bd..a66e7d77cfbb 100644
6022 --- a/fs/nfs/dir.c
6023 +++ b/fs/nfs/dir.c
6024 @@ -485,7 +485,7 @@ static
6025  void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
6026  {
6027         struct qstr filename = QSTR_INIT(entry->name, entry->len);
6028 -       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6029 +       DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6030         struct dentry *dentry;
6031         struct dentry *alias;
6032         struct inode *dir = d_inode(parent);
6033 @@ -1487,7 +1487,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
6034                     struct file *file, unsigned open_flags,
6035                     umode_t mode, int *opened)
6036  {
6037 -       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6038 +       DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6039         struct nfs_open_context *ctx;
6040         struct dentry *res;
6041         struct iattr attr = { .ia_valid = ATTR_OPEN };
6042 @@ -1802,7 +1802,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
6043  
6044         trace_nfs_rmdir_enter(dir, dentry);
6045         if (d_really_is_positive(dentry)) {
6046 +#ifdef CONFIG_PREEMPT_RT_BASE
6047 +               down(&NFS_I(d_inode(dentry))->rmdir_sem);
6048 +#else
6049                 down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
6050 +#endif
6051                 error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
6052                 /* Ensure the VFS deletes this inode */
6053                 switch (error) {
6054 @@ -1812,7 +1816,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
6055                 case -ENOENT:
6056                         nfs_dentry_handle_enoent(dentry);
6057                 }
6058 +#ifdef CONFIG_PREEMPT_RT_BASE
6059 +               up(&NFS_I(d_inode(dentry))->rmdir_sem);
6060 +#else
6061                 up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
6062 +#endif
6063         } else
6064                 error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
6065         trace_nfs_rmdir_exit(dir, dentry, error);
6066 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
6067 index bf4ec5ecc97e..36cd5fc9192c 100644
6068 --- a/fs/nfs/inode.c
6069 +++ b/fs/nfs/inode.c
6070 @@ -1957,7 +1957,11 @@ static void init_once(void *foo)
6071         nfsi->nrequests = 0;
6072         nfsi->commit_info.ncommit = 0;
6073         atomic_set(&nfsi->commit_info.rpcs_out, 0);
6074 +#ifdef CONFIG_PREEMPT_RT_BASE
6075 +       sema_init(&nfsi->rmdir_sem, 1);
6076 +#else
6077         init_rwsem(&nfsi->rmdir_sem);
6078 +#endif
6079         nfs4_init_once(nfsi);
6080  }
6081  
6082 diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
6083 index 1452177c822d..f43b01d54c59 100644
6084 --- a/fs/nfs/nfs4_fs.h
6085 +++ b/fs/nfs/nfs4_fs.h
6086 @@ -111,7 +111,7 @@ struct nfs4_state_owner {
6087         unsigned long        so_flags;
6088         struct list_head     so_states;
6089         struct nfs_seqid_counter so_seqid;
6090 -       seqcount_t           so_reclaim_seqcount;
6091 +       seqlock_t            so_reclaim_seqlock;
6092         struct mutex         so_delegreturn_mutex;
6093  };
6094  
6095 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
6096 index 78ff8b63d5f7..3573653fd5cc 100644
6097 --- a/fs/nfs/nfs4proc.c
6098 +++ b/fs/nfs/nfs4proc.c
6099 @@ -2698,7 +2698,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
6100         unsigned int seq;
6101         int ret;
6102  
6103 -       seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
6104 +       seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
6105  
6106         ret = _nfs4_proc_open(opendata);
6107         if (ret != 0)
6108 @@ -2736,7 +2736,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
6109         ctx->state = state;
6110         if (d_inode(dentry) == state->inode) {
6111                 nfs_inode_attach_open_context(ctx);
6112 -               if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
6113 +               if (read_seqretry(&sp->so_reclaim_seqlock, seq))
6114                         nfs4_schedule_stateid_recovery(server, state);
6115         }
6116  out:
6117 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
6118 index 0959c9661662..dabd834d7686 100644
6119 --- a/fs/nfs/nfs4state.c
6120 +++ b/fs/nfs/nfs4state.c
6121 @@ -488,7 +488,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
6122         nfs4_init_seqid_counter(&sp->so_seqid);
6123         atomic_set(&sp->so_count, 1);
6124         INIT_LIST_HEAD(&sp->so_lru);
6125 -       seqcount_init(&sp->so_reclaim_seqcount);
6126 +       seqlock_init(&sp->so_reclaim_seqlock);
6127         mutex_init(&sp->so_delegreturn_mutex);
6128         return sp;
6129  }
6130 @@ -1497,8 +1497,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
6131          * recovering after a network partition or a reboot from a
6132          * server that doesn't support a grace period.
6133          */
6134 +#ifdef CONFIG_PREEMPT_RT_FULL
6135 +       write_seqlock(&sp->so_reclaim_seqlock);
6136 +#else
6137 +       write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
6138 +#endif
6139         spin_lock(&sp->so_lock);
6140 -       raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
6141  restart:
6142         list_for_each_entry(state, &sp->so_states, open_states) {
6143                 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
6144 @@ -1567,14 +1571,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
6145                 spin_lock(&sp->so_lock);
6146                 goto restart;
6147         }
6148 -       raw_write_seqcount_end(&sp->so_reclaim_seqcount);
6149         spin_unlock(&sp->so_lock);
6150 +#ifdef CONFIG_PREEMPT_RT_FULL
6151 +       write_sequnlock(&sp->so_reclaim_seqlock);
6152 +#else
6153 +       write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
6154 +#endif
6155         return 0;
6156  out_err:
6157         nfs4_put_open_state(state);
6158 -       spin_lock(&sp->so_lock);
6159 -       raw_write_seqcount_end(&sp->so_reclaim_seqcount);
6160 -       spin_unlock(&sp->so_lock);
6161 +#ifdef CONFIG_PREEMPT_RT_FULL
6162 +       write_sequnlock(&sp->so_reclaim_seqlock);
6163 +#else
6164 +       write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
6165 +#endif
6166         return status;
6167  }
6168  
6169 diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
6170 index 191aa577dd1f..58990c8f52e0 100644
6171 --- a/fs/nfs/unlink.c
6172 +++ b/fs/nfs/unlink.c
6173 @@ -12,7 +12,7 @@
6174  #include <linux/sunrpc/clnt.h>
6175  #include <linux/nfs_fs.h>
6176  #include <linux/sched.h>
6177 -#include <linux/wait.h>
6178 +#include <linux/swait.h>
6179  #include <linux/namei.h>
6180  #include <linux/fsnotify.h>
6181  
6182 @@ -51,6 +51,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
6183                 rpc_restart_call_prepare(task);
6184  }
6185  
6186 +#ifdef CONFIG_PREEMPT_RT_BASE
6187 +static void nfs_down_anon(struct semaphore *sema)
6188 +{
6189 +       down(sema);
6190 +}
6191 +
6192 +static void nfs_up_anon(struct semaphore *sema)
6193 +{
6194 +       up(sema);
6195 +}
6196 +
6197 +#else
6198 +static void nfs_down_anon(struct rw_semaphore *rwsem)
6199 +{
6200 +       down_read_non_owner(rwsem);
6201 +}
6202 +
6203 +static void nfs_up_anon(struct rw_semaphore *rwsem)
6204 +{
6205 +       up_read_non_owner(rwsem);
6206 +}
6207 +#endif
6208 +
6209  /**
6210   * nfs_async_unlink_release - Release the sillydelete data.
6211   * @task: rpc_task of the sillydelete
6212 @@ -64,7 +87,7 @@ static void nfs_async_unlink_release(void *calldata)
6213         struct dentry *dentry = data->dentry;
6214         struct super_block *sb = dentry->d_sb;
6215  
6216 -       up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
6217 +       nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
6218         d_lookup_done(dentry);
6219         nfs_free_unlinkdata(data);
6220         dput(dentry);
6221 @@ -117,10 +140,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
6222         struct inode *dir = d_inode(dentry->d_parent);
6223         struct dentry *alias;
6224  
6225 -       down_read_non_owner(&NFS_I(dir)->rmdir_sem);
6226 +       nfs_down_anon(&NFS_I(dir)->rmdir_sem);
6227         alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq);
6228         if (IS_ERR(alias)) {
6229 -               up_read_non_owner(&NFS_I(dir)->rmdir_sem);
6230 +               nfs_up_anon(&NFS_I(dir)->rmdir_sem);
6231                 return 0;
6232         }
6233         if (!d_in_lookup(alias)) {
6234 @@ -142,7 +165,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
6235                         ret = 0;
6236                 spin_unlock(&alias->d_lock);
6237                 dput(alias);
6238 -               up_read_non_owner(&NFS_I(dir)->rmdir_sem);
6239 +               nfs_up_anon(&NFS_I(dir)->rmdir_sem);
6240                 /*
6241                  * If we'd displaced old cached devname, free it.  At that
6242                  * point dentry is definitely not a root, so we won't need
6243 @@ -182,7 +205,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
6244                 goto out_free_name;
6245         }
6246         data->res.dir_attr = &data->dir_attr;
6247 -       init_waitqueue_head(&data->wq);
6248 +       init_swait_queue_head(&data->wq);
6249  
6250         status = -EBUSY;
6251         spin_lock(&dentry->d_lock);
6252 diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
6253 index fe251f187ff8..e89da4fb14c2 100644
6254 --- a/fs/ntfs/aops.c
6255 +++ b/fs/ntfs/aops.c
6256 @@ -92,13 +92,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6257                         ofs = 0;
6258                         if (file_ofs < init_size)
6259                                 ofs = init_size - file_ofs;
6260 -                       local_irq_save(flags);
6261 +                       local_irq_save_nort(flags);
6262                         kaddr = kmap_atomic(page);
6263                         memset(kaddr + bh_offset(bh) + ofs, 0,
6264                                         bh->b_size - ofs);
6265                         flush_dcache_page(page);
6266                         kunmap_atomic(kaddr);
6267 -                       local_irq_restore(flags);
6268 +                       local_irq_restore_nort(flags);
6269                 }
6270         } else {
6271                 clear_buffer_uptodate(bh);
6272 @@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6273                                 "0x%llx.", (unsigned long long)bh->b_blocknr);
6274         }
6275         first = page_buffers(page);
6276 -       local_irq_save(flags);
6277 -       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
6278 +       flags = bh_uptodate_lock_irqsave(first);
6279         clear_buffer_async_read(bh);
6280         unlock_buffer(bh);
6281         tmp = bh;
6282 @@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6283                 }
6284                 tmp = tmp->b_this_page;
6285         } while (tmp != bh);
6286 -       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
6287 -       local_irq_restore(flags);
6288 +       bh_uptodate_unlock_irqrestore(first, flags);
6289         /*
6290          * If none of the buffers had errors then we can set the page uptodate,
6291          * but we first have to perform the post read mst fixups, if the
6292 @@ -145,13 +143,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6293                 recs = PAGE_SIZE / rec_size;
6294                 /* Should have been verified before we got here... */
6295                 BUG_ON(!recs);
6296 -               local_irq_save(flags);
6297 +               local_irq_save_nort(flags);
6298                 kaddr = kmap_atomic(page);
6299                 for (i = 0; i < recs; i++)
6300                         post_read_mst_fixup((NTFS_RECORD*)(kaddr +
6301                                         i * rec_size), rec_size);
6302                 kunmap_atomic(kaddr);
6303 -               local_irq_restore(flags);
6304 +               local_irq_restore_nort(flags);
6305                 flush_dcache_page(page);
6306                 if (likely(page_uptodate && !PageError(page)))
6307                         SetPageUptodate(page);
6308 @@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
6309         unlock_page(page);
6310         return;
6311  still_busy:
6312 -       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
6313 -       local_irq_restore(flags);
6314 -       return;
6315 +       bh_uptodate_unlock_irqrestore(first, flags);
6316  }
6317  
6318  /**
6319 diff --git a/fs/proc/base.c b/fs/proc/base.c
6320 index ca651ac00660..41d9dc789285 100644
6321 --- a/fs/proc/base.c
6322 +++ b/fs/proc/base.c
6323 @@ -1834,7 +1834,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
6324  
6325         child = d_hash_and_lookup(dir, &qname);
6326         if (!child) {
6327 -               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6328 +               DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6329                 child = d_alloc_parallel(dir, &qname, &wq);
6330                 if (IS_ERR(child))
6331                         goto end_instantiate;
6332 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
6333 index d4e37acd4821..000cea46434a 100644
6334 --- a/fs/proc/proc_sysctl.c
6335 +++ b/fs/proc/proc_sysctl.c
6336 @@ -632,7 +632,7 @@ static bool proc_sys_fill_cache(struct file *file,
6337  
6338         child = d_lookup(dir, &qname);
6339         if (!child) {
6340 -               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
6341 +               DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
6342                 child = d_alloc_parallel(dir, &qname, &wq);
6343                 if (IS_ERR(child))
6344                         return false;
6345 diff --git a/fs/timerfd.c b/fs/timerfd.c
6346 index 9ae4abb4110b..8644b67c48fd 100644
6347 --- a/fs/timerfd.c
6348 +++ b/fs/timerfd.c
6349 @@ -460,7 +460,10 @@ static int do_timerfd_settime(int ufd, int flags,
6350                                 break;
6351                 }
6352                 spin_unlock_irq(&ctx->wqh.lock);
6353 -               cpu_relax();
6354 +               if (isalarm(ctx))
6355 +                       hrtimer_wait_for_timer(&ctx->t.alarm.timer);
6356 +               else
6357 +                       hrtimer_wait_for_timer(&ctx->t.tmr);
6358         }
6359  
6360         /*
6361 diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
6362 index e861a24f06f2..b5c97d3059c7 100644
6363 --- a/include/acpi/platform/aclinux.h
6364 +++ b/include/acpi/platform/aclinux.h
6365 @@ -133,6 +133,7 @@
6366  
6367  #define acpi_cache_t                        struct kmem_cache
6368  #define acpi_spinlock                       spinlock_t *
6369 +#define acpi_raw_spinlock              raw_spinlock_t *
6370  #define acpi_cpu_flags                      unsigned long
6371  
6372  /* Use native linux version of acpi_os_allocate_zeroed */
6373 @@ -151,6 +152,20 @@
6374  #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
6375  #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
6376  
6377 +#define acpi_os_create_raw_lock(__handle)                      \
6378 +({                                                             \
6379 +        raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock));   \
6380 +                                                               \
6381 +        if (lock) {                                            \
6382 +               *(__handle) = lock;                             \
6383 +               raw_spin_lock_init(*(__handle));                \
6384 +        }                                                      \
6385 +        lock ? AE_OK : AE_NO_MEMORY;                           \
6386 + })
6387 +
6388 +#define acpi_os_delete_raw_lock(__handle)      kfree(__handle)
6389 +
6390 +
6391  /*
6392   * OSL interfaces used by debugger/disassembler
6393   */
6394 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
6395 index 6f96247226a4..fa53a21263c2 100644
6396 --- a/include/asm-generic/bug.h
6397 +++ b/include/asm-generic/bug.h
6398 @@ -215,6 +215,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
6399  # define WARN_ON_SMP(x)                        ({0;})
6400  #endif
6401  
6402 +#ifdef CONFIG_PREEMPT_RT_BASE
6403 +# define BUG_ON_RT(c)                  BUG_ON(c)
6404 +# define BUG_ON_NONRT(c)               do { } while (0)
6405 +# define WARN_ON_RT(condition)         WARN_ON(condition)
6406 +# define WARN_ON_NONRT(condition)      do { } while (0)
6407 +# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
6408 +#else
6409 +# define BUG_ON_RT(c)                  do { } while (0)
6410 +# define BUG_ON_NONRT(c)               BUG_ON(c)
6411 +# define WARN_ON_RT(condition)         do { } while (0)
6412 +# define WARN_ON_NONRT(condition)      WARN_ON(condition)
6413 +# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
6414 +#endif
6415 +
6416  #endif /* __ASSEMBLY__ */
6417  
6418  #endif
6419 diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
6420 index 535ab2e13d2e..cfc246899473 100644
6421 --- a/include/linux/blk-mq.h
6422 +++ b/include/linux/blk-mq.h
6423 @@ -209,7 +209,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
6424         return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
6425  }
6426  
6427 -
6428 +void __blk_mq_complete_request_remote_work(struct work_struct *work);
6429  int blk_mq_request_started(struct request *rq);
6430  void blk_mq_start_request(struct request *rq);
6431  void blk_mq_end_request(struct request *rq, int error);
6432 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
6433 index f6a816129856..ec7a4676f8a8 100644
6434 --- a/include/linux/blkdev.h
6435 +++ b/include/linux/blkdev.h
6436 @@ -89,6 +89,7 @@ struct request {
6437         struct list_head queuelist;
6438         union {
6439                 struct call_single_data csd;
6440 +               struct work_struct work;
6441                 u64 fifo_time;
6442         };
6443  
6444 @@ -467,7 +468,7 @@ struct request_queue {
6445         struct throtl_data *td;
6446  #endif
6447         struct rcu_head         rcu_head;
6448 -       wait_queue_head_t       mq_freeze_wq;
6449 +       struct swait_queue_head mq_freeze_wq;
6450         struct percpu_ref       q_usage_counter;
6451         struct list_head        all_q_node;
6452  
6453 diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
6454 index 8fdcb783197d..d07dbeec7bc1 100644
6455 --- a/include/linux/bottom_half.h
6456 +++ b/include/linux/bottom_half.h
6457 @@ -3,6 +3,39 @@
6458  
6459  #include <linux/preempt.h>
6460  
6461 +#ifdef CONFIG_PREEMPT_RT_FULL
6462 +
6463 +extern void __local_bh_disable(void);
6464 +extern void _local_bh_enable(void);
6465 +extern void __local_bh_enable(void);
6466 +
6467 +static inline void local_bh_disable(void)
6468 +{
6469 +       __local_bh_disable();
6470 +}
6471 +
6472 +static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
6473 +{
6474 +       __local_bh_disable();
6475 +}
6476 +
6477 +static inline void local_bh_enable(void)
6478 +{
6479 +       __local_bh_enable();
6480 +}
6481 +
6482 +static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
6483 +{
6484 +       __local_bh_enable();
6485 +}
6486 +
6487 +static inline void local_bh_enable_ip(unsigned long ip)
6488 +{
6489 +       __local_bh_enable();
6490 +}
6491 +
6492 +#else
6493 +
6494  #ifdef CONFIG_TRACE_IRQFLAGS
6495  extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
6496  #else
6497 @@ -30,5 +63,6 @@ static inline void local_bh_enable(void)
6498  {
6499         __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
6500  }
6501 +#endif
6502  
6503  #endif /* _LINUX_BH_H */
6504 diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
6505 index ebbacd14d450..be5e87f6360a 100644
6506 --- a/include/linux/buffer_head.h
6507 +++ b/include/linux/buffer_head.h
6508 @@ -75,8 +75,50 @@ struct buffer_head {
6509         struct address_space *b_assoc_map;      /* mapping this buffer is
6510                                                    associated with */
6511         atomic_t b_count;               /* users using this buffer_head */
6512 +#ifdef CONFIG_PREEMPT_RT_BASE
6513 +       spinlock_t b_uptodate_lock;
6514 +#if IS_ENABLED(CONFIG_JBD2)
6515 +       spinlock_t b_state_lock;
6516 +       spinlock_t b_journal_head_lock;
6517 +#endif
6518 +#endif
6519  };
6520  
6521 +static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
6522 +{
6523 +       unsigned long flags;
6524 +
6525 +#ifndef CONFIG_PREEMPT_RT_BASE
6526 +       local_irq_save(flags);
6527 +       bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
6528 +#else
6529 +       spin_lock_irqsave(&bh->b_uptodate_lock, flags);
6530 +#endif
6531 +       return flags;
6532 +}
6533 +
6534 +static inline void
6535 +bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
6536 +{
6537 +#ifndef CONFIG_PREEMPT_RT_BASE
6538 +       bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
6539 +       local_irq_restore(flags);
6540 +#else
6541 +       spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
6542 +#endif
6543 +}
6544 +
6545 +static inline void buffer_head_init_locks(struct buffer_head *bh)
6546 +{
6547 +#ifdef CONFIG_PREEMPT_RT_BASE
6548 +       spin_lock_init(&bh->b_uptodate_lock);
6549 +#if IS_ENABLED(CONFIG_JBD2)
6550 +       spin_lock_init(&bh->b_state_lock);
6551 +       spin_lock_init(&bh->b_journal_head_lock);
6552 +#endif
6553 +#endif
6554 +}
6555 +
6556  /*
6557   * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
6558   * and buffer_foo() functions.
6559 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
6560 index 5b17de62c962..56027cc01a56 100644
6561 --- a/include/linux/cgroup-defs.h
6562 +++ b/include/linux/cgroup-defs.h
6563 @@ -16,6 +16,7 @@
6564  #include <linux/percpu-refcount.h>
6565  #include <linux/percpu-rwsem.h>
6566  #include <linux/workqueue.h>
6567 +#include <linux/swork.h>
6568  
6569  #ifdef CONFIG_CGROUPS
6570  
6571 @@ -137,6 +138,7 @@ struct cgroup_subsys_state {
6572         /* percpu_ref killing and RCU release */
6573         struct rcu_head rcu_head;
6574         struct work_struct destroy_work;
6575 +       struct swork_event destroy_swork;
6576  };
6577  
6578  /*
6579 diff --git a/include/linux/completion.h b/include/linux/completion.h
6580 index 5d5aaae3af43..3bca1590e29f 100644
6581 --- a/include/linux/completion.h
6582 +++ b/include/linux/completion.h
6583 @@ -7,8 +7,7 @@
6584   * Atomic wait-for-completion handler data structures.
6585   * See kernel/sched/completion.c for details.
6586   */
6587 -
6588 -#include <linux/wait.h>
6589 +#include <linux/swait.h>
6590  
6591  /*
6592   * struct completion - structure used to maintain state for a "completion"
6593 @@ -24,11 +23,11 @@
6594   */
6595  struct completion {
6596         unsigned int done;
6597 -       wait_queue_head_t wait;
6598 +       struct swait_queue_head wait;
6599  };
6600  
6601  #define COMPLETION_INITIALIZER(work) \
6602 -       { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
6603 +       { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
6604  
6605  #define COMPLETION_INITIALIZER_ONSTACK(work) \
6606         ({ init_completion(&work); work; })
6607 @@ -73,7 +72,7 @@ struct completion {
6608  static inline void init_completion(struct completion *x)
6609  {
6610         x->done = 0;
6611 -       init_waitqueue_head(&x->wait);
6612 +       init_swait_queue_head(&x->wait);
6613  }
6614  
6615  /**
6616 diff --git a/include/linux/cpu.h b/include/linux/cpu.h
6617 index e571128ad99a..5e52d28c20c1 100644
6618 --- a/include/linux/cpu.h
6619 +++ b/include/linux/cpu.h
6620 @@ -182,6 +182,8 @@ extern void get_online_cpus(void);
6621  extern void put_online_cpus(void);
6622  extern void cpu_hotplug_disable(void);
6623  extern void cpu_hotplug_enable(void);
6624 +extern void pin_current_cpu(void);
6625 +extern void unpin_current_cpu(void);
6626  #define hotcpu_notifier(fn, pri)       cpu_notifier(fn, pri)
6627  #define __hotcpu_notifier(fn, pri)     __cpu_notifier(fn, pri)
6628  #define register_hotcpu_notifier(nb)   register_cpu_notifier(nb)
6629 @@ -199,6 +201,8 @@ static inline void cpu_hotplug_done(void) {}
6630  #define put_online_cpus()      do { } while (0)
6631  #define cpu_hotplug_disable()  do { } while (0)
6632  #define cpu_hotplug_enable()   do { } while (0)
6633 +static inline void pin_current_cpu(void) { }
6634 +static inline void unpin_current_cpu(void) { }
6635  #define hotcpu_notifier(fn, pri)       do { (void)(fn); } while (0)
6636  #define __hotcpu_notifier(fn, pri)     do { (void)(fn); } while (0)
6637  /* These aren't inline functions due to a GCC bug. */
6638 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
6639 index 5beed7b30561..61cab7ef458e 100644
6640 --- a/include/linux/dcache.h
6641 +++ b/include/linux/dcache.h
6642 @@ -11,6 +11,7 @@
6643  #include <linux/rcupdate.h>
6644  #include <linux/lockref.h>
6645  #include <linux/stringhash.h>
6646 +#include <linux/wait.h>
6647  
6648  struct path;
6649  struct vfsmount;
6650 @@ -100,7 +101,7 @@ struct dentry {
6651  
6652         union {
6653                 struct list_head d_lru;         /* LRU list */
6654 -               wait_queue_head_t *d_wait;      /* in-lookup ones only */
6655 +               struct swait_queue_head *d_wait;        /* in-lookup ones only */
6656         };
6657         struct list_head d_child;       /* child of parent list */
6658         struct list_head d_subdirs;     /* our children */
6659 @@ -230,7 +231,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
6660  extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
6661  extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
6662  extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
6663 -                                       wait_queue_head_t *);
6664 +                                       struct swait_queue_head *);
6665  extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
6666  extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
6667  extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
6668 diff --git a/include/linux/delay.h b/include/linux/delay.h
6669 index a6ecb34cf547..37caab306336 100644
6670 --- a/include/linux/delay.h
6671 +++ b/include/linux/delay.h
6672 @@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds)
6673         msleep(seconds * 1000);
6674  }
6675  
6676 +#ifdef CONFIG_PREEMPT_RT_FULL
6677 +extern void cpu_chill(void);
6678 +#else
6679 +# define cpu_chill()   cpu_relax()
6680 +#endif
6681 +
6682  #endif /* defined(_LINUX_DELAY_H) */
6683 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
6684 index bb3f3297062a..a117a33ef72c 100644
6685 --- a/include/linux/highmem.h
6686 +++ b/include/linux/highmem.h
6687 @@ -7,6 +7,7 @@
6688  #include <linux/mm.h>
6689  #include <linux/uaccess.h>
6690  #include <linux/hardirq.h>
6691 +#include <linux/sched.h>
6692  
6693  #include <asm/cacheflush.h>
6694  
6695 @@ -65,7 +66,7 @@ static inline void kunmap(struct page *page)
6696  
6697  static inline void *kmap_atomic(struct page *page)
6698  {
6699 -       preempt_disable();
6700 +       preempt_disable_nort();
6701         pagefault_disable();
6702         return page_address(page);
6703  }
6704 @@ -74,7 +75,7 @@ static inline void *kmap_atomic(struct page *page)
6705  static inline void __kunmap_atomic(void *addr)
6706  {
6707         pagefault_enable();
6708 -       preempt_enable();
6709 +       preempt_enable_nort();
6710  }
6711  
6712  #define kmap_atomic_pfn(pfn)   kmap_atomic(pfn_to_page(pfn))
6713 @@ -86,32 +87,51 @@ static inline void __kunmap_atomic(void *addr)
6714  
6715  #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
6716  
6717 +#ifndef CONFIG_PREEMPT_RT_FULL
6718  DECLARE_PER_CPU(int, __kmap_atomic_idx);
6719 +#endif
6720  
6721  static inline int kmap_atomic_idx_push(void)
6722  {
6723 +#ifndef CONFIG_PREEMPT_RT_FULL
6724         int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
6725  
6726 -#ifdef CONFIG_DEBUG_HIGHMEM
6727 +# ifdef CONFIG_DEBUG_HIGHMEM
6728         WARN_ON_ONCE(in_irq() && !irqs_disabled());
6729         BUG_ON(idx >= KM_TYPE_NR);
6730 -#endif
6731 +# endif
6732         return idx;
6733 +#else
6734 +       current->kmap_idx++;
6735 +       BUG_ON(current->kmap_idx > KM_TYPE_NR);
6736 +       return current->kmap_idx - 1;
6737 +#endif
6738  }
6739  
6740  static inline int kmap_atomic_idx(void)
6741  {
6742 +#ifndef CONFIG_PREEMPT_RT_FULL
6743         return __this_cpu_read(__kmap_atomic_idx) - 1;
6744 +#else
6745 +       return current->kmap_idx - 1;
6746 +#endif
6747  }
6748  
6749  static inline void kmap_atomic_idx_pop(void)
6750  {
6751 -#ifdef CONFIG_DEBUG_HIGHMEM
6752 +#ifndef CONFIG_PREEMPT_RT_FULL
6753 +# ifdef CONFIG_DEBUG_HIGHMEM
6754         int idx = __this_cpu_dec_return(__kmap_atomic_idx);
6755  
6756         BUG_ON(idx < 0);
6757 -#else
6758 +# else
6759         __this_cpu_dec(__kmap_atomic_idx);
6760 +# endif
6761 +#else
6762 +       current->kmap_idx--;
6763 +# ifdef CONFIG_DEBUG_HIGHMEM
6764 +       BUG_ON(current->kmap_idx < 0);
6765 +# endif
6766  #endif
6767  }
6768  
6769 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
6770 index 5e00f80b1535..a34e10b55cde 100644
6771 --- a/include/linux/hrtimer.h
6772 +++ b/include/linux/hrtimer.h
6773 @@ -87,6 +87,9 @@ enum hrtimer_restart {
6774   * @function:  timer expiry callback function
6775   * @base:      pointer to the timer base (per cpu and per clock)
6776   * @state:     state information (See bit values above)
6777 + * @cb_entry:  list entry to defer timers from hardirq context
6778 + * @irqsafe:   timer can run in hardirq context
6779 + * @praecox:   timer expiry time if expired at the time of programming
6780   * @is_rel:    Set if the timer was armed relative
6781   * @start_pid:  timer statistics field to store the pid of the task which
6782   *             started the timer
6783 @@ -103,6 +106,11 @@ struct hrtimer {
6784         enum hrtimer_restart            (*function)(struct hrtimer *);
6785         struct hrtimer_clock_base       *base;
6786         u8                              state;
6787 +       struct list_head                cb_entry;
6788 +       int                             irqsafe;
6789 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
6790 +       ktime_t                         praecox;
6791 +#endif
6792         u8                              is_rel;
6793  #ifdef CONFIG_TIMER_STATS
6794         int                             start_pid;
6795 @@ -123,11 +131,7 @@ struct hrtimer_sleeper {
6796         struct task_struct *task;
6797  };
6798  
6799 -#ifdef CONFIG_64BIT
6800  # define HRTIMER_CLOCK_BASE_ALIGN      64
6801 -#else
6802 -# define HRTIMER_CLOCK_BASE_ALIGN      32
6803 -#endif
6804  
6805  /**
6806   * struct hrtimer_clock_base - the timer base for a specific clock
6807 @@ -136,6 +140,7 @@ struct hrtimer_sleeper {
6808   *                     timer to a base on another cpu.
6809   * @clockid:           clock id for per_cpu support
6810   * @active:            red black tree root node for the active timers
6811 + * @expired:           list head for deferred timers.
6812   * @get_time:          function to retrieve the current time of the clock
6813   * @offset:            offset of this clock to the monotonic base
6814   */
6815 @@ -144,6 +149,7 @@ struct hrtimer_clock_base {
6816         int                     index;
6817         clockid_t               clockid;
6818         struct timerqueue_head  active;
6819 +       struct list_head        expired;
6820         ktime_t                 (*get_time)(void);
6821         ktime_t                 offset;
6822  } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
6823 @@ -187,6 +193,7 @@ struct hrtimer_cpu_base {
6824         raw_spinlock_t                  lock;
6825         seqcount_t                      seq;
6826         struct hrtimer                  *running;
6827 +       struct hrtimer                  *running_soft;
6828         unsigned int                    cpu;
6829         unsigned int                    active_bases;
6830         unsigned int                    clock_was_set_seq;
6831 @@ -203,6 +210,9 @@ struct hrtimer_cpu_base {
6832         unsigned int                    nr_hangs;
6833         unsigned int                    max_hang_time;
6834  #endif
6835 +#ifdef CONFIG_PREEMPT_RT_BASE
6836 +       wait_queue_head_t               wait;
6837 +#endif
6838         struct hrtimer_clock_base       clock_base[HRTIMER_MAX_CLOCK_BASES];
6839  } ____cacheline_aligned;
6840  
6841 @@ -412,6 +422,13 @@ static inline void hrtimer_restart(struct hrtimer *timer)
6842         hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
6843  }
6844  
6845 +/* Softirq preemption could deadlock timer removal */
6846 +#ifdef CONFIG_PREEMPT_RT_BASE
6847 +  extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
6848 +#else
6849 +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
6850 +#endif
6851 +
6852  /* Query timers: */
6853  extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
6854  
6855 @@ -436,9 +453,15 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
6856   * Helper function to check, whether the timer is running the callback
6857   * function
6858   */
6859 -static inline int hrtimer_callback_running(struct hrtimer *timer)
6860 +static inline int hrtimer_callback_running(const struct hrtimer *timer)
6861  {
6862 -       return timer->base->cpu_base->running == timer;
6863 +       if (timer->base->cpu_base->running == timer)
6864 +               return 1;
6865 +#ifdef CONFIG_PREEMPT_RT_BASE
6866 +       if (timer->base->cpu_base->running_soft == timer)
6867 +               return 1;
6868 +#endif
6869 +       return 0;
6870  }
6871  
6872  /* Forward a hrtimer so it expires after now: */
6873 diff --git a/include/linux/idr.h b/include/linux/idr.h
6874 index 083d61e92706..5899796f50cb 100644
6875 --- a/include/linux/idr.h
6876 +++ b/include/linux/idr.h
6877 @@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
6878   * Each idr_preload() should be matched with an invocation of this
6879   * function.  See idr_preload() for details.
6880   */
6881 +#ifdef CONFIG_PREEMPT_RT_FULL
6882 +void idr_preload_end(void);
6883 +#else
6884  static inline void idr_preload_end(void)
6885  {
6886         preempt_enable();
6887  }
6888 +#endif
6889  
6890  /**
6891   * idr_find - return pointer for given id
6892 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
6893 index 325f649d77ff..8af70bcc799b 100644
6894 --- a/include/linux/init_task.h
6895 +++ b/include/linux/init_task.h
6896 @@ -150,6 +150,12 @@ extern struct task_group root_task_group;
6897  # define INIT_PERF_EVENTS(tsk)
6898  #endif
6899  
6900 +#ifdef CONFIG_PREEMPT_RT_BASE
6901 +# define INIT_TIMER_LIST               .posix_timer_list = NULL,
6902 +#else
6903 +# define INIT_TIMER_LIST
6904 +#endif
6905 +
6906  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
6907  # define INIT_VTIME(tsk)                                               \
6908         .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount),      \
6909 @@ -250,6 +256,7 @@ extern struct task_group root_task_group;
6910         .cpu_timers     = INIT_CPU_TIMERS(tsk.cpu_timers),              \
6911         .pi_lock        = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),        \
6912         .timer_slack_ns = 50000, /* 50 usec default slack */            \
6913 +       INIT_TIMER_LIST                                                 \
6914         .pids = {                                                       \
6915                 [PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID),            \
6916                 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),           \
6917 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
6918 index 72f0721f75e7..480972ae47d3 100644
6919 --- a/include/linux/interrupt.h
6920 +++ b/include/linux/interrupt.h
6921 @@ -14,6 +14,7 @@
6922  #include <linux/hrtimer.h>
6923  #include <linux/kref.h>
6924  #include <linux/workqueue.h>
6925 +#include <linux/swork.h>
6926  
6927  #include <linux/atomic.h>
6928  #include <asm/ptrace.h>
6929 @@ -61,6 +62,7 @@
6930   *                interrupt handler after suspending interrupts. For system
6931   *                wakeup devices users need to implement wakeup detection in
6932   *                their interrupt handlers.
6933 + * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
6934   */
6935  #define IRQF_SHARED            0x00000080
6936  #define IRQF_PROBE_SHARED      0x00000100
6937 @@ -74,6 +76,7 @@
6938  #define IRQF_NO_THREAD         0x00010000
6939  #define IRQF_EARLY_RESUME      0x00020000
6940  #define IRQF_COND_SUSPEND      0x00040000
6941 +#define IRQF_NO_SOFTIRQ_CALL   0x00080000
6942  
6943  #define IRQF_TIMER             (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
6944  
6945 @@ -196,7 +199,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
6946  #ifdef CONFIG_LOCKDEP
6947  # define local_irq_enable_in_hardirq() do { } while (0)
6948  #else
6949 -# define local_irq_enable_in_hardirq() local_irq_enable()
6950 +# define local_irq_enable_in_hardirq() local_irq_enable_nort()
6951  #endif
6952  
6953  extern void disable_irq_nosync(unsigned int irq);
6954 @@ -216,6 +219,7 @@ extern void resume_device_irqs(void);
6955   * struct irq_affinity_notify - context for notification of IRQ affinity changes
6956   * @irq:               Interrupt to which notification applies
6957   * @kref:              Reference count, for internal use
6958 + * @swork:             Swork item, for internal use
6959   * @work:              Work item, for internal use
6960   * @notify:            Function to be called on change.  This will be
6961   *                     called in process context.
6962 @@ -227,7 +231,11 @@ extern void resume_device_irqs(void);
6963  struct irq_affinity_notify {
6964         unsigned int irq;
6965         struct kref kref;
6966 +#ifdef CONFIG_PREEMPT_RT_BASE
6967 +       struct swork_event swork;
6968 +#else
6969         struct work_struct work;
6970 +#endif
6971         void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
6972         void (*release)(struct kref *ref);
6973  };
6974 @@ -406,9 +414,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
6975                                  bool state);
6976  
6977  #ifdef CONFIG_IRQ_FORCED_THREADING
6978 +# ifndef CONFIG_PREEMPT_RT_BASE
6979  extern bool force_irqthreads;
6980 +# else
6981 +#  define force_irqthreads     (true)
6982 +# endif
6983  #else
6984 -#define force_irqthreads       (0)
6985 +#define force_irqthreads       (false)
6986  #endif
6987  
6988  #ifndef __ARCH_SET_SOFTIRQ_PENDING
6989 @@ -465,9 +477,10 @@ struct softirq_action
6990         void    (*action)(struct softirq_action *);
6991  };
6992  
6993 +#ifndef CONFIG_PREEMPT_RT_FULL
6994  asmlinkage void do_softirq(void);
6995  asmlinkage void __do_softirq(void);
6996 -
6997 +static inline void thread_do_softirq(void) { do_softirq(); }
6998  #ifdef __ARCH_HAS_DO_SOFTIRQ
6999  void do_softirq_own_stack(void);
7000  #else
7001 @@ -476,13 +489,25 @@ static inline void do_softirq_own_stack(void)
7002         __do_softirq();
7003  }
7004  #endif
7005 +#else
7006 +extern void thread_do_softirq(void);
7007 +#endif
7008  
7009  extern void open_softirq(int nr, void (*action)(struct softirq_action *));
7010  extern void softirq_init(void);
7011  extern void __raise_softirq_irqoff(unsigned int nr);
7012 +#ifdef CONFIG_PREEMPT_RT_FULL
7013 +extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
7014 +#else
7015 +static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
7016 +{
7017 +       __raise_softirq_irqoff(nr);
7018 +}
7019 +#endif
7020  
7021  extern void raise_softirq_irqoff(unsigned int nr);
7022  extern void raise_softirq(unsigned int nr);
7023 +extern void softirq_check_pending_idle(void);
7024  
7025  DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
7026  
7027 @@ -504,8 +529,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
7028       to be executed on some cpu at least once after this.
7029     * If the tasklet is already scheduled, but its execution is still not
7030       started, it will be executed only once.
7031 -   * If this tasklet is already running on another CPU (or schedule is called
7032 -     from tasklet itself), it is rescheduled for later.
7033 +   * If this tasklet is already running on another CPU, it is rescheduled
7034 +     for later.
7035 +   * Schedule must not be called from the tasklet itself (a lockup occurs)
7036     * Tasklet is strictly serialized wrt itself, but not
7037       wrt another tasklets. If client needs some intertask synchronization,
7038       he makes it with spinlocks.
7039 @@ -530,27 +556,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
7040  enum
7041  {
7042         TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
7043 -       TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
7044 +       TASKLET_STATE_RUN,      /* Tasklet is running (SMP only) */
7045 +       TASKLET_STATE_PENDING   /* Tasklet is pending */
7046  };
7047  
7048 -#ifdef CONFIG_SMP
7049 +#define TASKLET_STATEF_SCHED   (1 << TASKLET_STATE_SCHED)
7050 +#define TASKLET_STATEF_RUN     (1 << TASKLET_STATE_RUN)
7051 +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
7052 +
7053 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
7054  static inline int tasklet_trylock(struct tasklet_struct *t)
7055  {
7056         return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
7057  }
7058  
7059 +static inline int tasklet_tryunlock(struct tasklet_struct *t)
7060 +{
7061 +       return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
7062 +}
7063 +
7064  static inline void tasklet_unlock(struct tasklet_struct *t)
7065  {
7066         smp_mb__before_atomic();
7067         clear_bit(TASKLET_STATE_RUN, &(t)->state);
7068  }
7069  
7070 -static inline void tasklet_unlock_wait(struct tasklet_struct *t)
7071 -{
7072 -       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
7073 -}
7074 +extern void tasklet_unlock_wait(struct tasklet_struct *t);
7075 +
7076  #else
7077  #define tasklet_trylock(t) 1
7078 +#define tasklet_tryunlock(t)   1
7079  #define tasklet_unlock_wait(t) do { } while (0)
7080  #define tasklet_unlock(t) do { } while (0)
7081  #endif
7082 @@ -599,12 +634,7 @@ static inline void tasklet_disable(struct tasklet_struct *t)
7083         smp_mb();
7084  }
7085  
7086 -static inline void tasklet_enable(struct tasklet_struct *t)
7087 -{
7088 -       smp_mb__before_atomic();
7089 -       atomic_dec(&t->count);
7090 -}
7091 -
7092 +extern void tasklet_enable(struct tasklet_struct *t);
7093  extern void tasklet_kill(struct tasklet_struct *t);
7094  extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
7095  extern void tasklet_init(struct tasklet_struct *t,
7096 @@ -635,6 +665,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
7097         tasklet_kill(&ttimer->tasklet);
7098  }
7099  
7100 +#ifdef CONFIG_PREEMPT_RT_FULL
7101 +extern void softirq_early_init(void);
7102 +#else
7103 +static inline void softirq_early_init(void) { }
7104 +#endif
7105 +
7106  /*
7107   * Autoprobing for irqs:
7108   *
7109 diff --git a/include/linux/irq.h b/include/linux/irq.h
7110 index 39e3254e5769..8ebac94fbb9f 100644
7111 --- a/include/linux/irq.h
7112 +++ b/include/linux/irq.h
7113 @@ -72,6 +72,7 @@ enum irqchip_irq_state;
7114   * IRQ_IS_POLLED               - Always polled by another interrupt. Exclude
7115   *                               it from the spurious interrupt detection
7116   *                               mechanism and from core side polling.
7117 + * IRQ_NO_SOFTIRQ_CALL         - No softirq processing in the irq thread context (RT)
7118   * IRQ_DISABLE_UNLAZY          - Disable lazy irq disable
7119   */
7120  enum {
7121 @@ -99,13 +100,14 @@ enum {
7122         IRQ_PER_CPU_DEVID       = (1 << 17),
7123         IRQ_IS_POLLED           = (1 << 18),
7124         IRQ_DISABLE_UNLAZY      = (1 << 19),
7125 +       IRQ_NO_SOFTIRQ_CALL     = (1 << 20),
7126  };
7127  
7128  #define IRQF_MODIFY_MASK       \
7129         (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
7130          IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
7131          IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
7132 -        IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
7133 +        IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
7134  
7135  #define IRQ_NO_BALANCING_MASK  (IRQ_PER_CPU | IRQ_NO_BALANCING)
7136  
7137 diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
7138 index 47b9ebd4a74f..2543aab05daa 100644
7139 --- a/include/linux/irq_work.h
7140 +++ b/include/linux/irq_work.h
7141 @@ -16,6 +16,7 @@
7142  #define IRQ_WORK_BUSY          2UL
7143  #define IRQ_WORK_FLAGS         3UL
7144  #define IRQ_WORK_LAZY          4UL /* Doesn't want IPI, wait for tick */
7145 +#define IRQ_WORK_HARD_IRQ      8UL /* Run hard IRQ context, even on RT */
7146  
7147  struct irq_work {
7148         unsigned long flags;
7149 @@ -51,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
7150  static inline void irq_work_run(void) { }
7151  #endif
7152  
7153 +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
7154 +void irq_work_tick_soft(void);
7155 +#else
7156 +static inline void irq_work_tick_soft(void) { }
7157 +#endif
7158 +
7159  #endif /* _LINUX_IRQ_WORK_H */
7160 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
7161 index c9be57931b58..eeeb540971ae 100644
7162 --- a/include/linux/irqdesc.h
7163 +++ b/include/linux/irqdesc.h
7164 @@ -66,6 +66,7 @@ struct irq_desc {
7165         unsigned int            irqs_unhandled;
7166         atomic_t                threads_handled;
7167         int                     threads_handled_last;
7168 +       u64                     random_ip;
7169         raw_spinlock_t          lock;
7170         struct cpumask          *percpu_enabled;
7171         const struct cpumask    *percpu_affinity;
7172 diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
7173 index 5dd1272d1ab2..9b77034f7c5e 100644
7174 --- a/include/linux/irqflags.h
7175 +++ b/include/linux/irqflags.h
7176 @@ -25,8 +25,6 @@
7177  # define trace_softirqs_enabled(p)     ((p)->softirqs_enabled)
7178  # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
7179  # define trace_hardirq_exit()  do { current->hardirq_context--; } while (0)
7180 -# define lockdep_softirq_enter()       do { current->softirq_context++; } while (0)
7181 -# define lockdep_softirq_exit()        do { current->softirq_context--; } while (0)
7182  # define INIT_TRACE_IRQFLAGS   .softirqs_enabled = 1,
7183  #else
7184  # define trace_hardirqs_on()           do { } while (0)
7185 @@ -39,9 +37,15 @@
7186  # define trace_softirqs_enabled(p)     0
7187  # define trace_hardirq_enter()         do { } while (0)
7188  # define trace_hardirq_exit()          do { } while (0)
7189 +# define INIT_TRACE_IRQFLAGS
7190 +#endif
7191 +
7192 +#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
7193 +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
7194 +# define lockdep_softirq_exit()         do { current->softirq_context--; } while (0)
7195 +#else
7196  # define lockdep_softirq_enter()       do { } while (0)
7197  # define lockdep_softirq_exit()                do { } while (0)
7198 -# define INIT_TRACE_IRQFLAGS
7199  #endif
7200  
7201  #if defined(CONFIG_IRQSOFF_TRACER) || \
7202 @@ -148,4 +152,23 @@
7203  
7204  #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
7205  
7206 +/*
7207 + * local_irq* variants depending on RT/!RT
7208 + */
7209 +#ifdef CONFIG_PREEMPT_RT_FULL
7210 +# define local_irq_disable_nort()      do { } while (0)
7211 +# define local_irq_enable_nort()       do { } while (0)
7212 +# define local_irq_save_nort(flags)    local_save_flags(flags)
7213 +# define local_irq_restore_nort(flags) (void)(flags)
7214 +# define local_irq_disable_rt()                local_irq_disable()
7215 +# define local_irq_enable_rt()         local_irq_enable()
7216 +#else
7217 +# define local_irq_disable_nort()      local_irq_disable()
7218 +# define local_irq_enable_nort()       local_irq_enable()
7219 +# define local_irq_save_nort(flags)    local_irq_save(flags)
7220 +# define local_irq_restore_nort(flags) local_irq_restore(flags)
7221 +# define local_irq_disable_rt()                do { } while (0)
7222 +# define local_irq_enable_rt()         do { } while (0)
7223 +#endif
7224 +
7225  #endif
7226 diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
7227 index dfaa1f4dcb0c..d57dd06544a1 100644
7228 --- a/include/linux/jbd2.h
7229 +++ b/include/linux/jbd2.h
7230 @@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
7231  
7232  static inline void jbd_lock_bh_state(struct buffer_head *bh)
7233  {
7234 +#ifndef CONFIG_PREEMPT_RT_BASE
7235         bit_spin_lock(BH_State, &bh->b_state);
7236 +#else
7237 +       spin_lock(&bh->b_state_lock);
7238 +#endif
7239  }
7240  
7241  static inline int jbd_trylock_bh_state(struct buffer_head *bh)
7242  {
7243 +#ifndef CONFIG_PREEMPT_RT_BASE
7244         return bit_spin_trylock(BH_State, &bh->b_state);
7245 +#else
7246 +       return spin_trylock(&bh->b_state_lock);
7247 +#endif
7248  }
7249  
7250  static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
7251  {
7252 +#ifndef CONFIG_PREEMPT_RT_BASE
7253         return bit_spin_is_locked(BH_State, &bh->b_state);
7254 +#else
7255 +       return spin_is_locked(&bh->b_state_lock);
7256 +#endif
7257  }
7258  
7259  static inline void jbd_unlock_bh_state(struct buffer_head *bh)
7260  {
7261 +#ifndef CONFIG_PREEMPT_RT_BASE
7262         bit_spin_unlock(BH_State, &bh->b_state);
7263 +#else
7264 +       spin_unlock(&bh->b_state_lock);
7265 +#endif
7266  }
7267  
7268  static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
7269  {
7270 +#ifndef CONFIG_PREEMPT_RT_BASE
7271         bit_spin_lock(BH_JournalHead, &bh->b_state);
7272 +#else
7273 +       spin_lock(&bh->b_journal_head_lock);
7274 +#endif
7275  }
7276  
7277  static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
7278  {
7279 +#ifndef CONFIG_PREEMPT_RT_BASE
7280         bit_spin_unlock(BH_JournalHead, &bh->b_state);
7281 +#else
7282 +       spin_unlock(&bh->b_journal_head_lock);
7283 +#endif
7284  }
7285  
7286  #define J_ASSERT(assert)       BUG_ON(!(assert))
7287 diff --git a/include/linux/kdb.h b/include/linux/kdb.h
7288 index 410decacff8f..0861bebfc188 100644
7289 --- a/include/linux/kdb.h
7290 +++ b/include/linux/kdb.h
7291 @@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
7292  extern __printf(1, 2) int kdb_printf(const char *, ...);
7293  typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
7294  
7295 +#define in_kdb_printk()        (kdb_trap_printk)
7296  extern void kdb_init(int level);
7297  
7298  /* Access to kdb specific polling devices */
7299 @@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
7300  extern int kdb_unregister(char *);
7301  #else /* ! CONFIG_KGDB_KDB */
7302  static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
7303 +#define in_kdb_printk() (0)
7304  static inline void kdb_init(int level) {}
7305  static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
7306                                char *help, short minlen) { return 0; }
7307 diff --git a/include/linux/kernel.h b/include/linux/kernel.h
7308 index bc6ed52a39b9..7894d55e4998 100644
7309 --- a/include/linux/kernel.h
7310 +++ b/include/linux/kernel.h
7311 @@ -194,6 +194,9 @@ extern int _cond_resched(void);
7312   */
7313  # define might_sleep() \
7314         do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
7315 +
7316 +# define might_sleep_no_state_check() \
7317 +       do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
7318  # define sched_annotate_sleep()        (current->task_state_change = 0)
7319  #else
7320    static inline void ___might_sleep(const char *file, int line,
7321 @@ -201,6 +204,7 @@ extern int _cond_resched(void);
7322    static inline void __might_sleep(const char *file, int line,
7323                                    int preempt_offset) { }
7324  # define might_sleep() do { might_resched(); } while (0)
7325 +# define might_sleep_no_state_check() do { might_resched(); } while (0)
7326  # define sched_annotate_sleep() do { } while (0)
7327  #endif
7328  
7329 @@ -488,6 +492,7 @@ extern enum system_states {
7330         SYSTEM_HALT,
7331         SYSTEM_POWER_OFF,
7332         SYSTEM_RESTART,
7333 +       SYSTEM_SUSPEND,
7334  } system_state;
7335  
7336  #define TAINT_PROPRIETARY_MODULE       0
7337 diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
7338 index cb483305e1f5..4e5062316bb6 100644
7339 --- a/include/linux/list_bl.h
7340 +++ b/include/linux/list_bl.h
7341 @@ -2,6 +2,7 @@
7342  #define _LINUX_LIST_BL_H
7343  
7344  #include <linux/list.h>
7345 +#include <linux/spinlock.h>
7346  #include <linux/bit_spinlock.h>
7347  
7348  /*
7349 @@ -32,13 +33,24 @@
7350  
7351  struct hlist_bl_head {
7352         struct hlist_bl_node *first;
7353 +#ifdef CONFIG_PREEMPT_RT_BASE
7354 +       raw_spinlock_t lock;
7355 +#endif
7356  };
7357  
7358  struct hlist_bl_node {
7359         struct hlist_bl_node *next, **pprev;
7360  };
7361 -#define INIT_HLIST_BL_HEAD(ptr) \
7362 -       ((ptr)->first = NULL)
7363 +
7364 +#ifdef CONFIG_PREEMPT_RT_BASE
7365 +#define INIT_HLIST_BL_HEAD(h)          \
7366 +do {                                   \
7367 +       (h)->first = NULL;              \
7368 +       raw_spin_lock_init(&(h)->lock); \
7369 +} while (0)
7370 +#else
7371 +#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL
7372 +#endif
7373  
7374  static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
7375  {
7376 @@ -118,12 +130,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
7377  
7378  static inline void hlist_bl_lock(struct hlist_bl_head *b)
7379  {
7380 +#ifndef CONFIG_PREEMPT_RT_BASE
7381         bit_spin_lock(0, (unsigned long *)b);
7382 +#else
7383 +       raw_spin_lock(&b->lock);
7384 +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
7385 +       __set_bit(0, (unsigned long *)b);
7386 +#endif
7387 +#endif
7388  }
7389  
7390  static inline void hlist_bl_unlock(struct hlist_bl_head *b)
7391  {
7392 +#ifndef CONFIG_PREEMPT_RT_BASE
7393         __bit_spin_unlock(0, (unsigned long *)b);
7394 +#else
7395 +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
7396 +       __clear_bit(0, (unsigned long *)b);
7397 +#endif
7398 +       raw_spin_unlock(&b->lock);
7399 +#endif
7400  }
7401  
7402  static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
7403 diff --git a/include/linux/locallock.h b/include/linux/locallock.h
7404 new file mode 100644
7405 index 000000000000..845c77f1a5ca
7406 --- /dev/null
7407 +++ b/include/linux/locallock.h
7408 @@ -0,0 +1,278 @@
7409 +#ifndef _LINUX_LOCALLOCK_H
7410 +#define _LINUX_LOCALLOCK_H
7411 +
7412 +#include <linux/percpu.h>
7413 +#include <linux/spinlock.h>
7414 +
7415 +#ifdef CONFIG_PREEMPT_RT_BASE
7416 +
7417 +#ifdef CONFIG_DEBUG_SPINLOCK
7418 +# define LL_WARN(cond) WARN_ON(cond)
7419 +#else
7420 +# define LL_WARN(cond) do { } while (0)
7421 +#endif
7422 +
7423 +/*
7424 + * per cpu lock based substitute for local_irq_*()
7425 + */
7426 +struct local_irq_lock {
7427 +       spinlock_t              lock;
7428 +       struct task_struct      *owner;
7429 +       int                     nestcnt;
7430 +       unsigned long           flags;
7431 +};
7432 +
7433 +#define DEFINE_LOCAL_IRQ_LOCK(lvar)                                    \
7434 +       DEFINE_PER_CPU(struct local_irq_lock, lvar) = {                 \
7435 +               .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
7436 +
7437 +#define DECLARE_LOCAL_IRQ_LOCK(lvar)                                   \
7438 +       DECLARE_PER_CPU(struct local_irq_lock, lvar)
7439 +
7440 +#define local_irq_lock_init(lvar)                                      \
7441 +       do {                                                            \
7442 +               int __cpu;                                              \
7443 +               for_each_possible_cpu(__cpu)                            \
7444 +                       spin_lock_init(&per_cpu(lvar, __cpu).lock);     \
7445 +       } while (0)
7446 +
7447 +/*
7448 + * spin_lock|trylock|unlock_local flavour that does not migrate disable
7449 + * used for __local_lock|trylock|unlock where get_local_var/put_local_var
7450 + * already takes care of the migrate_disable/enable
7451 + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
7452 + */
7453 +#ifdef CONFIG_PREEMPT_RT_FULL
7454 +# define spin_lock_local(lock)                 rt_spin_lock__no_mg(lock)
7455 +# define spin_trylock_local(lock)              rt_spin_trylock__no_mg(lock)
7456 +# define spin_unlock_local(lock)               rt_spin_unlock__no_mg(lock)
7457 +#else
7458 +# define spin_lock_local(lock)                 spin_lock(lock)
7459 +# define spin_trylock_local(lock)              spin_trylock(lock)
7460 +# define spin_unlock_local(lock)               spin_unlock(lock)
7461 +#endif
7462 +
7463 +static inline void __local_lock(struct local_irq_lock *lv)
7464 +{
7465 +       if (lv->owner != current) {
7466 +               spin_lock_local(&lv->lock);
7467 +               LL_WARN(lv->owner);
7468 +               LL_WARN(lv->nestcnt);
7469 +               lv->owner = current;
7470 +       }
7471 +       lv->nestcnt++;
7472 +}
7473 +
7474 +#define local_lock(lvar)                                       \
7475 +       do { __local_lock(&get_local_var(lvar)); } while (0)
7476 +
7477 +#define local_lock_on(lvar, cpu)                               \
7478 +       do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
7479 +
7480 +static inline int __local_trylock(struct local_irq_lock *lv)
7481 +{
7482 +       if (lv->owner != current && spin_trylock_local(&lv->lock)) {
7483 +               LL_WARN(lv->owner);
7484 +               LL_WARN(lv->nestcnt);
7485 +               lv->owner = current;
7486 +               lv->nestcnt = 1;
7487 +               return 1;
7488 +       }
7489 +       return 0;
7490 +}
7491 +
7492 +#define local_trylock(lvar)                                            \
7493 +       ({                                                              \
7494 +               int __locked;                                           \
7495 +               __locked = __local_trylock(&get_local_var(lvar));       \
7496 +               if (!__locked)                                          \
7497 +                       put_local_var(lvar);                            \
7498 +               __locked;                                               \
7499 +       })
7500 +
7501 +static inline void __local_unlock(struct local_irq_lock *lv)
7502 +{
7503 +       LL_WARN(lv->nestcnt == 0);
7504 +       LL_WARN(lv->owner != current);
7505 +       if (--lv->nestcnt)
7506 +               return;
7507 +
7508 +       lv->owner = NULL;
7509 +       spin_unlock_local(&lv->lock);
7510 +}
7511 +
7512 +#define local_unlock(lvar)                                     \
7513 +       do {                                                    \
7514 +               __local_unlock(this_cpu_ptr(&lvar));            \
7515 +               put_local_var(lvar);                            \
7516 +       } while (0)
7517 +
7518 +#define local_unlock_on(lvar, cpu)                       \
7519 +       do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
7520 +
7521 +static inline void __local_lock_irq(struct local_irq_lock *lv)
7522 +{
7523 +       spin_lock_irqsave(&lv->lock, lv->flags);
7524 +       LL_WARN(lv->owner);
7525 +       LL_WARN(lv->nestcnt);
7526 +       lv->owner = current;
7527 +       lv->nestcnt = 1;
7528 +}
7529 +
7530 +#define local_lock_irq(lvar)                                           \
7531 +       do { __local_lock_irq(&get_local_var(lvar)); } while (0)
7532 +
7533 +#define local_lock_irq_on(lvar, cpu)                                   \
7534 +       do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
7535 +
7536 +static inline void __local_unlock_irq(struct local_irq_lock *lv)
7537 +{
7538 +       LL_WARN(!lv->nestcnt);
7539 +       LL_WARN(lv->owner != current);
7540 +       lv->owner = NULL;
7541 +       lv->nestcnt = 0;
7542 +       spin_unlock_irq(&lv->lock);
7543 +}
7544 +
7545 +#define local_unlock_irq(lvar)                                         \
7546 +       do {                                                            \
7547 +               __local_unlock_irq(this_cpu_ptr(&lvar));                \
7548 +               put_local_var(lvar);                                    \
7549 +       } while (0)
7550 +
7551 +#define local_unlock_irq_on(lvar, cpu)                                 \
7552 +       do {                                                            \
7553 +               __local_unlock_irq(&per_cpu(lvar, cpu));                \
7554 +       } while (0)
7555 +
7556 +static inline int __local_lock_irqsave(struct local_irq_lock *lv)
7557 +{
7558 +       if (lv->owner != current) {
7559 +               __local_lock_irq(lv);
7560 +               return 0;
7561 +       } else {
7562 +               lv->nestcnt++;
7563 +               return 1;
7564 +       }
7565 +}
7566 +
7567 +#define local_lock_irqsave(lvar, _flags)                               \
7568 +       do {                                                            \
7569 +               if (__local_lock_irqsave(&get_local_var(lvar)))         \
7570 +                       put_local_var(lvar);                            \
7571 +               _flags = __this_cpu_read(lvar.flags);                   \
7572 +       } while (0)
7573 +
7574 +#define local_lock_irqsave_on(lvar, _flags, cpu)                       \
7575 +       do {                                                            \
7576 +               __local_lock_irqsave(&per_cpu(lvar, cpu));              \
7577 +               _flags = per_cpu(lvar, cpu).flags;                      \
7578 +       } while (0)
7579 +
7580 +static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
7581 +                                           unsigned long flags)
7582 +{
7583 +       LL_WARN(!lv->nestcnt);
7584 +       LL_WARN(lv->owner != current);
7585 +       if (--lv->nestcnt)
7586 +               return 0;
7587 +
7588 +       lv->owner = NULL;
7589 +       spin_unlock_irqrestore(&lv->lock, lv->flags);
7590 +       return 1;
7591 +}
7592 +
7593 +#define local_unlock_irqrestore(lvar, flags)                           \
7594 +       do {                                                            \
7595 +               if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
7596 +                       put_local_var(lvar);                            \
7597 +       } while (0)
7598 +
7599 +#define local_unlock_irqrestore_on(lvar, flags, cpu)                   \
7600 +       do {                                                            \
7601 +               __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags);  \
7602 +       } while (0)
7603 +
7604 +#define local_spin_trylock_irq(lvar, lock)                             \
7605 +       ({                                                              \
7606 +               int __locked;                                           \
7607 +               local_lock_irq(lvar);                                   \
7608 +               __locked = spin_trylock(lock);                          \
7609 +               if (!__locked)                                          \
7610 +                       local_unlock_irq(lvar);                         \
7611 +               __locked;                                               \
7612 +       })
7613 +
7614 +#define local_spin_lock_irq(lvar, lock)                                        \
7615 +       do {                                                            \
7616 +               local_lock_irq(lvar);                                   \
7617 +               spin_lock(lock);                                        \
7618 +       } while (0)
7619 +
7620 +#define local_spin_unlock_irq(lvar, lock)                              \
7621 +       do {                                                            \
7622 +               spin_unlock(lock);                                      \
7623 +               local_unlock_irq(lvar);                                 \
7624 +       } while (0)
7625 +
7626 +#define local_spin_lock_irqsave(lvar, lock, flags)                     \
7627 +       do {                                                            \
7628 +               local_lock_irqsave(lvar, flags);                        \
7629 +               spin_lock(lock);                                        \
7630 +       } while (0)
7631 +
7632 +#define local_spin_unlock_irqrestore(lvar, lock, flags)                        \
7633 +       do {                                                            \
7634 +               spin_unlock(lock);                                      \
7635 +               local_unlock_irqrestore(lvar, flags);                   \
7636 +       } while (0)
7637 +
7638 +#define get_locked_var(lvar, var)                                      \
7639 +       (*({                                                            \
7640 +               local_lock(lvar);                                       \
7641 +               this_cpu_ptr(&var);                                     \
7642 +       }))
7643 +
7644 +#define put_locked_var(lvar, var)      local_unlock(lvar);
7645 +
7646 +#define local_lock_cpu(lvar)                                           \
7647 +       ({                                                              \
7648 +               local_lock(lvar);                                       \
7649 +               smp_processor_id();                                     \
7650 +       })
7651 +
7652 +#define local_unlock_cpu(lvar)                 local_unlock(lvar)
7653 +
7654 +#else /* PREEMPT_RT_BASE */
7655 +
7656 +#define DEFINE_LOCAL_IRQ_LOCK(lvar)            __typeof__(const int) lvar
7657 +#define DECLARE_LOCAL_IRQ_LOCK(lvar)           extern __typeof__(const int) lvar
7658 +
7659 +static inline void local_irq_lock_init(int lvar) { }
7660 +
7661 +#define local_lock(lvar)                       preempt_disable()
7662 +#define local_unlock(lvar)                     preempt_enable()
7663 +#define local_lock_irq(lvar)                   local_irq_disable()
7664 +#define local_lock_irq_on(lvar, cpu)           local_irq_disable()
7665 +#define local_unlock_irq(lvar)                 local_irq_enable()
7666 +#define local_unlock_irq_on(lvar, cpu)         local_irq_enable()
7667 +#define local_lock_irqsave(lvar, flags)                local_irq_save(flags)
7668 +#define local_unlock_irqrestore(lvar, flags)   local_irq_restore(flags)
7669 +
7670 +#define local_spin_trylock_irq(lvar, lock)     spin_trylock_irq(lock)
7671 +#define local_spin_lock_irq(lvar, lock)                spin_lock_irq(lock)
7672 +#define local_spin_unlock_irq(lvar, lock)      spin_unlock_irq(lock)
7673 +#define local_spin_lock_irqsave(lvar, lock, flags)     \
7674 +       spin_lock_irqsave(lock, flags)
7675 +#define local_spin_unlock_irqrestore(lvar, lock, flags)        \
7676 +       spin_unlock_irqrestore(lock, flags)
7677 +
7678 +#define get_locked_var(lvar, var)              get_cpu_var(var)
7679 +#define put_locked_var(lvar, var)              put_cpu_var(var)
7680 +
7681 +#define local_lock_cpu(lvar)                   get_cpu()
7682 +#define local_unlock_cpu(lvar)                 put_cpu()
7683 +
7684 +#endif
7685 +
7686 +#endif
7687 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
7688 index 08d947fc4c59..705fb564a605 100644
7689 --- a/include/linux/mm_types.h
7690 +++ b/include/linux/mm_types.h
7691 @@ -11,6 +11,7 @@
7692  #include <linux/completion.h>
7693  #include <linux/cpumask.h>
7694  #include <linux/uprobes.h>
7695 +#include <linux/rcupdate.h>
7696  #include <linux/page-flags-layout.h>
7697  #include <linux/workqueue.h>
7698  #include <asm/page.h>
7699 @@ -509,6 +510,9 @@ struct mm_struct {
7700         bool tlb_flush_pending;
7701  #endif
7702         struct uprobes_state uprobes_state;
7703 +#ifdef CONFIG_PREEMPT_RT_BASE
7704 +       struct rcu_head delayed_drop;
7705 +#endif
7706  #ifdef CONFIG_X86_INTEL_MPX
7707         /* address of the bounds directory */
7708         void __user *bd_addr;
7709 diff --git a/include/linux/module.h b/include/linux/module.h
7710 index 0c3207d26ac0..5944baaa3f28 100644
7711 --- a/include/linux/module.h
7712 +++ b/include/linux/module.h
7713 @@ -496,6 +496,7 @@ static inline int module_is_live(struct module *mod)
7714  struct module *__module_text_address(unsigned long addr);
7715  struct module *__module_address(unsigned long addr);
7716  bool is_module_address(unsigned long addr);
7717 +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
7718  bool is_module_percpu_address(unsigned long addr);
7719  bool is_module_text_address(unsigned long addr);
7720  
7721 @@ -663,6 +664,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
7722         return false;
7723  }
7724  
7725 +static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
7726 +{
7727 +       return false;
7728 +}
7729 +
7730  static inline bool is_module_text_address(unsigned long addr)
7731  {
7732         return false;
7733 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
7734 index 2cb7531e7d7a..b3fdfc820216 100644
7735 --- a/include/linux/mutex.h
7736 +++ b/include/linux/mutex.h
7737 @@ -19,6 +19,17 @@
7738  #include <asm/processor.h>
7739  #include <linux/osq_lock.h>
7740  
7741 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
7742 +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
7743 +       , .dep_map = { .name = #lockname }
7744 +#else
7745 +# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
7746 +#endif
7747 +
7748 +#ifdef CONFIG_PREEMPT_RT_FULL
7749 +# include <linux/mutex_rt.h>
7750 +#else
7751 +
7752  /*
7753   * Simple, straightforward mutexes with strict semantics:
7754   *
7755 @@ -99,13 +110,6 @@ do {                                                        \
7756  static inline void mutex_destroy(struct mutex *lock) {}
7757  #endif
7758  
7759 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
7760 -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
7761 -               , .dep_map = { .name = #lockname }
7762 -#else
7763 -# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
7764 -#endif
7765 -
7766  #define __MUTEX_INITIALIZER(lockname) \
7767                 { .count = ATOMIC_INIT(1) \
7768                 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
7769 @@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
7770  extern int mutex_trylock(struct mutex *lock);
7771  extern void mutex_unlock(struct mutex *lock);
7772  
7773 +#endif /* !PREEMPT_RT_FULL */
7774 +
7775  extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
7776  
7777  #endif /* __LINUX_MUTEX_H */
7778 diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
7779 new file mode 100644
7780 index 000000000000..e0284edec655
7781 --- /dev/null
7782 +++ b/include/linux/mutex_rt.h
7783 @@ -0,0 +1,89 @@
7784 +#ifndef __LINUX_MUTEX_RT_H
7785 +#define __LINUX_MUTEX_RT_H
7786 +
7787 +#ifndef __LINUX_MUTEX_H
7788 +#error "Please include mutex.h"
7789 +#endif
7790 +
7791 +#include <linux/rtmutex.h>
7792 +
7793 +/* FIXME: Just for __lockfunc */
7794 +#include <linux/spinlock.h>
7795 +
7796 +struct mutex {
7797 +       struct rt_mutex         lock;
7798 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
7799 +       struct lockdep_map      dep_map;
7800 +#endif
7801 +};
7802 +
7803 +#define __MUTEX_INITIALIZER(mutexname)                                 \
7804 +       {                                                               \
7805 +               .lock = __RT_MUTEX_INITIALIZER(mutexname.lock)          \
7806 +               __DEP_MAP_MUTEX_INITIALIZER(mutexname)                  \
7807 +       }
7808 +
7809 +#define DEFINE_MUTEX(mutexname)                                                \
7810 +       struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
7811 +
7812 +extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
7813 +extern void __lockfunc _mutex_lock(struct mutex *lock);
7814 +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
7815 +extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
7816 +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
7817 +extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
7818 +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
7819 +extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
7820 +extern int __lockfunc _mutex_trylock(struct mutex *lock);
7821 +extern void __lockfunc _mutex_unlock(struct mutex *lock);
7822 +
7823 +#define mutex_is_locked(l)             rt_mutex_is_locked(&(l)->lock)
7824 +#define mutex_lock(l)                  _mutex_lock(l)
7825 +#define mutex_lock_interruptible(l)    _mutex_lock_interruptible(l)
7826 +#define mutex_lock_killable(l)         _mutex_lock_killable(l)
7827 +#define mutex_trylock(l)               _mutex_trylock(l)
7828 +#define mutex_unlock(l)                        _mutex_unlock(l)
7829 +
7830 +#ifdef CONFIG_DEBUG_MUTEXES
7831 +#define mutex_destroy(l)               rt_mutex_destroy(&(l)->lock)
7832 +#else
7833 +static inline void mutex_destroy(struct mutex *lock) {}
7834 +#endif
7835 +
7836 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
7837 +# define mutex_lock_nested(l, s)       _mutex_lock_nested(l, s)
7838 +# define mutex_lock_interruptible_nested(l, s) \
7839 +                                       _mutex_lock_interruptible_nested(l, s)
7840 +# define mutex_lock_killable_nested(l, s) \
7841 +                                       _mutex_lock_killable_nested(l, s)
7842 +
7843 +# define mutex_lock_nest_lock(lock, nest_lock)                         \
7844 +do {                                                                   \
7845 +       typecheck(struct lockdep_map *, &(nest_lock)->dep_map);         \
7846 +       _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map);             \
7847 +} while (0)
7848 +
7849 +#else
7850 +# define mutex_lock_nested(l, s)       _mutex_lock(l)
7851 +# define mutex_lock_interruptible_nested(l, s) \
7852 +                                       _mutex_lock_interruptible(l)
7853 +# define mutex_lock_killable_nested(l, s) \
7854 +                                       _mutex_lock_killable(l)
7855 +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
7856 +#endif
7857 +
7858 +# define mutex_init(mutex)                             \
7859 +do {                                                   \
7860 +       static struct lock_class_key __key;             \
7861 +                                                       \
7862 +       rt_mutex_init(&(mutex)->lock);                  \
7863 +       __mutex_do_init((mutex), #mutex, &__key);       \
7864 +} while (0)
7865 +
7866 +# define __mutex_init(mutex, name, key)                        \
7867 +do {                                                   \
7868 +       rt_mutex_init(&(mutex)->lock);                  \
7869 +       __mutex_do_init((mutex), name, key);            \
7870 +} while (0)
7871 +
7872 +#endif
7873 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
7874 index bb9b102c15cd..a5b12b8ad196 100644
7875 --- a/include/linux/netdevice.h
7876 +++ b/include/linux/netdevice.h
7877 @@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handler_result_t;
7878  typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
7879  
7880  void __napi_schedule(struct napi_struct *n);
7881 +
7882 +/*
7883 + * When PREEMPT_RT_FULL is defined, all device interrupt handlers
7884 + * run as threads, and they can also be preempted (without PREEMPT_RT
7885 + * interrupt threads can not be preempted). Which means that calling
7886 + * __napi_schedule_irqoff() from an interrupt handler can be preempted
7887 + * and can corrupt the napi->poll_list.
7888 + */
7889 +#ifdef CONFIG_PREEMPT_RT_FULL
7890 +#define __napi_schedule_irqoff(n) __napi_schedule(n)
7891 +#else
7892  void __napi_schedule_irqoff(struct napi_struct *n);
7893 +#endif
7894  
7895  static inline bool napi_disable_pending(struct napi_struct *n)
7896  {
7897 @@ -2463,14 +2475,53 @@ void netdev_freemem(struct net_device *dev);
7898  void synchronize_net(void);
7899  int init_dummy_netdev(struct net_device *dev);
7900  
7901 -DECLARE_PER_CPU(int, xmit_recursion);
7902  #define XMIT_RECURSION_LIMIT   10
7903 +#ifdef CONFIG_PREEMPT_RT_FULL
7904 +static inline int dev_recursion_level(void)
7905 +{
7906 +       return current->xmit_recursion;
7907 +}
7908 +
7909 +static inline int xmit_rec_read(void)
7910 +{
7911 +       return current->xmit_recursion;
7912 +}
7913 +
7914 +static inline void xmit_rec_inc(void)
7915 +{
7916 +       current->xmit_recursion++;
7917 +}
7918 +
7919 +static inline void xmit_rec_dec(void)
7920 +{
7921 +       current->xmit_recursion--;
7922 +}
7923 +
7924 +#else
7925 +
7926 +DECLARE_PER_CPU(int, xmit_recursion);
7927  
7928  static inline int dev_recursion_level(void)
7929  {
7930         return this_cpu_read(xmit_recursion);
7931  }
7932  
7933 +static inline int xmit_rec_read(void)
7934 +{
7935 +       return __this_cpu_read(xmit_recursion);
7936 +}
7937 +
7938 +static inline void xmit_rec_inc(void)
7939 +{
7940 +       __this_cpu_inc(xmit_recursion);
7941 +}
7942 +
7943 +static inline void xmit_rec_dec(void)
7944 +{
7945 +       __this_cpu_dec(xmit_recursion);
7946 +}
7947 +#endif
7948 +
7949  struct net_device *dev_get_by_index(struct net *net, int ifindex);
7950  struct net_device *__dev_get_by_index(struct net *net, int ifindex);
7951  struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
7952 @@ -2855,6 +2906,7 @@ struct softnet_data {
7953         unsigned int            dropped;
7954         struct sk_buff_head     input_pkt_queue;
7955         struct napi_struct      backlog;
7956 +       struct sk_buff_head     tofree_queue;
7957  
7958  };
7959  
7960 diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
7961 index 2ad1a2b289b5..b4d10155af54 100644
7962 --- a/include/linux/netfilter/x_tables.h
7963 +++ b/include/linux/netfilter/x_tables.h
7964 @@ -4,6 +4,7 @@
7965  
7966  #include <linux/netdevice.h>
7967  #include <linux/static_key.h>
7968 +#include <linux/locallock.h>
7969  #include <uapi/linux/netfilter/x_tables.h>
7970  
7971  /* Test a struct->invflags and a boolean for inequality */
7972 @@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_info *info);
7973   */
7974  DECLARE_PER_CPU(seqcount_t, xt_recseq);
7975  
7976 +DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
7977 +
7978  /* xt_tee_enabled - true if x_tables needs to handle reentrancy
7979   *
7980   * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
7981 @@ -320,6 +323,9 @@ static inline unsigned int xt_write_recseq_begin(void)
7982  {
7983         unsigned int addend;
7984  
7985 +       /* RT protection */
7986 +       local_lock(xt_write_lock);
7987 +
7988         /*
7989          * Low order bit of sequence is set if we already
7990          * called xt_write_recseq_begin().
7991 @@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
7992         /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
7993         smp_wmb();
7994         __this_cpu_add(xt_recseq.sequence, addend);
7995 +       local_unlock(xt_write_lock);
7996  }
7997  
7998  /*
7999 diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
8000 index 810124b33327..d54ca43d571f 100644
8001 --- a/include/linux/nfs_fs.h
8002 +++ b/include/linux/nfs_fs.h
8003 @@ -165,7 +165,11 @@ struct nfs_inode {
8004  
8005         /* Readers: in-flight sillydelete RPC calls */
8006         /* Writers: rmdir */
8007 +#ifdef CONFIG_PREEMPT_RT_BASE
8008 +       struct semaphore        rmdir_sem;
8009 +#else
8010         struct rw_semaphore     rmdir_sem;
8011 +#endif
8012  
8013  #if IS_ENABLED(CONFIG_NFS_V4)
8014         struct nfs4_cached_acl  *nfs4_acl;
8015 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
8016 index beb1e10f446e..ebaf2e7bfe29 100644
8017 --- a/include/linux/nfs_xdr.h
8018 +++ b/include/linux/nfs_xdr.h
8019 @@ -1490,7 +1490,7 @@ struct nfs_unlinkdata {
8020         struct nfs_removeargs args;
8021         struct nfs_removeres res;
8022         struct dentry *dentry;
8023 -       wait_queue_head_t wq;
8024 +       struct swait_queue_head wq;
8025         struct rpc_cred *cred;
8026         struct nfs_fattr dir_attr;
8027         long timeout;
8028 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
8029 index 4149868de4e6..babe5b9bcb91 100644
8030 --- a/include/linux/notifier.h
8031 +++ b/include/linux/notifier.h
8032 @@ -6,7 +6,7 @@
8033   *
8034   *                             Alan Cox <Alan.Cox@linux.org>
8035   */
8036
8037 +
8038  #ifndef _LINUX_NOTIFIER_H
8039  #define _LINUX_NOTIFIER_H
8040  #include <linux/errno.h>
8041 @@ -42,9 +42,7 @@
8042   * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
8043   * As compensation, srcu_notifier_chain_unregister() is rather expensive.
8044   * SRCU notifier chains should be used when the chain will be called very
8045 - * often but notifier_blocks will seldom be removed.  Also, SRCU notifier
8046 - * chains are slightly more difficult to use because they require special
8047 - * runtime initialization.
8048 + * often but notifier_blocks will seldom be removed.
8049   */
8050  
8051  struct notifier_block;
8052 @@ -90,7 +88,7 @@ struct srcu_notifier_head {
8053                 (name)->head = NULL;            \
8054         } while (0)
8055  
8056 -/* srcu_notifier_heads must be initialized and cleaned up dynamically */
8057 +/* srcu_notifier_heads must be cleaned up dynamically */
8058  extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
8059  #define srcu_cleanup_notifier_head(name)       \
8060                 cleanup_srcu_struct(&(name)->srcu);
8061 @@ -103,7 +101,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
8062                 .head = NULL }
8063  #define RAW_NOTIFIER_INIT(name)        {                               \
8064                 .head = NULL }
8065 -/* srcu_notifier_heads cannot be initialized statically */
8066 +
8067 +#define SRCU_NOTIFIER_INIT(name, pcpu)                         \
8068 +       {                                                       \
8069 +               .mutex = __MUTEX_INITIALIZER(name.mutex),       \
8070 +               .head = NULL,                                   \
8071 +               .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu),    \
8072 +       }
8073  
8074  #define ATOMIC_NOTIFIER_HEAD(name)                             \
8075         struct atomic_notifier_head name =                      \
8076 @@ -115,6 +119,18 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
8077         struct raw_notifier_head name =                         \
8078                 RAW_NOTIFIER_INIT(name)
8079  
8080 +#define _SRCU_NOTIFIER_HEAD(name, mod)                         \
8081 +       static DEFINE_PER_CPU(struct srcu_struct_array,         \
8082 +                       name##_head_srcu_array);                \
8083 +       mod struct srcu_notifier_head name =                    \
8084 +                       SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
8085 +
8086 +#define SRCU_NOTIFIER_HEAD(name)                               \
8087 +       _SRCU_NOTIFIER_HEAD(name, )
8088 +
8089 +#define SRCU_NOTIFIER_HEAD_STATIC(name)                                \
8090 +       _SRCU_NOTIFIER_HEAD(name, static)
8091 +
8092  #ifdef __KERNEL__
8093  
8094  extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
8095 @@ -184,12 +200,12 @@ static inline int notifier_to_errno(int ret)
8096  
8097  /*
8098   *     Declared notifiers so far. I can imagine quite a few more chains
8099 - *     over time (eg laptop power reset chains, reboot chain (to clean 
8100 + *     over time (eg laptop power reset chains, reboot chain (to clean
8101   *     device units up), device [un]mount chain, module load/unload chain,
8102 - *     low memory chain, screenblank chain (for plug in modular screenblankers) 
8103 + *     low memory chain, screenblank chain (for plug in modular screenblankers)
8104   *     VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
8105   */
8106
8107 +
8108  /* CPU notfiers are defined in include/linux/cpu.h. */
8109  
8110  /* netdevice notifiers are defined in include/linux/netdevice.h */
8111 diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
8112 index 5b2e6159b744..ea940f451606 100644
8113 --- a/include/linux/percpu-rwsem.h
8114 +++ b/include/linux/percpu-rwsem.h
8115 @@ -4,7 +4,7 @@
8116  #include <linux/atomic.h>
8117  #include <linux/rwsem.h>
8118  #include <linux/percpu.h>
8119 -#include <linux/wait.h>
8120 +#include <linux/swait.h>
8121  #include <linux/rcu_sync.h>
8122  #include <linux/lockdep.h>
8123  
8124 @@ -12,7 +12,7 @@ struct percpu_rw_semaphore {
8125         struct rcu_sync         rss;
8126         unsigned int __percpu   *read_count;
8127         struct rw_semaphore     rw_sem;
8128 -       wait_queue_head_t       writer;
8129 +       struct swait_queue_head writer;
8130         int                     readers_block;
8131  };
8132  
8133 @@ -22,13 +22,13 @@ static struct percpu_rw_semaphore name = {                          \
8134         .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC),        \
8135         .read_count = &__percpu_rwsem_rc_##name,                        \
8136         .rw_sem = __RWSEM_INITIALIZER(name.rw_sem),                     \
8137 -       .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer),           \
8138 +       .writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer),          \
8139  }
8140  
8141  extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
8142  extern void __percpu_up_read(struct percpu_rw_semaphore *);
8143  
8144 -static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
8145 +static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
8146  {
8147         might_sleep();
8148  
8149 @@ -46,16 +46,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
8150         __this_cpu_inc(*sem->read_count);
8151         if (unlikely(!rcu_sync_is_idle(&sem->rss)))
8152                 __percpu_down_read(sem, false); /* Unconditional memory barrier */
8153 -       barrier();
8154         /*
8155 -        * The barrier() prevents the compiler from
8156 +        * The preempt_enable() prevents the compiler from
8157          * bleeding the critical section out.
8158          */
8159 -}
8160 -
8161 -static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
8162 -{
8163 -       percpu_down_read_preempt_disable(sem);
8164         preempt_enable();
8165  }
8166  
8167 @@ -82,13 +76,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
8168         return ret;
8169  }
8170  
8171 -static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
8172 +static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
8173  {
8174 -       /*
8175 -        * The barrier() prevents the compiler from
8176 -        * bleeding the critical section out.
8177 -        */
8178 -       barrier();
8179 +       preempt_disable();
8180         /*
8181          * Same as in percpu_down_read().
8182          */
8183 @@ -101,12 +91,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
8184         rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
8185  }
8186  
8187 -static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
8188 -{
8189 -       preempt_disable();
8190 -       percpu_up_read_preempt_enable(sem);
8191 -}
8192 -
8193  extern void percpu_down_write(struct percpu_rw_semaphore *);
8194  extern void percpu_up_write(struct percpu_rw_semaphore *);
8195  
8196 diff --git a/include/linux/percpu.h b/include/linux/percpu.h
8197 index 56939d3f6e53..b988bf40ad3e 100644
8198 --- a/include/linux/percpu.h
8199 +++ b/include/linux/percpu.h
8200 @@ -18,6 +18,35 @@
8201  #define PERCPU_MODULE_RESERVE          0
8202  #endif
8203  
8204 +#ifdef CONFIG_PREEMPT_RT_FULL
8205 +
8206 +#define get_local_var(var) (*({        \
8207 +       migrate_disable();      \
8208 +       this_cpu_ptr(&var);     }))
8209 +
8210 +#define put_local_var(var) do {        \
8211 +       (void)&(var);           \
8212 +       migrate_enable();       \
8213 +} while (0)
8214 +
8215 +# define get_local_ptr(var) ({ \
8216 +       migrate_disable();      \
8217 +       this_cpu_ptr(var);      })
8218 +
8219 +# define put_local_ptr(var) do {       \
8220 +       (void)(var);                    \
8221 +       migrate_enable();               \
8222 +} while (0)
8223 +
8224 +#else
8225 +
8226 +#define get_local_var(var)     get_cpu_var(var)
8227 +#define put_local_var(var)     put_cpu_var(var)
8228 +#define get_local_ptr(var)     get_cpu_ptr(var)
8229 +#define put_local_ptr(var)     put_cpu_ptr(var)
8230 +
8231 +#endif
8232 +
8233  /* minimum unit size, also is the maximum supported allocation size */
8234  #define PCPU_MIN_UNIT_SIZE             PFN_ALIGN(32 << 10)
8235  
8236 @@ -110,6 +139,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
8237  #endif
8238  
8239  extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
8240 +extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
8241  extern bool is_kernel_percpu_address(unsigned long addr);
8242  
8243  #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
8244 diff --git a/include/linux/pid.h b/include/linux/pid.h
8245 index 23705a53abba..2cc64b779f03 100644
8246 --- a/include/linux/pid.h
8247 +++ b/include/linux/pid.h
8248 @@ -2,6 +2,7 @@
8249  #define _LINUX_PID_H
8250  
8251  #include <linux/rcupdate.h>
8252 +#include <linux/atomic.h>
8253  
8254  enum pid_type
8255  {
8256 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
8257 index 75e4e30677f1..1cfb1cb72354 100644
8258 --- a/include/linux/preempt.h
8259 +++ b/include/linux/preempt.h
8260 @@ -50,7 +50,11 @@
8261  #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
8262  #define NMI_OFFSET     (1UL << NMI_SHIFT)
8263  
8264 -#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
8265 +#ifndef CONFIG_PREEMPT_RT_FULL
8266 +# define SOFTIRQ_DISABLE_OFFSET                (2 * SOFTIRQ_OFFSET)
8267 +#else
8268 +# define SOFTIRQ_DISABLE_OFFSET                (0)
8269 +#endif
8270  
8271  /* We use the MSB mostly because its available */
8272  #define PREEMPT_NEED_RESCHED   0x80000000
8273 @@ -59,9 +63,15 @@
8274  #include <asm/preempt.h>
8275  
8276  #define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
8277 -#define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
8278  #define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
8279                                  | NMI_MASK))
8280 +#ifndef CONFIG_PREEMPT_RT_FULL
8281 +# define softirq_count()       (preempt_count() & SOFTIRQ_MASK)
8282 +# define in_serving_softirq()  (softirq_count() & SOFTIRQ_OFFSET)
8283 +#else
8284 +# define softirq_count()       (0UL)
8285 +extern int in_serving_softirq(void);
8286 +#endif
8287  
8288  /*
8289   * Are we doing bottom half or hardware interrupt processing?
8290 @@ -72,7 +82,6 @@
8291  #define in_irq()               (hardirq_count())
8292  #define in_softirq()           (softirq_count())
8293  #define in_interrupt()         (irq_count())
8294 -#define in_serving_softirq()   (softirq_count() & SOFTIRQ_OFFSET)
8295  
8296  /*
8297   * Are we in NMI context?
8298 @@ -91,7 +100,11 @@
8299  /*
8300   * The preempt_count offset after spin_lock()
8301   */
8302 +#if !defined(CONFIG_PREEMPT_RT_FULL)
8303  #define PREEMPT_LOCK_OFFSET    PREEMPT_DISABLE_OFFSET
8304 +#else
8305 +#define PREEMPT_LOCK_OFFSET    0
8306 +#endif
8307  
8308  /*
8309   * The preempt_count offset needed for things like:
8310 @@ -140,6 +153,20 @@ extern void preempt_count_sub(int val);
8311  #define preempt_count_inc() preempt_count_add(1)
8312  #define preempt_count_dec() preempt_count_sub(1)
8313  
8314 +#ifdef CONFIG_PREEMPT_LAZY
8315 +#define add_preempt_lazy_count(val)    do { preempt_lazy_count() += (val); } while (0)
8316 +#define sub_preempt_lazy_count(val)    do { preempt_lazy_count() -= (val); } while (0)
8317 +#define inc_preempt_lazy_count()       add_preempt_lazy_count(1)
8318 +#define dec_preempt_lazy_count()       sub_preempt_lazy_count(1)
8319 +#define preempt_lazy_count()           (current_thread_info()->preempt_lazy_count)
8320 +#else
8321 +#define add_preempt_lazy_count(val)    do { } while (0)
8322 +#define sub_preempt_lazy_count(val)    do { } while (0)
8323 +#define inc_preempt_lazy_count()       do { } while (0)
8324 +#define dec_preempt_lazy_count()       do { } while (0)
8325 +#define preempt_lazy_count()           (0)
8326 +#endif
8327 +
8328  #ifdef CONFIG_PREEMPT_COUNT
8329  
8330  #define preempt_disable() \
8331 @@ -148,13 +175,25 @@ do { \
8332         barrier(); \
8333  } while (0)
8334  
8335 +#define preempt_lazy_disable() \
8336 +do { \
8337 +       inc_preempt_lazy_count(); \
8338 +       barrier(); \
8339 +} while (0)
8340 +
8341  #define sched_preempt_enable_no_resched() \
8342  do { \
8343         barrier(); \
8344         preempt_count_dec(); \
8345  } while (0)
8346  
8347 -#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
8348 +#ifdef CONFIG_PREEMPT_RT_BASE
8349 +# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
8350 +# define preempt_check_resched_rt() preempt_check_resched()
8351 +#else
8352 +# define preempt_enable_no_resched() preempt_enable()
8353 +# define preempt_check_resched_rt() barrier();
8354 +#endif
8355  
8356  #define preemptible()  (preempt_count() == 0 && !irqs_disabled())
8357  
8358 @@ -179,6 +218,13 @@ do { \
8359                 __preempt_schedule(); \
8360  } while (0)
8361  
8362 +#define preempt_lazy_enable() \
8363 +do { \
8364 +       dec_preempt_lazy_count(); \
8365 +       barrier(); \
8366 +       preempt_check_resched(); \
8367 +} while (0)
8368 +
8369  #else /* !CONFIG_PREEMPT */
8370  #define preempt_enable() \
8371  do { \
8372 @@ -224,6 +270,7 @@ do { \
8373  #define preempt_disable_notrace()              barrier()
8374  #define preempt_enable_no_resched_notrace()    barrier()
8375  #define preempt_enable_notrace()               barrier()
8376 +#define preempt_check_resched_rt()             barrier()
8377  #define preemptible()                          0
8378  
8379  #endif /* CONFIG_PREEMPT_COUNT */
8380 @@ -244,10 +291,31 @@ do { \
8381  } while (0)
8382  #define preempt_fold_need_resched() \
8383  do { \
8384 -       if (tif_need_resched()) \
8385 +       if (tif_need_resched_now()) \
8386                 set_preempt_need_resched(); \
8387  } while (0)
8388  
8389 +#ifdef CONFIG_PREEMPT_RT_FULL
8390 +# define preempt_disable_rt()          preempt_disable()
8391 +# define preempt_enable_rt()           preempt_enable()
8392 +# define preempt_disable_nort()                barrier()
8393 +# define preempt_enable_nort()         barrier()
8394 +# ifdef CONFIG_SMP
8395 +   extern void migrate_disable(void);
8396 +   extern void migrate_enable(void);
8397 +# else /* CONFIG_SMP */
8398 +#  define migrate_disable()            barrier()
8399 +#  define migrate_enable()             barrier()
8400 +# endif /* CONFIG_SMP */
8401 +#else
8402 +# define preempt_disable_rt()          barrier()
8403 +# define preempt_enable_rt()           barrier()
8404 +# define preempt_disable_nort()                preempt_disable()
8405 +# define preempt_enable_nort()         preempt_enable()
8406 +# define migrate_disable()             preempt_disable()
8407 +# define migrate_enable()              preempt_enable()
8408 +#endif
8409 +
8410  #ifdef CONFIG_PREEMPT_NOTIFIERS
8411  
8412  struct preempt_notifier;
8413 diff --git a/include/linux/printk.h b/include/linux/printk.h
8414 index eac1af8502bb..37e647af0b0b 100644
8415 --- a/include/linux/printk.h
8416 +++ b/include/linux/printk.h
8417 @@ -126,9 +126,11 @@ struct va_format {
8418  #ifdef CONFIG_EARLY_PRINTK
8419  extern asmlinkage __printf(1, 2)
8420  void early_printk(const char *fmt, ...);
8421 +extern void printk_kill(void);
8422  #else
8423  static inline __printf(1, 2) __cold
8424  void early_printk(const char *s, ...) { }
8425 +static inline void printk_kill(void) { }
8426  #endif
8427  
8428  #ifdef CONFIG_PRINTK_NMI
8429 diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
8430 index af3581b8a451..277295039c8f 100644
8431 --- a/include/linux/radix-tree.h
8432 +++ b/include/linux/radix-tree.h
8433 @@ -292,6 +292,8 @@ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
8434  int radix_tree_preload(gfp_t gfp_mask);
8435  int radix_tree_maybe_preload(gfp_t gfp_mask);
8436  int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
8437 +void radix_tree_preload_end(void);
8438 +
8439  void radix_tree_init(void);
8440  void *radix_tree_tag_set(struct radix_tree_root *root,
8441                         unsigned long index, unsigned int tag);
8442 @@ -314,11 +316,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
8443  int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
8444  unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
8445  
8446 -static inline void radix_tree_preload_end(void)
8447 -{
8448 -       preempt_enable();
8449 -}
8450 -
8451  /**
8452   * struct radix_tree_iter - radix tree iterator state
8453   *
8454 diff --git a/include/linux/random.h b/include/linux/random.h
8455 index 7bd2403e4fef..b2df7148a42b 100644
8456 --- a/include/linux/random.h
8457 +++ b/include/linux/random.h
8458 @@ -31,7 +31,7 @@ static inline void add_latent_entropy(void) {}
8459  
8460  extern void add_input_randomness(unsigned int type, unsigned int code,
8461                                  unsigned int value) __latent_entropy;
8462 -extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
8463 +extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy;
8464  
8465  extern void get_random_bytes(void *buf, int nbytes);
8466  extern int add_random_ready_callback(struct random_ready_callback *rdy);
8467 diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
8468 index e585018498d5..25c64474fc27 100644
8469 --- a/include/linux/rbtree.h
8470 +++ b/include/linux/rbtree.h
8471 @@ -31,7 +31,7 @@
8472  
8473  #include <linux/kernel.h>
8474  #include <linux/stddef.h>
8475 -#include <linux/rcupdate.h>
8476 +#include <linux/rcu_assign_pointer.h>
8477  
8478  struct rb_node {
8479         unsigned long  __rb_parent_color;
8480 diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
8481 index d076183e49be..36bfb4dd57ae 100644
8482 --- a/include/linux/rbtree_augmented.h
8483 +++ b/include/linux/rbtree_augmented.h
8484 @@ -26,6 +26,7 @@
8485  
8486  #include <linux/compiler.h>
8487  #include <linux/rbtree.h>
8488 +#include <linux/rcupdate.h>
8489  
8490  /*
8491   * Please note - only struct rb_augment_callbacks and the prototypes for
8492 diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h
8493 new file mode 100644
8494 index 000000000000..7066962a4379
8495 --- /dev/null
8496 +++ b/include/linux/rcu_assign_pointer.h
8497 @@ -0,0 +1,54 @@
8498 +#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
8499 +#define __LINUX_RCU_ASSIGN_POINTER_H__
8500 +#include <linux/compiler.h>
8501 +#include <asm/barrier.h>
8502 +
8503 +/**
8504 + * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
8505 + * @v: The value to statically initialize with.
8506 + */
8507 +#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
8508 +
8509 +/**
8510 + * rcu_assign_pointer() - assign to RCU-protected pointer
8511 + * @p: pointer to assign to
8512 + * @v: value to assign (publish)
8513 + *
8514 + * Assigns the specified value to the specified RCU-protected
8515 + * pointer, ensuring that any concurrent RCU readers will see
8516 + * any prior initialization.
8517 + *
8518 + * Inserts memory barriers on architectures that require them
8519 + * (which is most of them), and also prevents the compiler from
8520 + * reordering the code that initializes the structure after the pointer
8521 + * assignment.  More importantly, this call documents which pointers
8522 + * will be dereferenced by RCU read-side code.
8523 + *
8524 + * In some special cases, you may use RCU_INIT_POINTER() instead
8525 + * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
8526 + * to the fact that it does not constrain either the CPU or the compiler.
8527 + * That said, using RCU_INIT_POINTER() when you should have used
8528 + * rcu_assign_pointer() is a very bad thing that results in
8529 + * impossible-to-diagnose memory corruption.  So please be careful.
8530 + * See the RCU_INIT_POINTER() comment header for details.
8531 + *
8532 + * Note that rcu_assign_pointer() evaluates each of its arguments only
8533 + * once, appearances notwithstanding.  One of the "extra" evaluations
8534 + * is in typeof() and the other visible only to sparse (__CHECKER__),
8535 + * neither of which actually execute the argument.  As with most cpp
8536 + * macros, this execute-arguments-only-once property is important, so
8537 + * please be careful when making changes to rcu_assign_pointer() and the
8538 + * other macros that it invokes.
8539 + */
8540 +#define rcu_assign_pointer(p, v)                                             \
8541 +({                                                                           \
8542 +       uintptr_t _r_a_p__v = (uintptr_t)(v);                                 \
8543 +                                                                             \
8544 +       if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL)        \
8545 +               WRITE_ONCE((p), (typeof(p))(_r_a_p__v));                      \
8546 +       else                                                                  \
8547 +               smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
8548 +       _r_a_p__v;                                                            \
8549 +})
8550 +
8551 +#endif
8552 diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
8553 index 01f71e1d2e94..30cc001d0d5a 100644
8554 --- a/include/linux/rcupdate.h
8555 +++ b/include/linux/rcupdate.h
8556 @@ -46,6 +46,7 @@
8557  #include <linux/compiler.h>
8558  #include <linux/ktime.h>
8559  #include <linux/irqflags.h>
8560 +#include <linux/rcu_assign_pointer.h>
8561  
8562  #include <asm/barrier.h>
8563  
8564 @@ -178,6 +179,9 @@ void call_rcu(struct rcu_head *head,
8565  
8566  #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
8567  
8568 +#ifdef CONFIG_PREEMPT_RT_FULL
8569 +#define call_rcu_bh    call_rcu
8570 +#else
8571  /**
8572   * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
8573   * @head: structure to be used for queueing the RCU updates.
8574 @@ -201,6 +205,7 @@ void call_rcu(struct rcu_head *head,
8575   */
8576  void call_rcu_bh(struct rcu_head *head,
8577                  rcu_callback_t func);
8578 +#endif
8579  
8580  /**
8581   * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
8582 @@ -301,6 +306,11 @@ void synchronize_rcu(void);
8583   * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
8584   */
8585  #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
8586 +#ifndef CONFIG_PREEMPT_RT_FULL
8587 +#define sched_rcu_preempt_depth()      rcu_preempt_depth()
8588 +#else
8589 +static inline int sched_rcu_preempt_depth(void) { return 0; }
8590 +#endif
8591  
8592  #else /* #ifdef CONFIG_PREEMPT_RCU */
8593  
8594 @@ -326,6 +336,8 @@ static inline int rcu_preempt_depth(void)
8595         return 0;
8596  }
8597  
8598 +#define sched_rcu_preempt_depth()      rcu_preempt_depth()
8599 +
8600  #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
8601  
8602  /* Internal to kernel */
8603 @@ -505,7 +517,14 @@ extern struct lockdep_map rcu_callback_map;
8604  int debug_lockdep_rcu_enabled(void);
8605  
8606  int rcu_read_lock_held(void);
8607 +#ifdef CONFIG_PREEMPT_RT_FULL
8608 +static inline int rcu_read_lock_bh_held(void)
8609 +{
8610 +       return rcu_read_lock_held();
8611 +}
8612 +#else
8613  int rcu_read_lock_bh_held(void);
8614 +#endif
8615  
8616  /**
8617   * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
8618 @@ -626,54 +645,6 @@ static inline void rcu_preempt_sleep_check(void)
8619  })
8620  
8621  /**
8622 - * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
8623 - * @v: The value to statically initialize with.
8624 - */
8625 -#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
8626 -
8627 -/**
8628 - * rcu_assign_pointer() - assign to RCU-protected pointer
8629 - * @p: pointer to assign to
8630 - * @v: value to assign (publish)
8631 - *
8632 - * Assigns the specified value to the specified RCU-protected
8633 - * pointer, ensuring that any concurrent RCU readers will see
8634 - * any prior initialization.
8635 - *
8636 - * Inserts memory barriers on architectures that require them
8637 - * (which is most of them), and also prevents the compiler from
8638 - * reordering the code that initializes the structure after the pointer
8639 - * assignment.  More importantly, this call documents which pointers
8640 - * will be dereferenced by RCU read-side code.
8641 - *
8642 - * In some special cases, you may use RCU_INIT_POINTER() instead
8643 - * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
8644 - * to the fact that it does not constrain either the CPU or the compiler.
8645 - * That said, using RCU_INIT_POINTER() when you should have used
8646 - * rcu_assign_pointer() is a very bad thing that results in
8647 - * impossible-to-diagnose memory corruption.  So please be careful.
8648 - * See the RCU_INIT_POINTER() comment header for details.
8649 - *
8650 - * Note that rcu_assign_pointer() evaluates each of its arguments only
8651 - * once, appearances notwithstanding.  One of the "extra" evaluations
8652 - * is in typeof() and the other visible only to sparse (__CHECKER__),
8653 - * neither of which actually execute the argument.  As with most cpp
8654 - * macros, this execute-arguments-only-once property is important, so
8655 - * please be careful when making changes to rcu_assign_pointer() and the
8656 - * other macros that it invokes.
8657 - */
8658 -#define rcu_assign_pointer(p, v)                                             \
8659 -({                                                                           \
8660 -       uintptr_t _r_a_p__v = (uintptr_t)(v);                                 \
8661 -                                                                             \
8662 -       if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL)        \
8663 -               WRITE_ONCE((p), (typeof(p))(_r_a_p__v));                      \
8664 -       else                                                                  \
8665 -               smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
8666 -       _r_a_p__v;                                                            \
8667 -})
8668 -
8669 -/**
8670   * rcu_access_pointer() - fetch RCU pointer with no dereferencing
8671   * @p: The pointer to read
8672   *
8673 @@ -951,10 +922,14 @@ static inline void rcu_read_unlock(void)
8674  static inline void rcu_read_lock_bh(void)
8675  {
8676         local_bh_disable();
8677 +#ifdef CONFIG_PREEMPT_RT_FULL
8678 +       rcu_read_lock();
8679 +#else
8680         __acquire(RCU_BH);
8681         rcu_lock_acquire(&rcu_bh_lock_map);
8682         RCU_LOCKDEP_WARN(!rcu_is_watching(),
8683                          "rcu_read_lock_bh() used illegally while idle");
8684 +#endif
8685  }
8686  
8687  /*
8688 @@ -964,10 +939,14 @@ static inline void rcu_read_lock_bh(void)
8689   */
8690  static inline void rcu_read_unlock_bh(void)
8691  {
8692 +#ifdef CONFIG_PREEMPT_RT_FULL
8693 +       rcu_read_unlock();
8694 +#else
8695         RCU_LOCKDEP_WARN(!rcu_is_watching(),
8696                          "rcu_read_unlock_bh() used illegally while idle");
8697         rcu_lock_release(&rcu_bh_lock_map);
8698         __release(RCU_BH);
8699 +#endif
8700         local_bh_enable();
8701  }
8702  
8703 diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
8704 index 63a4e4cf40a5..08ab12df2863 100644
8705 --- a/include/linux/rcutree.h
8706 +++ b/include/linux/rcutree.h
8707 @@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu)
8708         rcu_note_context_switch();
8709  }
8710  
8711 +#ifdef CONFIG_PREEMPT_RT_FULL
8712 +# define synchronize_rcu_bh    synchronize_rcu
8713 +#else
8714  void synchronize_rcu_bh(void);
8715 +#endif
8716  void synchronize_sched_expedited(void);
8717  void synchronize_rcu_expedited(void);
8718  
8719 @@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void)
8720  }
8721  
8722  void rcu_barrier(void);
8723 +#ifdef CONFIG_PREEMPT_RT_FULL
8724 +# define rcu_barrier_bh                rcu_barrier
8725 +#else
8726  void rcu_barrier_bh(void);
8727 +#endif
8728  void rcu_barrier_sched(void);
8729  unsigned long get_state_synchronize_rcu(void);
8730  void cond_synchronize_rcu(unsigned long oldstate);
8731 @@ -82,17 +90,14 @@ void cond_synchronize_sched(unsigned long oldstate);
8732  extern unsigned long rcutorture_testseq;
8733  extern unsigned long rcutorture_vernum;
8734  unsigned long rcu_batches_started(void);
8735 -unsigned long rcu_batches_started_bh(void);
8736  unsigned long rcu_batches_started_sched(void);
8737  unsigned long rcu_batches_completed(void);
8738 -unsigned long rcu_batches_completed_bh(void);
8739  unsigned long rcu_batches_completed_sched(void);
8740  unsigned long rcu_exp_batches_completed(void);
8741  unsigned long rcu_exp_batches_completed_sched(void);
8742  void show_rcu_gp_kthreads(void);
8743  
8744  void rcu_force_quiescent_state(void);
8745 -void rcu_bh_force_quiescent_state(void);
8746  void rcu_sched_force_quiescent_state(void);
8747  
8748  void rcu_idle_enter(void);
8749 @@ -109,6 +114,16 @@ extern int rcu_scheduler_active __read_mostly;
8750  
8751  bool rcu_is_watching(void);
8752  
8753 +#ifndef CONFIG_PREEMPT_RT_FULL
8754 +void rcu_bh_force_quiescent_state(void);
8755 +unsigned long rcu_batches_started_bh(void);
8756 +unsigned long rcu_batches_completed_bh(void);
8757 +#else
8758 +# define rcu_bh_force_quiescent_state  rcu_force_quiescent_state
8759 +# define rcu_batches_completed_bh      rcu_batches_completed
8760 +# define rcu_batches_started_bh                rcu_batches_completed
8761 +#endif
8762 +
8763  void rcu_all_qs(void);
8764  
8765  /* RCUtree hotplug events */
8766 diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
8767 index 1abba5ce2a2f..30211c627511 100644
8768 --- a/include/linux/rtmutex.h
8769 +++ b/include/linux/rtmutex.h
8770 @@ -13,11 +13,15 @@
8771  #define __LINUX_RT_MUTEX_H
8772  
8773  #include <linux/linkage.h>
8774 +#include <linux/spinlock_types_raw.h>
8775  #include <linux/rbtree.h>
8776 -#include <linux/spinlock_types.h>
8777  
8778  extern int max_lock_depth; /* for sysctl */
8779  
8780 +#ifdef CONFIG_DEBUG_MUTEXES
8781 +#include <linux/debug_locks.h>
8782 +#endif
8783 +
8784  /**
8785   * The rt_mutex structure
8786   *
8787 @@ -31,8 +35,8 @@ struct rt_mutex {
8788         struct rb_root          waiters;
8789         struct rb_node          *waiters_leftmost;
8790         struct task_struct      *owner;
8791 -#ifdef CONFIG_DEBUG_RT_MUTEXES
8792         int                     save_state;
8793 +#ifdef CONFIG_DEBUG_RT_MUTEXES
8794         const char              *name, *file;
8795         int                     line;
8796         void                    *magic;
8797 @@ -55,22 +59,33 @@ struct hrtimer_sleeper;
8798  # define rt_mutex_debug_check_no_locks_held(task)      do { } while (0)
8799  #endif
8800  
8801 +# define rt_mutex_init(mutex)                                  \
8802 +       do {                                                    \
8803 +               raw_spin_lock_init(&(mutex)->wait_lock);        \
8804 +               __rt_mutex_init(mutex, #mutex);                 \
8805 +       } while (0)
8806 +
8807  #ifdef CONFIG_DEBUG_RT_MUTEXES
8808  # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
8809         , .name = #mutexname, .file = __FILE__, .line = __LINE__
8810 -# define rt_mutex_init(mutex)                  __rt_mutex_init(mutex, __func__)
8811   extern void rt_mutex_debug_task_free(struct task_struct *tsk);
8812  #else
8813  # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
8814 -# define rt_mutex_init(mutex)                  __rt_mutex_init(mutex, NULL)
8815  # define rt_mutex_debug_task_free(t)                   do { } while (0)
8816  #endif
8817  
8818 -#define __RT_MUTEX_INITIALIZER(mutexname) \
8819 -       { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
8820 +#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
8821 +        .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
8822         , .waiters = RB_ROOT \
8823         , .owner = NULL \
8824 -       __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
8825 +       __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
8826 +
8827 +#define __RT_MUTEX_INITIALIZER(mutexname) \
8828 +       { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
8829 +
8830 +#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
8831 +       { __RT_MUTEX_INITIALIZER_PLAIN(mutexname)    \
8832 +       , .save_state = 1 }
8833  
8834  #define DEFINE_RT_MUTEX(mutexname) \
8835         struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
8836 @@ -91,6 +106,7 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
8837  
8838  extern void rt_mutex_lock(struct rt_mutex *lock);
8839  extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
8840 +extern int rt_mutex_lock_killable(struct rt_mutex *lock);
8841  extern int rt_mutex_timed_lock(struct rt_mutex *lock,
8842                                struct hrtimer_sleeper *timeout);
8843  
8844 diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
8845 new file mode 100644
8846 index 000000000000..49ed2d45d3be
8847 --- /dev/null
8848 +++ b/include/linux/rwlock_rt.h
8849 @@ -0,0 +1,99 @@
8850 +#ifndef __LINUX_RWLOCK_RT_H
8851 +#define __LINUX_RWLOCK_RT_H
8852 +
8853 +#ifndef __LINUX_SPINLOCK_H
8854 +#error Do not include directly. Use spinlock.h
8855 +#endif
8856 +
8857 +#define rwlock_init(rwl)                               \
8858 +do {                                                   \
8859 +       static struct lock_class_key __key;             \
8860 +                                                       \
8861 +       rt_mutex_init(&(rwl)->lock);                    \
8862 +       __rt_rwlock_init(rwl, #rwl, &__key);            \
8863 +} while (0)
8864 +
8865 +extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
8866 +extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
8867 +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
8868 +extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
8869 +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
8870 +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
8871 +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
8872 +extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
8873 +extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
8874 +extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
8875 +
8876 +#define read_trylock(lock)     __cond_lock(lock, rt_read_trylock(lock))
8877 +#define write_trylock(lock)    __cond_lock(lock, rt_write_trylock(lock))
8878 +
8879 +#define write_trylock_irqsave(lock, flags)     \
8880 +       __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
8881 +
8882 +#define read_lock_irqsave(lock, flags)                 \
8883 +       do {                                            \
8884 +               typecheck(unsigned long, flags);        \
8885 +               flags = rt_read_lock_irqsave(lock);     \
8886 +       } while (0)
8887 +
8888 +#define write_lock_irqsave(lock, flags)                        \
8889 +       do {                                            \
8890 +               typecheck(unsigned long, flags);        \
8891 +               flags = rt_write_lock_irqsave(lock);    \
8892 +       } while (0)
8893 +
8894 +#define read_lock(lock)                rt_read_lock(lock)
8895 +
8896 +#define read_lock_bh(lock)                             \
8897 +       do {                                            \
8898 +               local_bh_disable();                     \
8899 +               rt_read_lock(lock);                     \
8900 +       } while (0)
8901 +
8902 +#define read_lock_irq(lock)    read_lock(lock)
8903 +
8904 +#define write_lock(lock)       rt_write_lock(lock)
8905 +
8906 +#define write_lock_bh(lock)                            \
8907 +       do {                                            \
8908 +               local_bh_disable();                     \
8909 +               rt_write_lock(lock);                    \
8910 +       } while (0)
8911 +
8912 +#define write_lock_irq(lock)   write_lock(lock)
8913 +
8914 +#define read_unlock(lock)      rt_read_unlock(lock)
8915 +
8916 +#define read_unlock_bh(lock)                           \
8917 +       do {                                            \
8918 +               rt_read_unlock(lock);                   \
8919 +               local_bh_enable();                      \
8920 +       } while (0)
8921 +
8922 +#define read_unlock_irq(lock)  read_unlock(lock)
8923 +
8924 +#define write_unlock(lock)     rt_write_unlock(lock)
8925 +
8926 +#define write_unlock_bh(lock)                          \
8927 +       do {                                            \
8928 +               rt_write_unlock(lock);                  \
8929 +               local_bh_enable();                      \
8930 +       } while (0)
8931 +
8932 +#define write_unlock_irq(lock) write_unlock(lock)
8933 +
8934 +#define read_unlock_irqrestore(lock, flags)            \
8935 +       do {                                            \
8936 +               typecheck(unsigned long, flags);        \
8937 +               (void) flags;                           \
8938 +               rt_read_unlock(lock);                   \
8939 +       } while (0)
8940 +
8941 +#define write_unlock_irqrestore(lock, flags) \
8942 +       do {                                            \
8943 +               typecheck(unsigned long, flags);        \
8944 +               (void) flags;                           \
8945 +               rt_write_unlock(lock);                  \
8946 +       } while (0)
8947 +
8948 +#endif
8949 diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
8950 index cc0072e93e36..5317cd957292 100644
8951 --- a/include/linux/rwlock_types.h
8952 +++ b/include/linux/rwlock_types.h
8953 @@ -1,6 +1,10 @@
8954  #ifndef __LINUX_RWLOCK_TYPES_H
8955  #define __LINUX_RWLOCK_TYPES_H
8956  
8957 +#if !defined(__LINUX_SPINLOCK_TYPES_H)
8958 +# error "Do not include directly, include spinlock_types.h"
8959 +#endif
8960 +
8961  /*
8962   * include/linux/rwlock_types.h - generic rwlock type definitions
8963   *                               and initializers
8964 diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
8965 new file mode 100644
8966 index 000000000000..51b28d775fe1
8967 --- /dev/null
8968 +++ b/include/linux/rwlock_types_rt.h
8969 @@ -0,0 +1,33 @@
8970 +#ifndef __LINUX_RWLOCK_TYPES_RT_H
8971 +#define __LINUX_RWLOCK_TYPES_RT_H
8972 +
8973 +#ifndef __LINUX_SPINLOCK_TYPES_H
8974 +#error "Do not include directly. Include spinlock_types.h instead"
8975 +#endif
8976 +
8977 +/*
8978 + * rwlocks - rtmutex which allows single reader recursion
8979 + */
8980 +typedef struct {
8981 +       struct rt_mutex         lock;
8982 +       int                     read_depth;
8983 +       unsigned int            break_lock;
8984 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
8985 +       struct lockdep_map      dep_map;
8986 +#endif
8987 +} rwlock_t;
8988 +
8989 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
8990 +# define RW_DEP_MAP_INIT(lockname)     .dep_map = { .name = #lockname }
8991 +#else
8992 +# define RW_DEP_MAP_INIT(lockname)
8993 +#endif
8994 +
8995 +#define __RW_LOCK_UNLOCKED(name) \
8996 +       { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
8997 +         RW_DEP_MAP_INIT(name) }
8998 +
8999 +#define DEFINE_RWLOCK(name) \
9000 +       rwlock_t name = __RW_LOCK_UNLOCKED(name)
9001 +
9002 +#endif
9003 diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
9004 index dd1d14250340..8e1f44ff1f2f 100644
9005 --- a/include/linux/rwsem.h
9006 +++ b/include/linux/rwsem.h
9007 @@ -19,6 +19,10 @@
9008  #include <linux/osq_lock.h>
9009  #endif
9010  
9011 +#ifdef CONFIG_PREEMPT_RT_FULL
9012 +#include <linux/rwsem_rt.h>
9013 +#else /* PREEMPT_RT_FULL */
9014 +
9015  struct rw_semaphore;
9016  
9017  #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
9018 @@ -184,4 +188,6 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
9019  # define up_read_non_owner(sem)                        up_read(sem)
9020  #endif
9021  
9022 +#endif /* !PREEMPT_RT_FULL */
9023 +
9024  #endif /* _LINUX_RWSEM_H */
9025 diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
9026 new file mode 100644
9027 index 000000000000..e26bd95a57c3
9028 --- /dev/null
9029 +++ b/include/linux/rwsem_rt.h
9030 @@ -0,0 +1,167 @@
9031 +#ifndef _LINUX_RWSEM_RT_H
9032 +#define _LINUX_RWSEM_RT_H
9033 +
9034 +#ifndef _LINUX_RWSEM_H
9035 +#error "Include rwsem.h"
9036 +#endif
9037 +
9038 +/*
9039 + * RW-semaphores are a spinlock plus a reader-depth count.
9040 + *
9041 + * Note that the semantics are different from the usual
9042 + * Linux rw-sems, in PREEMPT_RT mode we do not allow
9043 + * multiple readers to hold the lock at once, we only allow
9044 + * a read-lock owner to read-lock recursively. This is
9045 + * better for latency, makes the implementation inherently
9046 + * fair and makes it simpler as well.
9047 + */
9048 +
9049 +#include <linux/rtmutex.h>
9050 +
9051 +struct rw_semaphore {
9052 +       struct rt_mutex         lock;
9053 +       int                     read_depth;
9054 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
9055 +       struct lockdep_map      dep_map;
9056 +#endif
9057 +};
9058 +
9059 +#define __RWSEM_INITIALIZER(name) \
9060 +       { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
9061 +         RW_DEP_MAP_INIT(name) }
9062 +
9063 +#define DECLARE_RWSEM(lockname) \
9064 +       struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
9065 +
9066 +extern void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
9067 +                                    struct lock_class_key *key);
9068 +
9069 +#define __rt_init_rwsem(sem, name, key)                        \
9070 +       do {                                            \
9071 +               rt_mutex_init(&(sem)->lock);            \
9072 +               __rt_rwsem_init((sem), (name), (key));\
9073 +       } while (0)
9074 +
9075 +#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
9076 +
9077 +# define rt_init_rwsem(sem)                            \
9078 +do {                                                   \
9079 +       static struct lock_class_key __key;             \
9080 +                                                       \
9081 +       __rt_init_rwsem((sem), #sem, &__key);           \
9082 +} while (0)
9083 +
9084 +extern void rt_down_write(struct rw_semaphore *rwsem);
9085 +extern int  rt_down_write_killable(struct rw_semaphore *rwsem);
9086 +extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
9087 +extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
9088 +extern int  rt_down_write_killable_nested(struct rw_semaphore *rwsem,
9089 +                                         int subclass);
9090 +extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
9091 +                                     struct lockdep_map *nest);
9092 +extern void rt__down_read(struct rw_semaphore *rwsem);
9093 +extern void rt_down_read(struct rw_semaphore *rwsem);
9094 +extern int  rt_down_write_trylock(struct rw_semaphore *rwsem);
9095 +extern int  rt__down_read_trylock(struct rw_semaphore *rwsem);
9096 +extern int  rt_down_read_trylock(struct rw_semaphore *rwsem);
9097 +extern void __rt_up_read(struct rw_semaphore *rwsem);
9098 +extern void rt_up_read(struct rw_semaphore *rwsem);
9099 +extern void rt_up_write(struct rw_semaphore *rwsem);
9100 +extern void rt_downgrade_write(struct rw_semaphore *rwsem);
9101 +
9102 +#define init_rwsem(sem)                rt_init_rwsem(sem)
9103 +#define rwsem_is_locked(s)     rt_mutex_is_locked(&(s)->lock)
9104 +
9105 +static inline int rwsem_is_contended(struct rw_semaphore *sem)
9106 +{
9107 +       /* rt_mutex_has_waiters() */
9108 +       return !RB_EMPTY_ROOT(&sem->lock.waiters);
9109 +}
9110 +
9111 +static inline void __down_read(struct rw_semaphore *sem)
9112 +{
9113 +       rt__down_read(sem);
9114 +}
9115 +
9116 +static inline void down_read(struct rw_semaphore *sem)
9117 +{
9118 +       rt_down_read(sem);
9119 +}
9120 +
9121 +static inline int __down_read_trylock(struct rw_semaphore *sem)
9122 +{
9123 +       return rt__down_read_trylock(sem);
9124 +}
9125 +
9126 +static inline int down_read_trylock(struct rw_semaphore *sem)
9127 +{
9128 +       return rt_down_read_trylock(sem);
9129 +}
9130 +
9131 +static inline void down_write(struct rw_semaphore *sem)
9132 +{
9133 +       rt_down_write(sem);
9134 +}
9135 +
9136 +static inline int down_write_killable(struct rw_semaphore *sem)
9137 +{
9138 +       return rt_down_write_killable(sem);
9139 +}
9140 +
9141 +static inline int down_write_trylock(struct rw_semaphore *sem)
9142 +{
9143 +       return rt_down_write_trylock(sem);
9144 +}
9145 +
9146 +static inline void __up_read(struct rw_semaphore *sem)
9147 +{
9148 +       __rt_up_read(sem);
9149 +}
9150 +
9151 +static inline void up_read(struct rw_semaphore *sem)
9152 +{
9153 +       rt_up_read(sem);
9154 +}
9155 +
9156 +static inline void up_write(struct rw_semaphore *sem)
9157 +{
9158 +       rt_up_write(sem);
9159 +}
9160 +
9161 +static inline void downgrade_write(struct rw_semaphore *sem)
9162 +{
9163 +       rt_downgrade_write(sem);
9164 +}
9165 +
9166 +static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
9167 +{
9168 +       return rt_down_read_nested(sem, subclass);
9169 +}
9170 +
9171 +static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
9172 +{
9173 +       rt_down_write_nested(sem, subclass);
9174 +}
9175 +
9176 +static inline int down_write_killable_nested(struct rw_semaphore *sem,
9177 +                                            int subclass)
9178 +{
9179 +       return rt_down_write_killable_nested(sem, subclass);
9180 +}
9181 +
9182 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
9183 +static inline void down_write_nest_lock(struct rw_semaphore *sem,
9184 +               struct rw_semaphore *nest_lock)
9185 +{
9186 +       rt_down_write_nested_lock(sem, &nest_lock->dep_map);
9187 +}
9188 +
9189 +#else
9190 +
9191 +static inline void down_write_nest_lock(struct rw_semaphore *sem,
9192 +               struct rw_semaphore *nest_lock)
9193 +{
9194 +       rt_down_write_nested_lock(sem, NULL);
9195 +}
9196 +#endif
9197 +#endif
9198 diff --git a/include/linux/sched.h b/include/linux/sched.h
9199 index 75d9a57e212e..8cb7df0f56e3 100644
9200 --- a/include/linux/sched.h
9201 +++ b/include/linux/sched.h
9202 @@ -26,6 +26,7 @@ struct sched_param {
9203  #include <linux/nodemask.h>
9204  #include <linux/mm_types.h>
9205  #include <linux/preempt.h>
9206 +#include <asm/kmap_types.h>
9207  
9208  #include <asm/page.h>
9209  #include <asm/ptrace.h>
9210 @@ -243,10 +244,7 @@ extern char ___assert_task_state[1 - 2*!!(
9211                                  TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
9212                                  __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
9213  
9214 -#define task_is_traced(task)   ((task->state & __TASK_TRACED) != 0)
9215  #define task_is_stopped(task)  ((task->state & __TASK_STOPPED) != 0)
9216 -#define task_is_stopped_or_traced(task)        \
9217 -                       ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
9218  #define task_contributes_to_load(task) \
9219                                 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
9220                                  (task->flags & PF_FROZEN) == 0 && \
9221 @@ -312,6 +310,11 @@ extern char ___assert_task_state[1 - 2*!!(
9222  
9223  #endif
9224  
9225 +#define __set_current_state_no_track(state_value)      \
9226 +       do { current->state = (state_value); } while (0)
9227 +#define set_current_state_no_track(state_value)                \
9228 +       set_mb(current->state, (state_value))
9229 +
9230  /* Task command name length */
9231  #define TASK_COMM_LEN 16
9232  
9233 @@ -1013,8 +1016,18 @@ struct wake_q_head {
9234         struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
9235  
9236  extern void wake_q_add(struct wake_q_head *head,
9237 -                      struct task_struct *task);
9238 -extern void wake_up_q(struct wake_q_head *head);
9239 +                             struct task_struct *task);
9240 +extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
9241 +
9242 +static inline void wake_up_q(struct wake_q_head *head)
9243 +{
9244 +       __wake_up_q(head, false);
9245 +}
9246 +
9247 +static inline void wake_up_q_sleeper(struct wake_q_head *head)
9248 +{
9249 +       __wake_up_q(head, true);
9250 +}
9251  
9252  /*
9253   * sched-domains (multiprocessor balancing) declarations:
9254 @@ -1481,6 +1494,7 @@ struct task_struct {
9255         struct thread_info thread_info;
9256  #endif
9257         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
9258 +       volatile long saved_state; /* saved state for "spinlock sleepers" */
9259         void *stack;
9260         atomic_t usage;
9261         unsigned int flags;     /* per process flags, defined below */
9262 @@ -1520,6 +1534,12 @@ struct task_struct {
9263  #endif
9264  
9265         unsigned int policy;
9266 +#ifdef CONFIG_PREEMPT_RT_FULL
9267 +       int migrate_disable;
9268 +# ifdef CONFIG_SCHED_DEBUG
9269 +       int migrate_disable_atomic;
9270 +# endif
9271 +#endif
9272         int nr_cpus_allowed;
9273         cpumask_t cpus_allowed;
9274  
9275 @@ -1654,6 +1674,9 @@ struct task_struct {
9276  
9277         struct task_cputime cputime_expires;
9278         struct list_head cpu_timers[3];
9279 +#ifdef CONFIG_PREEMPT_RT_BASE
9280 +       struct task_struct *posix_timer_list;
9281 +#endif
9282  
9283  /* process credentials */
9284         const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
9285 @@ -1685,10 +1708,15 @@ struct task_struct {
9286  /* signal handlers */
9287         struct signal_struct *signal;
9288         struct sighand_struct *sighand;
9289 +       struct sigqueue *sigqueue_cache;
9290  
9291         sigset_t blocked, real_blocked;
9292         sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
9293         struct sigpending pending;
9294 +#ifdef CONFIG_PREEMPT_RT_FULL
9295 +       /* TODO: move me into ->restart_block ? */
9296 +       struct siginfo forced_info;
9297 +#endif
9298  
9299         unsigned long sas_ss_sp;
9300         size_t sas_ss_size;
9301 @@ -1917,6 +1945,12 @@ struct task_struct {
9302         /* bitmask and counter of trace recursion */
9303         unsigned long trace_recursion;
9304  #endif /* CONFIG_TRACING */
9305 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
9306 +       u64 preempt_timestamp_hist;
9307 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
9308 +       long timer_offset;
9309 +#endif
9310 +#endif
9311  #ifdef CONFIG_KCOV
9312         /* Coverage collection mode enabled for this task (0 if disabled). */
9313         enum kcov_mode kcov_mode;
9314 @@ -1942,9 +1976,23 @@ struct task_struct {
9315         unsigned int    sequential_io;
9316         unsigned int    sequential_io_avg;
9317  #endif
9318 +#ifdef CONFIG_PREEMPT_RT_BASE
9319 +       struct rcu_head put_rcu;
9320 +       int softirq_nestcnt;
9321 +       unsigned int softirqs_raised;
9322 +#endif
9323 +#ifdef CONFIG_PREEMPT_RT_FULL
9324 +# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
9325 +       int kmap_idx;
9326 +       pte_t kmap_pte[KM_TYPE_NR];
9327 +# endif
9328 +#endif
9329  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
9330         unsigned long   task_state_change;
9331  #endif
9332 +#ifdef CONFIG_PREEMPT_RT_FULL
9333 +       int xmit_recursion;
9334 +#endif
9335         int pagefault_disabled;
9336  #ifdef CONFIG_MMU
9337         struct task_struct *oom_reaper_list;
9338 @@ -1984,14 +2032,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
9339  }
9340  #endif
9341  
9342 -/* Future-safe accessor for struct task_struct's cpus_allowed. */
9343 -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
9344 -
9345 -static inline int tsk_nr_cpus_allowed(struct task_struct *p)
9346 -{
9347 -       return p->nr_cpus_allowed;
9348 -}
9349 -
9350  #define TNF_MIGRATED   0x01
9351  #define TNF_NO_GROUP   0x02
9352  #define TNF_SHARED     0x04
9353 @@ -2207,6 +2247,15 @@ extern struct pid *cad_pid;
9354  extern void free_task(struct task_struct *tsk);
9355  #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
9356  
9357 +#ifdef CONFIG_PREEMPT_RT_BASE
9358 +extern void __put_task_struct_cb(struct rcu_head *rhp);
9359 +
9360 +static inline void put_task_struct(struct task_struct *t)
9361 +{
9362 +       if (atomic_dec_and_test(&t->usage))
9363 +               call_rcu(&t->put_rcu, __put_task_struct_cb);
9364 +}
9365 +#else
9366  extern void __put_task_struct(struct task_struct *t);
9367  
9368  static inline void put_task_struct(struct task_struct *t)
9369 @@ -2214,6 +2263,7 @@ static inline void put_task_struct(struct task_struct *t)
9370         if (atomic_dec_and_test(&t->usage))
9371                 __put_task_struct(t);
9372  }
9373 +#endif
9374  
9375  struct task_struct *task_rcu_dereference(struct task_struct **ptask);
9376  struct task_struct *try_get_task_struct(struct task_struct **ptask);
9377 @@ -2255,6 +2305,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
9378  /*
9379   * Per process flags
9380   */
9381 +#define PF_IN_SOFTIRQ  0x00000001      /* Task is serving softirq */
9382  #define PF_EXITING     0x00000004      /* getting shut down */
9383  #define PF_EXITPIDONE  0x00000008      /* pi exit done on shut down */
9384  #define PF_VCPU                0x00000010      /* I'm a virtual CPU */
9385 @@ -2423,6 +2474,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
9386  
9387  extern int set_cpus_allowed_ptr(struct task_struct *p,
9388                                 const struct cpumask *new_mask);
9389 +int migrate_me(void);
9390 +void tell_sched_cpu_down_begin(int cpu);
9391 +void tell_sched_cpu_down_done(int cpu);
9392 +
9393  #else
9394  static inline void do_set_cpus_allowed(struct task_struct *p,
9395                                       const struct cpumask *new_mask)
9396 @@ -2435,6 +2490,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
9397                 return -EINVAL;
9398         return 0;
9399  }
9400 +static inline int migrate_me(void) { return 0; }
9401 +static inline void tell_sched_cpu_down_begin(int cpu) { }
9402 +static inline void tell_sched_cpu_down_done(int cpu) { }
9403  #endif
9404  
9405  #ifdef CONFIG_NO_HZ_COMMON
9406 @@ -2673,6 +2731,7 @@ extern void xtime_update(unsigned long ticks);
9407  
9408  extern int wake_up_state(struct task_struct *tsk, unsigned int state);
9409  extern int wake_up_process(struct task_struct *tsk);
9410 +extern int wake_up_lock_sleeper(struct task_struct * tsk);
9411  extern void wake_up_new_task(struct task_struct *tsk);
9412  #ifdef CONFIG_SMP
9413   extern void kick_process(struct task_struct *tsk);
9414 @@ -2881,6 +2940,17 @@ static inline void mmdrop(struct mm_struct *mm)
9415                 __mmdrop(mm);
9416  }
9417  
9418 +#ifdef CONFIG_PREEMPT_RT_BASE
9419 +extern void __mmdrop_delayed(struct rcu_head *rhp);
9420 +static inline void mmdrop_delayed(struct mm_struct *mm)
9421 +{
9422 +       if (atomic_dec_and_test(&mm->mm_count))
9423 +               call_rcu(&mm->delayed_drop, __mmdrop_delayed);
9424 +}
9425 +#else
9426 +# define mmdrop_delayed(mm)    mmdrop(mm)
9427 +#endif
9428 +
9429  static inline void mmdrop_async_fn(struct work_struct *work)
9430  {
9431         struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
9432 @@ -3273,6 +3343,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
9433         return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
9434  }
9435  
9436 +#ifdef CONFIG_PREEMPT_LAZY
9437 +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
9438 +{
9439 +       set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
9440 +}
9441 +
9442 +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
9443 +{
9444 +       clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
9445 +}
9446 +
9447 +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
9448 +{
9449 +       return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
9450 +}
9451 +
9452 +static inline int need_resched_lazy(void)
9453 +{
9454 +       return test_thread_flag(TIF_NEED_RESCHED_LAZY);
9455 +}
9456 +
9457 +static inline int need_resched_now(void)
9458 +{
9459 +       return test_thread_flag(TIF_NEED_RESCHED);
9460 +}
9461 +
9462 +#else
9463 +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
9464 +static inline int need_resched_lazy(void) { return 0; }
9465 +
9466 +static inline int need_resched_now(void)
9467 +{
9468 +       return test_thread_flag(TIF_NEED_RESCHED);
9469 +}
9470 +
9471 +#endif
9472 +
9473  static inline int restart_syscall(void)
9474  {
9475         set_tsk_thread_flag(current, TIF_SIGPENDING);
9476 @@ -3304,6 +3411,51 @@ static inline int signal_pending_state(long state, struct task_struct *p)
9477         return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
9478  }
9479  
9480 +static inline bool __task_is_stopped_or_traced(struct task_struct *task)
9481 +{
9482 +       if (task->state & (__TASK_STOPPED | __TASK_TRACED))
9483 +               return true;
9484 +#ifdef CONFIG_PREEMPT_RT_FULL
9485 +       if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
9486 +               return true;
9487 +#endif
9488 +       return false;
9489 +}
9490 +
9491 +static inline bool task_is_stopped_or_traced(struct task_struct *task)
9492 +{
9493 +       bool traced_stopped;
9494 +
9495 +#ifdef CONFIG_PREEMPT_RT_FULL
9496 +       unsigned long flags;
9497 +
9498 +       raw_spin_lock_irqsave(&task->pi_lock, flags);
9499 +       traced_stopped = __task_is_stopped_or_traced(task);
9500 +       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
9501 +#else
9502 +       traced_stopped = __task_is_stopped_or_traced(task);
9503 +#endif
9504 +       return traced_stopped;
9505 +}
9506 +
9507 +static inline bool task_is_traced(struct task_struct *task)
9508 +{
9509 +       bool traced = false;
9510 +
9511 +       if (task->state & __TASK_TRACED)
9512 +               return true;
9513 +#ifdef CONFIG_PREEMPT_RT_FULL
9514 +       /* in case the task is sleeping on tasklist_lock */
9515 +       raw_spin_lock_irq(&task->pi_lock);
9516 +       if (task->state & __TASK_TRACED)
9517 +               traced = true;
9518 +       else if (task->saved_state & __TASK_TRACED)
9519 +               traced = true;
9520 +       raw_spin_unlock_irq(&task->pi_lock);
9521 +#endif
9522 +       return traced;
9523 +}
9524 +
9525  /*
9526   * cond_resched() and cond_resched_lock(): latency reduction via
9527   * explicit rescheduling in places that are safe. The return
9528 @@ -3329,12 +3481,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
9529         __cond_resched_lock(lock);                              \
9530  })
9531  
9532 +#ifndef CONFIG_PREEMPT_RT_FULL
9533  extern int __cond_resched_softirq(void);
9534  
9535  #define cond_resched_softirq() ({                                      \
9536         ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);     \
9537         __cond_resched_softirq();                                       \
9538  })
9539 +#else
9540 +# define cond_resched_softirq()                cond_resched()
9541 +#endif
9542  
9543  static inline void cond_resched_rcu(void)
9544  {
9545 @@ -3509,6 +3665,31 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
9546  
9547  #endif /* CONFIG_SMP */
9548  
9549 +static inline int __migrate_disabled(struct task_struct *p)
9550 +{
9551 +#ifdef CONFIG_PREEMPT_RT_FULL
9552 +       return p->migrate_disable;
9553 +#else
9554 +       return 0;
9555 +#endif
9556 +}
9557 +
9558 +/* Future-safe accessor for struct task_struct's cpus_allowed. */
9559 +static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
9560 +{
9561 +       if (__migrate_disabled(p))
9562 +               return cpumask_of(task_cpu(p));
9563 +
9564 +       return &p->cpus_allowed;
9565 +}
9566 +
9567 +static inline int tsk_nr_cpus_allowed(struct task_struct *p)
9568 +{
9569 +       if (__migrate_disabled(p))
9570 +               return 1;
9571 +       return p->nr_cpus_allowed;
9572 +}
9573 +
9574  extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
9575  extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
9576  
9577 diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
9578 index ead97654c4e9..3d7223ffdd3b 100644
9579 --- a/include/linux/seqlock.h
9580 +++ b/include/linux/seqlock.h
9581 @@ -220,20 +220,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
9582         return __read_seqcount_retry(s, start);
9583  }
9584  
9585 -
9586 -
9587 -static inline void raw_write_seqcount_begin(seqcount_t *s)
9588 +static inline void __raw_write_seqcount_begin(seqcount_t *s)
9589  {
9590         s->sequence++;
9591         smp_wmb();
9592  }
9593  
9594 -static inline void raw_write_seqcount_end(seqcount_t *s)
9595 +static inline void raw_write_seqcount_begin(seqcount_t *s)
9596 +{
9597 +       preempt_disable_rt();
9598 +       __raw_write_seqcount_begin(s);
9599 +}
9600 +
9601 +static inline void __raw_write_seqcount_end(seqcount_t *s)
9602  {
9603         smp_wmb();
9604         s->sequence++;
9605  }
9606  
9607 +static inline void raw_write_seqcount_end(seqcount_t *s)
9608 +{
9609 +       __raw_write_seqcount_end(s);
9610 +       preempt_enable_rt();
9611 +}
9612 +
9613  /**
9614   * raw_write_seqcount_barrier - do a seq write barrier
9615   * @s: pointer to seqcount_t
9616 @@ -428,10 +438,32 @@ typedef struct {
9617  /*
9618   * Read side functions for starting and finalizing a read side section.
9619   */
9620 +#ifndef CONFIG_PREEMPT_RT_FULL
9621  static inline unsigned read_seqbegin(const seqlock_t *sl)
9622  {
9623         return read_seqcount_begin(&sl->seqcount);
9624  }
9625 +#else
9626 +/*
9627 + * Starvation safe read side for RT
9628 + */
9629 +static inline unsigned read_seqbegin(seqlock_t *sl)
9630 +{
9631 +       unsigned ret;
9632 +
9633 +repeat:
9634 +       ret = ACCESS_ONCE(sl->seqcount.sequence);
9635 +       if (unlikely(ret & 1)) {
9636 +               /*
9637 +                * Take the lock and let the writer proceed (i.e. evtl
9638 +                * boost it), otherwise we could loop here forever.
9639 +                */
9640 +               spin_unlock_wait(&sl->lock);
9641 +               goto repeat;
9642 +       }
9643 +       return ret;
9644 +}
9645 +#endif
9646  
9647  static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
9648  {
9649 @@ -446,36 +478,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
9650  static inline void write_seqlock(seqlock_t *sl)
9651  {
9652         spin_lock(&sl->lock);
9653 -       write_seqcount_begin(&sl->seqcount);
9654 +       __raw_write_seqcount_begin(&sl->seqcount);
9655 +}
9656 +
9657 +static inline int try_write_seqlock(seqlock_t *sl)
9658 +{
9659 +       if (spin_trylock(&sl->lock)) {
9660 +               __raw_write_seqcount_begin(&sl->seqcount);
9661 +               return 1;
9662 +       }
9663 +       return 0;
9664  }
9665  
9666  static inline void write_sequnlock(seqlock_t *sl)
9667  {
9668 -       write_seqcount_end(&sl->seqcount);
9669 +       __raw_write_seqcount_end(&sl->seqcount);
9670         spin_unlock(&sl->lock);
9671  }
9672  
9673  static inline void write_seqlock_bh(seqlock_t *sl)
9674  {
9675         spin_lock_bh(&sl->lock);
9676 -       write_seqcount_begin(&sl->seqcount);
9677 +       __raw_write_seqcount_begin(&sl->seqcount);
9678  }
9679  
9680  static inline void write_sequnlock_bh(seqlock_t *sl)
9681  {
9682 -       write_seqcount_end(&sl->seqcount);
9683 +       __raw_write_seqcount_end(&sl->seqcount);
9684         spin_unlock_bh(&sl->lock);
9685  }
9686  
9687  static inline void write_seqlock_irq(seqlock_t *sl)
9688  {
9689         spin_lock_irq(&sl->lock);
9690 -       write_seqcount_begin(&sl->seqcount);
9691 +       __raw_write_seqcount_begin(&sl->seqcount);
9692  }
9693  
9694  static inline void write_sequnlock_irq(seqlock_t *sl)
9695  {
9696 -       write_seqcount_end(&sl->seqcount);
9697 +       __raw_write_seqcount_end(&sl->seqcount);
9698         spin_unlock_irq(&sl->lock);
9699  }
9700  
9701 @@ -484,7 +525,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
9702         unsigned long flags;
9703  
9704         spin_lock_irqsave(&sl->lock, flags);
9705 -       write_seqcount_begin(&sl->seqcount);
9706 +       __raw_write_seqcount_begin(&sl->seqcount);
9707         return flags;
9708  }
9709  
9710 @@ -494,7 +535,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
9711  static inline void
9712  write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
9713  {
9714 -       write_seqcount_end(&sl->seqcount);
9715 +       __raw_write_seqcount_end(&sl->seqcount);
9716         spin_unlock_irqrestore(&sl->lock, flags);
9717  }
9718  
9719 diff --git a/include/linux/signal.h b/include/linux/signal.h
9720 index b63f63eaa39c..295540fdfc72 100644
9721 --- a/include/linux/signal.h
9722 +++ b/include/linux/signal.h
9723 @@ -233,6 +233,7 @@ static inline void init_sigpending(struct sigpending *sig)
9724  }
9725  
9726  extern void flush_sigqueue(struct sigpending *queue);
9727 +extern void flush_task_sigqueue(struct task_struct *tsk);
9728  
9729  /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
9730  static inline int valid_signal(unsigned long sig)
9731 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
9732 index 32810f279f8e..0db6e31161f6 100644
9733 --- a/include/linux/skbuff.h
9734 +++ b/include/linux/skbuff.h
9735 @@ -284,6 +284,7 @@ struct sk_buff_head {
9736  
9737         __u32           qlen;
9738         spinlock_t      lock;
9739 +       raw_spinlock_t  raw_lock;
9740  };
9741  
9742  struct sk_buff;
9743 @@ -1573,6 +1574,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
9744         __skb_queue_head_init(list);
9745  }
9746  
9747 +static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
9748 +{
9749 +       raw_spin_lock_init(&list->raw_lock);
9750 +       __skb_queue_head_init(list);
9751 +}
9752 +
9753  static inline void skb_queue_head_init_class(struct sk_buff_head *list,
9754                 struct lock_class_key *class)
9755  {
9756 diff --git a/include/linux/smp.h b/include/linux/smp.h
9757 index 8e0cb7a0f836..b16ca967ad80 100644
9758 --- a/include/linux/smp.h
9759 +++ b/include/linux/smp.h
9760 @@ -185,6 +185,9 @@ static inline void smp_init(void) { }
9761  #define get_cpu()              ({ preempt_disable(); smp_processor_id(); })
9762  #define put_cpu()              preempt_enable()
9763  
9764 +#define get_cpu_light()                ({ migrate_disable(); smp_processor_id(); })
9765 +#define put_cpu_light()                migrate_enable()
9766 +
9767  /*
9768   * Callback to arch code if there's nosmp or maxcpus=0 on the
9769   * boot command line:
9770 diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
9771 index 47dd0cebd204..02928fa5499d 100644
9772 --- a/include/linux/spinlock.h
9773 +++ b/include/linux/spinlock.h
9774 @@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
9775  #define raw_spin_can_lock(lock)        (!raw_spin_is_locked(lock))
9776  
9777  /* Include rwlock functions */
9778 -#include <linux/rwlock.h>
9779 +#ifdef CONFIG_PREEMPT_RT_FULL
9780 +# include <linux/rwlock_rt.h>
9781 +#else
9782 +# include <linux/rwlock.h>
9783 +#endif
9784  
9785  /*
9786   * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
9787 @@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
9788  # include <linux/spinlock_api_up.h>
9789  #endif
9790  
9791 +#ifdef CONFIG_PREEMPT_RT_FULL
9792 +# include <linux/spinlock_rt.h>
9793 +#else /* PREEMPT_RT_FULL */
9794 +
9795  /*
9796   * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
9797   */
9798 @@ -347,6 +355,12 @@ static __always_inline void spin_unlock(spinlock_t *lock)
9799         raw_spin_unlock(&lock->rlock);
9800  }
9801  
9802 +static __always_inline int spin_unlock_no_deboost(spinlock_t *lock)
9803 +{
9804 +       raw_spin_unlock(&lock->rlock);
9805 +       return 0;
9806 +}
9807 +
9808  static __always_inline void spin_unlock_bh(spinlock_t *lock)
9809  {
9810         raw_spin_unlock_bh(&lock->rlock);
9811 @@ -416,4 +430,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
9812  #define atomic_dec_and_lock(atomic, lock) \
9813                 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
9814  
9815 +#endif /* !PREEMPT_RT_FULL */
9816 +
9817  #endif /* __LINUX_SPINLOCK_H */
9818 diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
9819 index 5344268e6e62..043263f30e81 100644
9820 --- a/include/linux/spinlock_api_smp.h
9821 +++ b/include/linux/spinlock_api_smp.h
9822 @@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
9823         return 0;
9824  }
9825  
9826 -#include <linux/rwlock_api_smp.h>
9827 +#ifndef CONFIG_PREEMPT_RT_FULL
9828 +# include <linux/rwlock_api_smp.h>
9829 +#endif
9830  
9831  #endif /* __LINUX_SPINLOCK_API_SMP_H */
9832 diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
9833 new file mode 100644
9834 index 000000000000..3534cff3dd08
9835 --- /dev/null
9836 +++ b/include/linux/spinlock_rt.h
9837 @@ -0,0 +1,164 @@
9838 +#ifndef __LINUX_SPINLOCK_RT_H
9839 +#define __LINUX_SPINLOCK_RT_H
9840 +
9841 +#ifndef __LINUX_SPINLOCK_H
9842 +#error Do not include directly. Use spinlock.h
9843 +#endif
9844 +
9845 +#include <linux/bug.h>
9846 +
9847 +extern void
9848 +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
9849 +
9850 +#define spin_lock_init(slock)                          \
9851 +do {                                                   \
9852 +       static struct lock_class_key __key;             \
9853 +                                                       \
9854 +       rt_mutex_init(&(slock)->lock);                  \
9855 +       __rt_spin_lock_init(slock, #slock, &__key);     \
9856 +} while (0)
9857 +
9858 +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
9859 +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
9860 +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
9861 +
9862 +extern void __lockfunc rt_spin_lock(spinlock_t *lock);
9863 +extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
9864 +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
9865 +extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
9866 +extern int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock);
9867 +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
9868 +extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
9869 +extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
9870 +extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
9871 +extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
9872 +
9873 +/*
9874 + * lockdep-less calls, for derived types like rwlock:
9875 + * (for trylock they can use rt_mutex_trylock() directly.
9876 + */
9877 +extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
9878 +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
9879 +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
9880 +
9881 +#define spin_lock(lock)                        rt_spin_lock(lock)
9882 +
9883 +#define spin_lock_bh(lock)                     \
9884 +       do {                                    \
9885 +               local_bh_disable();             \
9886 +               rt_spin_lock(lock);             \
9887 +       } while (0)
9888 +
9889 +#define spin_lock_irq(lock)            spin_lock(lock)
9890 +
9891 +#define spin_do_trylock(lock)          __cond_lock(lock, rt_spin_trylock(lock))
9892 +
9893 +#define spin_trylock(lock)                     \
9894 +({                                             \
9895 +       int __locked;                           \
9896 +       __locked = spin_do_trylock(lock);       \
9897 +       __locked;                               \
9898 +})
9899 +
9900 +#ifdef CONFIG_LOCKDEP
9901 +# define spin_lock_nested(lock, subclass)              \
9902 +       do {                                            \
9903 +               rt_spin_lock_nested(lock, subclass);    \
9904 +       } while (0)
9905 +
9906 +#define spin_lock_bh_nested(lock, subclass)            \
9907 +       do {                                            \
9908 +               local_bh_disable();                     \
9909 +               rt_spin_lock_nested(lock, subclass);    \
9910 +       } while (0)
9911 +
9912 +# define spin_lock_irqsave_nested(lock, flags, subclass) \
9913 +       do {                                             \
9914 +               typecheck(unsigned long, flags);         \
9915 +               flags = 0;                               \
9916 +               rt_spin_lock_nested(lock, subclass);     \
9917 +       } while (0)
9918 +#else
9919 +# define spin_lock_nested(lock, subclass)      spin_lock(lock)
9920 +# define spin_lock_bh_nested(lock, subclass)   spin_lock_bh(lock)
9921 +
9922 +# define spin_lock_irqsave_nested(lock, flags, subclass) \
9923 +       do {                                             \
9924 +               typecheck(unsigned long, flags);         \
9925 +               flags = 0;                               \
9926 +               spin_lock(lock);                         \
9927 +       } while (0)
9928 +#endif
9929 +
9930 +#define spin_lock_irqsave(lock, flags)                  \
9931 +       do {                                             \
9932 +               typecheck(unsigned long, flags);         \
9933 +               flags = 0;                               \
9934 +               spin_lock(lock);                         \
9935 +       } while (0)
9936 +
9937 +static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
9938 +{
9939 +       unsigned long flags = 0;
9940 +#ifdef CONFIG_TRACE_IRQFLAGS
9941 +       flags = rt_spin_lock_trace_flags(lock);
9942 +#else
9943 +       spin_lock(lock); /* lock_local */
9944 +#endif
9945 +       return flags;
9946 +}
9947 +
9948 +/* FIXME: we need rt_spin_lock_nest_lock */
9949 +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
9950 +
9951 +#define spin_unlock(lock)                      rt_spin_unlock(lock)
9952 +#define spin_unlock_no_deboost(lock)           rt_spin_unlock_no_deboost(lock)
9953 +
9954 +#define spin_unlock_bh(lock)                           \
9955 +       do {                                            \
9956 +               rt_spin_unlock(lock);                   \
9957 +               local_bh_enable();                      \
9958 +       } while (0)
9959 +
9960 +#define spin_unlock_irq(lock)          spin_unlock(lock)
9961 +
9962 +#define spin_unlock_irqrestore(lock, flags)            \
9963 +       do {                                            \
9964 +               typecheck(unsigned long, flags);        \
9965 +               (void) flags;                           \
9966 +               spin_unlock(lock);                      \
9967 +       } while (0)
9968 +
9969 +#define spin_trylock_bh(lock)  __cond_lock(lock, rt_spin_trylock_bh(lock))
9970 +#define spin_trylock_irq(lock) spin_trylock(lock)
9971 +
9972 +#define spin_trylock_irqsave(lock, flags)      \
9973 +       rt_spin_trylock_irqsave(lock, &(flags))
9974 +
9975 +#define spin_unlock_wait(lock)         rt_spin_unlock_wait(lock)
9976 +
9977 +#ifdef CONFIG_GENERIC_LOCKBREAK
9978 +# define spin_is_contended(lock)       ((lock)->break_lock)
9979 +#else
9980 +# define spin_is_contended(lock)       (((void)(lock), 0))
9981 +#endif
9982 +
9983 +static inline int spin_can_lock(spinlock_t *lock)
9984 +{
9985 +       return !rt_mutex_is_locked(&lock->lock);
9986 +}
9987 +
9988 +static inline int spin_is_locked(spinlock_t *lock)
9989 +{
9990 +       return rt_mutex_is_locked(&lock->lock);
9991 +}
9992 +
9993 +static inline void assert_spin_locked(spinlock_t *lock)
9994 +{
9995 +       BUG_ON(!spin_is_locked(lock));
9996 +}
9997 +
9998 +#define atomic_dec_and_lock(atomic, lock) \
9999 +       atomic_dec_and_spin_lock(atomic, lock)
10000 +
10001 +#endif
10002 diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
10003 index 73548eb13a5d..10bac715ea96 100644
10004 --- a/include/linux/spinlock_types.h
10005 +++ b/include/linux/spinlock_types.h
10006 @@ -9,80 +9,15 @@
10007   * Released under the General Public License (GPL).
10008   */
10009  
10010 -#if defined(CONFIG_SMP)
10011 -# include <asm/spinlock_types.h>
10012 +#include <linux/spinlock_types_raw.h>
10013 +
10014 +#ifndef CONFIG_PREEMPT_RT_FULL
10015 +# include <linux/spinlock_types_nort.h>
10016 +# include <linux/rwlock_types.h>
10017  #else
10018 -# include <linux/spinlock_types_up.h>
10019 +# include <linux/rtmutex.h>
10020 +# include <linux/spinlock_types_rt.h>
10021 +# include <linux/rwlock_types_rt.h>
10022  #endif
10023  
10024 -#include <linux/lockdep.h>
10025 -
10026 -typedef struct raw_spinlock {
10027 -       arch_spinlock_t raw_lock;
10028 -#ifdef CONFIG_GENERIC_LOCKBREAK
10029 -       unsigned int break_lock;
10030 -#endif
10031 -#ifdef CONFIG_DEBUG_SPINLOCK
10032 -       unsigned int magic, owner_cpu;
10033 -       void *owner;
10034 -#endif
10035 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
10036 -       struct lockdep_map dep_map;
10037 -#endif
10038 -} raw_spinlock_t;
10039 -
10040 -#define SPINLOCK_MAGIC         0xdead4ead
10041 -
10042 -#define SPINLOCK_OWNER_INIT    ((void *)-1L)
10043 -
10044 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
10045 -# define SPIN_DEP_MAP_INIT(lockname)   .dep_map = { .name = #lockname }
10046 -#else
10047 -# define SPIN_DEP_MAP_INIT(lockname)
10048 -#endif
10049 -
10050 -#ifdef CONFIG_DEBUG_SPINLOCK
10051 -# define SPIN_DEBUG_INIT(lockname)             \
10052 -       .magic = SPINLOCK_MAGIC,                \
10053 -       .owner_cpu = -1,                        \
10054 -       .owner = SPINLOCK_OWNER_INIT,
10055 -#else
10056 -# define SPIN_DEBUG_INIT(lockname)
10057 -#endif
10058 -
10059 -#define __RAW_SPIN_LOCK_INITIALIZER(lockname)  \
10060 -       {                                       \
10061 -       .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,  \
10062 -       SPIN_DEBUG_INIT(lockname)               \
10063 -       SPIN_DEP_MAP_INIT(lockname) }
10064 -
10065 -#define __RAW_SPIN_LOCK_UNLOCKED(lockname)     \
10066 -       (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
10067 -
10068 -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
10069 -
10070 -typedef struct spinlock {
10071 -       union {
10072 -               struct raw_spinlock rlock;
10073 -
10074 -#ifdef CONFIG_DEBUG_LOCK_ALLOC
10075 -# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
10076 -               struct {
10077 -                       u8 __padding[LOCK_PADSIZE];
10078 -                       struct lockdep_map dep_map;
10079 -               };
10080 -#endif
10081 -       };
10082 -} spinlock_t;
10083 -
10084 -#define __SPIN_LOCK_INITIALIZER(lockname) \
10085 -       { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
10086 -
10087 -#define __SPIN_LOCK_UNLOCKED(lockname) \
10088 -       (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
10089 -
10090 -#define DEFINE_SPINLOCK(x)     spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
10091 -
10092 -#include <linux/rwlock_types.h>
10093 -
10094  #endif /* __LINUX_SPINLOCK_TYPES_H */
10095 diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
10096 new file mode 100644
10097 index 000000000000..f1dac1fb1d6a
10098 --- /dev/null
10099 +++ b/include/linux/spinlock_types_nort.h
10100 @@ -0,0 +1,33 @@
10101 +#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
10102 +#define __LINUX_SPINLOCK_TYPES_NORT_H
10103 +
10104 +#ifndef __LINUX_SPINLOCK_TYPES_H
10105 +#error "Do not include directly. Include spinlock_types.h instead"
10106 +#endif
10107 +
10108 +/*
10109 + * The non RT version maps spinlocks to raw_spinlocks
10110 + */
10111 +typedef struct spinlock {
10112 +       union {
10113 +               struct raw_spinlock rlock;
10114 +
10115 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
10116 +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
10117 +               struct {
10118 +                       u8 __padding[LOCK_PADSIZE];
10119 +                       struct lockdep_map dep_map;
10120 +               };
10121 +#endif
10122 +       };
10123 +} spinlock_t;
10124 +
10125 +#define __SPIN_LOCK_INITIALIZER(lockname) \
10126 +       { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
10127 +
10128 +#define __SPIN_LOCK_UNLOCKED(lockname) \
10129 +       (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
10130 +
10131 +#define DEFINE_SPINLOCK(x)     spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
10132 +
10133 +#endif
10134 diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
10135 new file mode 100644
10136 index 000000000000..edffc4d53fc9
10137 --- /dev/null
10138 +++ b/include/linux/spinlock_types_raw.h
10139 @@ -0,0 +1,56 @@
10140 +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
10141 +#define __LINUX_SPINLOCK_TYPES_RAW_H
10142 +
10143 +#if defined(CONFIG_SMP)
10144 +# include <asm/spinlock_types.h>
10145 +#else
10146 +# include <linux/spinlock_types_up.h>
10147 +#endif
10148 +
10149 +#include <linux/lockdep.h>
10150 +
10151 +typedef struct raw_spinlock {
10152 +       arch_spinlock_t raw_lock;
10153 +#ifdef CONFIG_GENERIC_LOCKBREAK
10154 +       unsigned int break_lock;
10155 +#endif
10156 +#ifdef CONFIG_DEBUG_SPINLOCK
10157 +       unsigned int magic, owner_cpu;
10158 +       void *owner;
10159 +#endif
10160 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
10161 +       struct lockdep_map dep_map;
10162 +#endif
10163 +} raw_spinlock_t;
10164 +
10165 +#define SPINLOCK_MAGIC         0xdead4ead
10166 +
10167 +#define SPINLOCK_OWNER_INIT    ((void *)-1L)
10168 +
10169 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
10170 +# define SPIN_DEP_MAP_INIT(lockname)   .dep_map = { .name = #lockname }
10171 +#else
10172 +# define SPIN_DEP_MAP_INIT(lockname)
10173 +#endif
10174 +
10175 +#ifdef CONFIG_DEBUG_SPINLOCK
10176 +# define SPIN_DEBUG_INIT(lockname)             \
10177 +       .magic = SPINLOCK_MAGIC,                \
10178 +       .owner_cpu = -1,                        \
10179 +       .owner = SPINLOCK_OWNER_INIT,
10180 +#else
10181 +# define SPIN_DEBUG_INIT(lockname)
10182 +#endif
10183 +
10184 +#define __RAW_SPIN_LOCK_INITIALIZER(lockname)  \
10185 +       {                                       \
10186 +       .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,  \
10187 +       SPIN_DEBUG_INIT(lockname)               \
10188 +       SPIN_DEP_MAP_INIT(lockname) }
10189 +
10190 +#define __RAW_SPIN_LOCK_UNLOCKED(lockname)     \
10191 +       (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
10192 +
10193 +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
10194 +
10195 +#endif
10196 diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
10197 new file mode 100644
10198 index 000000000000..3e3d8c5f7a9a
10199 --- /dev/null
10200 +++ b/include/linux/spinlock_types_rt.h
10201 @@ -0,0 +1,48 @@
10202 +#ifndef __LINUX_SPINLOCK_TYPES_RT_H
10203 +#define __LINUX_SPINLOCK_TYPES_RT_H
10204 +
10205 +#ifndef __LINUX_SPINLOCK_TYPES_H
10206 +#error "Do not include directly. Include spinlock_types.h instead"
10207 +#endif
10208 +
10209 +#include <linux/cache.h>
10210 +
10211 +/*
10212 + * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
10213 + */
10214 +typedef struct spinlock {
10215 +       struct rt_mutex         lock;
10216 +       unsigned int            break_lock;
10217 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
10218 +       struct lockdep_map      dep_map;
10219 +#endif
10220 +} spinlock_t;
10221 +
10222 +#ifdef CONFIG_DEBUG_RT_MUTEXES
10223 +# define __RT_SPIN_INITIALIZER(name) \
10224 +       { \
10225 +       .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
10226 +       .save_state = 1, \
10227 +       .file = __FILE__, \
10228 +       .line = __LINE__ , \
10229 +       }
10230 +#else
10231 +# define __RT_SPIN_INITIALIZER(name) \
10232 +       {                                                               \
10233 +       .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),          \
10234 +       .save_state = 1, \
10235 +       }
10236 +#endif
10237 +
10238 +/*
10239 +.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
10240 +*/
10241 +
10242 +#define __SPIN_LOCK_UNLOCKED(name)                     \
10243 +       { .lock = __RT_SPIN_INITIALIZER(name.lock),             \
10244 +         SPIN_DEP_MAP_INIT(name) }
10245 +
10246 +#define DEFINE_SPINLOCK(name) \
10247 +       spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
10248 +
10249 +#endif
10250 diff --git a/include/linux/srcu.h b/include/linux/srcu.h
10251 index dc8eb63c6568..e793d3a257da 100644
10252 --- a/include/linux/srcu.h
10253 +++ b/include/linux/srcu.h
10254 @@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp);
10255  
10256  void process_srcu(struct work_struct *work);
10257  
10258 -#define __SRCU_STRUCT_INIT(name)                                       \
10259 +#define __SRCU_STRUCT_INIT(name, pcpu_name)                            \
10260         {                                                               \
10261                 .completed = -300,                                      \
10262 -               .per_cpu_ref = &name##_srcu_array,                      \
10263 +               .per_cpu_ref = &pcpu_name,                              \
10264                 .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock),    \
10265                 .running = false,                                       \
10266                 .batch_queue = RCU_BATCH_INIT(name.batch_queue),        \
10267 @@ -119,7 +119,7 @@ void process_srcu(struct work_struct *work);
10268   */
10269  #define __DEFINE_SRCU(name, is_static)                                 \
10270         static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
10271 -       is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
10272 +       is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array)
10273  #define DEFINE_SRCU(name)              __DEFINE_SRCU(name, /* not static */)
10274  #define DEFINE_STATIC_SRCU(name)       __DEFINE_SRCU(name, static)
10275  
10276 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
10277 index d9718378a8be..e81e6dc7dcb1 100644
10278 --- a/include/linux/suspend.h
10279 +++ b/include/linux/suspend.h
10280 @@ -193,6 +193,12 @@ struct platform_freeze_ops {
10281         void (*end)(void);
10282  };
10283  
10284 +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
10285 +extern bool pm_in_action;
10286 +#else
10287 +# define pm_in_action false
10288 +#endif
10289 +
10290  #ifdef CONFIG_SUSPEND
10291  /**
10292   * suspend_set_ops - set platform dependent suspend operations
10293 diff --git a/include/linux/swait.h b/include/linux/swait.h
10294 index c1f9c62a8a50..83f004a72320 100644
10295 --- a/include/linux/swait.h
10296 +++ b/include/linux/swait.h
10297 @@ -87,6 +87,7 @@ static inline int swait_active(struct swait_queue_head *q)
10298  extern void swake_up(struct swait_queue_head *q);
10299  extern void swake_up_all(struct swait_queue_head *q);
10300  extern void swake_up_locked(struct swait_queue_head *q);
10301 +extern void swake_up_all_locked(struct swait_queue_head *q);
10302  
10303  extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
10304  extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
10305 diff --git a/include/linux/swap.h b/include/linux/swap.h
10306 index 55ff5593c193..52bf5477dc92 100644
10307 --- a/include/linux/swap.h
10308 +++ b/include/linux/swap.h
10309 @@ -11,6 +11,7 @@
10310  #include <linux/fs.h>
10311  #include <linux/atomic.h>
10312  #include <linux/page-flags.h>
10313 +#include <linux/locallock.h>
10314  #include <asm/page.h>
10315  
10316  struct notifier_block;
10317 @@ -247,7 +248,8 @@ struct swap_info_struct {
10318  void *workingset_eviction(struct address_space *mapping, struct page *page);
10319  bool workingset_refault(void *shadow);
10320  void workingset_activation(struct page *page);
10321 -extern struct list_lru workingset_shadow_nodes;
10322 +extern struct list_lru __workingset_shadow_nodes;
10323 +DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
10324  
10325  static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
10326  {
10327 @@ -292,6 +294,7 @@ extern unsigned long nr_free_pagecache_pages(void);
10328  
10329  
10330  /* linux/mm/swap.c */
10331 +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock);
10332  extern void lru_cache_add(struct page *);
10333  extern void lru_cache_add_anon(struct page *page);
10334  extern void lru_cache_add_file(struct page *page);
10335 diff --git a/include/linux/swork.h b/include/linux/swork.h
10336 new file mode 100644
10337 index 000000000000..f175fa9a6016
10338 --- /dev/null
10339 +++ b/include/linux/swork.h
10340 @@ -0,0 +1,24 @@
10341 +#ifndef _LINUX_SWORK_H
10342 +#define _LINUX_SWORK_H
10343 +
10344 +#include <linux/list.h>
10345 +
10346 +struct swork_event {
10347 +       struct list_head item;
10348 +       unsigned long flags;
10349 +       void (*func)(struct swork_event *);
10350 +};
10351 +
10352 +static inline void INIT_SWORK(struct swork_event *event,
10353 +                             void (*func)(struct swork_event *))
10354 +{
10355 +       event->flags = 0;
10356 +       event->func = func;
10357 +}
10358 +
10359 +bool swork_queue(struct swork_event *sev);
10360 +
10361 +int swork_get(void);
10362 +void swork_put(void);
10363 +
10364 +#endif /* _LINUX_SWORK_H */
10365 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
10366 index 2873baf5372a..eb1a108f17ca 100644
10367 --- a/include/linux/thread_info.h
10368 +++ b/include/linux/thread_info.h
10369 @@ -107,7 +107,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
10370  #define test_thread_flag(flag) \
10371         test_ti_thread_flag(current_thread_info(), flag)
10372  
10373 -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
10374 +#ifdef CONFIG_PREEMPT_LAZY
10375 +#define tif_need_resched()     (test_thread_flag(TIF_NEED_RESCHED) || \
10376 +                                test_thread_flag(TIF_NEED_RESCHED_LAZY))
10377 +#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
10378 +#define tif_need_resched_lazy()        test_thread_flag(TIF_NEED_RESCHED_LAZY))
10379 +
10380 +#else
10381 +#define tif_need_resched()     test_thread_flag(TIF_NEED_RESCHED)
10382 +#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
10383 +#define tif_need_resched_lazy()        0
10384 +#endif
10385  
10386  #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
10387  static inline int arch_within_stack_frames(const void * const stack,
10388 diff --git a/include/linux/timer.h b/include/linux/timer.h
10389 index 51d601f192d4..83cea629efe1 100644
10390 --- a/include/linux/timer.h
10391 +++ b/include/linux/timer.h
10392 @@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
10393  
10394  extern int try_to_del_timer_sync(struct timer_list *timer);
10395  
10396 -#ifdef CONFIG_SMP
10397 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
10398    extern int del_timer_sync(struct timer_list *timer);
10399  #else
10400  # define del_timer_sync(t)             del_timer(t)
10401 diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
10402 index be007610ceb0..15154b13a53b 100644
10403 --- a/include/linux/trace_events.h
10404 +++ b/include/linux/trace_events.h
10405 @@ -56,6 +56,9 @@ struct trace_entry {
10406         unsigned char           flags;
10407         unsigned char           preempt_count;
10408         int                     pid;
10409 +       unsigned short          migrate_disable;
10410 +       unsigned short          padding;
10411 +       unsigned char           preempt_lazy_count;
10412  };
10413  
10414  #define TRACE_EVENT_TYPE_MAX                                           \
10415 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
10416 index f30c187ed785..83bf0f798426 100644
10417 --- a/include/linux/uaccess.h
10418 +++ b/include/linux/uaccess.h
10419 @@ -24,6 +24,7 @@ static __always_inline void pagefault_disabled_dec(void)
10420   */
10421  static inline void pagefault_disable(void)
10422  {
10423 +       migrate_disable();
10424         pagefault_disabled_inc();
10425         /*
10426          * make sure to have issued the store before a pagefault
10427 @@ -40,6 +41,7 @@ static inline void pagefault_enable(void)
10428          */
10429         barrier();
10430         pagefault_disabled_dec();
10431 +       migrate_enable();
10432  }
10433  
10434  /*
10435 diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
10436 index 4a29c75b146e..0a294e950df8 100644
10437 --- a/include/linux/uprobes.h
10438 +++ b/include/linux/uprobes.h
10439 @@ -27,6 +27,7 @@
10440  #include <linux/errno.h>
10441  #include <linux/rbtree.h>
10442  #include <linux/types.h>
10443 +#include <linux/wait.h>
10444  
10445  struct vm_area_struct;
10446  struct mm_struct;
10447 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
10448 index 613771909b6e..e28c5a43229d 100644
10449 --- a/include/linux/vmstat.h
10450 +++ b/include/linux/vmstat.h
10451 @@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
10452   */
10453  static inline void __count_vm_event(enum vm_event_item item)
10454  {
10455 +       preempt_disable_rt();
10456         raw_cpu_inc(vm_event_states.event[item]);
10457 +       preempt_enable_rt();
10458  }
10459  
10460  static inline void count_vm_event(enum vm_event_item item)
10461 @@ -43,7 +45,9 @@ static inline void count_vm_event(enum vm_event_item item)
10462  
10463  static inline void __count_vm_events(enum vm_event_item item, long delta)
10464  {
10465 +       preempt_disable_rt();
10466         raw_cpu_add(vm_event_states.event[item], delta);
10467 +       preempt_enable_rt();
10468  }
10469  
10470  static inline void count_vm_events(enum vm_event_item item, long delta)
10471 diff --git a/include/linux/wait.h b/include/linux/wait.h
10472 index 2408e8d5c05c..db50d6609195 100644
10473 --- a/include/linux/wait.h
10474 +++ b/include/linux/wait.h
10475 @@ -8,6 +8,7 @@
10476  #include <linux/spinlock.h>
10477  #include <asm/current.h>
10478  #include <uapi/linux/wait.h>
10479 +#include <linux/atomic.h>
10480  
10481  typedef struct __wait_queue wait_queue_t;
10482  typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
10483 diff --git a/include/net/dst.h b/include/net/dst.h
10484 index 6835d224d47b..55a5a9698f14 100644
10485 --- a/include/net/dst.h
10486 +++ b/include/net/dst.h
10487 @@ -446,7 +446,7 @@ static inline void dst_confirm(struct dst_entry *dst)
10488  static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
10489                                    struct sk_buff *skb)
10490  {
10491 -       const struct hh_cache *hh;
10492 +       struct hh_cache *hh;
10493  
10494         if (dst->pending_confirm) {
10495                 unsigned long now = jiffies;
10496 diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
10497 index 231e121cc7d9..d125222b979d 100644
10498 --- a/include/net/gen_stats.h
10499 +++ b/include/net/gen_stats.h
10500 @@ -5,6 +5,7 @@
10501  #include <linux/socket.h>
10502  #include <linux/rtnetlink.h>
10503  #include <linux/pkt_sched.h>
10504 +#include <net/net_seq_lock.h>
10505  
10506  struct gnet_stats_basic_cpu {
10507         struct gnet_stats_basic_packed bstats;
10508 @@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
10509                                  spinlock_t *lock, struct gnet_dump *d,
10510                                  int padattr);
10511  
10512 -int gnet_stats_copy_basic(const seqcount_t *running,
10513 +int gnet_stats_copy_basic(net_seqlock_t *running,
10514                           struct gnet_dump *d,
10515                           struct gnet_stats_basic_cpu __percpu *cpu,
10516                           struct gnet_stats_basic_packed *b);
10517 -void __gnet_stats_copy_basic(const seqcount_t *running,
10518 +void __gnet_stats_copy_basic(net_seqlock_t *running,
10519                              struct gnet_stats_basic_packed *bstats,
10520                              struct gnet_stats_basic_cpu __percpu *cpu,
10521                              struct gnet_stats_basic_packed *b);
10522 @@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
10523                       struct gnet_stats_basic_cpu __percpu *cpu_bstats,
10524                       struct gnet_stats_rate_est64 *rate_est,
10525                       spinlock_t *stats_lock,
10526 -                     seqcount_t *running, struct nlattr *opt);
10527 +                     net_seqlock_t *running, struct nlattr *opt);
10528  void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
10529                         struct gnet_stats_rate_est64 *rate_est);
10530  int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
10531                           struct gnet_stats_basic_cpu __percpu *cpu_bstats,
10532                           struct gnet_stats_rate_est64 *rate_est,
10533                           spinlock_t *stats_lock,
10534 -                         seqcount_t *running, struct nlattr *opt);
10535 +                         net_seqlock_t *running, struct nlattr *opt);
10536  bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
10537                           const struct gnet_stats_rate_est64 *rate_est);
10538  #endif
10539 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
10540 index 8b683841e574..bf656008f6e7 100644
10541 --- a/include/net/neighbour.h
10542 +++ b/include/net/neighbour.h
10543 @@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
10544  }
10545  #endif
10546  
10547 -static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
10548 +static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
10549  {
10550         unsigned int seq;
10551         int hh_len;
10552 @@ -501,7 +501,7 @@ struct neighbour_cb {
10553  
10554  #define NEIGH_CB(skb)  ((struct neighbour_cb *)(skb)->cb)
10555  
10556 -static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
10557 +static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
10558                                      const struct net_device *dev)
10559  {
10560         unsigned int seq;
10561 diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h
10562 new file mode 100644
10563 index 000000000000..a7034298a82a
10564 --- /dev/null
10565 +++ b/include/net/net_seq_lock.h
10566 @@ -0,0 +1,15 @@
10567 +#ifndef __NET_NET_SEQ_LOCK_H__
10568 +#define __NET_NET_SEQ_LOCK_H__
10569 +
10570 +#ifdef CONFIG_PREEMPT_RT_BASE
10571 +# define net_seqlock_t                 seqlock_t
10572 +# define net_seq_begin(__r)            read_seqbegin(__r)
10573 +# define net_seq_retry(__r, __s)       read_seqretry(__r, __s)
10574 +
10575 +#else
10576 +# define net_seqlock_t                 seqcount_t
10577 +# define net_seq_begin(__r)            read_seqcount_begin(__r)
10578 +# define net_seq_retry(__r, __s)       read_seqcount_retry(__r, __s)
10579 +#endif
10580 +
10581 +#endif
10582 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
10583 index 7adf4386ac8f..d3fd5c357268 100644
10584 --- a/include/net/netns/ipv4.h
10585 +++ b/include/net/netns/ipv4.h
10586 @@ -69,6 +69,7 @@ struct netns_ipv4 {
10587  
10588         int sysctl_icmp_echo_ignore_all;
10589         int sysctl_icmp_echo_ignore_broadcasts;
10590 +       int sysctl_icmp_echo_sysrq;
10591         int sysctl_icmp_ignore_bogus_error_responses;
10592         int sysctl_icmp_ratelimit;
10593         int sysctl_icmp_ratemask;
10594 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
10595 index e6aa0a249672..b57736f2a8a3 100644
10596 --- a/include/net/sch_generic.h
10597 +++ b/include/net/sch_generic.h
10598 @@ -10,6 +10,7 @@
10599  #include <linux/dynamic_queue_limits.h>
10600  #include <net/gen_stats.h>
10601  #include <net/rtnetlink.h>
10602 +#include <net/net_seq_lock.h>
10603  
10604  struct Qdisc_ops;
10605  struct qdisc_walker;
10606 @@ -86,7 +87,7 @@ struct Qdisc {
10607         struct sk_buff          *gso_skb ____cacheline_aligned_in_smp;
10608         struct qdisc_skb_head   q;
10609         struct gnet_stats_basic_packed bstats;
10610 -       seqcount_t              running;
10611 +       net_seqlock_t           running;
10612         struct gnet_stats_queue qstats;
10613         unsigned long           state;
10614         struct Qdisc            *next_sched;
10615 @@ -98,13 +99,22 @@ struct Qdisc {
10616         spinlock_t              busylock ____cacheline_aligned_in_smp;
10617  };
10618  
10619 -static inline bool qdisc_is_running(const struct Qdisc *qdisc)
10620 +static inline bool qdisc_is_running(struct Qdisc *qdisc)
10621  {
10622 +#ifdef CONFIG_PREEMPT_RT_BASE
10623 +       return spin_is_locked(&qdisc->running.lock) ? true : false;
10624 +#else
10625         return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
10626 +#endif
10627  }
10628  
10629  static inline bool qdisc_run_begin(struct Qdisc *qdisc)
10630  {
10631 +#ifdef CONFIG_PREEMPT_RT_BASE
10632 +       if (try_write_seqlock(&qdisc->running))
10633 +               return true;
10634 +       return false;
10635 +#else
10636         if (qdisc_is_running(qdisc))
10637                 return false;
10638         /* Variant of write_seqcount_begin() telling lockdep a trylock
10639 @@ -113,11 +123,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
10640         raw_write_seqcount_begin(&qdisc->running);
10641         seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
10642         return true;
10643 +#endif
10644  }
10645  
10646  static inline void qdisc_run_end(struct Qdisc *qdisc)
10647  {
10648 +#ifdef CONFIG_PREEMPT_RT_BASE
10649 +       write_sequnlock(&qdisc->running);
10650 +#else
10651         write_seqcount_end(&qdisc->running);
10652 +#endif
10653  }
10654  
10655  static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
10656 @@ -308,7 +323,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
10657         return qdisc_lock(root);
10658  }
10659  
10660 -static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
10661 +static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
10662  {
10663         struct Qdisc *root = qdisc_root_sleeping(qdisc);
10664  
10665 diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
10666 new file mode 100644
10667 index 000000000000..f7710de1b1f3
10668 --- /dev/null
10669 +++ b/include/trace/events/hist.h
10670 @@ -0,0 +1,73 @@
10671 +#undef TRACE_SYSTEM
10672 +#define TRACE_SYSTEM hist
10673 +
10674 +#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
10675 +#define _TRACE_HIST_H
10676 +
10677 +#include "latency_hist.h"
10678 +#include <linux/tracepoint.h>
10679 +
10680 +#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
10681 +#define trace_preemptirqsoff_hist(a, b)
10682 +#define trace_preemptirqsoff_hist_rcuidle(a, b)
10683 +#else
10684 +TRACE_EVENT(preemptirqsoff_hist,
10685 +
10686 +       TP_PROTO(int reason, int starthist),
10687 +
10688 +       TP_ARGS(reason, starthist),
10689 +
10690 +       TP_STRUCT__entry(
10691 +               __field(int,    reason)
10692 +               __field(int,    starthist)
10693 +       ),
10694 +
10695 +       TP_fast_assign(
10696 +               __entry->reason         = reason;
10697 +               __entry->starthist      = starthist;
10698 +       ),
10699 +
10700 +       TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
10701 +                 __entry->starthist ? "start" : "stop")
10702 +);
10703 +#endif
10704 +
10705 +#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
10706 +#define trace_hrtimer_interrupt(a, b, c, d)
10707 +#else
10708 +TRACE_EVENT(hrtimer_interrupt,
10709 +
10710 +       TP_PROTO(int cpu, long long offset, struct task_struct *curr,
10711 +               struct task_struct *task),
10712 +
10713 +       TP_ARGS(cpu, offset, curr, task),
10714 +
10715 +       TP_STRUCT__entry(
10716 +               __field(int,            cpu)
10717 +               __field(long long,      offset)
10718 +               __array(char,           ccomm,  TASK_COMM_LEN)
10719 +               __field(int,            cprio)
10720 +               __array(char,           tcomm,  TASK_COMM_LEN)
10721 +               __field(int,            tprio)
10722 +       ),
10723 +
10724 +       TP_fast_assign(
10725 +               __entry->cpu    = cpu;
10726 +               __entry->offset = offset;
10727 +               memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
10728 +               __entry->cprio  = curr->prio;
10729 +               memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
10730 +                       task != NULL ? TASK_COMM_LEN : 7);
10731 +               __entry->tprio  = task != NULL ? task->prio : -1;
10732 +       ),
10733 +
10734 +       TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
10735 +               __entry->cpu, __entry->offset, __entry->ccomm,
10736 +               __entry->cprio, __entry->tcomm, __entry->tprio)
10737 +);
10738 +#endif
10739 +
10740 +#endif /* _TRACE_HIST_H */
10741 +
10742 +/* This part must be outside protection */
10743 +#include <trace/define_trace.h>
10744 diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h
10745 new file mode 100644
10746 index 000000000000..d3f2fbd560b1
10747 --- /dev/null
10748 +++ b/include/trace/events/latency_hist.h
10749 @@ -0,0 +1,29 @@
10750 +#ifndef _LATENCY_HIST_H
10751 +#define _LATENCY_HIST_H
10752 +
10753 +enum hist_action {
10754 +       IRQS_ON,
10755 +       PREEMPT_ON,
10756 +       TRACE_STOP,
10757 +       IRQS_OFF,
10758 +       PREEMPT_OFF,
10759 +       TRACE_START,
10760 +};
10761 +
10762 +static char *actions[] = {
10763 +       "IRQS_ON",
10764 +       "PREEMPT_ON",
10765 +       "TRACE_STOP",
10766 +       "IRQS_OFF",
10767 +       "PREEMPT_OFF",
10768 +       "TRACE_START",
10769 +};
10770 +
10771 +static inline char *getaction(int action)
10772 +{
10773 +       if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
10774 +               return actions[action];
10775 +       return "unknown";
10776 +}
10777 +
10778 +#endif /* _LATENCY_HIST_H */
10779 diff --git a/init/Kconfig b/init/Kconfig
10780 index 34407f15e6d3..2ce33a32e65d 100644
10781 --- a/init/Kconfig
10782 +++ b/init/Kconfig
10783 @@ -506,7 +506,7 @@ config TINY_RCU
10784  
10785  config RCU_EXPERT
10786         bool "Make expert-level adjustments to RCU configuration"
10787 -       default n
10788 +       default y if PREEMPT_RT_FULL
10789         help
10790           This option needs to be enabled if you wish to make
10791           expert-level adjustments to RCU configuration.  By default,
10792 @@ -623,7 +623,7 @@ config RCU_FANOUT_LEAF
10793  
10794  config RCU_FAST_NO_HZ
10795         bool "Accelerate last non-dyntick-idle CPU's grace periods"
10796 -       depends on NO_HZ_COMMON && SMP && RCU_EXPERT
10797 +       depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL
10798         default n
10799         help
10800           This option permits CPUs to enter dynticks-idle state even if
10801 @@ -650,7 +650,7 @@ config TREE_RCU_TRACE
10802  config RCU_BOOST
10803         bool "Enable RCU priority boosting"
10804         depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
10805 -       default n
10806 +       default y if PREEMPT_RT_FULL
10807         help
10808           This option boosts the priority of preempted RCU readers that
10809           block the current preemptible RCU grace period for too long.
10810 @@ -781,19 +781,6 @@ config RCU_NOCB_CPU_ALL
10811  
10812  endchoice
10813  
10814 -config RCU_EXPEDITE_BOOT
10815 -       bool
10816 -       default n
10817 -       help
10818 -         This option enables expedited grace periods at boot time,
10819 -         as if rcu_expedite_gp() had been invoked early in boot.
10820 -         The corresponding rcu_unexpedite_gp() is invoked from
10821 -         rcu_end_inkernel_boot(), which is intended to be invoked
10822 -         at the end of the kernel-only boot sequence, just before
10823 -         init is exec'ed.
10824 -
10825 -         Accept the default if unsure.
10826 -
10827  endmenu # "RCU Subsystem"
10828  
10829  config BUILD_BIN2C
10830 @@ -1064,6 +1051,7 @@ config CFS_BANDWIDTH
10831  config RT_GROUP_SCHED
10832         bool "Group scheduling for SCHED_RR/FIFO"
10833         depends on CGROUP_SCHED
10834 +       depends on !PREEMPT_RT_FULL
10835         default n
10836         help
10837           This feature lets you explicitly allocate real CPU bandwidth
10838 @@ -1772,6 +1760,7 @@ choice
10839  
10840  config SLAB
10841         bool "SLAB"
10842 +       depends on !PREEMPT_RT_FULL
10843         select HAVE_HARDENED_USERCOPY_ALLOCATOR
10844         help
10845           The regular slab allocator that is established and known to work
10846 @@ -1792,6 +1781,7 @@ config SLUB
10847  config SLOB
10848         depends on EXPERT
10849         bool "SLOB (Simple Allocator)"
10850 +       depends on !PREEMPT_RT_FULL
10851         help
10852            SLOB replaces the stock allocator with a drastically simpler
10853            allocator. SLOB is generally more space efficient but
10854 @@ -1810,7 +1800,7 @@ config SLAB_FREELIST_RANDOM
10855  
10856  config SLUB_CPU_PARTIAL
10857         default y
10858 -       depends on SLUB && SMP
10859 +       depends on SLUB && SMP && !PREEMPT_RT_FULL
10860         bool "SLUB per cpu partial cache"
10861         help
10862           Per cpu partial caches accellerate objects allocation and freeing
10863 diff --git a/init/Makefile b/init/Makefile
10864 index c4fb45525d08..821190dfaa75 100644
10865 --- a/init/Makefile
10866 +++ b/init/Makefile
10867 @@ -35,4 +35,4 @@ $(obj)/version.o: include/generated/compile.h
10868  include/generated/compile.h: FORCE
10869         @$($(quiet)chk_compile.h)
10870         $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
10871 -       "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
10872 +       "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
10873 diff --git a/init/main.c b/init/main.c
10874 index 2858be732f6d..3c97c3c91d88 100644
10875 --- a/init/main.c
10876 +++ b/init/main.c
10877 @@ -507,6 +507,7 @@ asmlinkage __visible void __init start_kernel(void)
10878         setup_command_line(command_line);
10879         setup_nr_cpu_ids();
10880         setup_per_cpu_areas();
10881 +       softirq_early_init();
10882         boot_cpu_state_init();
10883         smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
10884  
10885 diff --git a/ipc/sem.c b/ipc/sem.c
10886 index 10b94bc59d4a..b8360eaacc7a 100644
10887 --- a/ipc/sem.c
10888 +++ b/ipc/sem.c
10889 @@ -712,6 +712,13 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
10890  static void wake_up_sem_queue_prepare(struct list_head *pt,
10891                                 struct sem_queue *q, int error)
10892  {
10893 +#ifdef CONFIG_PREEMPT_RT_BASE
10894 +       struct task_struct *p = q->sleeper;
10895 +       get_task_struct(p);
10896 +       q->status = error;
10897 +       wake_up_process(p);
10898 +       put_task_struct(p);
10899 +#else
10900         if (list_empty(pt)) {
10901                 /*
10902                  * Hold preempt off so that we don't get preempted and have the
10903 @@ -723,6 +730,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
10904         q->pid = error;
10905  
10906         list_add_tail(&q->list, pt);
10907 +#endif
10908  }
10909  
10910  /**
10911 @@ -736,6 +744,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
10912   */
10913  static void wake_up_sem_queue_do(struct list_head *pt)
10914  {
10915 +#ifndef CONFIG_PREEMPT_RT_BASE
10916         struct sem_queue *q, *t;
10917         int did_something;
10918  
10919 @@ -748,6 +757,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
10920         }
10921         if (did_something)
10922                 preempt_enable();
10923 +#endif
10924  }
10925  
10926  static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
10927 diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
10928 index ebdb0043203a..b9e6aa7e5aa6 100644
10929 --- a/kernel/Kconfig.locks
10930 +++ b/kernel/Kconfig.locks
10931 @@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
10932  
10933  config MUTEX_SPIN_ON_OWNER
10934         def_bool y
10935 -       depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
10936 +       depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
10937  
10938  config RWSEM_SPIN_ON_OWNER
10939         def_bool y
10940 -       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
10941 +       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
10942  
10943  config LOCK_SPIN_ON_OWNER
10944         def_bool y
10945 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
10946 index 3f9c97419f02..11dbe26a8279 100644
10947 --- a/kernel/Kconfig.preempt
10948 +++ b/kernel/Kconfig.preempt
10949 @@ -1,3 +1,16 @@
10950 +config PREEMPT
10951 +       bool
10952 +       select PREEMPT_COUNT
10953 +
10954 +config PREEMPT_RT_BASE
10955 +       bool
10956 +       select PREEMPT
10957 +
10958 +config HAVE_PREEMPT_LAZY
10959 +       bool
10960 +
10961 +config PREEMPT_LAZY
10962 +       def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
10963  
10964  choice
10965         prompt "Preemption Model"
10966 @@ -33,9 +46,9 @@ config PREEMPT_VOLUNTARY
10967  
10968           Select this if you are building a kernel for a desktop system.
10969  
10970 -config PREEMPT
10971 +config PREEMPT__LL
10972         bool "Preemptible Kernel (Low-Latency Desktop)"
10973 -       select PREEMPT_COUNT
10974 +       select PREEMPT
10975         select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
10976         help
10977           This option reduces the latency of the kernel by making
10978 @@ -52,6 +65,22 @@ config PREEMPT
10979           embedded system with latency requirements in the milliseconds
10980           range.
10981  
10982 +config PREEMPT_RTB
10983 +       bool "Preemptible Kernel (Basic RT)"
10984 +       select PREEMPT_RT_BASE
10985 +       help
10986 +         This option is basically the same as (Low-Latency Desktop) but
10987 +         enables changes which are preliminary for the full preemptible
10988 +         RT kernel.
10989 +
10990 +config PREEMPT_RT_FULL
10991 +       bool "Fully Preemptible Kernel (RT)"
10992 +       depends on IRQ_FORCED_THREADING
10993 +       select PREEMPT_RT_BASE
10994 +       select PREEMPT_RCU
10995 +       help
10996 +         All and everything
10997 +
10998  endchoice
10999  
11000  config PREEMPT_COUNT
11001 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
11002 index 4e2f3de0e40b..6401eb5fe140 100644
11003 --- a/kernel/cgroup.c
11004 +++ b/kernel/cgroup.c
11005 @@ -5040,10 +5040,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
11006         queue_work(cgroup_destroy_wq, &css->destroy_work);
11007  }
11008  
11009 -static void css_release_work_fn(struct work_struct *work)
11010 +static void css_release_work_fn(struct swork_event *sev)
11011  {
11012         struct cgroup_subsys_state *css =
11013 -               container_of(work, struct cgroup_subsys_state, destroy_work);
11014 +               container_of(sev, struct cgroup_subsys_state, destroy_swork);
11015         struct cgroup_subsys *ss = css->ss;
11016         struct cgroup *cgrp = css->cgroup;
11017  
11018 @@ -5086,8 +5086,8 @@ static void css_release(struct percpu_ref *ref)
11019         struct cgroup_subsys_state *css =
11020                 container_of(ref, struct cgroup_subsys_state, refcnt);
11021  
11022 -       INIT_WORK(&css->destroy_work, css_release_work_fn);
11023 -       queue_work(cgroup_destroy_wq, &css->destroy_work);
11024 +       INIT_SWORK(&css->destroy_swork, css_release_work_fn);
11025 +       swork_queue(&css->destroy_swork);
11026  }
11027  
11028  static void init_and_link_css(struct cgroup_subsys_state *css,
11029 @@ -5739,6 +5739,7 @@ static int __init cgroup_wq_init(void)
11030          */
11031         cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
11032         BUG_ON(!cgroup_destroy_wq);
11033 +       BUG_ON(swork_get());
11034  
11035         /*
11036          * Used to destroy pidlists and separate to serve as flush domain.
11037 diff --git a/kernel/cpu.c b/kernel/cpu.c
11038 index 217fd2e7f435..69444f1bc924 100644
11039 --- a/kernel/cpu.c
11040 +++ b/kernel/cpu.c
11041 @@ -239,6 +239,289 @@ static struct {
11042  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
11043  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
11044  
11045 +/**
11046 + * hotplug_pcp - per cpu hotplug descriptor
11047 + * @unplug:    set when pin_current_cpu() needs to sync tasks
11048 + * @sync_tsk:  the task that waits for tasks to finish pinned sections
11049 + * @refcount:  counter of tasks in pinned sections
11050 + * @grab_lock: set when the tasks entering pinned sections should wait
11051 + * @synced:    notifier for @sync_tsk to tell cpu_down it's finished
11052 + * @mutex:     the mutex to make tasks wait (used when @grab_lock is true)
11053 + * @mutex_init:        zero if the mutex hasn't been initialized yet.
11054 + *
11055 + * Although @unplug and @sync_tsk may point to the same task, the @unplug
11056 + * is used as a flag and still exists after @sync_tsk has exited and
11057 + * @sync_tsk set to NULL.
11058 + */
11059 +struct hotplug_pcp {
11060 +       struct task_struct *unplug;
11061 +       struct task_struct *sync_tsk;
11062 +       int refcount;
11063 +       int grab_lock;
11064 +       struct completion synced;
11065 +       struct completion unplug_wait;
11066 +#ifdef CONFIG_PREEMPT_RT_FULL
11067 +       /*
11068 +        * Note, on PREEMPT_RT, the hotplug lock must save the state of
11069 +        * the task, otherwise the mutex will cause the task to fail
11070 +        * to sleep when required. (Because it's called from migrate_disable())
11071 +        *
11072 +        * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
11073 +        * state.
11074 +        */
11075 +       spinlock_t lock;
11076 +#else
11077 +       struct mutex mutex;
11078 +#endif
11079 +       int mutex_init;
11080 +};
11081 +
11082 +#ifdef CONFIG_PREEMPT_RT_FULL
11083 +# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
11084 +# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
11085 +#else
11086 +# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
11087 +# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
11088 +#endif
11089 +
11090 +static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
11091 +
11092 +/**
11093 + * pin_current_cpu - Prevent the current cpu from being unplugged
11094 + *
11095 + * Lightweight version of get_online_cpus() to prevent cpu from being
11096 + * unplugged when code runs in a migration disabled region.
11097 + *
11098 + * Must be called with preemption disabled (preempt_count = 1)!
11099 + */
11100 +void pin_current_cpu(void)
11101 +{
11102 +       struct hotplug_pcp *hp;
11103 +       int force = 0;
11104 +
11105 +retry:
11106 +       hp = this_cpu_ptr(&hotplug_pcp);
11107 +
11108 +       if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
11109 +           hp->unplug == current) {
11110 +               hp->refcount++;
11111 +               return;
11112 +       }
11113 +       if (hp->grab_lock) {
11114 +               preempt_enable();
11115 +               hotplug_lock(hp);
11116 +               hotplug_unlock(hp);
11117 +       } else {
11118 +               preempt_enable();
11119 +               /*
11120 +                * Try to push this task off of this CPU.
11121 +                */
11122 +               if (!migrate_me()) {
11123 +                       preempt_disable();
11124 +                       hp = this_cpu_ptr(&hotplug_pcp);
11125 +                       if (!hp->grab_lock) {
11126 +                               /*
11127 +                                * Just let it continue it's already pinned
11128 +                                * or about to sleep.
11129 +                                */
11130 +                               force = 1;
11131 +                               goto retry;
11132 +                       }
11133 +                       preempt_enable();
11134 +               }
11135 +       }
11136 +       preempt_disable();
11137 +       goto retry;
11138 +}
11139 +
11140 +/**
11141 + * unpin_current_cpu - Allow unplug of current cpu
11142 + *
11143 + * Must be called with preemption or interrupts disabled!
11144 + */
11145 +void unpin_current_cpu(void)
11146 +{
11147 +       struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
11148 +
11149 +       WARN_ON(hp->refcount <= 0);
11150 +
11151 +       /* This is safe. sync_unplug_thread is pinned to this cpu */
11152 +       if (!--hp->refcount && hp->unplug && hp->unplug != current)
11153 +               wake_up_process(hp->unplug);
11154 +}
11155 +
11156 +static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
11157 +{
11158 +       set_current_state(TASK_UNINTERRUPTIBLE);
11159 +       while (hp->refcount) {
11160 +               schedule_preempt_disabled();
11161 +               set_current_state(TASK_UNINTERRUPTIBLE);
11162 +       }
11163 +}
11164 +
11165 +static int sync_unplug_thread(void *data)
11166 +{
11167 +       struct hotplug_pcp *hp = data;
11168 +
11169 +       wait_for_completion(&hp->unplug_wait);
11170 +       preempt_disable();
11171 +       hp->unplug = current;
11172 +       wait_for_pinned_cpus(hp);
11173 +
11174 +       /*
11175 +        * This thread will synchronize the cpu_down() with threads
11176 +        * that have pinned the CPU. When the pinned CPU count reaches
11177 +        * zero, we inform the cpu_down code to continue to the next step.
11178 +        */
11179 +       set_current_state(TASK_UNINTERRUPTIBLE);
11180 +       preempt_enable();
11181 +       complete(&hp->synced);
11182 +
11183 +       /*
11184 +        * If all succeeds, the next step will need tasks to wait till
11185 +        * the CPU is offline before continuing. To do this, the grab_lock
11186 +        * is set and tasks going into pin_current_cpu() will block on the
11187 +        * mutex. But we still need to wait for those that are already in
11188 +        * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
11189 +        * will kick this thread out.
11190 +        */
11191 +       while (!hp->grab_lock && !kthread_should_stop()) {
11192 +               schedule();
11193 +               set_current_state(TASK_UNINTERRUPTIBLE);
11194 +       }
11195 +
11196 +       /* Make sure grab_lock is seen before we see a stale completion */
11197 +       smp_mb();
11198 +
11199 +       /*
11200 +        * Now just before cpu_down() enters stop machine, we need to make
11201 +        * sure all tasks that are in pinned CPU sections are out, and new
11202 +        * tasks will now grab the lock, keeping them from entering pinned
11203 +        * CPU sections.
11204 +        */
11205 +       if (!kthread_should_stop()) {
11206 +               preempt_disable();
11207 +               wait_for_pinned_cpus(hp);
11208 +               preempt_enable();
11209 +               complete(&hp->synced);
11210 +       }
11211 +
11212 +       set_current_state(TASK_UNINTERRUPTIBLE);
11213 +       while (!kthread_should_stop()) {
11214 +               schedule();
11215 +               set_current_state(TASK_UNINTERRUPTIBLE);
11216 +       }
11217 +       set_current_state(TASK_RUNNING);
11218 +
11219 +       /*
11220 +        * Force this thread off this CPU as it's going down and
11221 +        * we don't want any more work on this CPU.
11222 +        */
11223 +       current->flags &= ~PF_NO_SETAFFINITY;
11224 +       set_cpus_allowed_ptr(current, cpu_present_mask);
11225 +       migrate_me();
11226 +       return 0;
11227 +}
11228 +
11229 +static void __cpu_unplug_sync(struct hotplug_pcp *hp)
11230 +{
11231 +       wake_up_process(hp->sync_tsk);
11232 +       wait_for_completion(&hp->synced);
11233 +}
11234 +
11235 +static void __cpu_unplug_wait(unsigned int cpu)
11236 +{
11237 +       struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
11238 +
11239 +       complete(&hp->unplug_wait);
11240 +       wait_for_completion(&hp->synced);
11241 +}
11242 +
11243 +/*
11244 + * Start the sync_unplug_thread on the target cpu and wait for it to
11245 + * complete.
11246 + */
11247 +static int cpu_unplug_begin(unsigned int cpu)
11248 +{
11249 +       struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
11250 +       int err;
11251 +
11252 +       /* Protected by cpu_hotplug.lock */
11253 +       if (!hp->mutex_init) {
11254 +#ifdef CONFIG_PREEMPT_RT_FULL
11255 +               spin_lock_init(&hp->lock);
11256 +#else
11257 +               mutex_init(&hp->mutex);
11258 +#endif
11259 +               hp->mutex_init = 1;
11260 +       }
11261 +
11262 +       /* Inform the scheduler to migrate tasks off this CPU */
11263 +       tell_sched_cpu_down_begin(cpu);
11264 +
11265 +       init_completion(&hp->synced);
11266 +       init_completion(&hp->unplug_wait);
11267 +
11268 +       hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
11269 +       if (IS_ERR(hp->sync_tsk)) {
11270 +               err = PTR_ERR(hp->sync_tsk);
11271 +               hp->sync_tsk = NULL;
11272 +               return err;
11273 +       }
11274 +       kthread_bind(hp->sync_tsk, cpu);
11275 +
11276 +       /*
11277 +        * Wait for tasks to get out of the pinned sections,
11278 +        * it's still OK if new tasks enter. Some CPU notifiers will
11279 +        * wait for tasks that are going to enter these sections and
11280 +        * we must not have them block.
11281 +        */
11282 +       wake_up_process(hp->sync_tsk);
11283 +       return 0;
11284 +}
11285 +
11286 +static void cpu_unplug_sync(unsigned int cpu)
11287 +{
11288 +       struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
11289 +
11290 +       init_completion(&hp->synced);
11291 +       /* The completion needs to be initialzied before setting grab_lock */
11292 +       smp_wmb();
11293 +
11294 +       /* Grab the mutex before setting grab_lock */
11295 +       hotplug_lock(hp);
11296 +       hp->grab_lock = 1;
11297 +
11298 +       /*
11299 +        * The CPU notifiers have been completed.
11300 +        * Wait for tasks to get out of pinned CPU sections and have new
11301 +        * tasks block until the CPU is completely down.
11302 +        */
11303 +       __cpu_unplug_sync(hp);
11304 +
11305 +       /* All done with the sync thread */
11306 +       kthread_stop(hp->sync_tsk);
11307 +       hp->sync_tsk = NULL;
11308 +}
11309 +
11310 +static void cpu_unplug_done(unsigned int cpu)
11311 +{
11312 +       struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
11313 +
11314 +       hp->unplug = NULL;
11315 +       /* Let all tasks know cpu unplug is finished before cleaning up */
11316 +       smp_wmb();
11317 +
11318 +       if (hp->sync_tsk)
11319 +               kthread_stop(hp->sync_tsk);
11320 +
11321 +       if (hp->grab_lock) {
11322 +               hotplug_unlock(hp);
11323 +               /* protected by cpu_hotplug.lock */
11324 +               hp->grab_lock = 0;
11325 +       }
11326 +       tell_sched_cpu_down_done(cpu);
11327 +}
11328  
11329  void get_online_cpus(void)
11330  {
11331 @@ -789,10 +1072,14 @@ static int takedown_cpu(unsigned int cpu)
11332         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
11333         int err;
11334  
11335 +       __cpu_unplug_wait(cpu);
11336         /* Park the smpboot threads */
11337         kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
11338         smpboot_park_threads(cpu);
11339  
11340 +       /* Notifiers are done. Don't let any more tasks pin this CPU. */
11341 +       cpu_unplug_sync(cpu);
11342 +
11343         /*
11344          * Prevent irq alloc/free while the dying cpu reorganizes the
11345          * interrupt affinities.
11346 @@ -877,6 +1164,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
11347         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
11348         int prev_state, ret = 0;
11349         bool hasdied = false;
11350 +       int mycpu;
11351 +       cpumask_var_t cpumask;
11352 +       cpumask_var_t cpumask_org;
11353  
11354         if (num_online_cpus() == 1)
11355                 return -EBUSY;
11356 @@ -884,7 +1174,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
11357         if (!cpu_present(cpu))
11358                 return -EINVAL;
11359  
11360 +       /* Move the downtaker off the unplug cpu */
11361 +       if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
11362 +               return -ENOMEM;
11363 +       if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL))  {
11364 +               free_cpumask_var(cpumask);
11365 +               return -ENOMEM;
11366 +       }
11367 +
11368 +       cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
11369 +       cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
11370 +       set_cpus_allowed_ptr(current, cpumask);
11371 +       free_cpumask_var(cpumask);
11372 +       migrate_disable();
11373 +       mycpu = smp_processor_id();
11374 +       if (mycpu == cpu) {
11375 +               printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
11376 +               migrate_enable();
11377 +               ret = -EBUSY;
11378 +               goto restore_cpus;
11379 +       }
11380 +
11381 +       migrate_enable();
11382         cpu_hotplug_begin();
11383 +       ret = cpu_unplug_begin(cpu);
11384 +       if (ret) {
11385 +               printk("cpu_unplug_begin(%d) failed\n", cpu);
11386 +               goto out_cancel;
11387 +       }
11388  
11389         cpuhp_tasks_frozen = tasks_frozen;
11390  
11391 @@ -923,10 +1240,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
11392  
11393         hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
11394  out:
11395 +       cpu_unplug_done(cpu);
11396 +out_cancel:
11397         cpu_hotplug_done();
11398         /* This post dead nonsense must die */
11399         if (!ret && hasdied)
11400                 cpu_notify_nofail(CPU_POST_DEAD, cpu);
11401 +restore_cpus:
11402 +       set_cpus_allowed_ptr(current, cpumask_org);
11403 +       free_cpumask_var(cpumask_org);
11404         return ret;
11405  }
11406  
11407 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
11408 index 29f815d2ef7e..341b17f24f95 100644
11409 --- a/kernel/cpuset.c
11410 +++ b/kernel/cpuset.c
11411 @@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
11412   */
11413  
11414  static DEFINE_MUTEX(cpuset_mutex);
11415 -static DEFINE_SPINLOCK(callback_lock);
11416 +static DEFINE_RAW_SPINLOCK(callback_lock);
11417  
11418  static struct workqueue_struct *cpuset_migrate_mm_wq;
11419  
11420 @@ -907,9 +907,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
11421                         continue;
11422                 rcu_read_unlock();
11423  
11424 -               spin_lock_irq(&callback_lock);
11425 +               raw_spin_lock_irq(&callback_lock);
11426                 cpumask_copy(cp->effective_cpus, new_cpus);
11427 -               spin_unlock_irq(&callback_lock);
11428 +               raw_spin_unlock_irq(&callback_lock);
11429  
11430                 WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
11431                         !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
11432 @@ -974,9 +974,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
11433         if (retval < 0)
11434                 return retval;
11435  
11436 -       spin_lock_irq(&callback_lock);
11437 +       raw_spin_lock_irq(&callback_lock);
11438         cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
11439 -       spin_unlock_irq(&callback_lock);
11440 +       raw_spin_unlock_irq(&callback_lock);
11441  
11442         /* use trialcs->cpus_allowed as a temp variable */
11443         update_cpumasks_hier(cs, trialcs->cpus_allowed);
11444 @@ -1176,9 +1176,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
11445                         continue;
11446                 rcu_read_unlock();
11447  
11448 -               spin_lock_irq(&callback_lock);
11449 +               raw_spin_lock_irq(&callback_lock);
11450                 cp->effective_mems = *new_mems;
11451 -               spin_unlock_irq(&callback_lock);
11452 +               raw_spin_unlock_irq(&callback_lock);
11453  
11454                 WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
11455                         !nodes_equal(cp->mems_allowed, cp->effective_mems));
11456 @@ -1246,9 +1246,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
11457         if (retval < 0)
11458                 goto done;
11459  
11460 -       spin_lock_irq(&callback_lock);
11461 +       raw_spin_lock_irq(&callback_lock);
11462         cs->mems_allowed = trialcs->mems_allowed;
11463 -       spin_unlock_irq(&callback_lock);
11464 +       raw_spin_unlock_irq(&callback_lock);
11465  
11466         /* use trialcs->mems_allowed as a temp variable */
11467         update_nodemasks_hier(cs, &trialcs->mems_allowed);
11468 @@ -1339,9 +1339,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
11469         spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
11470                         || (is_spread_page(cs) != is_spread_page(trialcs)));
11471  
11472 -       spin_lock_irq(&callback_lock);
11473 +       raw_spin_lock_irq(&callback_lock);
11474         cs->flags = trialcs->flags;
11475 -       spin_unlock_irq(&callback_lock);
11476 +       raw_spin_unlock_irq(&callback_lock);
11477  
11478         if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
11479                 rebuild_sched_domains_locked();
11480 @@ -1756,7 +1756,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
11481         cpuset_filetype_t type = seq_cft(sf)->private;
11482         int ret = 0;
11483  
11484 -       spin_lock_irq(&callback_lock);
11485 +       raw_spin_lock_irq(&callback_lock);
11486  
11487         switch (type) {
11488         case FILE_CPULIST:
11489 @@ -1775,7 +1775,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
11490                 ret = -EINVAL;
11491         }
11492  
11493 -       spin_unlock_irq(&callback_lock);
11494 +       raw_spin_unlock_irq(&callback_lock);
11495         return ret;
11496  }
11497  
11498 @@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
11499  
11500         cpuset_inc();
11501  
11502 -       spin_lock_irq(&callback_lock);
11503 +       raw_spin_lock_irq(&callback_lock);
11504         if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
11505                 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
11506                 cs->effective_mems = parent->effective_mems;
11507         }
11508 -       spin_unlock_irq(&callback_lock);
11509 +       raw_spin_unlock_irq(&callback_lock);
11510  
11511         if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
11512                 goto out_unlock;
11513 @@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
11514         }
11515         rcu_read_unlock();
11516  
11517 -       spin_lock_irq(&callback_lock);
11518 +       raw_spin_lock_irq(&callback_lock);
11519         cs->mems_allowed = parent->mems_allowed;
11520         cs->effective_mems = parent->mems_allowed;
11521         cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
11522         cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
11523 -       spin_unlock_irq(&callback_lock);
11524 +       raw_spin_unlock_irq(&callback_lock);
11525  out_unlock:
11526         mutex_unlock(&cpuset_mutex);
11527         return 0;
11528 @@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
11529  static void cpuset_bind(struct cgroup_subsys_state *root_css)
11530  {
11531         mutex_lock(&cpuset_mutex);
11532 -       spin_lock_irq(&callback_lock);
11533 +       raw_spin_lock_irq(&callback_lock);
11534  
11535         if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
11536                 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
11537 @@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
11538                 top_cpuset.mems_allowed = top_cpuset.effective_mems;
11539         }
11540  
11541 -       spin_unlock_irq(&callback_lock);
11542 +       raw_spin_unlock_irq(&callback_lock);
11543         mutex_unlock(&cpuset_mutex);
11544  }
11545  
11546 @@ -2177,12 +2177,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
11547  {
11548         bool is_empty;
11549  
11550 -       spin_lock_irq(&callback_lock);
11551 +       raw_spin_lock_irq(&callback_lock);
11552         cpumask_copy(cs->cpus_allowed, new_cpus);
11553         cpumask_copy(cs->effective_cpus, new_cpus);
11554         cs->mems_allowed = *new_mems;
11555         cs->effective_mems = *new_mems;
11556 -       spin_unlock_irq(&callback_lock);
11557 +       raw_spin_unlock_irq(&callback_lock);
11558  
11559         /*
11560          * Don't call update_tasks_cpumask() if the cpuset becomes empty,
11561 @@ -2219,10 +2219,10 @@ hotplug_update_tasks(struct cpuset *cs,
11562         if (nodes_empty(*new_mems))
11563                 *new_mems = parent_cs(cs)->effective_mems;
11564  
11565 -       spin_lock_irq(&callback_lock);
11566 +       raw_spin_lock_irq(&callback_lock);
11567         cpumask_copy(cs->effective_cpus, new_cpus);
11568         cs->effective_mems = *new_mems;
11569 -       spin_unlock_irq(&callback_lock);
11570 +       raw_spin_unlock_irq(&callback_lock);
11571  
11572         if (cpus_updated)
11573                 update_tasks_cpumask(cs);
11574 @@ -2308,21 +2308,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
11575  
11576         /* synchronize cpus_allowed to cpu_active_mask */
11577         if (cpus_updated) {
11578 -               spin_lock_irq(&callback_lock);
11579 +               raw_spin_lock_irq(&callback_lock);
11580                 if (!on_dfl)
11581                         cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
11582                 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
11583 -               spin_unlock_irq(&callback_lock);
11584 +               raw_spin_unlock_irq(&callback_lock);
11585                 /* we don't mess with cpumasks of tasks in top_cpuset */
11586         }
11587  
11588         /* synchronize mems_allowed to N_MEMORY */
11589         if (mems_updated) {
11590 -               spin_lock_irq(&callback_lock);
11591 +               raw_spin_lock_irq(&callback_lock);
11592                 if (!on_dfl)
11593                         top_cpuset.mems_allowed = new_mems;
11594                 top_cpuset.effective_mems = new_mems;
11595 -               spin_unlock_irq(&callback_lock);
11596 +               raw_spin_unlock_irq(&callback_lock);
11597                 update_tasks_nodemask(&top_cpuset);
11598         }
11599  
11600 @@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
11601  {
11602         unsigned long flags;
11603  
11604 -       spin_lock_irqsave(&callback_lock, flags);
11605 +       raw_spin_lock_irqsave(&callback_lock, flags);
11606         rcu_read_lock();
11607         guarantee_online_cpus(task_cs(tsk), pmask);
11608         rcu_read_unlock();
11609 -       spin_unlock_irqrestore(&callback_lock, flags);
11610 +       raw_spin_unlock_irqrestore(&callback_lock, flags);
11611  }
11612  
11613  void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
11614 @@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
11615         nodemask_t mask;
11616         unsigned long flags;
11617  
11618 -       spin_lock_irqsave(&callback_lock, flags);
11619 +       raw_spin_lock_irqsave(&callback_lock, flags);
11620         rcu_read_lock();
11621         guarantee_online_mems(task_cs(tsk), &mask);
11622         rcu_read_unlock();
11623 -       spin_unlock_irqrestore(&callback_lock, flags);
11624 +       raw_spin_unlock_irqrestore(&callback_lock, flags);
11625  
11626         return mask;
11627  }
11628 @@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
11629                 return true;
11630  
11631         /* Not hardwall and node outside mems_allowed: scan up cpusets */
11632 -       spin_lock_irqsave(&callback_lock, flags);
11633 +       raw_spin_lock_irqsave(&callback_lock, flags);
11634  
11635         rcu_read_lock();
11636         cs = nearest_hardwall_ancestor(task_cs(current));
11637         allowed = node_isset(node, cs->mems_allowed);
11638         rcu_read_unlock();
11639  
11640 -       spin_unlock_irqrestore(&callback_lock, flags);
11641 +       raw_spin_unlock_irqrestore(&callback_lock, flags);
11642         return allowed;
11643  }
11644  
11645 diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
11646 index fc1ef736253c..83c666537a7a 100644
11647 --- a/kernel/debug/kdb/kdb_io.c
11648 +++ b/kernel/debug/kdb/kdb_io.c
11649 @@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
11650         int linecount;
11651         int colcount;
11652         int logging, saved_loglevel = 0;
11653 -       int saved_trap_printk;
11654         int got_printf_lock = 0;
11655         int retlen = 0;
11656         int fnd, len;
11657 @@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
11658         unsigned long uninitialized_var(flags);
11659  
11660         preempt_disable();
11661 -       saved_trap_printk = kdb_trap_printk;
11662 -       kdb_trap_printk = 0;
11663  
11664         /* Serialize kdb_printf if multiple cpus try to write at once.
11665          * But if any cpu goes recursive in kdb, just print the output,
11666 @@ -855,7 +852,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
11667         } else {
11668                 __release(kdb_printf_lock);
11669         }
11670 -       kdb_trap_printk = saved_trap_printk;
11671         preempt_enable();
11672         return retlen;
11673  }
11674 @@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...)
11675         va_list ap;
11676         int r;
11677  
11678 +       kdb_trap_printk++;
11679         va_start(ap, fmt);
11680         r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
11681         va_end(ap);
11682 +       kdb_trap_printk--;
11683  
11684         return r;
11685  }
11686 diff --git a/kernel/events/core.c b/kernel/events/core.c
11687 index 4b3323151a2f..e89a1a4d59cd 100644
11688 --- a/kernel/events/core.c
11689 +++ b/kernel/events/core.c
11690 @@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
11691         raw_spin_lock_init(&cpuctx->hrtimer_lock);
11692         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
11693         timer->function = perf_mux_hrtimer_handler;
11694 +       timer->irqsafe = 1;
11695  }
11696  
11697  static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
11698 @@ -8363,6 +8364,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
11699  
11700         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
11701         hwc->hrtimer.function = perf_swevent_hrtimer;
11702 +       hwc->hrtimer.irqsafe = 1;
11703  
11704         /*
11705          * Since hrtimers have a fixed rate, we can do a static freq->period
11706 diff --git a/kernel/exit.c b/kernel/exit.c
11707 index 3076f3089919..fb2ebcf3ca7c 100644
11708 --- a/kernel/exit.c
11709 +++ b/kernel/exit.c
11710 @@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
11711          * Do this under ->siglock, we can race with another thread
11712          * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
11713          */
11714 -       flush_sigqueue(&tsk->pending);
11715 +       flush_task_sigqueue(tsk);
11716         tsk->sighand = NULL;
11717         spin_unlock(&sighand->siglock);
11718  
11719 diff --git a/kernel/fork.c b/kernel/fork.c
11720 index ba8a01564985..47784f8aed37 100644
11721 --- a/kernel/fork.c
11722 +++ b/kernel/fork.c
11723 @@ -76,6 +76,7 @@
11724  #include <linux/compiler.h>
11725  #include <linux/sysctl.h>
11726  #include <linux/kcov.h>
11727 +#include <linux/kprobes.h>
11728  
11729  #include <asm/pgtable.h>
11730  #include <asm/pgalloc.h>
11731 @@ -376,13 +377,24 @@ static inline void put_signal_struct(struct signal_struct *sig)
11732         if (atomic_dec_and_test(&sig->sigcnt))
11733                 free_signal_struct(sig);
11734  }
11735 -
11736 +#ifdef CONFIG_PREEMPT_RT_BASE
11737 +static
11738 +#endif
11739  void __put_task_struct(struct task_struct *tsk)
11740  {
11741         WARN_ON(!tsk->exit_state);
11742         WARN_ON(atomic_read(&tsk->usage));
11743         WARN_ON(tsk == current);
11744  
11745 +       /*
11746 +        * Remove function-return probe instances associated with this
11747 +        * task and put them back on the free list.
11748 +        */
11749 +       kprobe_flush_task(tsk);
11750 +
11751 +       /* Task is done with its stack. */
11752 +       put_task_stack(tsk);
11753 +
11754         cgroup_free(tsk);
11755         task_numa_free(tsk);
11756         security_task_free(tsk);
11757 @@ -393,7 +405,18 @@ void __put_task_struct(struct task_struct *tsk)
11758         if (!profile_handoff_task(tsk))
11759                 free_task(tsk);
11760  }
11761 +#ifndef CONFIG_PREEMPT_RT_BASE
11762  EXPORT_SYMBOL_GPL(__put_task_struct);
11763 +#else
11764 +void __put_task_struct_cb(struct rcu_head *rhp)
11765 +{
11766 +       struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
11767 +
11768 +       __put_task_struct(tsk);
11769 +
11770 +}
11771 +EXPORT_SYMBOL_GPL(__put_task_struct_cb);
11772 +#endif
11773  
11774  void __init __weak arch_task_cache_init(void) { }
11775  
11776 @@ -852,6 +875,19 @@ void __mmdrop(struct mm_struct *mm)
11777  }
11778  EXPORT_SYMBOL_GPL(__mmdrop);
11779  
11780 +#ifdef CONFIG_PREEMPT_RT_BASE
11781 +/*
11782 + * RCU callback for delayed mm drop. Not strictly rcu, but we don't
11783 + * want another facility to make this work.
11784 + */
11785 +void __mmdrop_delayed(struct rcu_head *rhp)
11786 +{
11787 +       struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
11788 +
11789 +       __mmdrop(mm);
11790 +}
11791 +#endif
11792 +
11793  static inline void __mmput(struct mm_struct *mm)
11794  {
11795         VM_BUG_ON(atomic_read(&mm->mm_users));
11796 @@ -1426,6 +1462,9 @@ static void rt_mutex_init_task(struct task_struct *p)
11797   */
11798  static void posix_cpu_timers_init(struct task_struct *tsk)
11799  {
11800 +#ifdef CONFIG_PREEMPT_RT_BASE
11801 +       tsk->posix_timer_list = NULL;
11802 +#endif
11803         tsk->cputime_expires.prof_exp = 0;
11804         tsk->cputime_expires.virt_exp = 0;
11805         tsk->cputime_expires.sched_exp = 0;
11806 @@ -1552,6 +1591,7 @@ static __latent_entropy struct task_struct *copy_process(
11807         spin_lock_init(&p->alloc_lock);
11808  
11809         init_sigpending(&p->pending);
11810 +       p->sigqueue_cache = NULL;
11811  
11812         p->utime = p->stime = p->gtime = 0;
11813         p->utimescaled = p->stimescaled = 0;
11814 diff --git a/kernel/futex.c b/kernel/futex.c
11815 index 38b68c2735c5..6450a8d81667 100644
11816 --- a/kernel/futex.c
11817 +++ b/kernel/futex.c
11818 @@ -904,7 +904,9 @@ void exit_pi_state_list(struct task_struct *curr)
11819                  * task still owns the PI-state:
11820                  */
11821                 if (head->next != next) {
11822 +                       raw_spin_unlock_irq(&curr->pi_lock);
11823                         spin_unlock(&hb->lock);
11824 +                       raw_spin_lock_irq(&curr->pi_lock);
11825                         continue;
11826                 }
11827  
11828 @@ -1299,6 +1301,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
11829         struct futex_pi_state *pi_state = this->pi_state;
11830         u32 uninitialized_var(curval), newval;
11831         WAKE_Q(wake_q);
11832 +       WAKE_Q(wake_sleeper_q);
11833         bool deboost;
11834         int ret = 0;
11835  
11836 @@ -1365,7 +1368,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
11837  
11838         raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
11839  
11840 -       deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
11841 +       deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
11842 +                                       &wake_sleeper_q);
11843  
11844         /*
11845          * First unlock HB so the waiter does not spin on it once he got woken
11846 @@ -1373,8 +1377,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
11847          * deboost first (and lose our higher priority), then the task might get
11848          * scheduled away before the wake up can take place.
11849          */
11850 -       spin_unlock(&hb->lock);
11851 +       deboost |= spin_unlock_no_deboost(&hb->lock);
11852         wake_up_q(&wake_q);
11853 +       wake_up_q_sleeper(&wake_sleeper_q);
11854         if (deboost)
11855                 rt_mutex_adjust_prio(current);
11856  
11857 @@ -1924,6 +1929,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
11858                                 requeue_pi_wake_futex(this, &key2, hb2);
11859                                 drop_count++;
11860                                 continue;
11861 +                       } else if (ret == -EAGAIN) {
11862 +                               /*
11863 +                                * Waiter was woken by timeout or
11864 +                                * signal and has set pi_blocked_on to
11865 +                                * PI_WAKEUP_INPROGRESS before we
11866 +                                * tried to enqueue it on the rtmutex.
11867 +                                */
11868 +                               this->pi_state = NULL;
11869 +                               put_pi_state(pi_state);
11870 +                               continue;
11871                         } else if (ret) {
11872                                 /*
11873                                  * rt_mutex_start_proxy_lock() detected a
11874 @@ -2814,7 +2829,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
11875         struct hrtimer_sleeper timeout, *to = NULL;
11876         struct rt_mutex_waiter rt_waiter;
11877         struct rt_mutex *pi_mutex = NULL;
11878 -       struct futex_hash_bucket *hb;
11879 +       struct futex_hash_bucket *hb, *hb2;
11880         union futex_key key2 = FUTEX_KEY_INIT;
11881         struct futex_q q = futex_q_init;
11882         int res, ret;
11883 @@ -2839,10 +2854,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
11884          * The waiter is allocated on our stack, manipulated by the requeue
11885          * code while we sleep on uaddr.
11886          */
11887 -       debug_rt_mutex_init_waiter(&rt_waiter);
11888 -       RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
11889 -       RB_CLEAR_NODE(&rt_waiter.tree_entry);
11890 -       rt_waiter.task = NULL;
11891 +       rt_mutex_init_waiter(&rt_waiter, false);
11892  
11893         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
11894         if (unlikely(ret != 0))
11895 @@ -2873,20 +2885,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
11896         /* Queue the futex_q, drop the hb lock, wait for wakeup. */
11897         futex_wait_queue_me(hb, &q, to);
11898  
11899 -       spin_lock(&hb->lock);
11900 -       ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
11901 -       spin_unlock(&hb->lock);
11902 -       if (ret)
11903 -               goto out_put_keys;
11904 +       /*
11905 +        * On RT we must avoid races with requeue and trying to block
11906 +        * on two mutexes (hb->lock and uaddr2's rtmutex) by
11907 +        * serializing access to pi_blocked_on with pi_lock.
11908 +        */
11909 +       raw_spin_lock_irq(&current->pi_lock);
11910 +       if (current->pi_blocked_on) {
11911 +               /*
11912 +                * We have been requeued or are in the process of
11913 +                * being requeued.
11914 +                */
11915 +               raw_spin_unlock_irq(&current->pi_lock);
11916 +       } else {
11917 +               /*
11918 +                * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
11919 +                * prevents a concurrent requeue from moving us to the
11920 +                * uaddr2 rtmutex. After that we can safely acquire
11921 +                * (and possibly block on) hb->lock.
11922 +                */
11923 +               current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
11924 +               raw_spin_unlock_irq(&current->pi_lock);
11925 +
11926 +               spin_lock(&hb->lock);
11927 +
11928 +               /*
11929 +                * Clean up pi_blocked_on. We might leak it otherwise
11930 +                * when we succeeded with the hb->lock in the fast
11931 +                * path.
11932 +                */
11933 +               raw_spin_lock_irq(&current->pi_lock);
11934 +               current->pi_blocked_on = NULL;
11935 +               raw_spin_unlock_irq(&current->pi_lock);
11936 +
11937 +               ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
11938 +               spin_unlock(&hb->lock);
11939 +               if (ret)
11940 +                       goto out_put_keys;
11941 +       }
11942  
11943         /*
11944 -        * In order for us to be here, we know our q.key == key2, and since
11945 -        * we took the hb->lock above, we also know that futex_requeue() has
11946 -        * completed and we no longer have to concern ourselves with a wakeup
11947 -        * race with the atomic proxy lock acquisition by the requeue code. The
11948 -        * futex_requeue dropped our key1 reference and incremented our key2
11949 -        * reference count.
11950 +        * In order to be here, we have either been requeued, are in
11951 +        * the process of being requeued, or requeue successfully
11952 +        * acquired uaddr2 on our behalf.  If pi_blocked_on was
11953 +        * non-null above, we may be racing with a requeue.  Do not
11954 +        * rely on q->lock_ptr to be hb2->lock until after blocking on
11955 +        * hb->lock or hb2->lock. The futex_requeue dropped our key1
11956 +        * reference and incremented our key2 reference count.
11957          */
11958 +       hb2 = hash_futex(&key2);
11959  
11960         /* Check if the requeue code acquired the second futex for us. */
11961         if (!q.rt_waiter) {
11962 @@ -2895,14 +2942,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
11963                  * did a lock-steal - fix up the PI-state in that case.
11964                  */
11965                 if (q.pi_state && (q.pi_state->owner != current)) {
11966 -                       spin_lock(q.lock_ptr);
11967 +                       spin_lock(&hb2->lock);
11968 +                       BUG_ON(&hb2->lock != q.lock_ptr);
11969                         ret = fixup_pi_state_owner(uaddr2, &q, current);
11970                         /*
11971                          * Drop the reference to the pi state which
11972                          * the requeue_pi() code acquired for us.
11973                          */
11974                         put_pi_state(q.pi_state);
11975 -                       spin_unlock(q.lock_ptr);
11976 +                       spin_unlock(&hb2->lock);
11977                 }
11978         } else {
11979                 /*
11980 @@ -2915,7 +2963,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
11981                 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
11982                 debug_rt_mutex_free_waiter(&rt_waiter);
11983  
11984 -               spin_lock(q.lock_ptr);
11985 +               spin_lock(&hb2->lock);
11986 +               BUG_ON(&hb2->lock != q.lock_ptr);
11987                 /*
11988                  * Fixup the pi_state owner and possibly acquire the lock if we
11989                  * haven't already.
11990 diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
11991 index d3f24905852c..f87aa8fdcc51 100644
11992 --- a/kernel/irq/handle.c
11993 +++ b/kernel/irq/handle.c
11994 @@ -181,10 +181,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
11995  {
11996         irqreturn_t retval;
11997         unsigned int flags = 0;
11998 +       struct pt_regs *regs = get_irq_regs();
11999 +       u64 ip = regs ? instruction_pointer(regs) : 0;
12000  
12001         retval = __handle_irq_event_percpu(desc, &flags);
12002  
12003 -       add_interrupt_randomness(desc->irq_data.irq, flags);
12004 +#ifdef CONFIG_PREEMPT_RT_FULL
12005 +       desc->random_ip = ip;
12006 +#else
12007 +       add_interrupt_randomness(desc->irq_data.irq, flags, ip);
12008 +#endif
12009  
12010         if (!noirqdebug)
12011                 note_interrupt(desc, retval);
12012 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
12013 index 6b669593e7eb..e357bf6c59d5 100644
12014 --- a/kernel/irq/manage.c
12015 +++ b/kernel/irq/manage.c
12016 @@ -22,6 +22,7 @@
12017  #include "internals.h"
12018  
12019  #ifdef CONFIG_IRQ_FORCED_THREADING
12020 +# ifndef CONFIG_PREEMPT_RT_BASE
12021  __read_mostly bool force_irqthreads;
12022  
12023  static int __init setup_forced_irqthreads(char *arg)
12024 @@ -30,6 +31,7 @@ static int __init setup_forced_irqthreads(char *arg)
12025         return 0;
12026  }
12027  early_param("threadirqs", setup_forced_irqthreads);
12028 +# endif
12029  #endif
12030  
12031  static void __synchronize_hardirq(struct irq_desc *desc)
12032 @@ -233,7 +235,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
12033  
12034         if (desc->affinity_notify) {
12035                 kref_get(&desc->affinity_notify->kref);
12036 +
12037 +#ifdef CONFIG_PREEMPT_RT_BASE
12038 +               swork_queue(&desc->affinity_notify->swork);
12039 +#else
12040                 schedule_work(&desc->affinity_notify->work);
12041 +#endif
12042         }
12043         irqd_set(data, IRQD_AFFINITY_SET);
12044  
12045 @@ -271,10 +278,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
12046  }
12047  EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
12048  
12049 -static void irq_affinity_notify(struct work_struct *work)
12050 +static void _irq_affinity_notify(struct irq_affinity_notify *notify)
12051  {
12052 -       struct irq_affinity_notify *notify =
12053 -               container_of(work, struct irq_affinity_notify, work);
12054         struct irq_desc *desc = irq_to_desc(notify->irq);
12055         cpumask_var_t cpumask;
12056         unsigned long flags;
12057 @@ -296,6 +301,35 @@ static void irq_affinity_notify(struct work_struct *work)
12058         kref_put(&notify->kref, notify->release);
12059  }
12060  
12061 +#ifdef CONFIG_PREEMPT_RT_BASE
12062 +static void init_helper_thread(void)
12063 +{
12064 +       static int init_sworker_once;
12065 +
12066 +       if (init_sworker_once)
12067 +               return;
12068 +       if (WARN_ON(swork_get()))
12069 +               return;
12070 +       init_sworker_once = 1;
12071 +}
12072 +
12073 +static void irq_affinity_notify(struct swork_event *swork)
12074 +{
12075 +       struct irq_affinity_notify *notify =
12076 +               container_of(swork, struct irq_affinity_notify, swork);
12077 +       _irq_affinity_notify(notify);
12078 +}
12079 +
12080 +#else
12081 +
12082 +static void irq_affinity_notify(struct work_struct *work)
12083 +{
12084 +       struct irq_affinity_notify *notify =
12085 +               container_of(work, struct irq_affinity_notify, work);
12086 +       _irq_affinity_notify(notify);
12087 +}
12088 +#endif
12089 +
12090  /**
12091   *     irq_set_affinity_notifier - control notification of IRQ affinity changes
12092   *     @irq:           Interrupt for which to enable/disable notification
12093 @@ -324,7 +358,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
12094         if (notify) {
12095                 notify->irq = irq;
12096                 kref_init(&notify->kref);
12097 +#ifdef CONFIG_PREEMPT_RT_BASE
12098 +               INIT_SWORK(&notify->swork, irq_affinity_notify);
12099 +               init_helper_thread();
12100 +#else
12101                 INIT_WORK(&notify->work, irq_affinity_notify);
12102 +#endif
12103         }
12104  
12105         raw_spin_lock_irqsave(&desc->lock, flags);
12106 @@ -879,7 +918,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
12107         local_bh_disable();
12108         ret = action->thread_fn(action->irq, action->dev_id);
12109         irq_finalize_oneshot(desc, action);
12110 -       local_bh_enable();
12111 +       /*
12112 +        * Interrupts which have real time requirements can be set up
12113 +        * to avoid softirq processing in the thread handler. This is
12114 +        * safe as these interrupts do not raise soft interrupts.
12115 +        */
12116 +       if (irq_settings_no_softirq_call(desc))
12117 +               _local_bh_enable();
12118 +       else
12119 +               local_bh_enable();
12120         return ret;
12121  }
12122  
12123 @@ -976,6 +1023,12 @@ static int irq_thread(void *data)
12124                 if (action_ret == IRQ_WAKE_THREAD)
12125                         irq_wake_secondary(desc, action);
12126  
12127 +#ifdef CONFIG_PREEMPT_RT_FULL
12128 +               migrate_disable();
12129 +               add_interrupt_randomness(action->irq, 0,
12130 +                                desc->random_ip ^ (unsigned long) action);
12131 +               migrate_enable();
12132 +#endif
12133                 wake_threads_waitq(desc);
12134         }
12135  
12136 @@ -1336,6 +1389,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
12137                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
12138                 }
12139  
12140 +               if (new->flags & IRQF_NO_SOFTIRQ_CALL)
12141 +                       irq_settings_set_no_softirq_call(desc);
12142 +
12143                 /* Set default affinity mask once everything is setup */
12144                 setup_affinity(desc, mask);
12145  
12146 @@ -2061,7 +2117,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
12147   *     This call sets the internal irqchip state of an interrupt,
12148   *     depending on the value of @which.
12149   *
12150 - *     This function should be called with preemption disabled if the
12151 + *     This function should be called with migration disabled if the
12152   *     interrupt controller has per-cpu registers.
12153   */
12154  int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
12155 diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
12156 index 320579d89091..2df2d4445b1e 100644
12157 --- a/kernel/irq/settings.h
12158 +++ b/kernel/irq/settings.h
12159 @@ -16,6 +16,7 @@ enum {
12160         _IRQ_PER_CPU_DEVID      = IRQ_PER_CPU_DEVID,
12161         _IRQ_IS_POLLED          = IRQ_IS_POLLED,
12162         _IRQ_DISABLE_UNLAZY     = IRQ_DISABLE_UNLAZY,
12163 +       _IRQ_NO_SOFTIRQ_CALL    = IRQ_NO_SOFTIRQ_CALL,
12164         _IRQF_MODIFY_MASK       = IRQF_MODIFY_MASK,
12165  };
12166  
12167 @@ -30,6 +31,7 @@ enum {
12168  #define IRQ_PER_CPU_DEVID      GOT_YOU_MORON
12169  #define IRQ_IS_POLLED          GOT_YOU_MORON
12170  #define IRQ_DISABLE_UNLAZY     GOT_YOU_MORON
12171 +#define IRQ_NO_SOFTIRQ_CALL    GOT_YOU_MORON
12172  #undef IRQF_MODIFY_MASK
12173  #define IRQF_MODIFY_MASK       GOT_YOU_MORON
12174  
12175 @@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
12176         desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
12177  }
12178  
12179 +static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
12180 +{
12181 +       return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
12182 +}
12183 +
12184 +static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
12185 +{
12186 +       desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
12187 +}
12188 +
12189  static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
12190  {
12191         return desc->status_use_accessors & _IRQ_PER_CPU;
12192 diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
12193 index 5707f97a3e6a..73f38dc7a7fb 100644
12194 --- a/kernel/irq/spurious.c
12195 +++ b/kernel/irq/spurious.c
12196 @@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
12197  
12198  static int __init irqfixup_setup(char *str)
12199  {
12200 +#ifdef CONFIG_PREEMPT_RT_BASE
12201 +       pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
12202 +       return 1;
12203 +#endif
12204         irqfixup = 1;
12205         printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
12206         printk(KERN_WARNING "This may impact system performance.\n");
12207 @@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644);
12208  
12209  static int __init irqpoll_setup(char *str)
12210  {
12211 +#ifdef CONFIG_PREEMPT_RT_BASE
12212 +       pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
12213 +       return 1;
12214 +#endif
12215         irqfixup = 2;
12216         printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
12217                                 "enabled\n");
12218 diff --git a/kernel/irq_work.c b/kernel/irq_work.c
12219 index bcf107ce0854..2899ba0d23d1 100644
12220 --- a/kernel/irq_work.c
12221 +++ b/kernel/irq_work.c
12222 @@ -17,6 +17,7 @@
12223  #include <linux/cpu.h>
12224  #include <linux/notifier.h>
12225  #include <linux/smp.h>
12226 +#include <linux/interrupt.h>
12227  #include <asm/processor.h>
12228  
12229  
12230 @@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void)
12231   */
12232  bool irq_work_queue_on(struct irq_work *work, int cpu)
12233  {
12234 +       struct llist_head *list;
12235 +
12236         /* All work should have been flushed before going offline */
12237         WARN_ON_ONCE(cpu_is_offline(cpu));
12238  
12239 @@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
12240         if (!irq_work_claim(work))
12241                 return false;
12242  
12243 -       if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
12244 +       if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
12245 +               list = &per_cpu(lazy_list, cpu);
12246 +       else
12247 +               list = &per_cpu(raised_list, cpu);
12248 +
12249 +       if (llist_add(&work->llnode, list))
12250                 arch_send_call_function_single_ipi(cpu);
12251  
12252         return true;
12253 @@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on);
12254  /* Enqueue the irq work @work on the current CPU */
12255  bool irq_work_queue(struct irq_work *work)
12256  {
12257 +       struct llist_head *list;
12258 +       bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
12259 +
12260         /* Only queue if not already pending */
12261         if (!irq_work_claim(work))
12262                 return false;
12263 @@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *work)
12264         /* Queue the entry and raise the IPI if needed. */
12265         preempt_disable();
12266  
12267 -       /* If the work is "lazy", handle it from next tick if any */
12268 -       if (work->flags & IRQ_WORK_LAZY) {
12269 -               if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
12270 -                   tick_nohz_tick_stopped())
12271 -                       arch_irq_work_raise();
12272 -       } else {
12273 -               if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
12274 +       lazy_work = work->flags & IRQ_WORK_LAZY;
12275 +
12276 +       if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
12277 +               list = this_cpu_ptr(&lazy_list);
12278 +       else
12279 +               list = this_cpu_ptr(&raised_list);
12280 +
12281 +       if (llist_add(&work->llnode, list)) {
12282 +               if (!lazy_work || tick_nohz_tick_stopped())
12283                         arch_irq_work_raise();
12284         }
12285  
12286 @@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void)
12287         raised = this_cpu_ptr(&raised_list);
12288         lazy = this_cpu_ptr(&lazy_list);
12289  
12290 -       if (llist_empty(raised) || arch_irq_work_has_interrupt())
12291 -               if (llist_empty(lazy))
12292 -                       return false;
12293 +       if (llist_empty(raised) && llist_empty(lazy))
12294 +               return false;
12295  
12296         /* All work should have been flushed before going offline */
12297         WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
12298 @@ -132,7 +144,7 @@ static void irq_work_run_list(struct llist_head *list)
12299         struct irq_work *work;
12300         struct llist_node *llnode;
12301  
12302 -       BUG_ON(!irqs_disabled());
12303 +       BUG_ON_NONRT(!irqs_disabled());
12304  
12305         if (llist_empty(list))
12306                 return;
12307 @@ -169,7 +181,16 @@ static void irq_work_run_list(struct llist_head *list)
12308  void irq_work_run(void)
12309  {
12310         irq_work_run_list(this_cpu_ptr(&raised_list));
12311 -       irq_work_run_list(this_cpu_ptr(&lazy_list));
12312 +       if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
12313 +               /*
12314 +                * NOTE: we raise softirq via IPI for safety,
12315 +                * and execute in irq_work_tick() to move the
12316 +                * overhead from hard to soft irq context.
12317 +                */
12318 +               if (!llist_empty(this_cpu_ptr(&lazy_list)))
12319 +                       raise_softirq(TIMER_SOFTIRQ);
12320 +       } else
12321 +               irq_work_run_list(this_cpu_ptr(&lazy_list));
12322  }
12323  EXPORT_SYMBOL_GPL(irq_work_run);
12324  
12325 @@ -179,8 +200,17 @@ void irq_work_tick(void)
12326  
12327         if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
12328                 irq_work_run_list(raised);
12329 +
12330 +       if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
12331 +               irq_work_run_list(this_cpu_ptr(&lazy_list));
12332 +}
12333 +
12334 +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
12335 +void irq_work_tick_soft(void)
12336 +{
12337         irq_work_run_list(this_cpu_ptr(&lazy_list));
12338  }
12339 +#endif
12340  
12341  /*
12342   * Synchronize against the irq_work @entry, ensures the entry is not
12343 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
12344 index ee1bc1bb8feb..ddef07958840 100644
12345 --- a/kernel/ksysfs.c
12346 +++ b/kernel/ksysfs.c
12347 @@ -136,6 +136,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
12348  
12349  #endif /* CONFIG_KEXEC_CORE */
12350  
12351 +#if defined(CONFIG_PREEMPT_RT_FULL)
12352 +static ssize_t  realtime_show(struct kobject *kobj,
12353 +                             struct kobj_attribute *attr, char *buf)
12354 +{
12355 +       return sprintf(buf, "%d\n", 1);
12356 +}
12357 +KERNEL_ATTR_RO(realtime);
12358 +#endif
12359 +
12360  /* whether file capabilities are enabled */
12361  static ssize_t fscaps_show(struct kobject *kobj,
12362                                   struct kobj_attribute *attr, char *buf)
12363 @@ -225,6 +234,9 @@ static struct attribute * kernel_attrs[] = {
12364         &rcu_expedited_attr.attr,
12365         &rcu_normal_attr.attr,
12366  #endif
12367 +#ifdef CONFIG_PREEMPT_RT_FULL
12368 +       &realtime_attr.attr,
12369 +#endif
12370         NULL
12371  };
12372  
12373 diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
12374 index 6f88e352cd4f..5e27fb1079e7 100644
12375 --- a/kernel/locking/Makefile
12376 +++ b/kernel/locking/Makefile
12377 @@ -2,7 +2,7 @@
12378  # and is generally not a function of system call inputs.
12379  KCOV_INSTRUMENT                := n
12380  
12381 -obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
12382 +obj-y += semaphore.o percpu-rwsem.o
12383  
12384  ifdef CONFIG_FUNCTION_TRACER
12385  CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
12386 @@ -11,7 +11,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
12387  CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
12388  endif
12389  
12390 +ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
12391 +obj-y += mutex.o
12392  obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
12393 +obj-y += rwsem.o
12394 +endif
12395  obj-$(CONFIG_LOCKDEP) += lockdep.o
12396  ifeq ($(CONFIG_PROC_FS),y)
12397  obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
12398 @@ -24,7 +28,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
12399  obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
12400  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
12401  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
12402 +ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
12403  obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
12404  obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
12405 +endif
12406 +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
12407  obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
12408  obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
12409 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
12410 index 4d7ffc0a0d00..3d157b3128eb 100644
12411 --- a/kernel/locking/lockdep.c
12412 +++ b/kernel/locking/lockdep.c
12413 @@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
12414         struct lockdep_subclass_key *key;
12415         struct hlist_head *hash_head;
12416         struct lock_class *class;
12417 +       bool is_static = false;
12418  
12419         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
12420                 debug_locks_off();
12421 @@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
12422  
12423         /*
12424          * Static locks do not have their class-keys yet - for them the key
12425 -        * is the lock object itself:
12426 +        * is the lock object itself. If the lock is in the per cpu area,
12427 +        * the canonical address of the lock (per cpu offset removed) is
12428 +        * used.
12429          */
12430 -       if (unlikely(!lock->key))
12431 -               lock->key = (void *)lock;
12432 +       if (unlikely(!lock->key)) {
12433 +               unsigned long can_addr, addr = (unsigned long)lock;
12434 +
12435 +               if (__is_kernel_percpu_address(addr, &can_addr))
12436 +                       lock->key = (void *)can_addr;
12437 +               else if (__is_module_percpu_address(addr, &can_addr))
12438 +                       lock->key = (void *)can_addr;
12439 +               else if (static_obj(lock))
12440 +                       lock->key = (void *)lock;
12441 +               else
12442 +                       return ERR_PTR(-EINVAL);
12443 +               is_static = true;
12444 +       }
12445  
12446         /*
12447          * NOTE: the class-key must be unique. For dynamic locks, a static
12448 @@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
12449                 }
12450         }
12451  
12452 -       return NULL;
12453 +       return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
12454  }
12455  
12456  /*
12457 @@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
12458         DEBUG_LOCKS_WARN_ON(!irqs_disabled());
12459  
12460         class = look_up_lock_class(lock, subclass);
12461 -       if (likely(class))
12462 +       if (likely(!IS_ERR_OR_NULL(class)))
12463                 goto out_set_class_cache;
12464  
12465         /*
12466          * Debug-check: all keys must be persistent!
12467 -        */
12468 -       if (!static_obj(lock->key)) {
12469 +        */
12470 +       if (IS_ERR(class)) {
12471                 debug_locks_off();
12472                 printk("INFO: trying to register non-static key.\n");
12473                 printk("the code is fine but needs lockdep annotation.\n");
12474                 printk("turning off the locking correctness validator.\n");
12475                 dump_stack();
12476 -
12477                 return NULL;
12478         }
12479  
12480 @@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
12481                  * Clearly if the lock hasn't been acquired _ever_, we're not
12482                  * holding it either, so report failure.
12483                  */
12484 -               if (!class)
12485 +               if (IS_ERR_OR_NULL(class))
12486                         return 0;
12487  
12488                 /*
12489 @@ -3689,6 +3702,7 @@ static void check_flags(unsigned long flags)
12490                 }
12491         }
12492  
12493 +#ifndef CONFIG_PREEMPT_RT_FULL
12494         /*
12495          * We dont accurately track softirq state in e.g.
12496          * hardirq contexts (such as on 4KSTACKS), so only
12497 @@ -3703,6 +3717,7 @@ static void check_flags(unsigned long flags)
12498                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
12499                 }
12500         }
12501 +#endif
12502  
12503         if (!debug_locks)
12504                 print_irqtrace_events(current);
12505 @@ -4159,7 +4174,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
12506                  * If the class exists we look it up and zap it:
12507                  */
12508                 class = look_up_lock_class(lock, j);
12509 -               if (class)
12510 +               if (!IS_ERR_OR_NULL(class))
12511                         zap_class(class);
12512         }
12513         /*
12514 diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
12515 index f8c5af52a131..788068773e61 100644
12516 --- a/kernel/locking/locktorture.c
12517 +++ b/kernel/locking/locktorture.c
12518 @@ -26,7 +26,6 @@
12519  #include <linux/kthread.h>
12520  #include <linux/sched/rt.h>
12521  #include <linux/spinlock.h>
12522 -#include <linux/rwlock.h>
12523  #include <linux/mutex.h>
12524  #include <linux/rwsem.h>
12525  #include <linux/smp.h>
12526 diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
12527 index ce182599cf2e..2ad3a1e8344c 100644
12528 --- a/kernel/locking/percpu-rwsem.c
12529 +++ b/kernel/locking/percpu-rwsem.c
12530 @@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
12531         /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
12532         rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
12533         __init_rwsem(&sem->rw_sem, name, rwsem_key);
12534 -       init_waitqueue_head(&sem->writer);
12535 +       init_swait_queue_head(&sem->writer);
12536         sem->readers_block = 0;
12537         return 0;
12538  }
12539 @@ -103,7 +103,7 @@ void __percpu_up_read(struct percpu_rw_semaphore *sem)
12540         __this_cpu_dec(*sem->read_count);
12541  
12542         /* Prod writer to recheck readers_active */
12543 -       wake_up(&sem->writer);
12544 +       swake_up(&sem->writer);
12545  }
12546  EXPORT_SYMBOL_GPL(__percpu_up_read);
12547  
12548 @@ -160,7 +160,7 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
12549          */
12550  
12551         /* Wait for all now active readers to complete. */
12552 -       wait_event(sem->writer, readers_active_check(sem));
12553 +       swait_event(sem->writer, readers_active_check(sem));
12554  }
12555  EXPORT_SYMBOL_GPL(percpu_down_write);
12556  
12557 diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c
12558 new file mode 100644
12559 index 000000000000..665754c00e1e
12560 --- /dev/null
12561 +++ b/kernel/locking/rt.c
12562 @@ -0,0 +1,498 @@
12563 +/*
12564 + * kernel/rt.c
12565 + *
12566 + * Real-Time Preemption Support
12567 + *
12568 + * started by Ingo Molnar:
12569 + *
12570 + *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
12571 + *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
12572 + *
12573 + * historic credit for proving that Linux spinlocks can be implemented via
12574 + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
12575 + * and others) who prototyped it on 2.4 and did lots of comparative
12576 + * research and analysis; TimeSys, for proving that you can implement a
12577 + * fully preemptible kernel via the use of IRQ threading and mutexes;
12578 + * Bill Huey for persuasively arguing on lkml that the mutex model is the
12579 + * right one; and to MontaVista, who ported pmutexes to 2.6.
12580 + *
12581 + * This code is a from-scratch implementation and is not based on pmutexes,
12582 + * but the idea of converting spinlocks to mutexes is used here too.
12583 + *
12584 + * lock debugging, locking tree, deadlock detection:
12585 + *
12586 + *  Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
12587 + *  Released under the General Public License (GPL).
12588 + *
12589 + * Includes portions of the generic R/W semaphore implementation from:
12590 + *
12591 + *  Copyright (c) 2001   David Howells (dhowells@redhat.com).
12592 + *  - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
12593 + *  - Derived also from comments by Linus
12594 + *
12595 + * Pending ownership of locks and ownership stealing:
12596 + *
12597 + *  Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
12598 + *
12599 + *   (also by Steven Rostedt)
12600 + *    - Converted single pi_lock to individual task locks.
12601 + *
12602 + * By Esben Nielsen:
12603 + *    Doing priority inheritance with help of the scheduler.
12604 + *
12605 + *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
12606 + *  - major rework based on Esben Nielsens initial patch
12607 + *  - replaced thread_info references by task_struct refs
12608 + *  - removed task->pending_owner dependency
12609 + *  - BKL drop/reacquire for semaphore style locks to avoid deadlocks
12610 + *    in the scheduler return path as discussed with Steven Rostedt
12611 + *
12612 + *  Copyright (C) 2006, Kihon Technologies Inc.
12613 + *    Steven Rostedt <rostedt@goodmis.org>
12614 + *  - debugged and patched Thomas Gleixner's rework.
12615 + *  - added back the cmpxchg to the rework.
12616 + *  - turned atomic require back on for SMP.
12617 + */
12618 +
12619 +#include <linux/spinlock.h>
12620 +#include <linux/rtmutex.h>
12621 +#include <linux/sched.h>
12622 +#include <linux/delay.h>
12623 +#include <linux/module.h>
12624 +#include <linux/kallsyms.h>
12625 +#include <linux/syscalls.h>
12626 +#include <linux/interrupt.h>
12627 +#include <linux/plist.h>
12628 +#include <linux/fs.h>
12629 +#include <linux/futex.h>
12630 +#include <linux/hrtimer.h>
12631 +
12632 +#include "rtmutex_common.h"
12633 +
12634 +/*
12635 + * struct mutex functions
12636 + */
12637 +void __mutex_do_init(struct mutex *mutex, const char *name,
12638 +                    struct lock_class_key *key)
12639 +{
12640 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
12641 +       /*
12642 +        * Make sure we are not reinitializing a held lock:
12643 +        */
12644 +       debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
12645 +       lockdep_init_map(&mutex->dep_map, name, key, 0);
12646 +#endif
12647 +       mutex->lock.save_state = 0;
12648 +}
12649 +EXPORT_SYMBOL(__mutex_do_init);
12650 +
12651 +void __lockfunc _mutex_lock(struct mutex *lock)
12652 +{
12653 +       mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
12654 +       rt_mutex_lock(&lock->lock);
12655 +}
12656 +EXPORT_SYMBOL(_mutex_lock);
12657 +
12658 +int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
12659 +{
12660 +       int ret;
12661 +
12662 +       mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
12663 +       ret = rt_mutex_lock_interruptible(&lock->lock);
12664 +       if (ret)
12665 +               mutex_release(&lock->dep_map, 1, _RET_IP_);
12666 +       return ret;
12667 +}
12668 +EXPORT_SYMBOL(_mutex_lock_interruptible);
12669 +
12670 +int __lockfunc _mutex_lock_killable(struct mutex *lock)
12671 +{
12672 +       int ret;
12673 +
12674 +       mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
12675 +       ret = rt_mutex_lock_killable(&lock->lock);
12676 +       if (ret)
12677 +               mutex_release(&lock->dep_map, 1, _RET_IP_);
12678 +       return ret;
12679 +}
12680 +EXPORT_SYMBOL(_mutex_lock_killable);
12681 +
12682 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
12683 +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
12684 +{
12685 +       mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
12686 +       rt_mutex_lock(&lock->lock);
12687 +}
12688 +EXPORT_SYMBOL(_mutex_lock_nested);
12689 +
12690 +void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
12691 +{
12692 +       mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
12693 +       rt_mutex_lock(&lock->lock);
12694 +}
12695 +EXPORT_SYMBOL(_mutex_lock_nest_lock);
12696 +
12697 +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
12698 +{
12699 +       int ret;
12700 +
12701 +       mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
12702 +       ret = rt_mutex_lock_interruptible(&lock->lock);
12703 +       if (ret)
12704 +               mutex_release(&lock->dep_map, 1, _RET_IP_);
12705 +       return ret;
12706 +}
12707 +EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
12708 +
12709 +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
12710 +{
12711 +       int ret;
12712 +
12713 +       mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
12714 +       ret = rt_mutex_lock_killable(&lock->lock);
12715 +       if (ret)
12716 +               mutex_release(&lock->dep_map, 1, _RET_IP_);
12717 +       return ret;
12718 +}
12719 +EXPORT_SYMBOL(_mutex_lock_killable_nested);
12720 +#endif
12721 +
12722 +int __lockfunc _mutex_trylock(struct mutex *lock)
12723 +{
12724 +       int ret = rt_mutex_trylock(&lock->lock);
12725 +
12726 +       if (ret)
12727 +               mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
12728 +
12729 +       return ret;
12730 +}
12731 +EXPORT_SYMBOL(_mutex_trylock);
12732 +
12733 +void __lockfunc _mutex_unlock(struct mutex *lock)
12734 +{
12735 +       mutex_release(&lock->dep_map, 1, _RET_IP_);
12736 +       rt_mutex_unlock(&lock->lock);
12737 +}
12738 +EXPORT_SYMBOL(_mutex_unlock);
12739 +
12740 +/*
12741 + * rwlock_t functions
12742 + */
12743 +int __lockfunc rt_write_trylock(rwlock_t *rwlock)
12744 +{
12745 +       int ret;
12746 +
12747 +       migrate_disable();
12748 +       ret = rt_mutex_trylock(&rwlock->lock);
12749 +       if (ret)
12750 +               rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
12751 +       else
12752 +               migrate_enable();
12753 +
12754 +       return ret;
12755 +}
12756 +EXPORT_SYMBOL(rt_write_trylock);
12757 +
12758 +int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
12759 +{
12760 +       int ret;
12761 +
12762 +       *flags = 0;
12763 +       ret = rt_write_trylock(rwlock);
12764 +       return ret;
12765 +}
12766 +EXPORT_SYMBOL(rt_write_trylock_irqsave);
12767 +
12768 +int __lockfunc rt_read_trylock(rwlock_t *rwlock)
12769 +{
12770 +       struct rt_mutex *lock = &rwlock->lock;
12771 +       int ret = 1;
12772 +
12773 +       /*
12774 +        * recursive read locks succeed when current owns the lock,
12775 +        * but not when read_depth == 0 which means that the lock is
12776 +        * write locked.
12777 +        */
12778 +       if (rt_mutex_owner(lock) != current) {
12779 +               migrate_disable();
12780 +               ret = rt_mutex_trylock(lock);
12781 +               if (ret)
12782 +                       rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
12783 +               else
12784 +                       migrate_enable();
12785 +
12786 +       } else if (!rwlock->read_depth) {
12787 +               ret = 0;
12788 +       }
12789 +
12790 +       if (ret)
12791 +               rwlock->read_depth++;
12792 +
12793 +       return ret;
12794 +}
12795 +EXPORT_SYMBOL(rt_read_trylock);
12796 +
12797 +void __lockfunc rt_write_lock(rwlock_t *rwlock)
12798 +{
12799 +       rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
12800 +       __rt_spin_lock(&rwlock->lock);
12801 +}
12802 +EXPORT_SYMBOL(rt_write_lock);
12803 +
12804 +void __lockfunc rt_read_lock(rwlock_t *rwlock)
12805 +{
12806 +       struct rt_mutex *lock = &rwlock->lock;
12807 +
12808 +
12809 +       /*
12810 +        * recursive read locks succeed when current owns the lock
12811 +        */
12812 +       if (rt_mutex_owner(lock) != current) {
12813 +               rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
12814 +               __rt_spin_lock(lock);
12815 +       }
12816 +       rwlock->read_depth++;
12817 +}
12818 +
12819 +EXPORT_SYMBOL(rt_read_lock);
12820 +
12821 +void __lockfunc rt_write_unlock(rwlock_t *rwlock)
12822 +{
12823 +       /* NOTE: we always pass in '1' for nested, for simplicity */
12824 +       rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
12825 +       __rt_spin_unlock(&rwlock->lock);
12826 +       migrate_enable();
12827 +}
12828 +EXPORT_SYMBOL(rt_write_unlock);
12829 +
12830 +void __lockfunc rt_read_unlock(rwlock_t *rwlock)
12831 +{
12832 +       /* Release the lock only when read_depth is down to 0 */
12833 +       if (--rwlock->read_depth == 0) {
12834 +               rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
12835 +               __rt_spin_unlock(&rwlock->lock);
12836 +               migrate_enable();
12837 +       }
12838 +}
12839 +EXPORT_SYMBOL(rt_read_unlock);
12840 +
12841 +unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
12842 +{
12843 +       rt_write_lock(rwlock);
12844 +
12845 +       return 0;
12846 +}
12847 +EXPORT_SYMBOL(rt_write_lock_irqsave);
12848 +
12849 +unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
12850 +{
12851 +       rt_read_lock(rwlock);
12852 +
12853 +       return 0;
12854 +}
12855 +EXPORT_SYMBOL(rt_read_lock_irqsave);
12856 +
12857 +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
12858 +{
12859 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
12860 +       /*
12861 +        * Make sure we are not reinitializing a held lock:
12862 +        */
12863 +       debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
12864 +       lockdep_init_map(&rwlock->dep_map, name, key, 0);
12865 +#endif
12866 +       rwlock->lock.save_state = 1;
12867 +       rwlock->read_depth = 0;
12868 +}
12869 +EXPORT_SYMBOL(__rt_rwlock_init);
12870 +
12871 +/*
12872 + * rw_semaphores
12873 + */
12874 +
12875 +void  rt_up_write(struct rw_semaphore *rwsem)
12876 +{
12877 +       rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
12878 +       rt_mutex_unlock(&rwsem->lock);
12879 +}
12880 +EXPORT_SYMBOL(rt_up_write);
12881 +
12882 +void __rt_up_read(struct rw_semaphore *rwsem)
12883 +{
12884 +       if (--rwsem->read_depth == 0)
12885 +               rt_mutex_unlock(&rwsem->lock);
12886 +}
12887 +
12888 +void  rt_up_read(struct rw_semaphore *rwsem)
12889 +{
12890 +       rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
12891 +       __rt_up_read(rwsem);
12892 +}
12893 +EXPORT_SYMBOL(rt_up_read);
12894 +
12895 +/*
12896 + * downgrade a write lock into a read lock
12897 + * - just wake up any readers at the front of the queue
12898 + */
12899 +void  rt_downgrade_write(struct rw_semaphore *rwsem)
12900 +{
12901 +       BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
12902 +       rwsem->read_depth = 1;
12903 +}
12904 +EXPORT_SYMBOL(rt_downgrade_write);
12905 +
12906 +int  rt_down_write_trylock(struct rw_semaphore *rwsem)
12907 +{
12908 +       int ret = rt_mutex_trylock(&rwsem->lock);
12909 +
12910 +       if (ret)
12911 +               rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
12912 +       return ret;
12913 +}
12914 +EXPORT_SYMBOL(rt_down_write_trylock);
12915 +
12916 +void  rt_down_write(struct rw_semaphore *rwsem)
12917 +{
12918 +       rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
12919 +       rt_mutex_lock(&rwsem->lock);
12920 +}
12921 +EXPORT_SYMBOL(rt_down_write);
12922 +
12923 +int rt_down_write_killable(struct rw_semaphore *rwsem)
12924 +{
12925 +       int ret;
12926 +
12927 +       rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
12928 +       ret = rt_mutex_lock_killable(&rwsem->lock);
12929 +       if (ret)
12930 +               rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
12931 +       return ret;
12932 +}
12933 +EXPORT_SYMBOL(rt_down_write_killable);
12934 +
12935 +int rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass)
12936 +{
12937 +       int ret;
12938 +
12939 +       rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
12940 +       ret = rt_mutex_lock_killable(&rwsem->lock);
12941 +       if (ret)
12942 +               rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
12943 +       return ret;
12944 +}
12945 +EXPORT_SYMBOL(rt_down_write_killable_nested);
12946 +
12947 +void  rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
12948 +{
12949 +       rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
12950 +       rt_mutex_lock(&rwsem->lock);
12951 +}
12952 +EXPORT_SYMBOL(rt_down_write_nested);
12953 +
12954 +void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
12955 +                              struct lockdep_map *nest)
12956 +{
12957 +       rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
12958 +       rt_mutex_lock(&rwsem->lock);
12959 +}
12960 +EXPORT_SYMBOL(rt_down_write_nested_lock);
12961 +
12962 +int rt__down_read_trylock(struct rw_semaphore *rwsem)
12963 +{
12964 +       struct rt_mutex *lock = &rwsem->lock;
12965 +       int ret = 1;
12966 +
12967 +       /*
12968 +        * recursive read locks succeed when current owns the rwsem,
12969 +        * but not when read_depth == 0 which means that the rwsem is
12970 +        * write locked.
12971 +        */
12972 +       if (rt_mutex_owner(lock) != current)
12973 +               ret = rt_mutex_trylock(&rwsem->lock);
12974 +       else if (!rwsem->read_depth)
12975 +               ret = 0;
12976 +
12977 +       if (ret)
12978 +               rwsem->read_depth++;
12979 +       return ret;
12980 +
12981 +}
12982 +
12983 +int  rt_down_read_trylock(struct rw_semaphore *rwsem)
12984 +{
12985 +       int ret;
12986 +
12987 +       ret = rt__down_read_trylock(rwsem);
12988 +       if (ret)
12989 +               rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
12990 +
12991 +       return ret;
12992 +}
12993 +EXPORT_SYMBOL(rt_down_read_trylock);
12994 +
12995 +void rt__down_read(struct rw_semaphore *rwsem)
12996 +{
12997 +       struct rt_mutex *lock = &rwsem->lock;
12998 +
12999 +       if (rt_mutex_owner(lock) != current)
13000 +               rt_mutex_lock(&rwsem->lock);
13001 +       rwsem->read_depth++;
13002 +}
13003 +EXPORT_SYMBOL(rt__down_read);
13004 +
13005 +static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
13006 +{
13007 +       rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
13008 +       rt__down_read(rwsem);
13009 +}
13010 +
13011 +void  rt_down_read(struct rw_semaphore *rwsem)
13012 +{
13013 +       __rt_down_read(rwsem, 0);
13014 +}
13015 +EXPORT_SYMBOL(rt_down_read);
13016 +
13017 +void  rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
13018 +{
13019 +       __rt_down_read(rwsem, subclass);
13020 +}
13021 +EXPORT_SYMBOL(rt_down_read_nested);
13022 +
13023 +void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
13024 +                             struct lock_class_key *key)
13025 +{
13026 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
13027 +       /*
13028 +        * Make sure we are not reinitializing a held lock:
13029 +        */
13030 +       debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
13031 +       lockdep_init_map(&rwsem->dep_map, name, key, 0);
13032 +#endif
13033 +       rwsem->read_depth = 0;
13034 +       rwsem->lock.save_state = 0;
13035 +}
13036 +EXPORT_SYMBOL(__rt_rwsem_init);
13037 +
13038 +/**
13039 + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
13040 + * @cnt: the atomic which we are to dec
13041 + * @lock: the mutex to return holding if we dec to 0
13042 + *
13043 + * return true and hold lock if we dec to 0, return false otherwise
13044 + */
13045 +int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
13046 +{
13047 +       /* dec if we can't possibly hit 0 */
13048 +       if (atomic_add_unless(cnt, -1, 1))
13049 +               return 0;
13050 +       /* we might hit 0, so take the lock */
13051 +       mutex_lock(lock);
13052 +       if (!atomic_dec_and_test(cnt)) {
13053 +               /* when we actually did the dec, we didn't hit 0 */
13054 +               mutex_unlock(lock);
13055 +               return 0;
13056 +       }
13057 +       /* we hit 0, and we hold the lock */
13058 +       return 1;
13059 +}
13060 +EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
13061 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
13062 index 2c49d76f96c3..4f1a7663c34d 100644
13063 --- a/kernel/locking/rtmutex.c
13064 +++ b/kernel/locking/rtmutex.c
13065 @@ -7,6 +7,11 @@
13066   *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
13067   *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
13068   *  Copyright (C) 2006 Esben Nielsen
13069 + *  Adaptive Spinlocks:
13070 + *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
13071 + *                                  and Peter Morreale,
13072 + * Adaptive Spinlocks simplification:
13073 + *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
13074   *
13075   *  See Documentation/locking/rt-mutex-design.txt for details.
13076   */
13077 @@ -16,6 +21,7 @@
13078  #include <linux/sched/rt.h>
13079  #include <linux/sched/deadline.h>
13080  #include <linux/timer.h>
13081 +#include <linux/ww_mutex.h>
13082  
13083  #include "rtmutex_common.h"
13084  
13085 @@ -133,6 +139,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
13086                 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
13087  }
13088  
13089 +static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
13090 +{
13091 +       return waiter && waiter != PI_WAKEUP_INPROGRESS &&
13092 +               waiter != PI_REQUEUE_INPROGRESS;
13093 +}
13094 +
13095  /*
13096   * We can speed up the acquire/release, if there's no debugging state to be
13097   * set up.
13098 @@ -414,6 +426,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
13099         return debug_rt_mutex_detect_deadlock(waiter, chwalk);
13100  }
13101  
13102 +static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
13103 +{
13104 +       if (waiter->savestate)
13105 +               wake_up_lock_sleeper(waiter->task);
13106 +       else
13107 +               wake_up_process(waiter->task);
13108 +}
13109 +
13110  /*
13111   * Max number of times we'll walk the boosting chain:
13112   */
13113 @@ -421,7 +441,8 @@ int max_lock_depth = 1024;
13114  
13115  static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
13116  {
13117 -       return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
13118 +       return rt_mutex_real_waiter(p->pi_blocked_on) ?
13119 +               p->pi_blocked_on->lock : NULL;
13120  }
13121  
13122  /*
13123 @@ -557,7 +578,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
13124          * reached or the state of the chain has changed while we
13125          * dropped the locks.
13126          */
13127 -       if (!waiter)
13128 +       if (!rt_mutex_real_waiter(waiter))
13129                 goto out_unlock_pi;
13130  
13131         /*
13132 @@ -719,13 +740,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
13133          * follow here. This is the end of the chain we are walking.
13134          */
13135         if (!rt_mutex_owner(lock)) {
13136 +               struct rt_mutex_waiter *lock_top_waiter;
13137 +
13138                 /*
13139                  * If the requeue [7] above changed the top waiter,
13140                  * then we need to wake the new top waiter up to try
13141                  * to get the lock.
13142                  */
13143 -               if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
13144 -                       wake_up_process(rt_mutex_top_waiter(lock)->task);
13145 +               lock_top_waiter = rt_mutex_top_waiter(lock);
13146 +               if (prerequeue_top_waiter != lock_top_waiter)
13147 +                       rt_mutex_wake_waiter(lock_top_waiter);
13148                 raw_spin_unlock_irq(&lock->wait_lock);
13149                 return 0;
13150         }
13151 @@ -818,6 +842,25 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
13152         return ret;
13153  }
13154  
13155 +
13156 +#define STEAL_NORMAL  0
13157 +#define STEAL_LATERAL 1
13158 +
13159 +/*
13160 + * Note that RT tasks are excluded from lateral-steals to prevent the
13161 + * introduction of an unbounded latency
13162 + */
13163 +static inline int lock_is_stealable(struct task_struct *task,
13164 +                                   struct task_struct *pendowner, int mode)
13165 +{
13166 +    if (mode == STEAL_NORMAL || rt_task(task)) {
13167 +           if (task->prio >= pendowner->prio)
13168 +                   return 0;
13169 +    } else if (task->prio > pendowner->prio)
13170 +           return 0;
13171 +    return 1;
13172 +}
13173 +
13174  /*
13175   * Try to take an rt-mutex
13176   *
13177 @@ -828,8 +871,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
13178   * @waiter: The waiter that is queued to the lock's wait tree if the
13179   *         callsite called task_blocked_on_lock(), otherwise NULL
13180   */
13181 -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
13182 -                               struct rt_mutex_waiter *waiter)
13183 +static int __try_to_take_rt_mutex(struct rt_mutex *lock,
13184 +                                 struct task_struct *task,
13185 +                                 struct rt_mutex_waiter *waiter, int mode)
13186  {
13187         /*
13188          * Before testing whether we can acquire @lock, we set the
13189 @@ -866,8 +910,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
13190                  * If waiter is not the highest priority waiter of
13191                  * @lock, give up.
13192                  */
13193 -               if (waiter != rt_mutex_top_waiter(lock))
13194 +               if (waiter != rt_mutex_top_waiter(lock)) {
13195 +                       /* XXX lock_is_stealable() ? */
13196                         return 0;
13197 +               }
13198  
13199                 /*
13200                  * We can acquire the lock. Remove the waiter from the
13201 @@ -885,14 +931,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
13202                  * not need to be dequeued.
13203                  */
13204                 if (rt_mutex_has_waiters(lock)) {
13205 -                       /*
13206 -                        * If @task->prio is greater than or equal to
13207 -                        * the top waiter priority (kernel view),
13208 -                        * @task lost.
13209 -                        */
13210 -                       if (task->prio >= rt_mutex_top_waiter(lock)->prio)
13211 -                               return 0;
13212 +                       struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
13213  
13214 +                       if (task != pown && !lock_is_stealable(task, pown, mode))
13215 +                               return 0;
13216                         /*
13217                          * The current top waiter stays enqueued. We
13218                          * don't have to change anything in the lock
13219 @@ -941,6 +983,433 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
13220         return 1;
13221  }
13222  
13223 +#ifdef CONFIG_PREEMPT_RT_FULL
13224 +/*
13225 + * preemptible spin_lock functions:
13226 + */
13227 +static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
13228 +                                        void  (*slowfn)(struct rt_mutex *lock,
13229 +                                                        bool mg_off),
13230 +                                        bool do_mig_dis)
13231 +{
13232 +       might_sleep_no_state_check();
13233 +
13234 +       if (do_mig_dis)
13235 +               migrate_disable();
13236 +
13237 +       if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
13238 +               rt_mutex_deadlock_account_lock(lock, current);
13239 +       else
13240 +               slowfn(lock, do_mig_dis);
13241 +}
13242 +
13243 +static inline int rt_spin_lock_fastunlock(struct rt_mutex *lock,
13244 +                                         int (*slowfn)(struct rt_mutex *lock))
13245 +{
13246 +       if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
13247 +               rt_mutex_deadlock_account_unlock(current);
13248 +               return 0;
13249 +       }
13250 +       return slowfn(lock);
13251 +}
13252 +#ifdef CONFIG_SMP
13253 +/*
13254 + * Note that owner is a speculative pointer and dereferencing relies
13255 + * on rcu_read_lock() and the check against the lock owner.
13256 + */
13257 +static int adaptive_wait(struct rt_mutex *lock,
13258 +                        struct task_struct *owner)
13259 +{
13260 +       int res = 0;
13261 +
13262 +       rcu_read_lock();
13263 +       for (;;) {
13264 +               if (owner != rt_mutex_owner(lock))
13265 +                       break;
13266 +               /*
13267 +                * Ensure that owner->on_cpu is dereferenced _after_
13268 +                * checking the above to be valid.
13269 +                */
13270 +               barrier();
13271 +               if (!owner->on_cpu) {
13272 +                       res = 1;
13273 +                       break;
13274 +               }
13275 +               cpu_relax();
13276 +       }
13277 +       rcu_read_unlock();
13278 +       return res;
13279 +}
13280 +#else
13281 +static int adaptive_wait(struct rt_mutex *lock,
13282 +                        struct task_struct *orig_owner)
13283 +{
13284 +       return 1;
13285 +}
13286 +#endif
13287 +
13288 +static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
13289 +                                  struct rt_mutex_waiter *waiter,
13290 +                                  struct task_struct *task,
13291 +                                  enum rtmutex_chainwalk chwalk);
13292 +/*
13293 + * Slow path lock function spin_lock style: this variant is very
13294 + * careful not to miss any non-lock wakeups.
13295 + *
13296 + * We store the current state under p->pi_lock in p->saved_state and
13297 + * the try_to_wake_up() code handles this accordingly.
13298 + */
13299 +static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
13300 +                                                   bool mg_off)
13301 +{
13302 +       struct task_struct *lock_owner, *self = current;
13303 +       struct rt_mutex_waiter waiter, *top_waiter;
13304 +       unsigned long flags;
13305 +       int ret;
13306 +
13307 +       rt_mutex_init_waiter(&waiter, true);
13308 +
13309 +       raw_spin_lock_irqsave(&lock->wait_lock, flags);
13310 +
13311 +       if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
13312 +               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13313 +               return;
13314 +       }
13315 +
13316 +       BUG_ON(rt_mutex_owner(lock) == self);
13317 +
13318 +       /*
13319 +        * We save whatever state the task is in and we'll restore it
13320 +        * after acquiring the lock taking real wakeups into account
13321 +        * as well. We are serialized via pi_lock against wakeups. See
13322 +        * try_to_wake_up().
13323 +        */
13324 +       raw_spin_lock(&self->pi_lock);
13325 +       self->saved_state = self->state;
13326 +       __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
13327 +       raw_spin_unlock(&self->pi_lock);
13328 +
13329 +       ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
13330 +       BUG_ON(ret);
13331 +
13332 +       for (;;) {
13333 +               /* Try to acquire the lock again. */
13334 +               if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
13335 +                       break;
13336 +
13337 +               top_waiter = rt_mutex_top_waiter(lock);
13338 +               lock_owner = rt_mutex_owner(lock);
13339 +
13340 +               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13341 +
13342 +               debug_rt_mutex_print_deadlock(&waiter);
13343 +
13344 +               if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
13345 +                       if (mg_off)
13346 +                               migrate_enable();
13347 +                       schedule();
13348 +                       if (mg_off)
13349 +                               migrate_disable();
13350 +               }
13351 +
13352 +               raw_spin_lock_irqsave(&lock->wait_lock, flags);
13353 +
13354 +               raw_spin_lock(&self->pi_lock);
13355 +               __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
13356 +               raw_spin_unlock(&self->pi_lock);
13357 +       }
13358 +
13359 +       /*
13360 +        * Restore the task state to current->saved_state. We set it
13361 +        * to the original state above and the try_to_wake_up() code
13362 +        * has possibly updated it when a real (non-rtmutex) wakeup
13363 +        * happened while we were blocked. Clear saved_state so
13364 +        * try_to_wakeup() does not get confused.
13365 +        */
13366 +       raw_spin_lock(&self->pi_lock);
13367 +       __set_current_state_no_track(self->saved_state);
13368 +       self->saved_state = TASK_RUNNING;
13369 +       raw_spin_unlock(&self->pi_lock);
13370 +
13371 +       /*
13372 +        * try_to_take_rt_mutex() sets the waiter bit
13373 +        * unconditionally. We might have to fix that up:
13374 +        */
13375 +       fixup_rt_mutex_waiters(lock);
13376 +
13377 +       BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
13378 +       BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
13379 +
13380 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13381 +
13382 +       debug_rt_mutex_free_waiter(&waiter);
13383 +}
13384 +
13385 +static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
13386 +                                   struct wake_q_head *wake_sleeper_q,
13387 +                                   struct rt_mutex *lock);
13388 +/*
13389 + * Slow path to release a rt_mutex spin_lock style
13390 + */
13391 +static int noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
13392 +{
13393 +       unsigned long flags;
13394 +       WAKE_Q(wake_q);
13395 +       WAKE_Q(wake_sleeper_q);
13396 +
13397 +       raw_spin_lock_irqsave(&lock->wait_lock, flags);
13398 +
13399 +       debug_rt_mutex_unlock(lock);
13400 +
13401 +       rt_mutex_deadlock_account_unlock(current);
13402 +
13403 +       if (!rt_mutex_has_waiters(lock)) {
13404 +               lock->owner = NULL;
13405 +               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13406 +               return 0;
13407 +       }
13408 +
13409 +       mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
13410 +
13411 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13412 +       wake_up_q(&wake_q);
13413 +       wake_up_q_sleeper(&wake_sleeper_q);
13414 +
13415 +       /* Undo pi boosting.when necessary */
13416 +       rt_mutex_adjust_prio(current);
13417 +       return 0;
13418 +}
13419 +
13420 +static int noinline __sched rt_spin_lock_slowunlock_no_deboost(struct rt_mutex *lock)
13421 +{
13422 +       unsigned long flags;
13423 +       WAKE_Q(wake_q);
13424 +       WAKE_Q(wake_sleeper_q);
13425 +
13426 +       raw_spin_lock_irqsave(&lock->wait_lock, flags);
13427 +
13428 +       debug_rt_mutex_unlock(lock);
13429 +
13430 +       rt_mutex_deadlock_account_unlock(current);
13431 +
13432 +       if (!rt_mutex_has_waiters(lock)) {
13433 +               lock->owner = NULL;
13434 +               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13435 +               return 0;
13436 +       }
13437 +
13438 +       mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
13439 +
13440 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13441 +       wake_up_q(&wake_q);
13442 +       wake_up_q_sleeper(&wake_sleeper_q);
13443 +       return 1;
13444 +}
13445 +
13446 +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
13447 +{
13448 +       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
13449 +       spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
13450 +}
13451 +EXPORT_SYMBOL(rt_spin_lock__no_mg);
13452 +
13453 +void __lockfunc rt_spin_lock(spinlock_t *lock)
13454 +{
13455 +       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
13456 +       spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
13457 +}
13458 +EXPORT_SYMBOL(rt_spin_lock);
13459 +
13460 +void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
13461 +{
13462 +       rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
13463 +}
13464 +EXPORT_SYMBOL(__rt_spin_lock);
13465 +
13466 +void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
13467 +{
13468 +       rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
13469 +}
13470 +EXPORT_SYMBOL(__rt_spin_lock__no_mg);
13471 +
13472 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
13473 +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
13474 +{
13475 +       spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
13476 +       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
13477 +}
13478 +EXPORT_SYMBOL(rt_spin_lock_nested);
13479 +#endif
13480 +
13481 +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
13482 +{
13483 +       /* NOTE: we always pass in '1' for nested, for simplicity */
13484 +       spin_release(&lock->dep_map, 1, _RET_IP_);
13485 +       rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
13486 +}
13487 +EXPORT_SYMBOL(rt_spin_unlock__no_mg);
13488 +
13489 +void __lockfunc rt_spin_unlock(spinlock_t *lock)
13490 +{
13491 +       /* NOTE: we always pass in '1' for nested, for simplicity */
13492 +       spin_release(&lock->dep_map, 1, _RET_IP_);
13493 +       rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
13494 +       migrate_enable();
13495 +}
13496 +EXPORT_SYMBOL(rt_spin_unlock);
13497 +
13498 +int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock)
13499 +{
13500 +       int ret;
13501 +
13502 +       /* NOTE: we always pass in '1' for nested, for simplicity */
13503 +       spin_release(&lock->dep_map, 1, _RET_IP_);
13504 +       ret = rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_no_deboost);
13505 +       migrate_enable();
13506 +       return ret;
13507 +}
13508 +
13509 +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
13510 +{
13511 +       rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
13512 +}
13513 +EXPORT_SYMBOL(__rt_spin_unlock);
13514 +
13515 +/*
13516 + * Wait for the lock to get unlocked: instead of polling for an unlock
13517 + * (like raw spinlocks do), we lock and unlock, to force the kernel to
13518 + * schedule if there's contention:
13519 + */
13520 +void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
13521 +{
13522 +       spin_lock(lock);
13523 +       spin_unlock(lock);
13524 +}
13525 +EXPORT_SYMBOL(rt_spin_unlock_wait);
13526 +
13527 +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
13528 +{
13529 +       int ret;
13530 +
13531 +       ret = rt_mutex_trylock(&lock->lock);
13532 +       if (ret)
13533 +               spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
13534 +       return ret;
13535 +}
13536 +EXPORT_SYMBOL(rt_spin_trylock__no_mg);
13537 +
13538 +int __lockfunc rt_spin_trylock(spinlock_t *lock)
13539 +{
13540 +       int ret;
13541 +
13542 +       migrate_disable();
13543 +       ret = rt_mutex_trylock(&lock->lock);
13544 +       if (ret)
13545 +               spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
13546 +       else
13547 +               migrate_enable();
13548 +       return ret;
13549 +}
13550 +EXPORT_SYMBOL(rt_spin_trylock);
13551 +
13552 +int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
13553 +{
13554 +       int ret;
13555 +
13556 +       local_bh_disable();
13557 +       ret = rt_mutex_trylock(&lock->lock);
13558 +       if (ret) {
13559 +               migrate_disable();
13560 +               spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
13561 +       } else
13562 +               local_bh_enable();
13563 +       return ret;
13564 +}
13565 +EXPORT_SYMBOL(rt_spin_trylock_bh);
13566 +
13567 +int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
13568 +{
13569 +       int ret;
13570 +
13571 +       *flags = 0;
13572 +       ret = rt_mutex_trylock(&lock->lock);
13573 +       if (ret) {
13574 +               migrate_disable();
13575 +               spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
13576 +       }
13577 +       return ret;
13578 +}
13579 +EXPORT_SYMBOL(rt_spin_trylock_irqsave);
13580 +
13581 +int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
13582 +{
13583 +       /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
13584 +       if (atomic_add_unless(atomic, -1, 1))
13585 +               return 0;
13586 +       rt_spin_lock(lock);
13587 +       if (atomic_dec_and_test(atomic))
13588 +               return 1;
13589 +       rt_spin_unlock(lock);
13590 +       return 0;
13591 +}
13592 +EXPORT_SYMBOL(atomic_dec_and_spin_lock);
13593 +
13594 +       void
13595 +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
13596 +{
13597 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
13598 +       /*
13599 +        * Make sure we are not reinitializing a held lock:
13600 +        */
13601 +       debug_check_no_locks_freed((void *)lock, sizeof(*lock));
13602 +       lockdep_init_map(&lock->dep_map, name, key, 0);
13603 +#endif
13604 +}
13605 +EXPORT_SYMBOL(__rt_spin_lock_init);
13606 +
13607 +#endif /* PREEMPT_RT_FULL */
13608 +
13609 +#ifdef CONFIG_PREEMPT_RT_FULL
13610 +       static inline int __sched
13611 +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
13612 +{
13613 +       struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
13614 +       struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
13615 +
13616 +       if (!hold_ctx)
13617 +               return 0;
13618 +
13619 +       if (unlikely(ctx == hold_ctx))
13620 +               return -EALREADY;
13621 +
13622 +       if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
13623 +           (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
13624 +#ifdef CONFIG_DEBUG_MUTEXES
13625 +               DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
13626 +               ctx->contending_lock = ww;
13627 +#endif
13628 +               return -EDEADLK;
13629 +       }
13630 +
13631 +       return 0;
13632 +}
13633 +#else
13634 +       static inline int __sched
13635 +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
13636 +{
13637 +       BUG();
13638 +       return 0;
13639 +}
13640 +
13641 +#endif
13642 +
13643 +static inline int
13644 +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
13645 +                    struct rt_mutex_waiter *waiter)
13646 +{
13647 +       return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
13648 +}
13649 +
13650  /*
13651   * Task blocks on lock.
13652   *
13653 @@ -971,6 +1440,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
13654                 return -EDEADLK;
13655  
13656         raw_spin_lock(&task->pi_lock);
13657 +
13658 +       /*
13659 +        * In the case of futex requeue PI, this will be a proxy
13660 +        * lock. The task will wake unaware that it is enqueueed on
13661 +        * this lock. Avoid blocking on two locks and corrupting
13662 +        * pi_blocked_on via the PI_WAKEUP_INPROGRESS
13663 +        * flag. futex_wait_requeue_pi() sets this when it wakes up
13664 +        * before requeue (due to a signal or timeout). Do not enqueue
13665 +        * the task if PI_WAKEUP_INPROGRESS is set.
13666 +        */
13667 +       if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
13668 +               raw_spin_unlock(&task->pi_lock);
13669 +               return -EAGAIN;
13670 +       }
13671 +
13672 +       BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
13673 +
13674         __rt_mutex_adjust_prio(task);
13675         waiter->task = task;
13676         waiter->lock = lock;
13677 @@ -994,7 +1480,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
13678                 rt_mutex_enqueue_pi(owner, waiter);
13679  
13680                 __rt_mutex_adjust_prio(owner);
13681 -               if (owner->pi_blocked_on)
13682 +               if (rt_mutex_real_waiter(owner->pi_blocked_on))
13683                         chain_walk = 1;
13684         } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
13685                 chain_walk = 1;
13686 @@ -1036,6 +1522,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
13687   * Called with lock->wait_lock held and interrupts disabled.
13688   */
13689  static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
13690 +                                   struct wake_q_head *wake_sleeper_q,
13691                                     struct rt_mutex *lock)
13692  {
13693         struct rt_mutex_waiter *waiter;
13694 @@ -1064,7 +1551,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
13695  
13696         raw_spin_unlock(&current->pi_lock);
13697  
13698 -       wake_q_add(wake_q, waiter->task);
13699 +       if (waiter->savestate)
13700 +               wake_q_add(wake_sleeper_q, waiter->task);
13701 +       else
13702 +               wake_q_add(wake_q, waiter->task);
13703  }
13704  
13705  /*
13706 @@ -1078,7 +1568,7 @@ static void remove_waiter(struct rt_mutex *lock,
13707  {
13708         bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
13709         struct task_struct *owner = rt_mutex_owner(lock);
13710 -       struct rt_mutex *next_lock;
13711 +       struct rt_mutex *next_lock = NULL;
13712  
13713         raw_spin_lock(&current->pi_lock);
13714         rt_mutex_dequeue(lock, waiter);
13715 @@ -1102,7 +1592,8 @@ static void remove_waiter(struct rt_mutex *lock,
13716         __rt_mutex_adjust_prio(owner);
13717  
13718         /* Store the lock on which owner is blocked or NULL */
13719 -       next_lock = task_blocked_on_lock(owner);
13720 +       if (rt_mutex_real_waiter(owner->pi_blocked_on))
13721 +               next_lock = task_blocked_on_lock(owner);
13722  
13723         raw_spin_unlock(&owner->pi_lock);
13724  
13725 @@ -1138,17 +1629,17 @@ void rt_mutex_adjust_pi(struct task_struct *task)
13726         raw_spin_lock_irqsave(&task->pi_lock, flags);
13727  
13728         waiter = task->pi_blocked_on;
13729 -       if (!waiter || (waiter->prio == task->prio &&
13730 +       if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
13731                         !dl_prio(task->prio))) {
13732                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
13733                 return;
13734         }
13735         next_lock = waiter->lock;
13736 -       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
13737  
13738         /* gets dropped in rt_mutex_adjust_prio_chain()! */
13739         get_task_struct(task);
13740  
13741 +       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
13742         rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
13743                                    next_lock, NULL, task);
13744  }
13745 @@ -1166,7 +1657,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
13746  static int __sched
13747  __rt_mutex_slowlock(struct rt_mutex *lock, int state,
13748                     struct hrtimer_sleeper *timeout,
13749 -                   struct rt_mutex_waiter *waiter)
13750 +                   struct rt_mutex_waiter *waiter,
13751 +                   struct ww_acquire_ctx *ww_ctx)
13752  {
13753         int ret = 0;
13754  
13755 @@ -1189,6 +1681,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
13756                                 break;
13757                 }
13758  
13759 +               if (ww_ctx && ww_ctx->acquired > 0) {
13760 +                       ret = __mutex_lock_check_stamp(lock, ww_ctx);
13761 +                       if (ret)
13762 +                               break;
13763 +               }
13764 +
13765                 raw_spin_unlock_irq(&lock->wait_lock);
13766  
13767                 debug_rt_mutex_print_deadlock(waiter);
13768 @@ -1223,21 +1721,96 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
13769         }
13770  }
13771  
13772 +static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
13773 +                                                  struct ww_acquire_ctx *ww_ctx)
13774 +{
13775 +#ifdef CONFIG_DEBUG_MUTEXES
13776 +       /*
13777 +        * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
13778 +        * but released with a normal mutex_unlock in this call.
13779 +        *
13780 +        * This should never happen, always use ww_mutex_unlock.
13781 +        */
13782 +       DEBUG_LOCKS_WARN_ON(ww->ctx);
13783 +
13784 +       /*
13785 +        * Not quite done after calling ww_acquire_done() ?
13786 +        */
13787 +       DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
13788 +
13789 +       if (ww_ctx->contending_lock) {
13790 +               /*
13791 +                * After -EDEADLK you tried to
13792 +                * acquire a different ww_mutex? Bad!
13793 +                */
13794 +               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
13795 +
13796 +               /*
13797 +                * You called ww_mutex_lock after receiving -EDEADLK,
13798 +                * but 'forgot' to unlock everything else first?
13799 +                */
13800 +               DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
13801 +               ww_ctx->contending_lock = NULL;
13802 +       }
13803 +
13804 +       /*
13805 +        * Naughty, using a different class will lead to undefined behavior!
13806 +        */
13807 +       DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
13808 +#endif
13809 +       ww_ctx->acquired++;
13810 +}
13811 +
13812 +#ifdef CONFIG_PREEMPT_RT_FULL
13813 +static void ww_mutex_account_lock(struct rt_mutex *lock,
13814 +                                 struct ww_acquire_ctx *ww_ctx)
13815 +{
13816 +       struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
13817 +       struct rt_mutex_waiter *waiter, *n;
13818 +
13819 +       /*
13820 +        * This branch gets optimized out for the common case,
13821 +        * and is only important for ww_mutex_lock.
13822 +        */
13823 +       ww_mutex_lock_acquired(ww, ww_ctx);
13824 +       ww->ctx = ww_ctx;
13825 +
13826 +       /*
13827 +        * Give any possible sleeping processes the chance to wake up,
13828 +        * so they can recheck if they have to back off.
13829 +        */
13830 +       rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
13831 +                                            tree_entry) {
13832 +               /* XXX debug rt mutex waiter wakeup */
13833 +
13834 +               BUG_ON(waiter->lock != lock);
13835 +               rt_mutex_wake_waiter(waiter);
13836 +       }
13837 +}
13838 +
13839 +#else
13840 +
13841 +static void ww_mutex_account_lock(struct rt_mutex *lock,
13842 +                                 struct ww_acquire_ctx *ww_ctx)
13843 +{
13844 +       BUG();
13845 +}
13846 +#endif
13847 +
13848  /*
13849   * Slow path lock function:
13850   */
13851  static int __sched
13852  rt_mutex_slowlock(struct rt_mutex *lock, int state,
13853                   struct hrtimer_sleeper *timeout,
13854 -                 enum rtmutex_chainwalk chwalk)
13855 +                 enum rtmutex_chainwalk chwalk,
13856 +                 struct ww_acquire_ctx *ww_ctx)
13857  {
13858         struct rt_mutex_waiter waiter;
13859         unsigned long flags;
13860         int ret = 0;
13861  
13862 -       debug_rt_mutex_init_waiter(&waiter);
13863 -       RB_CLEAR_NODE(&waiter.pi_tree_entry);
13864 -       RB_CLEAR_NODE(&waiter.tree_entry);
13865 +       rt_mutex_init_waiter(&waiter, false);
13866  
13867         /*
13868          * Technically we could use raw_spin_[un]lock_irq() here, but this can
13869 @@ -1251,6 +1824,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
13870  
13871         /* Try to acquire the lock again: */
13872         if (try_to_take_rt_mutex(lock, current, NULL)) {
13873 +               if (ww_ctx)
13874 +                       ww_mutex_account_lock(lock, ww_ctx);
13875                 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13876                 return 0;
13877         }
13878 @@ -1265,13 +1840,23 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
13879  
13880         if (likely(!ret))
13881                 /* sleep on the mutex */
13882 -               ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
13883 +               ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
13884 +                                         ww_ctx);
13885 +       else if (ww_ctx) {
13886 +               /* ww_mutex received EDEADLK, let it become EALREADY */
13887 +               ret = __mutex_lock_check_stamp(lock, ww_ctx);
13888 +               BUG_ON(!ret);
13889 +       }
13890  
13891         if (unlikely(ret)) {
13892                 __set_current_state(TASK_RUNNING);
13893                 if (rt_mutex_has_waiters(lock))
13894                         remove_waiter(lock, &waiter);
13895 -               rt_mutex_handle_deadlock(ret, chwalk, &waiter);
13896 +               /* ww_mutex want to report EDEADLK/EALREADY, let them */
13897 +               if (!ww_ctx)
13898 +                       rt_mutex_handle_deadlock(ret, chwalk, &waiter);
13899 +       } else if (ww_ctx) {
13900 +               ww_mutex_account_lock(lock, ww_ctx);
13901         }
13902  
13903         /*
13904 @@ -1331,7 +1916,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
13905   * Return whether the current task needs to undo a potential priority boosting.
13906   */
13907  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
13908 -                                       struct wake_q_head *wake_q)
13909 +                                       struct wake_q_head *wake_q,
13910 +                                       struct wake_q_head *wake_sleeper_q)
13911  {
13912         unsigned long flags;
13913  
13914 @@ -1387,7 +1973,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
13915          *
13916          * Queue the next waiter for wakeup once we release the wait_lock.
13917          */
13918 -       mark_wakeup_next_waiter(wake_q, lock);
13919 +       mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
13920  
13921         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13922  
13923 @@ -1403,31 +1989,36 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
13924   */
13925  static inline int
13926  rt_mutex_fastlock(struct rt_mutex *lock, int state,
13927 +                 struct ww_acquire_ctx *ww_ctx,
13928                   int (*slowfn)(struct rt_mutex *lock, int state,
13929                                 struct hrtimer_sleeper *timeout,
13930 -                               enum rtmutex_chainwalk chwalk))
13931 +                               enum rtmutex_chainwalk chwalk,
13932 +                               struct ww_acquire_ctx *ww_ctx))
13933  {
13934         if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
13935                 rt_mutex_deadlock_account_lock(lock, current);
13936                 return 0;
13937         } else
13938 -               return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
13939 +               return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
13940 +                             ww_ctx);
13941  }
13942  
13943  static inline int
13944  rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
13945                         struct hrtimer_sleeper *timeout,
13946                         enum rtmutex_chainwalk chwalk,
13947 +                       struct ww_acquire_ctx *ww_ctx,
13948                         int (*slowfn)(struct rt_mutex *lock, int state,
13949                                       struct hrtimer_sleeper *timeout,
13950 -                                     enum rtmutex_chainwalk chwalk))
13951 +                                     enum rtmutex_chainwalk chwalk,
13952 +                                     struct ww_acquire_ctx *ww_ctx))
13953  {
13954         if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
13955             likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
13956                 rt_mutex_deadlock_account_lock(lock, current);
13957                 return 0;
13958         } else
13959 -               return slowfn(lock, state, timeout, chwalk);
13960 +               return slowfn(lock, state, timeout, chwalk, ww_ctx);
13961  }
13962  
13963  static inline int
13964 @@ -1444,17 +2035,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
13965  static inline void
13966  rt_mutex_fastunlock(struct rt_mutex *lock,
13967                     bool (*slowfn)(struct rt_mutex *lock,
13968 -                                  struct wake_q_head *wqh))
13969 +                                  struct wake_q_head *wqh,
13970 +                                  struct wake_q_head *wq_sleeper))
13971  {
13972         WAKE_Q(wake_q);
13973 +       WAKE_Q(wake_sleeper_q);
13974  
13975         if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
13976                 rt_mutex_deadlock_account_unlock(current);
13977  
13978         } else {
13979 -               bool deboost = slowfn(lock, &wake_q);
13980 +               bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q);
13981  
13982                 wake_up_q(&wake_q);
13983 +               wake_up_q_sleeper(&wake_sleeper_q);
13984  
13985                 /* Undo pi boosting if necessary: */
13986                 if (deboost)
13987 @@ -1471,7 +2065,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
13988  {
13989         might_sleep();
13990  
13991 -       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
13992 +       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
13993  }
13994  EXPORT_SYMBOL_GPL(rt_mutex_lock);
13995  
13996 @@ -1488,7 +2082,7 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
13997  {
13998         might_sleep();
13999  
14000 -       return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
14001 +       return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
14002  }
14003  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
14004  
14005 @@ -1501,11 +2095,30 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
14006         might_sleep();
14007  
14008         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
14009 -                                      RT_MUTEX_FULL_CHAINWALK,
14010 +                                      RT_MUTEX_FULL_CHAINWALK, NULL,
14011                                        rt_mutex_slowlock);
14012  }
14013  
14014  /**
14015 + * rt_mutex_lock_killable - lock a rt_mutex killable
14016 + *
14017 + * @lock:              the rt_mutex to be locked
14018 + * @detect_deadlock:   deadlock detection on/off
14019 + *
14020 + * Returns:
14021 + *  0          on success
14022 + * -EINTR      when interrupted by a signal
14023 + * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
14024 + */
14025 +int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
14026 +{
14027 +       might_sleep();
14028 +
14029 +       return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
14030 +}
14031 +EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
14032 +
14033 +/**
14034   * rt_mutex_timed_lock - lock a rt_mutex interruptible
14035   *                     the timeout structure is provided
14036   *                     by the caller
14037 @@ -1525,6 +2138,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
14038  
14039         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
14040                                        RT_MUTEX_MIN_CHAINWALK,
14041 +                                      NULL,
14042                                        rt_mutex_slowlock);
14043  }
14044  EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
14045 @@ -1542,7 +2156,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
14046   */
14047  int __sched rt_mutex_trylock(struct rt_mutex *lock)
14048  {
14049 +#ifdef CONFIG_PREEMPT_RT_FULL
14050 +       if (WARN_ON_ONCE(in_irq() || in_nmi()))
14051 +#else
14052         if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
14053 +#endif
14054                 return 0;
14055  
14056         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
14057 @@ -1568,13 +2186,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
14058   * required or not.
14059   */
14060  bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
14061 -                                  struct wake_q_head *wqh)
14062 +                                  struct wake_q_head *wqh,
14063 +                                  struct wake_q_head *wq_sleeper)
14064  {
14065         if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
14066                 rt_mutex_deadlock_account_unlock(current);
14067                 return false;
14068         }
14069 -       return rt_mutex_slowunlock(lock, wqh);
14070 +       return rt_mutex_slowunlock(lock, wqh, wq_sleeper);
14071  }
14072  
14073  /**
14074 @@ -1607,13 +2226,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
14075  void __rt_mutex_init(struct rt_mutex *lock, const char *name)
14076  {
14077         lock->owner = NULL;
14078 -       raw_spin_lock_init(&lock->wait_lock);
14079         lock->waiters = RB_ROOT;
14080         lock->waiters_leftmost = NULL;
14081  
14082         debug_rt_mutex_init(lock, name);
14083  }
14084 -EXPORT_SYMBOL_GPL(__rt_mutex_init);
14085 +EXPORT_SYMBOL(__rt_mutex_init);
14086  
14087  /**
14088   * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
14089 @@ -1628,7 +2246,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
14090  void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
14091                                 struct task_struct *proxy_owner)
14092  {
14093 -       __rt_mutex_init(lock, NULL);
14094 +       rt_mutex_init(lock);
14095         debug_rt_mutex_proxy_lock(lock, proxy_owner);
14096         rt_mutex_set_owner(lock, proxy_owner);
14097         rt_mutex_deadlock_account_lock(lock, proxy_owner);
14098 @@ -1676,6 +2294,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
14099                 return 1;
14100         }
14101  
14102 +#ifdef CONFIG_PREEMPT_RT_FULL
14103 +       /*
14104 +        * In PREEMPT_RT there's an added race.
14105 +        * If the task, that we are about to requeue, times out,
14106 +        * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
14107 +        * to skip this task. But right after the task sets
14108 +        * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
14109 +        * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
14110 +        * This will replace the PI_WAKEUP_INPROGRESS with the actual
14111 +        * lock that it blocks on. We *must not* place this task
14112 +        * on this proxy lock in that case.
14113 +        *
14114 +        * To prevent this race, we first take the task's pi_lock
14115 +        * and check if it has updated its pi_blocked_on. If it has,
14116 +        * we assume that it woke up and we return -EAGAIN.
14117 +        * Otherwise, we set the task's pi_blocked_on to
14118 +        * PI_REQUEUE_INPROGRESS, so that if the task is waking up
14119 +        * it will know that we are in the process of requeuing it.
14120 +        */
14121 +       raw_spin_lock(&task->pi_lock);
14122 +       if (task->pi_blocked_on) {
14123 +               raw_spin_unlock(&task->pi_lock);
14124 +               raw_spin_unlock_irq(&lock->wait_lock);
14125 +               return -EAGAIN;
14126 +       }
14127 +       task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
14128 +       raw_spin_unlock(&task->pi_lock);
14129 +#endif
14130 +
14131         /* We enforce deadlock detection for futexes */
14132         ret = task_blocks_on_rt_mutex(lock, waiter, task,
14133                                       RT_MUTEX_FULL_CHAINWALK);
14134 @@ -1690,7 +2337,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
14135                 ret = 0;
14136         }
14137  
14138 -       if (unlikely(ret))
14139 +       if (ret && rt_mutex_has_waiters(lock))
14140                 remove_waiter(lock, waiter);
14141  
14142         raw_spin_unlock_irq(&lock->wait_lock);
14143 @@ -1746,7 +2393,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
14144         set_current_state(TASK_INTERRUPTIBLE);
14145  
14146         /* sleep on the mutex */
14147 -       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
14148 +       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
14149  
14150         if (unlikely(ret))
14151                 remove_waiter(lock, waiter);
14152 @@ -1761,3 +2408,89 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
14153  
14154         return ret;
14155  }
14156 +
14157 +static inline int
14158 +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
14159 +{
14160 +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
14161 +       unsigned tmp;
14162 +
14163 +       if (ctx->deadlock_inject_countdown-- == 0) {
14164 +               tmp = ctx->deadlock_inject_interval;
14165 +               if (tmp > UINT_MAX/4)
14166 +                       tmp = UINT_MAX;
14167 +               else
14168 +                       tmp = tmp*2 + tmp + tmp/2;
14169 +
14170 +               ctx->deadlock_inject_interval = tmp;
14171 +               ctx->deadlock_inject_countdown = tmp;
14172 +               ctx->contending_lock = lock;
14173 +
14174 +               ww_mutex_unlock(lock);
14175 +
14176 +               return -EDEADLK;
14177 +       }
14178 +#endif
14179 +
14180 +       return 0;
14181 +}
14182 +
14183 +#ifdef CONFIG_PREEMPT_RT_FULL
14184 +int __sched
14185 +__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
14186 +{
14187 +       int ret;
14188 +
14189 +       might_sleep();
14190 +
14191 +       mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
14192 +       ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
14193 +       if (ret)
14194 +               mutex_release(&lock->base.dep_map, 1, _RET_IP_);
14195 +       else if (!ret && ww_ctx->acquired > 1)
14196 +               return ww_mutex_deadlock_injection(lock, ww_ctx);
14197 +
14198 +       return ret;
14199 +}
14200 +EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
14201 +
14202 +int __sched
14203 +__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
14204 +{
14205 +       int ret;
14206 +
14207 +       might_sleep();
14208 +
14209 +       mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
14210 +       ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
14211 +       if (ret)
14212 +               mutex_release(&lock->base.dep_map, 1, _RET_IP_);
14213 +       else if (!ret && ww_ctx->acquired > 1)
14214 +               return ww_mutex_deadlock_injection(lock, ww_ctx);
14215 +
14216 +       return ret;
14217 +}
14218 +EXPORT_SYMBOL_GPL(__ww_mutex_lock);
14219 +
14220 +void __sched ww_mutex_unlock(struct ww_mutex *lock)
14221 +{
14222 +       int nest = !!lock->ctx;
14223 +
14224 +       /*
14225 +        * The unlocking fastpath is the 0->1 transition from 'locked'
14226 +        * into 'unlocked' state:
14227 +        */
14228 +       if (nest) {
14229 +#ifdef CONFIG_DEBUG_MUTEXES
14230 +               DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
14231 +#endif
14232 +               if (lock->ctx->acquired > 0)
14233 +                       lock->ctx->acquired--;
14234 +               lock->ctx = NULL;
14235 +       }
14236 +
14237 +       mutex_release(&lock->base.dep_map, nest, _RET_IP_);
14238 +       rt_mutex_unlock(&lock->base.lock);
14239 +}
14240 +EXPORT_SYMBOL(ww_mutex_unlock);
14241 +#endif
14242 diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
14243 index e317e1cbb3eb..f457c7574920 100644
14244 --- a/kernel/locking/rtmutex_common.h
14245 +++ b/kernel/locking/rtmutex_common.h
14246 @@ -27,6 +27,7 @@ struct rt_mutex_waiter {
14247         struct rb_node          pi_tree_entry;
14248         struct task_struct      *task;
14249         struct rt_mutex         *lock;
14250 +       bool                    savestate;
14251  #ifdef CONFIG_DEBUG_RT_MUTEXES
14252         unsigned long           ip;
14253         struct pid              *deadlock_task_pid;
14254 @@ -98,6 +99,9 @@ enum rtmutex_chainwalk {
14255  /*
14256   * PI-futex support (proxy locking functions, etc.):
14257   */
14258 +#define PI_WAKEUP_INPROGRESS   ((struct rt_mutex_waiter *) 1)
14259 +#define PI_REQUEUE_INPROGRESS  ((struct rt_mutex_waiter *) 2)
14260 +
14261  extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
14262  extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
14263                                        struct task_struct *proxy_owner);
14264 @@ -111,7 +115,8 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
14265                                       struct rt_mutex_waiter *waiter);
14266  extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
14267  extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
14268 -                                 struct wake_q_head *wqh);
14269 +                                 struct wake_q_head *wqh,
14270 +                                 struct wake_q_head *wq_sleeper);
14271  extern void rt_mutex_adjust_prio(struct task_struct *task);
14272  
14273  #ifdef CONFIG_DEBUG_RT_MUTEXES
14274 @@ -120,4 +125,14 @@ extern void rt_mutex_adjust_prio(struct task_struct *task);
14275  # include "rtmutex.h"
14276  #endif
14277  
14278 +static inline void
14279 +rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
14280 +{
14281 +       debug_rt_mutex_init_waiter(waiter);
14282 +       waiter->task = NULL;
14283 +       waiter->savestate = savestate;
14284 +       RB_CLEAR_NODE(&waiter->pi_tree_entry);
14285 +       RB_CLEAR_NODE(&waiter->tree_entry);
14286 +}
14287 +
14288  #endif
14289 diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
14290 index db3ccb1dd614..909779647bd1 100644
14291 --- a/kernel/locking/spinlock.c
14292 +++ b/kernel/locking/spinlock.c
14293 @@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)           \
14294   *         __[spin|read|write]_lock_bh()
14295   */
14296  BUILD_LOCK_OPS(spin, raw_spinlock);
14297 +
14298 +#ifndef CONFIG_PREEMPT_RT_FULL
14299  BUILD_LOCK_OPS(read, rwlock);
14300  BUILD_LOCK_OPS(write, rwlock);
14301 +#endif
14302  
14303  #endif
14304  
14305 @@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
14306  EXPORT_SYMBOL(_raw_spin_unlock_bh);
14307  #endif
14308  
14309 +#ifndef CONFIG_PREEMPT_RT_FULL
14310 +
14311  #ifndef CONFIG_INLINE_READ_TRYLOCK
14312  int __lockfunc _raw_read_trylock(rwlock_t *lock)
14313  {
14314 @@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
14315  EXPORT_SYMBOL(_raw_write_unlock_bh);
14316  #endif
14317  
14318 +#endif /* !PREEMPT_RT_FULL */
14319 +
14320  #ifdef CONFIG_DEBUG_LOCK_ALLOC
14321  
14322  void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
14323 diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
14324 index 0374a596cffa..94970338d518 100644
14325 --- a/kernel/locking/spinlock_debug.c
14326 +++ b/kernel/locking/spinlock_debug.c
14327 @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
14328  
14329  EXPORT_SYMBOL(__raw_spin_lock_init);
14330  
14331 +#ifndef CONFIG_PREEMPT_RT_FULL
14332  void __rwlock_init(rwlock_t *lock, const char *name,
14333                    struct lock_class_key *key)
14334  {
14335 @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
14336  }
14337  
14338  EXPORT_SYMBOL(__rwlock_init);
14339 +#endif
14340  
14341  static void spin_dump(raw_spinlock_t *lock, const char *msg)
14342  {
14343 @@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
14344         arch_spin_unlock(&lock->raw_lock);
14345  }
14346  
14347 +#ifndef CONFIG_PREEMPT_RT_FULL
14348  static void rwlock_bug(rwlock_t *lock, const char *msg)
14349  {
14350         if (!debug_locks_off())
14351 @@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock)
14352         debug_write_unlock(lock);
14353         arch_write_unlock(&lock->raw_lock);
14354  }
14355 +
14356 +#endif
14357 diff --git a/kernel/module.c b/kernel/module.c
14358 index 0e54d5bf0097..3483a3743b44 100644
14359 --- a/kernel/module.c
14360 +++ b/kernel/module.c
14361 @@ -660,16 +660,7 @@ static void percpu_modcopy(struct module *mod,
14362                 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
14363  }
14364  
14365 -/**
14366 - * is_module_percpu_address - test whether address is from module static percpu
14367 - * @addr: address to test
14368 - *
14369 - * Test whether @addr belongs to module static percpu area.
14370 - *
14371 - * RETURNS:
14372 - * %true if @addr is from module static percpu area
14373 - */
14374 -bool is_module_percpu_address(unsigned long addr)
14375 +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
14376  {
14377         struct module *mod;
14378         unsigned int cpu;
14379 @@ -683,9 +674,11 @@ bool is_module_percpu_address(unsigned long addr)
14380                         continue;
14381                 for_each_possible_cpu(cpu) {
14382                         void *start = per_cpu_ptr(mod->percpu, cpu);
14383 +                       void *va = (void *)addr;
14384  
14385 -                       if ((void *)addr >= start &&
14386 -                           (void *)addr < start + mod->percpu_size) {
14387 +                       if (va >= start && va < start + mod->percpu_size) {
14388 +                               if (can_addr)
14389 +                                       *can_addr = (unsigned long) (va - start);
14390                                 preempt_enable();
14391                                 return true;
14392                         }
14393 @@ -696,6 +689,20 @@ bool is_module_percpu_address(unsigned long addr)
14394         return false;
14395  }
14396  
14397 +/**
14398 + * is_module_percpu_address - test whether address is from module static percpu
14399 + * @addr: address to test
14400 + *
14401 + * Test whether @addr belongs to module static percpu area.
14402 + *
14403 + * RETURNS:
14404 + * %true if @addr is from module static percpu area
14405 + */
14406 +bool is_module_percpu_address(unsigned long addr)
14407 +{
14408 +       return __is_module_percpu_address(addr, NULL);
14409 +}
14410 +
14411  #else /* ... !CONFIG_SMP */
14412  
14413  static inline void __percpu *mod_percpu(struct module *mod)
14414 @@ -727,6 +734,11 @@ bool is_module_percpu_address(unsigned long addr)
14415         return false;
14416  }
14417  
14418 +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
14419 +{
14420 +       return false;
14421 +}
14422 +
14423  #endif /* CONFIG_SMP */
14424  
14425  #define MODINFO_ATTR(field)    \
14426 diff --git a/kernel/panic.c b/kernel/panic.c
14427 index e6480e20379e..7e9c1918a94e 100644
14428 --- a/kernel/panic.c
14429 +++ b/kernel/panic.c
14430 @@ -482,9 +482,11 @@ static u64 oops_id;
14431  
14432  static int init_oops_id(void)
14433  {
14434 +#ifndef CONFIG_PREEMPT_RT_FULL
14435         if (!oops_id)
14436                 get_random_bytes(&oops_id, sizeof(oops_id));
14437         else
14438 +#endif
14439                 oops_id++;
14440  
14441         return 0;
14442 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
14443 index b26dbc48c75b..968255f27a33 100644
14444 --- a/kernel/power/hibernate.c
14445 +++ b/kernel/power/hibernate.c
14446 @@ -286,6 +286,8 @@ static int create_image(int platform_mode)
14447  
14448         local_irq_disable();
14449  
14450 +       system_state = SYSTEM_SUSPEND;
14451 +
14452         error = syscore_suspend();
14453         if (error) {
14454                 printk(KERN_ERR "PM: Some system devices failed to power down, "
14455 @@ -317,6 +319,7 @@ static int create_image(int platform_mode)
14456         syscore_resume();
14457  
14458   Enable_irqs:
14459 +       system_state = SYSTEM_RUNNING;
14460         local_irq_enable();
14461  
14462   Enable_cpus:
14463 @@ -446,6 +449,7 @@ static int resume_target_kernel(bool platform_mode)
14464                 goto Enable_cpus;
14465  
14466         local_irq_disable();
14467 +       system_state = SYSTEM_SUSPEND;
14468  
14469         error = syscore_suspend();
14470         if (error)
14471 @@ -479,6 +483,7 @@ static int resume_target_kernel(bool platform_mode)
14472         syscore_resume();
14473  
14474   Enable_irqs:
14475 +       system_state = SYSTEM_RUNNING;
14476         local_irq_enable();
14477  
14478   Enable_cpus:
14479 @@ -564,6 +569,7 @@ int hibernation_platform_enter(void)
14480                 goto Enable_cpus;
14481  
14482         local_irq_disable();
14483 +       system_state = SYSTEM_SUSPEND;
14484         syscore_suspend();
14485         if (pm_wakeup_pending()) {
14486                 error = -EAGAIN;
14487 @@ -576,6 +582,7 @@ int hibernation_platform_enter(void)
14488  
14489   Power_up:
14490         syscore_resume();
14491 +       system_state = SYSTEM_RUNNING;
14492         local_irq_enable();
14493  
14494   Enable_cpus:
14495 @@ -676,6 +683,10 @@ static int load_image_and_restore(void)
14496         return error;
14497  }
14498  
14499 +#ifndef CONFIG_SUSPEND
14500 +bool pm_in_action;
14501 +#endif
14502 +
14503  /**
14504   * hibernate - Carry out system hibernation, including saving the image.
14505   */
14506 @@ -689,6 +700,8 @@ int hibernate(void)
14507                 return -EPERM;
14508         }
14509  
14510 +       pm_in_action = true;
14511 +
14512         lock_system_sleep();
14513         /* The snapshot device should not be opened while we're running */
14514         if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
14515 @@ -766,6 +779,7 @@ int hibernate(void)
14516         atomic_inc(&snapshot_device_available);
14517   Unlock:
14518         unlock_system_sleep();
14519 +       pm_in_action = false;
14520         return error;
14521  }
14522  
14523 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
14524 index 6ccb08f57fcb..c8cbb5ed2fe3 100644
14525 --- a/kernel/power/suspend.c
14526 +++ b/kernel/power/suspend.c
14527 @@ -369,6 +369,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
14528         arch_suspend_disable_irqs();
14529         BUG_ON(!irqs_disabled());
14530  
14531 +       system_state = SYSTEM_SUSPEND;
14532 +
14533         error = syscore_suspend();
14534         if (!error) {
14535                 *wakeup = pm_wakeup_pending();
14536 @@ -385,6 +387,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
14537                 syscore_resume();
14538         }
14539  
14540 +       system_state = SYSTEM_RUNNING;
14541 +
14542         arch_suspend_enable_irqs();
14543         BUG_ON(irqs_disabled());
14544  
14545 @@ -527,6 +531,8 @@ static int enter_state(suspend_state_t state)
14546         return error;
14547  }
14548  
14549 +bool pm_in_action;
14550 +
14551  /**
14552   * pm_suspend - Externally visible function for suspending the system.
14553   * @state: System sleep state to enter.
14554 @@ -541,6 +547,8 @@ int pm_suspend(suspend_state_t state)
14555         if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
14556                 return -EINVAL;
14557  
14558 +       pm_in_action = true;
14559 +
14560         error = enter_state(state);
14561         if (error) {
14562                 suspend_stats.fail++;
14563 @@ -548,6 +556,7 @@ int pm_suspend(suspend_state_t state)
14564         } else {
14565                 suspend_stats.success++;
14566         }
14567 +       pm_in_action = false;
14568         return error;
14569  }
14570  EXPORT_SYMBOL(pm_suspend);
14571 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
14572 index 9c5b231684d0..cf15bdb6855b 100644
14573 --- a/kernel/printk/printk.c
14574 +++ b/kernel/printk/printk.c
14575 @@ -351,6 +351,65 @@ __packed __aligned(4)
14576   */
14577  DEFINE_RAW_SPINLOCK(logbuf_lock);
14578  
14579 +#ifdef CONFIG_EARLY_PRINTK
14580 +struct console *early_console;
14581 +
14582 +static void early_vprintk(const char *fmt, va_list ap)
14583 +{
14584 +       if (early_console) {
14585 +               char buf[512];
14586 +               int n = vscnprintf(buf, sizeof(buf), fmt, ap);
14587 +
14588 +               early_console->write(early_console, buf, n);
14589 +       }
14590 +}
14591 +
14592 +asmlinkage void early_printk(const char *fmt, ...)
14593 +{
14594 +       va_list ap;
14595 +
14596 +       va_start(ap, fmt);
14597 +       early_vprintk(fmt, ap);
14598 +       va_end(ap);
14599 +}
14600 +
14601 +/*
14602 + * This is independent of any log levels - a global
14603 + * kill switch that turns off all of printk.
14604 + *
14605 + * Used by the NMI watchdog if early-printk is enabled.
14606 + */
14607 +static bool __read_mostly printk_killswitch;
14608 +
14609 +static int __init force_early_printk_setup(char *str)
14610 +{
14611 +       printk_killswitch = true;
14612 +       return 0;
14613 +}
14614 +early_param("force_early_printk", force_early_printk_setup);
14615 +
14616 +void printk_kill(void)
14617 +{
14618 +       printk_killswitch = true;
14619 +}
14620 +
14621 +#ifdef CONFIG_PRINTK
14622 +static int forced_early_printk(const char *fmt, va_list ap)
14623 +{
14624 +       if (!printk_killswitch)
14625 +               return 0;
14626 +       early_vprintk(fmt, ap);
14627 +       return 1;
14628 +}
14629 +#endif
14630 +
14631 +#else
14632 +static inline int forced_early_printk(const char *fmt, va_list ap)
14633 +{
14634 +       return 0;
14635 +}
14636 +#endif
14637 +
14638  #ifdef CONFIG_PRINTK
14639  DECLARE_WAIT_QUEUE_HEAD(log_wait);
14640  /* the next printk record to read by syslog(READ) or /proc/kmsg */
14641 @@ -1337,6 +1396,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
14642  {
14643         char *text;
14644         int len = 0;
14645 +       int attempts = 0;
14646  
14647         text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
14648         if (!text)
14649 @@ -1348,6 +1408,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
14650                 u64 seq;
14651                 u32 idx;
14652                 enum log_flags prev;
14653 +               int num_msg;
14654 +try_again:
14655 +               attempts++;
14656 +               if (attempts > 10) {
14657 +                       len = -EBUSY;
14658 +                       goto out;
14659 +               }
14660 +               num_msg = 0;
14661  
14662                 /*
14663                  * Find first record that fits, including all following records,
14664 @@ -1363,6 +1431,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
14665                         prev = msg->flags;
14666                         idx = log_next(idx);
14667                         seq++;
14668 +                       num_msg++;
14669 +                       if (num_msg > 5) {
14670 +                               num_msg = 0;
14671 +                               raw_spin_unlock_irq(&logbuf_lock);
14672 +                               raw_spin_lock_irq(&logbuf_lock);
14673 +                               if (clear_seq < log_first_seq)
14674 +                                       goto try_again;
14675 +                       }
14676                 }
14677  
14678                 /* move first record forward until length fits into the buffer */
14679 @@ -1376,6 +1452,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
14680                         prev = msg->flags;
14681                         idx = log_next(idx);
14682                         seq++;
14683 +                       num_msg++;
14684 +                       if (num_msg > 5) {
14685 +                               num_msg = 0;
14686 +                               raw_spin_unlock_irq(&logbuf_lock);
14687 +                               raw_spin_lock_irq(&logbuf_lock);
14688 +                               if (clear_seq < log_first_seq)
14689 +                                       goto try_again;
14690 +                       }
14691                 }
14692  
14693                 /* last message fitting into this dump */
14694 @@ -1416,6 +1500,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
14695                 clear_seq = log_next_seq;
14696                 clear_idx = log_next_idx;
14697         }
14698 +out:
14699         raw_spin_unlock_irq(&logbuf_lock);
14700  
14701         kfree(text);
14702 @@ -1569,6 +1654,12 @@ static void call_console_drivers(int level,
14703         if (!console_drivers)
14704                 return;
14705  
14706 +       if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
14707 +               if (in_irq() || in_nmi())
14708 +                       return;
14709 +       }
14710 +
14711 +       migrate_disable();
14712         for_each_console(con) {
14713                 if (exclusive_console && con != exclusive_console)
14714                         continue;
14715 @@ -1584,6 +1675,7 @@ static void call_console_drivers(int level,
14716                 else
14717                         con->write(con, text, len);
14718         }
14719 +       migrate_enable();
14720  }
14721  
14722  /*
14723 @@ -1781,6 +1873,13 @@ asmlinkage int vprintk_emit(int facility, int level,
14724         /* cpu currently holding logbuf_lock in this function */
14725         static unsigned int logbuf_cpu = UINT_MAX;
14726  
14727 +       /*
14728 +        * Fall back to early_printk if a debugging subsystem has
14729 +        * killed printk output
14730 +        */
14731 +       if (unlikely(forced_early_printk(fmt, args)))
14732 +               return 1;
14733 +
14734         if (level == LOGLEVEL_SCHED) {
14735                 level = LOGLEVEL_DEFAULT;
14736                 in_sched = true;
14737 @@ -1885,13 +1984,23 @@ asmlinkage int vprintk_emit(int facility, int level,
14738  
14739         /* If called from the scheduler, we can not call up(). */
14740         if (!in_sched) {
14741 +               int may_trylock = 1;
14742 +
14743                 lockdep_off();
14744 +#ifdef CONFIG_PREEMPT_RT_FULL
14745 +               /*
14746 +                * we can't take a sleeping lock with IRQs or preeption disabled
14747 +                * so we can't print in these contexts
14748 +                */
14749 +               if (!(preempt_count() == 0 && !irqs_disabled()))
14750 +                       may_trylock = 0;
14751 +#endif
14752                 /*
14753                  * Try to acquire and then immediately release the console
14754                  * semaphore.  The release will print out buffers and wake up
14755                  * /dev/kmsg and syslog() users.
14756                  */
14757 -               if (console_trylock())
14758 +               if (may_trylock && console_trylock())
14759                         console_unlock();
14760                 lockdep_on();
14761         }
14762 @@ -2014,26 +2123,6 @@ DEFINE_PER_CPU(printk_func_t, printk_func);
14763  
14764  #endif /* CONFIG_PRINTK */
14765  
14766 -#ifdef CONFIG_EARLY_PRINTK
14767 -struct console *early_console;
14768 -
14769 -asmlinkage __visible void early_printk(const char *fmt, ...)
14770 -{
14771 -       va_list ap;
14772 -       char buf[512];
14773 -       int n;
14774 -
14775 -       if (!early_console)
14776 -               return;
14777 -
14778 -       va_start(ap, fmt);
14779 -       n = vscnprintf(buf, sizeof(buf), fmt, ap);
14780 -       va_end(ap);
14781 -
14782 -       early_console->write(early_console, buf, n);
14783 -}
14784 -#endif
14785 -
14786  static int __add_preferred_console(char *name, int idx, char *options,
14787                                    char *brl_options)
14788  {
14789 @@ -2303,11 +2392,16 @@ static void console_cont_flush(char *text, size_t size)
14790                 goto out;
14791  
14792         len = cont_print_text(text, size);
14793 +#ifdef CONFIG_PREEMPT_RT_FULL
14794 +       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
14795 +       call_console_drivers(cont.level, NULL, 0, text, len);
14796 +#else
14797         raw_spin_unlock(&logbuf_lock);
14798         stop_critical_timings();
14799         call_console_drivers(cont.level, NULL, 0, text, len);
14800         start_critical_timings();
14801         local_irq_restore(flags);
14802 +#endif
14803         return;
14804  out:
14805         raw_spin_unlock_irqrestore(&logbuf_lock, flags);
14806 @@ -2431,13 +2525,17 @@ void console_unlock(void)
14807                 console_idx = log_next(console_idx);
14808                 console_seq++;
14809                 console_prev = msg->flags;
14810 +#ifdef CONFIG_PREEMPT_RT_FULL
14811 +               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
14812 +               call_console_drivers(level, ext_text, ext_len, text, len);
14813 +#else
14814                 raw_spin_unlock(&logbuf_lock);
14815  
14816                 stop_critical_timings();        /* don't trace print latency */
14817                 call_console_drivers(level, ext_text, ext_len, text, len);
14818                 start_critical_timings();
14819                 local_irq_restore(flags);
14820 -
14821 +#endif
14822                 if (do_cond_resched)
14823                         cond_resched();
14824         }
14825 @@ -2489,6 +2587,11 @@ void console_unblank(void)
14826  {
14827         struct console *c;
14828  
14829 +       if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
14830 +               if (in_irq() || in_nmi())
14831 +                       return;
14832 +       }
14833 +
14834         /*
14835          * console_unblank can no longer be called in interrupt context unless
14836          * oops_in_progress is set to 1..
14837 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
14838 index 49ba7c1ade9d..44f44b47ec07 100644
14839 --- a/kernel/ptrace.c
14840 +++ b/kernel/ptrace.c
14841 @@ -166,7 +166,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
14842  
14843         spin_lock_irq(&task->sighand->siglock);
14844         if (task_is_traced(task) && !__fatal_signal_pending(task)) {
14845 -               task->state = __TASK_TRACED;
14846 +               unsigned long flags;
14847 +
14848 +               raw_spin_lock_irqsave(&task->pi_lock, flags);
14849 +               if (task->state & __TASK_TRACED)
14850 +                       task->state = __TASK_TRACED;
14851 +               else
14852 +                       task->saved_state = __TASK_TRACED;
14853 +               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
14854                 ret = true;
14855         }
14856         spin_unlock_irq(&task->sighand->siglock);
14857 diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
14858 index bf08fee53dc7..eeb8ce4ad7b6 100644
14859 --- a/kernel/rcu/rcutorture.c
14860 +++ b/kernel/rcu/rcutorture.c
14861 @@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops = {
14862         .name           = "rcu"
14863  };
14864  
14865 +#ifndef CONFIG_PREEMPT_RT_FULL
14866  /*
14867   * Definitions for rcu_bh torture testing.
14868   */
14869 @@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops = {
14870         .name           = "rcu_bh"
14871  };
14872  
14873 +#else
14874 +static struct rcu_torture_ops rcu_bh_ops = {
14875 +       .ttype          = INVALID_RCU_FLAVOR,
14876 +};
14877 +#endif
14878 +
14879  /*
14880   * Don't even think about trying any of these in real life!!!
14881   * The names includes "busted", and they really means it!
14882 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
14883 index 10f62c6f48e7..dbee19478f09 100644
14884 --- a/kernel/rcu/tree.c
14885 +++ b/kernel/rcu/tree.c
14886 @@ -55,6 +55,11 @@
14887  #include <linux/random.h>
14888  #include <linux/trace_events.h>
14889  #include <linux/suspend.h>
14890 +#include <linux/delay.h>
14891 +#include <linux/gfp.h>
14892 +#include <linux/oom.h>
14893 +#include <linux/smpboot.h>
14894 +#include "../time/tick-internal.h"
14895  
14896  #include "tree.h"
14897  #include "rcu.h"
14898 @@ -260,6 +265,19 @@ void rcu_sched_qs(void)
14899                            this_cpu_ptr(&rcu_sched_data), true);
14900  }
14901  
14902 +#ifdef CONFIG_PREEMPT_RT_FULL
14903 +static void rcu_preempt_qs(void);
14904 +
14905 +void rcu_bh_qs(void)
14906 +{
14907 +       unsigned long flags;
14908 +
14909 +       /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
14910 +       local_irq_save(flags);
14911 +       rcu_preempt_qs();
14912 +       local_irq_restore(flags);
14913 +}
14914 +#else
14915  void rcu_bh_qs(void)
14916  {
14917         if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
14918 @@ -269,6 +287,7 @@ void rcu_bh_qs(void)
14919                 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
14920         }
14921  }
14922 +#endif
14923  
14924  static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
14925  
14926 @@ -449,11 +468,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
14927  /*
14928   * Return the number of RCU BH batches started thus far for debug & stats.
14929   */
14930 +#ifndef CONFIG_PREEMPT_RT_FULL
14931  unsigned long rcu_batches_started_bh(void)
14932  {
14933         return rcu_bh_state.gpnum;
14934  }
14935  EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
14936 +#endif
14937  
14938  /*
14939   * Return the number of RCU batches completed thus far for debug & stats.
14940 @@ -473,6 +494,7 @@ unsigned long rcu_batches_completed_sched(void)
14941  }
14942  EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
14943  
14944 +#ifndef CONFIG_PREEMPT_RT_FULL
14945  /*
14946   * Return the number of RCU BH batches completed thus far for debug & stats.
14947   */
14948 @@ -481,6 +503,7 @@ unsigned long rcu_batches_completed_bh(void)
14949         return rcu_bh_state.completed;
14950  }
14951  EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
14952 +#endif
14953  
14954  /*
14955   * Return the number of RCU expedited batches completed thus far for
14956 @@ -504,6 +527,7 @@ unsigned long rcu_exp_batches_completed_sched(void)
14957  }
14958  EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
14959  
14960 +#ifndef CONFIG_PREEMPT_RT_FULL
14961  /*
14962   * Force a quiescent state.
14963   */
14964 @@ -522,6 +546,13 @@ void rcu_bh_force_quiescent_state(void)
14965  }
14966  EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
14967  
14968 +#else
14969 +void rcu_force_quiescent_state(void)
14970 +{
14971 +}
14972 +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
14973 +#endif
14974 +
14975  /*
14976   * Force a quiescent state for RCU-sched.
14977   */
14978 @@ -572,9 +603,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
14979         case RCU_FLAVOR:
14980                 rsp = rcu_state_p;
14981                 break;
14982 +#ifndef CONFIG_PREEMPT_RT_FULL
14983         case RCU_BH_FLAVOR:
14984                 rsp = &rcu_bh_state;
14985                 break;
14986 +#endif
14987         case RCU_SCHED_FLAVOR:
14988                 rsp = &rcu_sched_state;
14989                 break;
14990 @@ -3016,18 +3049,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
14991  /*
14992   * Do RCU core processing for the current CPU.
14993   */
14994 -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
14995 +static __latent_entropy void rcu_process_callbacks(void)
14996  {
14997         struct rcu_state *rsp;
14998  
14999         if (cpu_is_offline(smp_processor_id()))
15000                 return;
15001 -       trace_rcu_utilization(TPS("Start RCU core"));
15002         for_each_rcu_flavor(rsp)
15003                 __rcu_process_callbacks(rsp);
15004 -       trace_rcu_utilization(TPS("End RCU core"));
15005  }
15006  
15007 +static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
15008  /*
15009   * Schedule RCU callback invocation.  If the specified type of RCU
15010   * does not support RCU priority boosting, just do a direct call,
15011 @@ -3039,19 +3071,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
15012  {
15013         if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
15014                 return;
15015 -       if (likely(!rsp->boost)) {
15016 -               rcu_do_batch(rsp, rdp);
15017 -               return;
15018 -       }
15019 -       invoke_rcu_callbacks_kthread();
15020 +       rcu_do_batch(rsp, rdp);
15021  }
15022  
15023 +static void rcu_wake_cond(struct task_struct *t, int status)
15024 +{
15025 +       /*
15026 +        * If the thread is yielding, only wake it when this
15027 +        * is invoked from idle
15028 +        */
15029 +       if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
15030 +               wake_up_process(t);
15031 +}
15032 +
15033 +/*
15034 + * Wake up this CPU's rcuc kthread to do RCU core processing.
15035 + */
15036  static void invoke_rcu_core(void)
15037  {
15038 -       if (cpu_online(smp_processor_id()))
15039 -               raise_softirq(RCU_SOFTIRQ);
15040 +       unsigned long flags;
15041 +       struct task_struct *t;
15042 +
15043 +       if (!cpu_online(smp_processor_id()))
15044 +               return;
15045 +       local_irq_save(flags);
15046 +       __this_cpu_write(rcu_cpu_has_work, 1);
15047 +       t = __this_cpu_read(rcu_cpu_kthread_task);
15048 +       if (t != NULL && current != t)
15049 +               rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
15050 +       local_irq_restore(flags);
15051  }
15052  
15053 +static void rcu_cpu_kthread_park(unsigned int cpu)
15054 +{
15055 +       per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
15056 +}
15057 +
15058 +static int rcu_cpu_kthread_should_run(unsigned int cpu)
15059 +{
15060 +       return __this_cpu_read(rcu_cpu_has_work);
15061 +}
15062 +
15063 +/*
15064 + * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
15065 + * RCU softirq used in flavors and configurations of RCU that do not
15066 + * support RCU priority boosting.
15067 + */
15068 +static void rcu_cpu_kthread(unsigned int cpu)
15069 +{
15070 +       unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
15071 +       char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
15072 +       int spincnt;
15073 +
15074 +       for (spincnt = 0; spincnt < 10; spincnt++) {
15075 +               trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
15076 +               local_bh_disable();
15077 +               *statusp = RCU_KTHREAD_RUNNING;
15078 +               this_cpu_inc(rcu_cpu_kthread_loops);
15079 +               local_irq_disable();
15080 +               work = *workp;
15081 +               *workp = 0;
15082 +               local_irq_enable();
15083 +               if (work)
15084 +                       rcu_process_callbacks();
15085 +               local_bh_enable();
15086 +               if (*workp == 0) {
15087 +                       trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
15088 +                       *statusp = RCU_KTHREAD_WAITING;
15089 +                       return;
15090 +               }
15091 +       }
15092 +       *statusp = RCU_KTHREAD_YIELDING;
15093 +       trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
15094 +       schedule_timeout_interruptible(2);
15095 +       trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
15096 +       *statusp = RCU_KTHREAD_WAITING;
15097 +}
15098 +
15099 +static struct smp_hotplug_thread rcu_cpu_thread_spec = {
15100 +       .store                  = &rcu_cpu_kthread_task,
15101 +       .thread_should_run      = rcu_cpu_kthread_should_run,
15102 +       .thread_fn              = rcu_cpu_kthread,
15103 +       .thread_comm            = "rcuc/%u",
15104 +       .setup                  = rcu_cpu_kthread_setup,
15105 +       .park                   = rcu_cpu_kthread_park,
15106 +};
15107 +
15108 +/*
15109 + * Spawn per-CPU RCU core processing kthreads.
15110 + */
15111 +static int __init rcu_spawn_core_kthreads(void)
15112 +{
15113 +       int cpu;
15114 +
15115 +       for_each_possible_cpu(cpu)
15116 +               per_cpu(rcu_cpu_has_work, cpu) = 0;
15117 +       BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
15118 +       return 0;
15119 +}
15120 +early_initcall(rcu_spawn_core_kthreads);
15121 +
15122  /*
15123   * Handle any core-RCU processing required by a call_rcu() invocation.
15124   */
15125 @@ -3195,6 +3314,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
15126  }
15127  EXPORT_SYMBOL_GPL(call_rcu_sched);
15128  
15129 +#ifndef CONFIG_PREEMPT_RT_FULL
15130  /*
15131   * Queue an RCU callback for invocation after a quicker grace period.
15132   */
15133 @@ -3203,6 +3323,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
15134         __call_rcu(head, func, &rcu_bh_state, -1, 0);
15135  }
15136  EXPORT_SYMBOL_GPL(call_rcu_bh);
15137 +#endif
15138  
15139  /*
15140   * Queue an RCU callback for lazy invocation after a grace period.
15141 @@ -3294,6 +3415,7 @@ void synchronize_sched(void)
15142  }
15143  EXPORT_SYMBOL_GPL(synchronize_sched);
15144  
15145 +#ifndef CONFIG_PREEMPT_RT_FULL
15146  /**
15147   * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
15148   *
15149 @@ -3320,6 +3442,7 @@ void synchronize_rcu_bh(void)
15150                 wait_rcu_gp(call_rcu_bh);
15151  }
15152  EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
15153 +#endif
15154  
15155  /**
15156   * get_state_synchronize_rcu - Snapshot current RCU state
15157 @@ -3698,6 +3821,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
15158         mutex_unlock(&rsp->barrier_mutex);
15159  }
15160  
15161 +#ifndef CONFIG_PREEMPT_RT_FULL
15162  /**
15163   * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
15164   */
15165 @@ -3706,6 +3830,7 @@ void rcu_barrier_bh(void)
15166         _rcu_barrier(&rcu_bh_state);
15167  }
15168  EXPORT_SYMBOL_GPL(rcu_barrier_bh);
15169 +#endif
15170  
15171  /**
15172   * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
15173 @@ -4227,12 +4352,13 @@ void __init rcu_init(void)
15174  
15175         rcu_bootup_announce();
15176         rcu_init_geometry();
15177 +#ifndef CONFIG_PREEMPT_RT_FULL
15178         rcu_init_one(&rcu_bh_state);
15179 +#endif
15180         rcu_init_one(&rcu_sched_state);
15181         if (dump_tree)
15182                 rcu_dump_rcu_node_tree(&rcu_sched_state);
15183         __rcu_init_preempt();
15184 -       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
15185  
15186         /*
15187          * We don't need protection against CPU-hotplug here because
15188 diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
15189 index e99a5234d9ed..958ac107062c 100644
15190 --- a/kernel/rcu/tree.h
15191 +++ b/kernel/rcu/tree.h
15192 @@ -588,18 +588,18 @@ extern struct list_head rcu_struct_flavors;
15193   */
15194  extern struct rcu_state rcu_sched_state;
15195  
15196 +#ifndef CONFIG_PREEMPT_RT_FULL
15197  extern struct rcu_state rcu_bh_state;
15198 +#endif
15199  
15200  #ifdef CONFIG_PREEMPT_RCU
15201  extern struct rcu_state rcu_preempt_state;
15202  #endif /* #ifdef CONFIG_PREEMPT_RCU */
15203  
15204 -#ifdef CONFIG_RCU_BOOST
15205  DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
15206  DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
15207  DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
15208  DECLARE_PER_CPU(char, rcu_cpu_has_work);
15209 -#endif /* #ifdef CONFIG_RCU_BOOST */
15210  
15211  #ifndef RCU_TREE_NONCORE
15212  
15213 @@ -619,10 +619,9 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
15214  static void __init __rcu_init_preempt(void);
15215  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
15216  static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
15217 -static void invoke_rcu_callbacks_kthread(void);
15218  static bool rcu_is_callbacks_kthread(void);
15219 +static void rcu_cpu_kthread_setup(unsigned int cpu);
15220  #ifdef CONFIG_RCU_BOOST
15221 -static void rcu_preempt_do_callbacks(void);
15222  static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
15223                                                  struct rcu_node *rnp);
15224  #endif /* #ifdef CONFIG_RCU_BOOST */
15225 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
15226 index 56583e764ebf..7c656f8e192f 100644
15227 --- a/kernel/rcu/tree_plugin.h
15228 +++ b/kernel/rcu/tree_plugin.h
15229 @@ -24,25 +24,10 @@
15230   *        Paul E. McKenney <paulmck@linux.vnet.ibm.com>
15231   */
15232  
15233 -#include <linux/delay.h>
15234 -#include <linux/gfp.h>
15235 -#include <linux/oom.h>
15236 -#include <linux/smpboot.h>
15237 -#include "../time/tick-internal.h"
15238 -
15239  #ifdef CONFIG_RCU_BOOST
15240  
15241  #include "../locking/rtmutex_common.h"
15242  
15243 -/*
15244 - * Control variables for per-CPU and per-rcu_node kthreads.  These
15245 - * handle all flavors of RCU.
15246 - */
15247 -static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
15248 -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
15249 -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
15250 -DEFINE_PER_CPU(char, rcu_cpu_has_work);
15251 -
15252  #else /* #ifdef CONFIG_RCU_BOOST */
15253  
15254  /*
15255 @@ -55,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
15256  
15257  #endif /* #else #ifdef CONFIG_RCU_BOOST */
15258  
15259 +/*
15260 + * Control variables for per-CPU and per-rcu_node kthreads.  These
15261 + * handle all flavors of RCU.
15262 + */
15263 +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
15264 +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
15265 +DEFINE_PER_CPU(char, rcu_cpu_has_work);
15266 +
15267  #ifdef CONFIG_RCU_NOCB_CPU
15268  static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
15269  static bool have_rcu_nocb_mask;            /* Was rcu_nocb_mask allocated? */
15270 @@ -426,7 +419,7 @@ void rcu_read_unlock_special(struct task_struct *t)
15271         }
15272  
15273         /* Hardware IRQ handlers cannot block, complain if they get here. */
15274 -       if (in_irq() || in_serving_softirq()) {
15275 +       if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
15276                 lockdep_rcu_suspicious(__FILE__, __LINE__,
15277                                        "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
15278                 pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
15279 @@ -632,15 +625,6 @@ static void rcu_preempt_check_callbacks(void)
15280                 t->rcu_read_unlock_special.b.need_qs = true;
15281  }
15282  
15283 -#ifdef CONFIG_RCU_BOOST
15284 -
15285 -static void rcu_preempt_do_callbacks(void)
15286 -{
15287 -       rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
15288 -}
15289 -
15290 -#endif /* #ifdef CONFIG_RCU_BOOST */
15291 -
15292  /*
15293   * Queue a preemptible-RCU callback for invocation after a grace period.
15294   */
15295 @@ -829,6 +813,19 @@ void exit_rcu(void)
15296  
15297  #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
15298  
15299 +/*
15300 + * If boosting, set rcuc kthreads to realtime priority.
15301 + */
15302 +static void rcu_cpu_kthread_setup(unsigned int cpu)
15303 +{
15304 +#ifdef CONFIG_RCU_BOOST
15305 +       struct sched_param sp;
15306 +
15307 +       sp.sched_priority = kthread_prio;
15308 +       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
15309 +#endif /* #ifdef CONFIG_RCU_BOOST */
15310 +}
15311 +
15312  #ifdef CONFIG_RCU_BOOST
15313  
15314  #include "../locking/rtmutex_common.h"
15315 @@ -860,16 +857,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
15316  
15317  #endif /* #else #ifdef CONFIG_RCU_TRACE */
15318  
15319 -static void rcu_wake_cond(struct task_struct *t, int status)
15320 -{
15321 -       /*
15322 -        * If the thread is yielding, only wake it when this
15323 -        * is invoked from idle
15324 -        */
15325 -       if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
15326 -               wake_up_process(t);
15327 -}
15328 -
15329  /*
15330   * Carry out RCU priority boosting on the task indicated by ->exp_tasks
15331   * or ->boost_tasks, advancing the pointer to the next task in the
15332 @@ -1013,23 +1000,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
15333  }
15334  
15335  /*
15336 - * Wake up the per-CPU kthread to invoke RCU callbacks.
15337 - */
15338 -static void invoke_rcu_callbacks_kthread(void)
15339 -{
15340 -       unsigned long flags;
15341 -
15342 -       local_irq_save(flags);
15343 -       __this_cpu_write(rcu_cpu_has_work, 1);
15344 -       if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
15345 -           current != __this_cpu_read(rcu_cpu_kthread_task)) {
15346 -               rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
15347 -                             __this_cpu_read(rcu_cpu_kthread_status));
15348 -       }
15349 -       local_irq_restore(flags);
15350 -}
15351 -
15352 -/*
15353   * Is the current CPU running the RCU-callbacks kthread?
15354   * Caller must have preemption disabled.
15355   */
15356 @@ -1083,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
15357         return 0;
15358  }
15359  
15360 -static void rcu_kthread_do_work(void)
15361 -{
15362 -       rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
15363 -       rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
15364 -       rcu_preempt_do_callbacks();
15365 -}
15366 -
15367 -static void rcu_cpu_kthread_setup(unsigned int cpu)
15368 -{
15369 -       struct sched_param sp;
15370 -
15371 -       sp.sched_priority = kthread_prio;
15372 -       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
15373 -}
15374 -
15375 -static void rcu_cpu_kthread_park(unsigned int cpu)
15376 -{
15377 -       per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
15378 -}
15379 -
15380 -static int rcu_cpu_kthread_should_run(unsigned int cpu)
15381 -{
15382 -       return __this_cpu_read(rcu_cpu_has_work);
15383 -}
15384 -
15385 -/*
15386 - * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
15387 - * RCU softirq used in flavors and configurations of RCU that do not
15388 - * support RCU priority boosting.
15389 - */
15390 -static void rcu_cpu_kthread(unsigned int cpu)
15391 -{
15392 -       unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
15393 -       char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
15394 -       int spincnt;
15395 -
15396 -       for (spincnt = 0; spincnt < 10; spincnt++) {
15397 -               trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
15398 -               local_bh_disable();
15399 -               *statusp = RCU_KTHREAD_RUNNING;
15400 -               this_cpu_inc(rcu_cpu_kthread_loops);
15401 -               local_irq_disable();
15402 -               work = *workp;
15403 -               *workp = 0;
15404 -               local_irq_enable();
15405 -               if (work)
15406 -                       rcu_kthread_do_work();
15407 -               local_bh_enable();
15408 -               if (*workp == 0) {
15409 -                       trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
15410 -                       *statusp = RCU_KTHREAD_WAITING;
15411 -                       return;
15412 -               }
15413 -       }
15414 -       *statusp = RCU_KTHREAD_YIELDING;
15415 -       trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
15416 -       schedule_timeout_interruptible(2);
15417 -       trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
15418 -       *statusp = RCU_KTHREAD_WAITING;
15419 -}
15420 -
15421  /*
15422   * Set the per-rcu_node kthread's affinity to cover all CPUs that are
15423   * served by the rcu_node in question.  The CPU hotplug lock is still
15424 @@ -1174,26 +1083,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
15425         free_cpumask_var(cm);
15426  }
15427  
15428 -static struct smp_hotplug_thread rcu_cpu_thread_spec = {
15429 -       .store                  = &rcu_cpu_kthread_task,
15430 -       .thread_should_run      = rcu_cpu_kthread_should_run,
15431 -       .thread_fn              = rcu_cpu_kthread,
15432 -       .thread_comm            = "rcuc/%u",
15433 -       .setup                  = rcu_cpu_kthread_setup,
15434 -       .park                   = rcu_cpu_kthread_park,
15435 -};
15436 -
15437  /*
15438   * Spawn boost kthreads -- called as soon as the scheduler is running.
15439   */
15440  static void __init rcu_spawn_boost_kthreads(void)
15441  {
15442         struct rcu_node *rnp;
15443 -       int cpu;
15444 -
15445 -       for_each_possible_cpu(cpu)
15446 -               per_cpu(rcu_cpu_has_work, cpu) = 0;
15447 -       BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
15448         rcu_for_each_leaf_node(rcu_state_p, rnp)
15449                 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
15450  }
15451 @@ -1216,11 +1111,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
15452         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
15453  }
15454  
15455 -static void invoke_rcu_callbacks_kthread(void)
15456 -{
15457 -       WARN_ON_ONCE(1);
15458 -}
15459 -
15460  static bool rcu_is_callbacks_kthread(void)
15461  {
15462         return false;
15463 @@ -1244,7 +1134,7 @@ static void rcu_prepare_kthreads(int cpu)
15464  
15465  #endif /* #else #ifdef CONFIG_RCU_BOOST */
15466  
15467 -#if !defined(CONFIG_RCU_FAST_NO_HZ)
15468 +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
15469  
15470  /*
15471   * Check to see if any future RCU-related work will need to be done
15472 @@ -1261,7 +1151,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
15473         return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
15474                ? 0 : rcu_cpu_has_callbacks(NULL);
15475  }
15476 +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
15477  
15478 +#if !defined(CONFIG_RCU_FAST_NO_HZ)
15479  /*
15480   * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
15481   * after it.
15482 @@ -1357,6 +1249,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
15483         return cbs_ready;
15484  }
15485  
15486 +#ifndef CONFIG_PREEMPT_RT_FULL
15487 +
15488  /*
15489   * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
15490   * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
15491 @@ -1402,6 +1296,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
15492         *nextevt = basemono + dj * TICK_NSEC;
15493         return 0;
15494  }
15495 +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
15496  
15497  /*
15498   * Prepare a CPU for idle from an RCU perspective.  The first major task
15499 diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
15500 index 4f6db7e6a117..ee02e1e1b3e5 100644
15501 --- a/kernel/rcu/update.c
15502 +++ b/kernel/rcu/update.c
15503 @@ -62,7 +62,7 @@
15504  #ifndef CONFIG_TINY_RCU
15505  module_param(rcu_expedited, int, 0);
15506  module_param(rcu_normal, int, 0);
15507 -static int rcu_normal_after_boot;
15508 +static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
15509  module_param(rcu_normal_after_boot, int, 0);
15510  #endif /* #ifndef CONFIG_TINY_RCU */
15511  
15512 @@ -132,8 +132,7 @@ bool rcu_gp_is_normal(void)
15513  }
15514  EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
15515  
15516 -static atomic_t rcu_expedited_nesting =
15517 -       ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
15518 +static atomic_t rcu_expedited_nesting =        ATOMIC_INIT(1);
15519  
15520  /*
15521   * Should normal grace-period primitives be expedited?  Intended for
15522 @@ -182,8 +181,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
15523   */
15524  void rcu_end_inkernel_boot(void)
15525  {
15526 -       if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
15527 -               rcu_unexpedite_gp();
15528 +       rcu_unexpedite_gp();
15529         if (rcu_normal_after_boot)
15530                 WRITE_ONCE(rcu_normal, 1);
15531  }
15532 @@ -298,6 +296,7 @@ int rcu_read_lock_held(void)
15533  }
15534  EXPORT_SYMBOL_GPL(rcu_read_lock_held);
15535  
15536 +#ifndef CONFIG_PREEMPT_RT_FULL
15537  /**
15538   * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
15539   *
15540 @@ -324,6 +323,7 @@ int rcu_read_lock_bh_held(void)
15541         return in_softirq() || irqs_disabled();
15542  }
15543  EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
15544 +#endif
15545  
15546  #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
15547  
15548 diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
15549 index 5e59b832ae2b..7337a7f60e3f 100644
15550 --- a/kernel/sched/Makefile
15551 +++ b/kernel/sched/Makefile
15552 @@ -17,7 +17,7 @@ endif
15553  
15554  obj-y += core.o loadavg.o clock.o cputime.o
15555  obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
15556 -obj-y += wait.o swait.o completion.o idle.o
15557 +obj-y += wait.o swait.o swork.o completion.o idle.o
15558  obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
15559  obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
15560  obj-$(CONFIG_SCHEDSTATS) += stats.o
15561 diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
15562 index 8d0f35debf35..b62cf6400fe0 100644
15563 --- a/kernel/sched/completion.c
15564 +++ b/kernel/sched/completion.c
15565 @@ -30,10 +30,10 @@ void complete(struct completion *x)
15566  {
15567         unsigned long flags;
15568  
15569 -       spin_lock_irqsave(&x->wait.lock, flags);
15570 +       raw_spin_lock_irqsave(&x->wait.lock, flags);
15571         x->done++;
15572 -       __wake_up_locked(&x->wait, TASK_NORMAL, 1);
15573 -       spin_unlock_irqrestore(&x->wait.lock, flags);
15574 +       swake_up_locked(&x->wait);
15575 +       raw_spin_unlock_irqrestore(&x->wait.lock, flags);
15576  }
15577  EXPORT_SYMBOL(complete);
15578  
15579 @@ -50,10 +50,10 @@ void complete_all(struct completion *x)
15580  {
15581         unsigned long flags;
15582  
15583 -       spin_lock_irqsave(&x->wait.lock, flags);
15584 +       raw_spin_lock_irqsave(&x->wait.lock, flags);
15585         x->done += UINT_MAX/2;
15586 -       __wake_up_locked(&x->wait, TASK_NORMAL, 0);
15587 -       spin_unlock_irqrestore(&x->wait.lock, flags);
15588 +       swake_up_all_locked(&x->wait);
15589 +       raw_spin_unlock_irqrestore(&x->wait.lock, flags);
15590  }
15591  EXPORT_SYMBOL(complete_all);
15592  
15593 @@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
15594                    long (*action)(long), long timeout, int state)
15595  {
15596         if (!x->done) {
15597 -               DECLARE_WAITQUEUE(wait, current);
15598 +               DECLARE_SWAITQUEUE(wait);
15599  
15600 -               __add_wait_queue_tail_exclusive(&x->wait, &wait);
15601 +               __prepare_to_swait(&x->wait, &wait);
15602                 do {
15603                         if (signal_pending_state(state, current)) {
15604                                 timeout = -ERESTARTSYS;
15605                                 break;
15606                         }
15607                         __set_current_state(state);
15608 -                       spin_unlock_irq(&x->wait.lock);
15609 +                       raw_spin_unlock_irq(&x->wait.lock);
15610                         timeout = action(timeout);
15611 -                       spin_lock_irq(&x->wait.lock);
15612 +                       raw_spin_lock_irq(&x->wait.lock);
15613                 } while (!x->done && timeout);
15614 -               __remove_wait_queue(&x->wait, &wait);
15615 +               __finish_swait(&x->wait, &wait);
15616                 if (!x->done)
15617                         return timeout;
15618         }
15619 @@ -89,9 +89,9 @@ __wait_for_common(struct completion *x,
15620  {
15621         might_sleep();
15622  
15623 -       spin_lock_irq(&x->wait.lock);
15624 +       raw_spin_lock_irq(&x->wait.lock);
15625         timeout = do_wait_for_common(x, action, timeout, state);
15626 -       spin_unlock_irq(&x->wait.lock);
15627 +       raw_spin_unlock_irq(&x->wait.lock);
15628         return timeout;
15629  }
15630  
15631 @@ -277,12 +277,12 @@ bool try_wait_for_completion(struct completion *x)
15632         if (!READ_ONCE(x->done))
15633                 return 0;
15634  
15635 -       spin_lock_irqsave(&x->wait.lock, flags);
15636 +       raw_spin_lock_irqsave(&x->wait.lock, flags);
15637         if (!x->done)
15638                 ret = 0;
15639         else
15640                 x->done--;
15641 -       spin_unlock_irqrestore(&x->wait.lock, flags);
15642 +       raw_spin_unlock_irqrestore(&x->wait.lock, flags);
15643         return ret;
15644  }
15645  EXPORT_SYMBOL(try_wait_for_completion);
15646 @@ -311,7 +311,7 @@ bool completion_done(struct completion *x)
15647          * after it's acquired the lock.
15648          */
15649         smp_rmb();
15650 -       spin_unlock_wait(&x->wait.lock);
15651 +       raw_spin_unlock_wait(&x->wait.lock);
15652         return true;
15653  }
15654  EXPORT_SYMBOL(completion_done);
15655 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
15656 index 154fd689fe02..a6aa5801b21e 100644
15657 --- a/kernel/sched/core.c
15658 +++ b/kernel/sched/core.c
15659 @@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_features =
15660   * Number of tasks to iterate in a single balance run.
15661   * Limited because this is done with IRQs disabled.
15662   */
15663 +#ifndef CONFIG_PREEMPT_RT_FULL
15664  const_debug unsigned int sysctl_sched_nr_migrate = 32;
15665 +#else
15666 +const_debug unsigned int sysctl_sched_nr_migrate = 8;
15667 +#endif
15668  
15669  /*
15670   * period over which we average the RT time consumption, measured
15671 @@ -345,6 +349,7 @@ static void init_rq_hrtick(struct rq *rq)
15672  
15673         hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
15674         rq->hrtick_timer.function = hrtick;
15675 +       rq->hrtick_timer.irqsafe = 1;
15676  }
15677  #else  /* CONFIG_SCHED_HRTICK */
15678  static inline void hrtick_clear(struct rq *rq)
15679 @@ -449,7 +454,7 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
15680         head->lastp = &node->next;
15681  }
15682  
15683 -void wake_up_q(struct wake_q_head *head)
15684 +void __wake_up_q(struct wake_q_head *head, bool sleeper)
15685  {
15686         struct wake_q_node *node = head->first;
15687  
15688 @@ -466,7 +471,10 @@ void wake_up_q(struct wake_q_head *head)
15689                  * wake_up_process() implies a wmb() to pair with the queueing
15690                  * in wake_q_add() so as not to miss wakeups.
15691                  */
15692 -               wake_up_process(task);
15693 +               if (sleeper)
15694 +                       wake_up_lock_sleeper(task);
15695 +               else
15696 +                       wake_up_process(task);
15697                 put_task_struct(task);
15698         }
15699  }
15700 @@ -502,6 +510,38 @@ void resched_curr(struct rq *rq)
15701                 trace_sched_wake_idle_without_ipi(cpu);
15702  }
15703  
15704 +#ifdef CONFIG_PREEMPT_LAZY
15705 +void resched_curr_lazy(struct rq *rq)
15706 +{
15707 +       struct task_struct *curr = rq->curr;
15708 +       int cpu;
15709 +
15710 +       if (!sched_feat(PREEMPT_LAZY)) {
15711 +               resched_curr(rq);
15712 +               return;
15713 +       }
15714 +
15715 +       lockdep_assert_held(&rq->lock);
15716 +
15717 +       if (test_tsk_need_resched(curr))
15718 +               return;
15719 +
15720 +       if (test_tsk_need_resched_lazy(curr))
15721 +               return;
15722 +
15723 +       set_tsk_need_resched_lazy(curr);
15724 +
15725 +       cpu = cpu_of(rq);
15726 +       if (cpu == smp_processor_id())
15727 +               return;
15728 +
15729 +       /* NEED_RESCHED_LAZY must be visible before we test polling */
15730 +       smp_mb();
15731 +       if (!tsk_is_polling(curr))
15732 +               smp_send_reschedule(cpu);
15733 +}
15734 +#endif
15735 +
15736  void resched_cpu(int cpu)
15737  {
15738         struct rq *rq = cpu_rq(cpu);
15739 @@ -525,11 +565,14 @@ void resched_cpu(int cpu)
15740   */
15741  int get_nohz_timer_target(void)
15742  {
15743 -       int i, cpu = smp_processor_id();
15744 +       int i, cpu;
15745         struct sched_domain *sd;
15746  
15747 +       preempt_disable_rt();
15748 +       cpu = smp_processor_id();
15749 +
15750         if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
15751 -               return cpu;
15752 +               goto preempt_en_rt;
15753  
15754         rcu_read_lock();
15755         for_each_domain(cpu, sd) {
15756 @@ -548,6 +591,8 @@ int get_nohz_timer_target(void)
15757                 cpu = housekeeping_any_cpu();
15758  unlock:
15759         rcu_read_unlock();
15760 +preempt_en_rt:
15761 +       preempt_enable_rt();
15762         return cpu;
15763  }
15764  /*
15765 @@ -1100,6 +1145,11 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
15766  
15767         lockdep_assert_held(&p->pi_lock);
15768  
15769 +       if (__migrate_disabled(p)) {
15770 +               cpumask_copy(&p->cpus_allowed, new_mask);
15771 +               return;
15772 +       }
15773 +
15774         queued = task_on_rq_queued(p);
15775         running = task_current(rq, p);
15776  
15777 @@ -1122,6 +1172,84 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
15778                 set_curr_task(rq, p);
15779  }
15780  
15781 +static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
15782 +static DEFINE_MUTEX(sched_down_mutex);
15783 +static cpumask_t sched_down_cpumask;
15784 +
15785 +void tell_sched_cpu_down_begin(int cpu)
15786 +{
15787 +       mutex_lock(&sched_down_mutex);
15788 +       cpumask_set_cpu(cpu, &sched_down_cpumask);
15789 +       mutex_unlock(&sched_down_mutex);
15790 +}
15791 +
15792 +void tell_sched_cpu_down_done(int cpu)
15793 +{
15794 +       mutex_lock(&sched_down_mutex);
15795 +       cpumask_clear_cpu(cpu, &sched_down_cpumask);
15796 +       mutex_unlock(&sched_down_mutex);
15797 +}
15798 +
15799 +/**
15800 + * migrate_me - try to move the current task off this cpu
15801 + *
15802 + * Used by the pin_current_cpu() code to try to get tasks
15803 + * to move off the current CPU as it is going down.
15804 + * It will only move the task if the task isn't pinned to
15805 + * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
15806 + * and the task has to be in a RUNNING state. Otherwise the
15807 + * movement of the task will wake it up (change its state
15808 + * to running) when the task did not expect it.
15809 + *
15810 + * Returns 1 if it succeeded in moving the current task
15811 + *         0 otherwise.
15812 + */
15813 +int migrate_me(void)
15814 +{
15815 +       struct task_struct *p = current;
15816 +       struct migration_arg arg;
15817 +       struct cpumask *cpumask;
15818 +       struct cpumask *mask;
15819 +       unsigned int dest_cpu;
15820 +       struct rq_flags rf;
15821 +       struct rq *rq;
15822 +
15823 +       /*
15824 +        * We can not migrate tasks bounded to a CPU or tasks not
15825 +        * running. The movement of the task will wake it up.
15826 +        */
15827 +       if (p->flags & PF_NO_SETAFFINITY || p->state)
15828 +               return 0;
15829 +
15830 +       mutex_lock(&sched_down_mutex);
15831 +       rq = task_rq_lock(p, &rf);
15832 +
15833 +       cpumask = this_cpu_ptr(&sched_cpumasks);
15834 +       mask = &p->cpus_allowed;
15835 +
15836 +       cpumask_andnot(cpumask, mask, &sched_down_cpumask);
15837 +
15838 +       if (!cpumask_weight(cpumask)) {
15839 +               /* It's only on this CPU? */
15840 +               task_rq_unlock(rq, p, &rf);
15841 +               mutex_unlock(&sched_down_mutex);
15842 +               return 0;
15843 +       }
15844 +
15845 +       dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
15846 +
15847 +       arg.task = p;
15848 +       arg.dest_cpu = dest_cpu;
15849 +
15850 +       task_rq_unlock(rq, p, &rf);
15851 +
15852 +       stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
15853 +       tlb_migrate_finish(p->mm);
15854 +       mutex_unlock(&sched_down_mutex);
15855 +
15856 +       return 1;
15857 +}
15858 +
15859  /*
15860   * Change a given task's CPU affinity. Migrate the thread to a
15861   * proper CPU and schedule it away if the CPU it's executing on
15862 @@ -1179,7 +1307,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
15863         }
15864  
15865         /* Can the task run on the task's current CPU? If so, we're done */
15866 -       if (cpumask_test_cpu(task_cpu(p), new_mask))
15867 +       if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
15868                 goto out;
15869  
15870         dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
15871 @@ -1366,6 +1494,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
15872         return ret;
15873  }
15874  
15875 +static bool check_task_state(struct task_struct *p, long match_state)
15876 +{
15877 +       bool match = false;
15878 +
15879 +       raw_spin_lock_irq(&p->pi_lock);
15880 +       if (p->state == match_state || p->saved_state == match_state)
15881 +               match = true;
15882 +       raw_spin_unlock_irq(&p->pi_lock);
15883 +
15884 +       return match;
15885 +}
15886 +
15887  /*
15888   * wait_task_inactive - wait for a thread to unschedule.
15889   *
15890 @@ -1410,7 +1550,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
15891                  * is actually now running somewhere else!
15892                  */
15893                 while (task_running(rq, p)) {
15894 -                       if (match_state && unlikely(p->state != match_state))
15895 +                       if (match_state && !check_task_state(p, match_state))
15896                                 return 0;
15897                         cpu_relax();
15898                 }
15899 @@ -1425,7 +1565,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
15900                 running = task_running(rq, p);
15901                 queued = task_on_rq_queued(p);
15902                 ncsw = 0;
15903 -               if (!match_state || p->state == match_state)
15904 +               if (!match_state || p->state == match_state ||
15905 +                   p->saved_state == match_state)
15906                         ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
15907                 task_rq_unlock(rq, p, &rf);
15908  
15909 @@ -1680,10 +1821,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
15910  {
15911         activate_task(rq, p, en_flags);
15912         p->on_rq = TASK_ON_RQ_QUEUED;
15913 -
15914 -       /* if a worker is waking up, notify workqueue */
15915 -       if (p->flags & PF_WQ_WORKER)
15916 -               wq_worker_waking_up(p, cpu_of(rq));
15917  }
15918  
15919  /*
15920 @@ -2018,8 +2155,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
15921          */
15922         smp_mb__before_spinlock();
15923         raw_spin_lock_irqsave(&p->pi_lock, flags);
15924 -       if (!(p->state & state))
15925 +       if (!(p->state & state)) {
15926 +               /*
15927 +                * The task might be running due to a spinlock sleeper
15928 +                * wakeup. Check the saved state and set it to running
15929 +                * if the wakeup condition is true.
15930 +                */
15931 +               if (!(wake_flags & WF_LOCK_SLEEPER)) {
15932 +                       if (p->saved_state & state) {
15933 +                               p->saved_state = TASK_RUNNING;
15934 +                               success = 1;
15935 +                       }
15936 +               }
15937                 goto out;
15938 +       }
15939 +
15940 +       /*
15941 +        * If this is a regular wakeup, then we can unconditionally
15942 +        * clear the saved state of a "lock sleeper".
15943 +        */
15944 +       if (!(wake_flags & WF_LOCK_SLEEPER))
15945 +               p->saved_state = TASK_RUNNING;
15946  
15947         trace_sched_waking(p);
15948  
15949 @@ -2102,53 +2258,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
15950  }
15951  
15952  /**
15953 - * try_to_wake_up_local - try to wake up a local task with rq lock held
15954 - * @p: the thread to be awakened
15955 - * @cookie: context's cookie for pinning
15956 - *
15957 - * Put @p on the run-queue if it's not already there. The caller must
15958 - * ensure that this_rq() is locked, @p is bound to this_rq() and not
15959 - * the current task.
15960 - */
15961 -static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
15962 -{
15963 -       struct rq *rq = task_rq(p);
15964 -
15965 -       if (WARN_ON_ONCE(rq != this_rq()) ||
15966 -           WARN_ON_ONCE(p == current))
15967 -               return;
15968 -
15969 -       lockdep_assert_held(&rq->lock);
15970 -
15971 -       if (!raw_spin_trylock(&p->pi_lock)) {
15972 -               /*
15973 -                * This is OK, because current is on_cpu, which avoids it being
15974 -                * picked for load-balance and preemption/IRQs are still
15975 -                * disabled avoiding further scheduler activity on it and we've
15976 -                * not yet picked a replacement task.
15977 -                */
15978 -               lockdep_unpin_lock(&rq->lock, cookie);
15979 -               raw_spin_unlock(&rq->lock);
15980 -               raw_spin_lock(&p->pi_lock);
15981 -               raw_spin_lock(&rq->lock);
15982 -               lockdep_repin_lock(&rq->lock, cookie);
15983 -       }
15984 -
15985 -       if (!(p->state & TASK_NORMAL))
15986 -               goto out;
15987 -
15988 -       trace_sched_waking(p);
15989 -
15990 -       if (!task_on_rq_queued(p))
15991 -               ttwu_activate(rq, p, ENQUEUE_WAKEUP);
15992 -
15993 -       ttwu_do_wakeup(rq, p, 0, cookie);
15994 -       ttwu_stat(p, smp_processor_id(), 0);
15995 -out:
15996 -       raw_spin_unlock(&p->pi_lock);
15997 -}
15998 -
15999 -/**
16000   * wake_up_process - Wake up a specific process
16001   * @p: The process to be woken up.
16002   *
16003 @@ -2166,6 +2275,18 @@ int wake_up_process(struct task_struct *p)
16004  }
16005  EXPORT_SYMBOL(wake_up_process);
16006  
16007 +/**
16008 + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
16009 + * @p: The process to be woken up.
16010 + *
16011 + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
16012 + * the nature of the wakeup.
16013 + */
16014 +int wake_up_lock_sleeper(struct task_struct *p)
16015 +{
16016 +       return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
16017 +}
16018 +
16019  int wake_up_state(struct task_struct *p, unsigned int state)
16020  {
16021         return try_to_wake_up(p, state, 0);
16022 @@ -2442,6 +2563,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
16023         p->on_cpu = 0;
16024  #endif
16025         init_task_preempt_count(p);
16026 +#ifdef CONFIG_HAVE_PREEMPT_LAZY
16027 +       task_thread_info(p)->preempt_lazy_count = 0;
16028 +#endif
16029  #ifdef CONFIG_SMP
16030         plist_node_init(&p->pushable_tasks, MAX_PRIO);
16031         RB_CLEAR_NODE(&p->pushable_dl_tasks);
16032 @@ -2770,21 +2894,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
16033         finish_arch_post_lock_switch();
16034  
16035         fire_sched_in_preempt_notifiers(current);
16036 +       /*
16037 +        * We use mmdrop_delayed() here so we don't have to do the
16038 +        * full __mmdrop() when we are the last user.
16039 +        */
16040         if (mm)
16041 -               mmdrop(mm);
16042 +               mmdrop_delayed(mm);
16043         if (unlikely(prev_state == TASK_DEAD)) {
16044                 if (prev->sched_class->task_dead)
16045                         prev->sched_class->task_dead(prev);
16046  
16047 -               /*
16048 -                * Remove function-return probe instances associated with this
16049 -                * task and put them back on the free list.
16050 -                */
16051 -               kprobe_flush_task(prev);
16052 -
16053 -               /* Task is done with its stack. */
16054 -               put_task_stack(prev);
16055 -
16056                 put_task_struct(prev);
16057         }
16058  
16059 @@ -3252,6 +3371,77 @@ static inline void schedule_debug(struct task_struct *prev)
16060         schedstat_inc(this_rq()->sched_count);
16061  }
16062  
16063 +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
16064 +
16065 +void migrate_disable(void)
16066 +{
16067 +       struct task_struct *p = current;
16068 +
16069 +       if (in_atomic() || irqs_disabled()) {
16070 +#ifdef CONFIG_SCHED_DEBUG
16071 +               p->migrate_disable_atomic++;
16072 +#endif
16073 +               return;
16074 +       }
16075 +
16076 +#ifdef CONFIG_SCHED_DEBUG
16077 +       if (unlikely(p->migrate_disable_atomic)) {
16078 +               tracing_off();
16079 +               WARN_ON_ONCE(1);
16080 +       }
16081 +#endif
16082 +
16083 +       if (p->migrate_disable) {
16084 +               p->migrate_disable++;
16085 +               return;
16086 +       }
16087 +
16088 +       preempt_disable();
16089 +       preempt_lazy_disable();
16090 +       pin_current_cpu();
16091 +       p->migrate_disable = 1;
16092 +       preempt_enable();
16093 +}
16094 +EXPORT_SYMBOL(migrate_disable);
16095 +
16096 +void migrate_enable(void)
16097 +{
16098 +       struct task_struct *p = current;
16099 +
16100 +       if (in_atomic() || irqs_disabled()) {
16101 +#ifdef CONFIG_SCHED_DEBUG
16102 +               p->migrate_disable_atomic--;
16103 +#endif
16104 +               return;
16105 +       }
16106 +
16107 +#ifdef CONFIG_SCHED_DEBUG
16108 +       if (unlikely(p->migrate_disable_atomic)) {
16109 +               tracing_off();
16110 +               WARN_ON_ONCE(1);
16111 +       }
16112 +#endif
16113 +       WARN_ON_ONCE(p->migrate_disable <= 0);
16114 +
16115 +       if (p->migrate_disable > 1) {
16116 +               p->migrate_disable--;
16117 +               return;
16118 +       }
16119 +
16120 +       preempt_disable();
16121 +       /*
16122 +        * Clearing migrate_disable causes tsk_cpus_allowed to
16123 +        * show the tasks original cpu affinity.
16124 +        */
16125 +       p->migrate_disable = 0;
16126 +
16127 +       unpin_current_cpu();
16128 +       preempt_enable();
16129 +       preempt_lazy_enable();
16130 +}
16131 +EXPORT_SYMBOL(migrate_enable);
16132 +#endif
16133 +
16134  /*
16135   * Pick up the highest-prio task:
16136   */
16137 @@ -3368,19 +3558,6 @@ static void __sched notrace __schedule(bool preempt)
16138                 } else {
16139                         deactivate_task(rq, prev, DEQUEUE_SLEEP);
16140                         prev->on_rq = 0;
16141 -
16142 -                       /*
16143 -                        * If a worker went to sleep, notify and ask workqueue
16144 -                        * whether it wants to wake up a task to maintain
16145 -                        * concurrency.
16146 -                        */
16147 -                       if (prev->flags & PF_WQ_WORKER) {
16148 -                               struct task_struct *to_wakeup;
16149 -
16150 -                               to_wakeup = wq_worker_sleeping(prev);
16151 -                               if (to_wakeup)
16152 -                                       try_to_wake_up_local(to_wakeup, cookie);
16153 -                       }
16154                 }
16155                 switch_count = &prev->nvcsw;
16156         }
16157 @@ -3390,6 +3567,7 @@ static void __sched notrace __schedule(bool preempt)
16158  
16159         next = pick_next_task(rq, prev, cookie);
16160         clear_tsk_need_resched(prev);
16161 +       clear_tsk_need_resched_lazy(prev);
16162         clear_preempt_need_resched();
16163         rq->clock_skip_update = 0;
16164  
16165 @@ -3437,9 +3615,20 @@ void __noreturn do_task_dead(void)
16166  
16167  static inline void sched_submit_work(struct task_struct *tsk)
16168  {
16169 -       if (!tsk->state || tsk_is_pi_blocked(tsk))
16170 +       if (!tsk->state)
16171                 return;
16172         /*
16173 +        * If a worker went to sleep, notify and ask workqueue whether
16174 +        * it wants to wake up a task to maintain concurrency.
16175 +        */
16176 +       if (tsk->flags & PF_WQ_WORKER)
16177 +               wq_worker_sleeping(tsk);
16178 +
16179 +
16180 +       if (tsk_is_pi_blocked(tsk))
16181 +               return;
16182 +
16183 +       /*
16184          * If we are going to sleep and we have plugged IO queued,
16185          * make sure to submit it to avoid deadlocks.
16186          */
16187 @@ -3447,6 +3636,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
16188                 blk_schedule_flush_plug(tsk);
16189  }
16190  
16191 +static void sched_update_worker(struct task_struct *tsk)
16192 +{
16193 +       if (tsk->flags & PF_WQ_WORKER)
16194 +               wq_worker_running(tsk);
16195 +}
16196 +
16197  asmlinkage __visible void __sched schedule(void)
16198  {
16199         struct task_struct *tsk = current;
16200 @@ -3457,6 +3652,7 @@ asmlinkage __visible void __sched schedule(void)
16201                 __schedule(false);
16202                 sched_preempt_enable_no_resched();
16203         } while (need_resched());
16204 +       sched_update_worker(tsk);
16205  }
16206  EXPORT_SYMBOL(schedule);
16207  
16208 @@ -3520,6 +3716,30 @@ static void __sched notrace preempt_schedule_common(void)
16209         } while (need_resched());
16210  }
16211  
16212 +#ifdef CONFIG_PREEMPT_LAZY
16213 +/*
16214 + * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
16215 + * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
16216 + * preempt_lazy_count counter >0.
16217 + */
16218 +static __always_inline int preemptible_lazy(void)
16219 +{
16220 +       if (test_thread_flag(TIF_NEED_RESCHED))
16221 +               return 1;
16222 +       if (current_thread_info()->preempt_lazy_count)
16223 +               return 0;
16224 +       return 1;
16225 +}
16226 +
16227 +#else
16228 +
16229 +static inline int preemptible_lazy(void)
16230 +{
16231 +       return 1;
16232 +}
16233 +
16234 +#endif
16235 +
16236  #ifdef CONFIG_PREEMPT
16237  /*
16238   * this is the entry point to schedule() from in-kernel preemption
16239 @@ -3534,7 +3754,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
16240          */
16241         if (likely(!preemptible()))
16242                 return;
16243 -
16244 +       if (!preemptible_lazy())
16245 +               return;
16246         preempt_schedule_common();
16247  }
16248  NOKPROBE_SYMBOL(preempt_schedule);
16249 @@ -3561,6 +3782,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
16250         if (likely(!preemptible()))
16251                 return;
16252  
16253 +       if (!preemptible_lazy())
16254 +               return;
16255 +
16256         do {
16257                 /*
16258                  * Because the function tracer can trace preempt_count_sub()
16259 @@ -3583,7 +3807,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
16260                  * an infinite recursion.
16261                  */
16262                 prev_ctx = exception_enter();
16263 +               /*
16264 +                * The add/subtract must not be traced by the function
16265 +                * tracer. But we still want to account for the
16266 +                * preempt off latency tracer. Since the _notrace versions
16267 +                * of add/subtract skip the accounting for latency tracer
16268 +                * we must force it manually.
16269 +                */
16270 +               start_critical_timings();
16271                 __schedule(true);
16272 +               stop_critical_timings();
16273                 exception_exit(prev_ctx);
16274  
16275                 preempt_latency_stop(1);
16276 @@ -4939,6 +5172,7 @@ int __cond_resched_lock(spinlock_t *lock)
16277  }
16278  EXPORT_SYMBOL(__cond_resched_lock);
16279  
16280 +#ifndef CONFIG_PREEMPT_RT_FULL
16281  int __sched __cond_resched_softirq(void)
16282  {
16283         BUG_ON(!in_softirq());
16284 @@ -4952,6 +5186,7 @@ int __sched __cond_resched_softirq(void)
16285         return 0;
16286  }
16287  EXPORT_SYMBOL(__cond_resched_softirq);
16288 +#endif
16289  
16290  /**
16291   * yield - yield the current processor to other threads.
16292 @@ -5315,7 +5550,9 @@ void init_idle(struct task_struct *idle, int cpu)
16293  
16294         /* Set the preempt count _outside_ the spinlocks! */
16295         init_idle_preempt_count(idle, cpu);
16296 -
16297 +#ifdef CONFIG_HAVE_PREEMPT_LAZY
16298 +       task_thread_info(idle)->preempt_lazy_count = 0;
16299 +#endif
16300         /*
16301          * The idle tasks have their own, simple scheduling class:
16302          */
16303 @@ -5458,6 +5695,8 @@ void sched_setnuma(struct task_struct *p, int nid)
16304  #endif /* CONFIG_NUMA_BALANCING */
16305  
16306  #ifdef CONFIG_HOTPLUG_CPU
16307 +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
16308 +
16309  /*
16310   * Ensures that the idle task is using init_mm right before its cpu goes
16311   * offline.
16312 @@ -5472,7 +5711,12 @@ void idle_task_exit(void)
16313                 switch_mm_irqs_off(mm, &init_mm, current);
16314                 finish_arch_post_lock_switch();
16315         }
16316 -       mmdrop(mm);
16317 +       /*
16318 +        * Defer the cleanup to an alive cpu. On RT we can neither
16319 +        * call mmdrop() nor mmdrop_delayed() from here.
16320 +        */
16321 +       per_cpu(idle_last_mm, smp_processor_id()) = mm;
16322 +
16323  }
16324  
16325  /*
16326 @@ -7418,6 +7662,10 @@ int sched_cpu_dying(unsigned int cpu)
16327         update_max_interval();
16328         nohz_balance_exit_idle(cpu);
16329         hrtick_clear(rq);
16330 +       if (per_cpu(idle_last_mm, cpu)) {
16331 +               mmdrop_delayed(per_cpu(idle_last_mm, cpu));
16332 +               per_cpu(idle_last_mm, cpu) = NULL;
16333 +       }
16334         return 0;
16335  }
16336  #endif
16337 @@ -7698,7 +7946,7 @@ void __init sched_init(void)
16338  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
16339  static inline int preempt_count_equals(int preempt_offset)
16340  {
16341 -       int nested = preempt_count() + rcu_preempt_depth();
16342 +       int nested = preempt_count() + sched_rcu_preempt_depth();
16343  
16344         return (nested == preempt_offset);
16345  }
16346 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
16347 index 37e2449186c4..e00accf92a4b 100644
16348 --- a/kernel/sched/deadline.c
16349 +++ b/kernel/sched/deadline.c
16350 @@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
16351  
16352         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
16353         timer->function = dl_task_timer;
16354 +       timer->irqsafe = 1;
16355  }
16356  
16357  static
16358 @@ -1729,12 +1730,11 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
16359  #ifdef CONFIG_SMP
16360                 if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
16361                         queue_push_tasks(rq);
16362 -#else
16363 +#endif
16364                 if (dl_task(rq->curr))
16365                         check_preempt_curr_dl(rq, p, 0);
16366                 else
16367                         resched_curr(rq);
16368 -#endif
16369         }
16370  }
16371  
16372 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
16373 index fa178b62ea79..935224123441 100644
16374 --- a/kernel/sched/debug.c
16375 +++ b/kernel/sched/debug.c
16376 @@ -558,6 +558,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
16377         P(rt_throttled);
16378         PN(rt_time);
16379         PN(rt_runtime);
16380 +#ifdef CONFIG_SMP
16381 +       P(rt_nr_migratory);
16382 +#endif
16383  
16384  #undef PN
16385  #undef P
16386 @@ -953,6 +956,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
16387  #endif
16388         P(policy);
16389         P(prio);
16390 +#ifdef CONFIG_PREEMPT_RT_FULL
16391 +       P(migrate_disable);
16392 +#endif
16393 +       P(nr_cpus_allowed);
16394  #undef PN_SCHEDSTAT
16395  #undef PN
16396  #undef __PN
16397 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
16398 index c242944f5cbd..4aeb2e2e41bc 100644
16399 --- a/kernel/sched/fair.c
16400 +++ b/kernel/sched/fair.c
16401 @@ -3518,7 +3518,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
16402         ideal_runtime = sched_slice(cfs_rq, curr);
16403         delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
16404         if (delta_exec > ideal_runtime) {
16405 -               resched_curr(rq_of(cfs_rq));
16406 +               resched_curr_lazy(rq_of(cfs_rq));
16407                 /*
16408                  * The current task ran long enough, ensure it doesn't get
16409                  * re-elected due to buddy favours.
16410 @@ -3542,7 +3542,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
16411                 return;
16412  
16413         if (delta > ideal_runtime)
16414 -               resched_curr(rq_of(cfs_rq));
16415 +               resched_curr_lazy(rq_of(cfs_rq));
16416  }
16417  
16418  static void
16419 @@ -3684,7 +3684,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
16420          * validating it and just reschedule.
16421          */
16422         if (queued) {
16423 -               resched_curr(rq_of(cfs_rq));
16424 +               resched_curr_lazy(rq_of(cfs_rq));
16425                 return;
16426         }
16427         /*
16428 @@ -3866,7 +3866,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
16429          * hierarchy can be throttled
16430          */
16431         if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
16432 -               resched_curr(rq_of(cfs_rq));
16433 +               resched_curr_lazy(rq_of(cfs_rq));
16434  }
16435  
16436  static __always_inline
16437 @@ -4494,7 +4494,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
16438  
16439                 if (delta < 0) {
16440                         if (rq->curr == p)
16441 -                               resched_curr(rq);
16442 +                               resched_curr_lazy(rq);
16443                         return;
16444                 }
16445                 hrtick_start(rq, delta);
16446 @@ -5905,7 +5905,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
16447         return;
16448  
16449  preempt:
16450 -       resched_curr(rq);
16451 +       resched_curr_lazy(rq);
16452         /*
16453          * Only set the backward buddy when the current task is still
16454          * on the rq. This can happen when a wakeup gets interleaved
16455 @@ -8631,7 +8631,7 @@ static void task_fork_fair(struct task_struct *p)
16456                  * 'current' within the tree based on its new key value.
16457                  */
16458                 swap(curr->vruntime, se->vruntime);
16459 -               resched_curr(rq);
16460 +               resched_curr_lazy(rq);
16461         }
16462  
16463         se->vruntime -= cfs_rq->min_vruntime;
16464 @@ -8655,7 +8655,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
16465          */
16466         if (rq->curr == p) {
16467                 if (p->prio > oldprio)
16468 -                       resched_curr(rq);
16469 +                       resched_curr_lazy(rq);
16470         } else
16471                 check_preempt_curr(rq, p, 0);
16472  }
16473 diff --git a/kernel/sched/features.h b/kernel/sched/features.h
16474 index 69631fa46c2f..6d28fcd08872 100644
16475 --- a/kernel/sched/features.h
16476 +++ b/kernel/sched/features.h
16477 @@ -45,11 +45,19 @@ SCHED_FEAT(LB_BIAS, true)
16478   */
16479  SCHED_FEAT(NONTASK_CAPACITY, true)
16480  
16481 +#ifdef CONFIG_PREEMPT_RT_FULL
16482 +SCHED_FEAT(TTWU_QUEUE, false)
16483 +# ifdef CONFIG_PREEMPT_LAZY
16484 +SCHED_FEAT(PREEMPT_LAZY, true)
16485 +# endif
16486 +#else
16487 +
16488  /*
16489   * Queue remote wakeups on the target CPU and process them
16490   * using the scheduler IPI. Reduces rq->lock contention/bounces.
16491   */
16492  SCHED_FEAT(TTWU_QUEUE, true)
16493 +#endif
16494  
16495  #ifdef HAVE_RT_PUSH_IPI
16496  /*
16497 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
16498 index 2516b8df6dbb..b0691f4e7d49 100644
16499 --- a/kernel/sched/rt.c
16500 +++ b/kernel/sched/rt.c
16501 @@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
16502  
16503         hrtimer_init(&rt_b->rt_period_timer,
16504                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
16505 +       rt_b->rt_period_timer.irqsafe = 1;
16506         rt_b->rt_period_timer.function = sched_rt_period_timer;
16507  }
16508  
16509 @@ -101,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
16510         rt_rq->push_cpu = nr_cpu_ids;
16511         raw_spin_lock_init(&rt_rq->push_lock);
16512         init_irq_work(&rt_rq->push_work, push_irq_work_func);
16513 +       rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ;
16514  #endif
16515  #endif /* CONFIG_SMP */
16516         /* We start is dequeued state, because no RT tasks are queued */
16517 @@ -2198,10 +2200,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
16518  #ifdef CONFIG_SMP
16519                 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
16520                         queue_push_tasks(rq);
16521 -#else
16522 +#endif /* CONFIG_SMP */
16523                 if (p->prio < rq->curr->prio)
16524                         resched_curr(rq);
16525 -#endif /* CONFIG_SMP */
16526         }
16527  }
16528  
16529 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
16530 index 055f935d4421..19324ac27026 100644
16531 --- a/kernel/sched/sched.h
16532 +++ b/kernel/sched/sched.h
16533 @@ -1163,6 +1163,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
16534  #define WF_SYNC                0x01            /* waker goes to sleep after wakeup */
16535  #define WF_FORK                0x02            /* child wakeup after fork */
16536  #define WF_MIGRATED    0x4             /* internal use, task got migrated */
16537 +#define WF_LOCK_SLEEPER        0x08            /* wakeup spinlock "sleeper" */
16538  
16539  /*
16540   * To aid in avoiding the subversion of "niceness" due to uneven distribution
16541 @@ -1346,6 +1347,15 @@ extern void init_sched_fair_class(void);
16542  extern void resched_curr(struct rq *rq);
16543  extern void resched_cpu(int cpu);
16544  
16545 +#ifdef CONFIG_PREEMPT_LAZY
16546 +extern void resched_curr_lazy(struct rq *rq);
16547 +#else
16548 +static inline void resched_curr_lazy(struct rq *rq)
16549 +{
16550 +       resched_curr(rq);
16551 +}
16552 +#endif
16553 +
16554  extern struct rt_bandwidth def_rt_bandwidth;
16555  extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
16556  
16557 diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
16558 index 82f0dff90030..ef027ff3250a 100644
16559 --- a/kernel/sched/swait.c
16560 +++ b/kernel/sched/swait.c
16561 @@ -1,5 +1,6 @@
16562  #include <linux/sched.h>
16563  #include <linux/swait.h>
16564 +#include <linux/suspend.h>
16565  
16566  void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
16567                              struct lock_class_key *key)
16568 @@ -29,6 +30,25 @@ void swake_up_locked(struct swait_queue_head *q)
16569  }
16570  EXPORT_SYMBOL(swake_up_locked);
16571  
16572 +void swake_up_all_locked(struct swait_queue_head *q)
16573 +{
16574 +       struct swait_queue *curr;
16575 +       int wakes = 0;
16576 +
16577 +       while (!list_empty(&q->task_list)) {
16578 +
16579 +               curr = list_first_entry(&q->task_list, typeof(*curr),
16580 +                                       task_list);
16581 +               wake_up_process(curr->task);
16582 +               list_del_init(&curr->task_list);
16583 +               wakes++;
16584 +       }
16585 +       if (pm_in_action)
16586 +               return;
16587 +       WARN(wakes > 2, "complete_all() with %d waiters\n", wakes);
16588 +}
16589 +EXPORT_SYMBOL(swake_up_all_locked);
16590 +
16591  void swake_up(struct swait_queue_head *q)
16592  {
16593         unsigned long flags;
16594 @@ -54,6 +74,7 @@ void swake_up_all(struct swait_queue_head *q)
16595         if (!swait_active(q))
16596                 return;
16597  
16598 +       WARN_ON(irqs_disabled());
16599         raw_spin_lock_irq(&q->lock);
16600         list_splice_init(&q->task_list, &tmp);
16601         while (!list_empty(&tmp)) {
16602 diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c
16603 new file mode 100644
16604 index 000000000000..1950f40ca725
16605 --- /dev/null
16606 +++ b/kernel/sched/swork.c
16607 @@ -0,0 +1,173 @@
16608 +/*
16609 + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
16610 + *
16611 + * Provides a framework for enqueuing callbacks from irq context
16612 + * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
16613 + */
16614 +
16615 +#include <linux/swait.h>
16616 +#include <linux/swork.h>
16617 +#include <linux/kthread.h>
16618 +#include <linux/slab.h>
16619 +#include <linux/spinlock.h>
16620 +#include <linux/export.h>
16621 +
16622 +#define SWORK_EVENT_PENDING     (1 << 0)
16623 +
16624 +static DEFINE_MUTEX(worker_mutex);
16625 +static struct sworker *glob_worker;
16626 +
16627 +struct sworker {
16628 +       struct list_head events;
16629 +       struct swait_queue_head wq;
16630 +
16631 +       raw_spinlock_t lock;
16632 +
16633 +       struct task_struct *task;
16634 +       int refs;
16635 +};
16636 +
16637 +static bool swork_readable(struct sworker *worker)
16638 +{
16639 +       bool r;
16640 +
16641 +       if (kthread_should_stop())
16642 +               return true;
16643 +
16644 +       raw_spin_lock_irq(&worker->lock);
16645 +       r = !list_empty(&worker->events);
16646 +       raw_spin_unlock_irq(&worker->lock);
16647 +
16648 +       return r;
16649 +}
16650 +
16651 +static int swork_kthread(void *arg)
16652 +{
16653 +       struct sworker *worker = arg;
16654 +
16655 +       for (;;) {
16656 +               swait_event_interruptible(worker->wq,
16657 +                                       swork_readable(worker));
16658 +               if (kthread_should_stop())
16659 +                       break;
16660 +
16661 +               raw_spin_lock_irq(&worker->lock);
16662 +               while (!list_empty(&worker->events)) {
16663 +                       struct swork_event *sev;
16664 +
16665 +                       sev = list_first_entry(&worker->events,
16666 +                                       struct swork_event, item);
16667 +                       list_del(&sev->item);
16668 +                       raw_spin_unlock_irq(&worker->lock);
16669 +
16670 +                       WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
16671 +                                                        &sev->flags));
16672 +                       sev->func(sev);
16673 +                       raw_spin_lock_irq(&worker->lock);
16674 +               }
16675 +               raw_spin_unlock_irq(&worker->lock);
16676 +       }
16677 +       return 0;
16678 +}
16679 +
16680 +static struct sworker *swork_create(void)
16681 +{
16682 +       struct sworker *worker;
16683 +
16684 +       worker = kzalloc(sizeof(*worker), GFP_KERNEL);
16685 +       if (!worker)
16686 +               return ERR_PTR(-ENOMEM);
16687 +
16688 +       INIT_LIST_HEAD(&worker->events);
16689 +       raw_spin_lock_init(&worker->lock);
16690 +       init_swait_queue_head(&worker->wq);
16691 +
16692 +       worker->task = kthread_run(swork_kthread, worker, "kswork");
16693 +       if (IS_ERR(worker->task)) {
16694 +               kfree(worker);
16695 +               return ERR_PTR(-ENOMEM);
16696 +       }
16697 +
16698 +       return worker;
16699 +}
16700 +
16701 +static void swork_destroy(struct sworker *worker)
16702 +{
16703 +       kthread_stop(worker->task);
16704 +
16705 +       WARN_ON(!list_empty(&worker->events));
16706 +       kfree(worker);
16707 +}
16708 +
16709 +/**
16710 + * swork_queue - queue swork
16711 + *
16712 + * Returns %false if @work was already on a queue, %true otherwise.
16713 + *
16714 + * The work is queued and processed on a random CPU
16715 + */
16716 +bool swork_queue(struct swork_event *sev)
16717 +{
16718 +       unsigned long flags;
16719 +
16720 +       if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
16721 +               return false;
16722 +
16723 +       raw_spin_lock_irqsave(&glob_worker->lock, flags);
16724 +       list_add_tail(&sev->item, &glob_worker->events);
16725 +       raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
16726 +
16727 +       swake_up(&glob_worker->wq);
16728 +       return true;
16729 +}
16730 +EXPORT_SYMBOL_GPL(swork_queue);
16731 +
16732 +/**
16733 + * swork_get - get an instance of the sworker
16734 + *
16735 + * Returns an negative error code if the initialization if the worker did not
16736 + * work, %0 otherwise.
16737 + *
16738 + */
16739 +int swork_get(void)
16740 +{
16741 +       struct sworker *worker;
16742 +
16743 +       mutex_lock(&worker_mutex);
16744 +       if (!glob_worker) {
16745 +               worker = swork_create();
16746 +               if (IS_ERR(worker)) {
16747 +                       mutex_unlock(&worker_mutex);
16748 +                       return -ENOMEM;
16749 +               }
16750 +
16751 +               glob_worker = worker;
16752 +       }
16753 +
16754 +       glob_worker->refs++;
16755 +       mutex_unlock(&worker_mutex);
16756 +
16757 +       return 0;
16758 +}
16759 +EXPORT_SYMBOL_GPL(swork_get);
16760 +
16761 +/**
16762 + * swork_put - puts an instance of the sworker
16763 + *
16764 + * Will destroy the sworker thread. This function must not be called until all
16765 + * queued events have been completed.
16766 + */
16767 +void swork_put(void)
16768 +{
16769 +       mutex_lock(&worker_mutex);
16770 +
16771 +       glob_worker->refs--;
16772 +       if (glob_worker->refs > 0)
16773 +               goto out;
16774 +
16775 +       swork_destroy(glob_worker);
16776 +       glob_worker = NULL;
16777 +out:
16778 +       mutex_unlock(&worker_mutex);
16779 +}
16780 +EXPORT_SYMBOL_GPL(swork_put);
16781 diff --git a/kernel/signal.c b/kernel/signal.c
16782 index 75761acc77cf..ae0773c76bb0 100644
16783 --- a/kernel/signal.c
16784 +++ b/kernel/signal.c
16785 @@ -14,6 +14,7 @@
16786  #include <linux/export.h>
16787  #include <linux/init.h>
16788  #include <linux/sched.h>
16789 +#include <linux/sched/rt.h>
16790  #include <linux/fs.h>
16791  #include <linux/tty.h>
16792  #include <linux/binfmts.h>
16793 @@ -352,13 +353,30 @@ static bool task_participate_group_stop(struct task_struct *task)
16794         return false;
16795  }
16796  
16797 +static inline struct sigqueue *get_task_cache(struct task_struct *t)
16798 +{
16799 +       struct sigqueue *q = t->sigqueue_cache;
16800 +
16801 +       if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
16802 +               return NULL;
16803 +       return q;
16804 +}
16805 +
16806 +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
16807 +{
16808 +       if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
16809 +               return 0;
16810 +       return 1;
16811 +}
16812 +
16813  /*
16814   * allocate a new signal queue record
16815   * - this may be called without locks if and only if t == current, otherwise an
16816   *   appropriate lock must be held to stop the target task from exiting
16817   */
16818  static struct sigqueue *
16819 -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
16820 +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
16821 +                   int override_rlimit, int fromslab)
16822  {
16823         struct sigqueue *q = NULL;
16824         struct user_struct *user;
16825 @@ -375,7 +393,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
16826         if (override_rlimit ||
16827             atomic_read(&user->sigpending) <=
16828                         task_rlimit(t, RLIMIT_SIGPENDING)) {
16829 -               q = kmem_cache_alloc(sigqueue_cachep, flags);
16830 +               if (!fromslab)
16831 +                       q = get_task_cache(t);
16832 +               if (!q)
16833 +                       q = kmem_cache_alloc(sigqueue_cachep, flags);
16834         } else {
16835                 print_dropped_signal(sig);
16836         }
16837 @@ -392,6 +413,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
16838         return q;
16839  }
16840  
16841 +static struct sigqueue *
16842 +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
16843 +                int override_rlimit)
16844 +{
16845 +       return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
16846 +}
16847 +
16848  static void __sigqueue_free(struct sigqueue *q)
16849  {
16850         if (q->flags & SIGQUEUE_PREALLOC)
16851 @@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqueue *q)
16852         kmem_cache_free(sigqueue_cachep, q);
16853  }
16854  
16855 +static void sigqueue_free_current(struct sigqueue *q)
16856 +{
16857 +       struct user_struct *up;
16858 +
16859 +       if (q->flags & SIGQUEUE_PREALLOC)
16860 +               return;
16861 +
16862 +       up = q->user;
16863 +       if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
16864 +               atomic_dec(&up->sigpending);
16865 +               free_uid(up);
16866 +       } else
16867 +                 __sigqueue_free(q);
16868 +}
16869 +
16870  void flush_sigqueue(struct sigpending *queue)
16871  {
16872         struct sigqueue *q;
16873 @@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *queue)
16874  }
16875  
16876  /*
16877 + * Called from __exit_signal. Flush tsk->pending and
16878 + * tsk->sigqueue_cache
16879 + */
16880 +void flush_task_sigqueue(struct task_struct *tsk)
16881 +{
16882 +       struct sigqueue *q;
16883 +
16884 +       flush_sigqueue(&tsk->pending);
16885 +
16886 +       q = get_task_cache(tsk);
16887 +       if (q)
16888 +               kmem_cache_free(sigqueue_cachep, q);
16889 +}
16890 +
16891 +/*
16892   * Flush all pending signals for this kthread.
16893   */
16894  void flush_signals(struct task_struct *t)
16895 @@ -525,7 +583,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
16896  still_pending:
16897                 list_del_init(&first->list);
16898                 copy_siginfo(info, &first->info);
16899 -               __sigqueue_free(first);
16900 +               sigqueue_free_current(first);
16901         } else {
16902                 /*
16903                  * Ok, it wasn't in the queue.  This must be
16904 @@ -560,6 +618,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
16905  {
16906         int signr;
16907  
16908 +       WARN_ON_ONCE(tsk != current);
16909 +
16910         /* We only dequeue private signals from ourselves, we don't let
16911          * signalfd steal them
16912          */
16913 @@ -1156,8 +1216,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
16914   * We don't want to have recursive SIGSEGV's etc, for example,
16915   * that is why we also clear SIGNAL_UNKILLABLE.
16916   */
16917 -int
16918 -force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
16919 +static int
16920 +do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
16921  {
16922         unsigned long int flags;
16923         int ret, blocked, ignored;
16924 @@ -1182,6 +1242,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
16925         return ret;
16926  }
16927  
16928 +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
16929 +{
16930 +/*
16931 + * On some archs, PREEMPT_RT has to delay sending a signal from a trap
16932 + * since it can not enable preemption, and the signal code's spin_locks
16933 + * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
16934 + * send the signal on exit of the trap.
16935 + */
16936 +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
16937 +       if (in_atomic()) {
16938 +               if (WARN_ON_ONCE(t != current))
16939 +                       return 0;
16940 +               if (WARN_ON_ONCE(t->forced_info.si_signo))
16941 +                       return 0;
16942 +
16943 +               if (is_si_special(info)) {
16944 +                       WARN_ON_ONCE(info != SEND_SIG_PRIV);
16945 +                       t->forced_info.si_signo = sig;
16946 +                       t->forced_info.si_errno = 0;
16947 +                       t->forced_info.si_code = SI_KERNEL;
16948 +                       t->forced_info.si_pid = 0;
16949 +                       t->forced_info.si_uid = 0;
16950 +               } else {
16951 +                       t->forced_info = *info;
16952 +               }
16953 +
16954 +               set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
16955 +               return 0;
16956 +       }
16957 +#endif
16958 +       return do_force_sig_info(sig, info, t);
16959 +}
16960 +
16961  /*
16962   * Nuke all other threads in the group.
16963   */
16964 @@ -1216,12 +1309,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
16965                  * Disable interrupts early to avoid deadlocks.
16966                  * See rcu_read_unlock() comment header for details.
16967                  */
16968 -               local_irq_save(*flags);
16969 +               local_irq_save_nort(*flags);
16970                 rcu_read_lock();
16971                 sighand = rcu_dereference(tsk->sighand);
16972                 if (unlikely(sighand == NULL)) {
16973                         rcu_read_unlock();
16974 -                       local_irq_restore(*flags);
16975 +                       local_irq_restore_nort(*flags);
16976                         break;
16977                 }
16978                 /*
16979 @@ -1242,7 +1335,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
16980                 }
16981                 spin_unlock(&sighand->siglock);
16982                 rcu_read_unlock();
16983 -               local_irq_restore(*flags);
16984 +               local_irq_restore_nort(*flags);
16985         }
16986  
16987         return sighand;
16988 @@ -1485,7 +1578,8 @@ EXPORT_SYMBOL(kill_pid);
16989   */
16990  struct sigqueue *sigqueue_alloc(void)
16991  {
16992 -       struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
16993 +       /* Preallocated sigqueue objects always from the slabcache ! */
16994 +       struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
16995  
16996         if (q)
16997                 q->flags |= SIGQUEUE_PREALLOC;
16998 @@ -1846,15 +1940,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
16999                 if (gstop_done && ptrace_reparented(current))
17000                         do_notify_parent_cldstop(current, false, why);
17001  
17002 -               /*
17003 -                * Don't want to allow preemption here, because
17004 -                * sys_ptrace() needs this task to be inactive.
17005 -                *
17006 -                * XXX: implement read_unlock_no_resched().
17007 -                */
17008 -               preempt_disable();
17009                 read_unlock(&tasklist_lock);
17010 -               preempt_enable_no_resched();
17011                 freezable_schedule();
17012         } else {
17013                 /*
17014 diff --git a/kernel/softirq.c b/kernel/softirq.c
17015 index 744fa611cae0..819bd7cf5ad0 100644
17016 --- a/kernel/softirq.c
17017 +++ b/kernel/softirq.c
17018 @@ -21,10 +21,12 @@
17019  #include <linux/freezer.h>
17020  #include <linux/kthread.h>
17021  #include <linux/rcupdate.h>
17022 +#include <linux/delay.h>
17023  #include <linux/ftrace.h>
17024  #include <linux/smp.h>
17025  #include <linux/smpboot.h>
17026  #include <linux/tick.h>
17027 +#include <linux/locallock.h>
17028  #include <linux/irq.h>
17029  
17030  #define CREATE_TRACE_POINTS
17031 @@ -56,12 +58,108 @@ EXPORT_SYMBOL(irq_stat);
17032  static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
17033  
17034  DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
17035 +#ifdef CONFIG_PREEMPT_RT_FULL
17036 +#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
17037 +DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
17038 +#endif
17039  
17040  const char * const softirq_to_name[NR_SOFTIRQS] = {
17041         "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
17042         "TASKLET", "SCHED", "HRTIMER", "RCU"
17043  };
17044  
17045 +#ifdef CONFIG_NO_HZ_COMMON
17046 +# ifdef CONFIG_PREEMPT_RT_FULL
17047 +
17048 +struct softirq_runner {
17049 +       struct task_struct *runner[NR_SOFTIRQS];
17050 +};
17051 +
17052 +static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
17053 +
17054 +static inline void softirq_set_runner(unsigned int sirq)
17055 +{
17056 +       struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
17057 +
17058 +       sr->runner[sirq] = current;
17059 +}
17060 +
17061 +static inline void softirq_clr_runner(unsigned int sirq)
17062 +{
17063 +       struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
17064 +
17065 +       sr->runner[sirq] = NULL;
17066 +}
17067 +
17068 +/*
17069 + * On preempt-rt a softirq running context might be blocked on a
17070 + * lock. There might be no other runnable task on this CPU because the
17071 + * lock owner runs on some other CPU. So we have to go into idle with
17072 + * the pending bit set. Therefor we need to check this otherwise we
17073 + * warn about false positives which confuses users and defeats the
17074 + * whole purpose of this test.
17075 + *
17076 + * This code is called with interrupts disabled.
17077 + */
17078 +void softirq_check_pending_idle(void)
17079 +{
17080 +       static int rate_limit;
17081 +       struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
17082 +       u32 warnpending;
17083 +       int i;
17084 +
17085 +       if (rate_limit >= 10)
17086 +               return;
17087 +
17088 +       warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
17089 +       for (i = 0; i < NR_SOFTIRQS; i++) {
17090 +               struct task_struct *tsk = sr->runner[i];
17091 +
17092 +               /*
17093 +                * The wakeup code in rtmutex.c wakes up the task
17094 +                * _before_ it sets pi_blocked_on to NULL under
17095 +                * tsk->pi_lock. So we need to check for both: state
17096 +                * and pi_blocked_on.
17097 +                */
17098 +               if (tsk) {
17099 +                       raw_spin_lock(&tsk->pi_lock);
17100 +                       if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
17101 +                               /* Clear all bits pending in that task */
17102 +                               warnpending &= ~(tsk->softirqs_raised);
17103 +                               warnpending &= ~(1 << i);
17104 +                       }
17105 +                       raw_spin_unlock(&tsk->pi_lock);
17106 +               }
17107 +       }
17108 +
17109 +       if (warnpending) {
17110 +               printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
17111 +                      warnpending);
17112 +               rate_limit++;
17113 +       }
17114 +}
17115 +# else
17116 +/*
17117 + * On !PREEMPT_RT we just printk rate limited:
17118 + */
17119 +void softirq_check_pending_idle(void)
17120 +{
17121 +       static int rate_limit;
17122 +
17123 +       if (rate_limit < 10 &&
17124 +                       (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
17125 +               printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
17126 +                      local_softirq_pending());
17127 +               rate_limit++;
17128 +       }
17129 +}
17130 +# endif
17131 +
17132 +#else /* !CONFIG_NO_HZ_COMMON */
17133 +static inline void softirq_set_runner(unsigned int sirq) { }
17134 +static inline void softirq_clr_runner(unsigned int sirq) { }
17135 +#endif
17136 +
17137  /*
17138   * we cannot loop indefinitely here to avoid userspace starvation,
17139   * but we also don't want to introduce a worst case 1/HZ latency
17140 @@ -77,6 +175,38 @@ static void wakeup_softirqd(void)
17141                 wake_up_process(tsk);
17142  }
17143  
17144 +#ifdef CONFIG_PREEMPT_RT_FULL
17145 +static void wakeup_timer_softirqd(void)
17146 +{
17147 +       /* Interrupts are disabled: no need to stop preemption */
17148 +       struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
17149 +
17150 +       if (tsk && tsk->state != TASK_RUNNING)
17151 +               wake_up_process(tsk);
17152 +}
17153 +#endif
17154 +
17155 +static void handle_softirq(unsigned int vec_nr)
17156 +{
17157 +       struct softirq_action *h = softirq_vec + vec_nr;
17158 +       int prev_count;
17159 +
17160 +       prev_count = preempt_count();
17161 +
17162 +       kstat_incr_softirqs_this_cpu(vec_nr);
17163 +
17164 +       trace_softirq_entry(vec_nr);
17165 +       h->action(h);
17166 +       trace_softirq_exit(vec_nr);
17167 +       if (unlikely(prev_count != preempt_count())) {
17168 +               pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
17169 +                      vec_nr, softirq_to_name[vec_nr], h->action,
17170 +                      prev_count, preempt_count());
17171 +               preempt_count_set(prev_count);
17172 +       }
17173 +}
17174 +
17175 +#ifndef CONFIG_PREEMPT_RT_FULL
17176  /*
17177   * If ksoftirqd is scheduled, we do not want to process pending softirqs
17178   * right now. Let ksoftirqd handle this at its own rate, to get fairness.
17179 @@ -88,6 +218,47 @@ static bool ksoftirqd_running(void)
17180         return tsk && (tsk->state == TASK_RUNNING);
17181  }
17182  
17183 +static inline int ksoftirqd_softirq_pending(void)
17184 +{
17185 +       return local_softirq_pending();
17186 +}
17187 +
17188 +static void handle_pending_softirqs(u32 pending)
17189 +{
17190 +       struct softirq_action *h = softirq_vec;
17191 +       int softirq_bit;
17192 +
17193 +       local_irq_enable();
17194 +
17195 +       h = softirq_vec;
17196 +
17197 +       while ((softirq_bit = ffs(pending))) {
17198 +               unsigned int vec_nr;
17199 +
17200 +               h += softirq_bit - 1;
17201 +               vec_nr = h - softirq_vec;
17202 +               handle_softirq(vec_nr);
17203 +
17204 +               h++;
17205 +               pending >>= softirq_bit;
17206 +       }
17207 +
17208 +       rcu_bh_qs();
17209 +       local_irq_disable();
17210 +}
17211 +
17212 +static void run_ksoftirqd(unsigned int cpu)
17213 +{
17214 +       local_irq_disable();
17215 +       if (ksoftirqd_softirq_pending()) {
17216 +               __do_softirq();
17217 +               local_irq_enable();
17218 +               cond_resched_rcu_qs();
17219 +               return;
17220 +       }
17221 +       local_irq_enable();
17222 +}
17223 +
17224  /*
17225   * preempt_count and SOFTIRQ_OFFSET usage:
17226   * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
17227 @@ -243,10 +414,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
17228         unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
17229         unsigned long old_flags = current->flags;
17230         int max_restart = MAX_SOFTIRQ_RESTART;
17231 -       struct softirq_action *h;
17232         bool in_hardirq;
17233         __u32 pending;
17234 -       int softirq_bit;
17235  
17236         /*
17237          * Mask out PF_MEMALLOC s current task context is borrowed for the
17238 @@ -265,36 +434,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
17239         /* Reset the pending bitmask before enabling irqs */
17240         set_softirq_pending(0);
17241  
17242 -       local_irq_enable();
17243 -
17244 -       h = softirq_vec;
17245 -
17246 -       while ((softirq_bit = ffs(pending))) {
17247 -               unsigned int vec_nr;
17248 -               int prev_count;
17249 -
17250 -               h += softirq_bit - 1;
17251 -
17252 -               vec_nr = h - softirq_vec;
17253 -               prev_count = preempt_count();
17254 -
17255 -               kstat_incr_softirqs_this_cpu(vec_nr);
17256 -
17257 -               trace_softirq_entry(vec_nr);
17258 -               h->action(h);
17259 -               trace_softirq_exit(vec_nr);
17260 -               if (unlikely(prev_count != preempt_count())) {
17261 -                       pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
17262 -                              vec_nr, softirq_to_name[vec_nr], h->action,
17263 -                              prev_count, preempt_count());
17264 -                       preempt_count_set(prev_count);
17265 -               }
17266 -               h++;
17267 -               pending >>= softirq_bit;
17268 -       }
17269 -
17270 -       rcu_bh_qs();
17271 -       local_irq_disable();
17272 +       handle_pending_softirqs(pending);
17273  
17274         pending = local_softirq_pending();
17275         if (pending) {
17276 @@ -331,6 +471,309 @@ asmlinkage __visible void do_softirq(void)
17277  }
17278  
17279  /*
17280 + * This function must run with irqs disabled!
17281 + */
17282 +void raise_softirq_irqoff(unsigned int nr)
17283 +{
17284 +       __raise_softirq_irqoff(nr);
17285 +
17286 +       /*
17287 +        * If we're in an interrupt or softirq, we're done
17288 +        * (this also catches softirq-disabled code). We will
17289 +        * actually run the softirq once we return from
17290 +        * the irq or softirq.
17291 +        *
17292 +        * Otherwise we wake up ksoftirqd to make sure we
17293 +        * schedule the softirq soon.
17294 +        */
17295 +       if (!in_interrupt())
17296 +               wakeup_softirqd();
17297 +}
17298 +
17299 +void __raise_softirq_irqoff(unsigned int nr)
17300 +{
17301 +       trace_softirq_raise(nr);
17302 +       or_softirq_pending(1UL << nr);
17303 +}
17304 +
17305 +static inline void local_bh_disable_nort(void) { local_bh_disable(); }
17306 +static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
17307 +static void ksoftirqd_set_sched_params(unsigned int cpu) { }
17308 +
17309 +#else /* !PREEMPT_RT_FULL */
17310 +
17311 +/*
17312 + * On RT we serialize softirq execution with a cpu local lock per softirq
17313 + */
17314 +static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
17315 +
17316 +void __init softirq_early_init(void)
17317 +{
17318 +       int i;
17319 +
17320 +       for (i = 0; i < NR_SOFTIRQS; i++)
17321 +               local_irq_lock_init(local_softirq_locks[i]);
17322 +}
17323 +
17324 +static void lock_softirq(int which)
17325 +{
17326 +       local_lock(local_softirq_locks[which]);
17327 +}
17328 +
17329 +static void unlock_softirq(int which)
17330 +{
17331 +       local_unlock(local_softirq_locks[which]);
17332 +}
17333 +
17334 +static void do_single_softirq(int which)
17335 +{
17336 +       unsigned long old_flags = current->flags;
17337 +
17338 +       current->flags &= ~PF_MEMALLOC;
17339 +       vtime_account_irq_enter(current);
17340 +       current->flags |= PF_IN_SOFTIRQ;
17341 +       lockdep_softirq_enter();
17342 +       local_irq_enable();
17343 +       handle_softirq(which);
17344 +       local_irq_disable();
17345 +       lockdep_softirq_exit();
17346 +       current->flags &= ~PF_IN_SOFTIRQ;
17347 +       vtime_account_irq_enter(current);
17348 +       tsk_restore_flags(current, old_flags, PF_MEMALLOC);
17349 +}
17350 +
17351 +/*
17352 + * Called with interrupts disabled. Process softirqs which were raised
17353 + * in current context (or on behalf of ksoftirqd).
17354 + */
17355 +static void do_current_softirqs(void)
17356 +{
17357 +       while (current->softirqs_raised) {
17358 +               int i = __ffs(current->softirqs_raised);
17359 +               unsigned int pending, mask = (1U << i);
17360 +
17361 +               current->softirqs_raised &= ~mask;
17362 +               local_irq_enable();
17363 +
17364 +               /*
17365 +                * If the lock is contended, we boost the owner to
17366 +                * process the softirq or leave the critical section
17367 +                * now.
17368 +                */
17369 +               lock_softirq(i);
17370 +               local_irq_disable();
17371 +               softirq_set_runner(i);
17372 +               /*
17373 +                * Check with the local_softirq_pending() bits,
17374 +                * whether we need to process this still or if someone
17375 +                * else took care of it.
17376 +                */
17377 +               pending = local_softirq_pending();
17378 +               if (pending & mask) {
17379 +                       set_softirq_pending(pending & ~mask);
17380 +                       do_single_softirq(i);
17381 +               }
17382 +               softirq_clr_runner(i);
17383 +               WARN_ON(current->softirq_nestcnt != 1);
17384 +               local_irq_enable();
17385 +               unlock_softirq(i);
17386 +               local_irq_disable();
17387 +       }
17388 +}
17389 +
17390 +void __local_bh_disable(void)
17391 +{
17392 +       if (++current->softirq_nestcnt == 1)
17393 +               migrate_disable();
17394 +}
17395 +EXPORT_SYMBOL(__local_bh_disable);
17396 +
17397 +void __local_bh_enable(void)
17398 +{
17399 +       if (WARN_ON(current->softirq_nestcnt == 0))
17400 +               return;
17401 +
17402 +       local_irq_disable();
17403 +       if (current->softirq_nestcnt == 1 && current->softirqs_raised)
17404 +               do_current_softirqs();
17405 +       local_irq_enable();
17406 +
17407 +       if (--current->softirq_nestcnt == 0)
17408 +               migrate_enable();
17409 +}
17410 +EXPORT_SYMBOL(__local_bh_enable);
17411 +
17412 +void _local_bh_enable(void)
17413 +{
17414 +       if (WARN_ON(current->softirq_nestcnt == 0))
17415 +               return;
17416 +       if (--current->softirq_nestcnt == 0)
17417 +               migrate_enable();
17418 +}
17419 +EXPORT_SYMBOL(_local_bh_enable);
17420 +
17421 +int in_serving_softirq(void)
17422 +{
17423 +       return current->flags & PF_IN_SOFTIRQ;
17424 +}
17425 +EXPORT_SYMBOL(in_serving_softirq);
17426 +
17427 +/* Called with preemption disabled */
17428 +static void run_ksoftirqd(unsigned int cpu)
17429 +{
17430 +       local_irq_disable();
17431 +       current->softirq_nestcnt++;
17432 +
17433 +       do_current_softirqs();
17434 +       current->softirq_nestcnt--;
17435 +       local_irq_enable();
17436 +       cond_resched_rcu_qs();
17437 +}
17438 +
17439 +/*
17440 + * Called from netif_rx_ni(). Preemption enabled, but migration
17441 + * disabled. So the cpu can't go away under us.
17442 + */
17443 +void thread_do_softirq(void)
17444 +{
17445 +       if (!in_serving_softirq() && current->softirqs_raised) {
17446 +               current->softirq_nestcnt++;
17447 +               do_current_softirqs();
17448 +               current->softirq_nestcnt--;
17449 +       }
17450 +}
17451 +
17452 +static void do_raise_softirq_irqoff(unsigned int nr)
17453 +{
17454 +       unsigned int mask;
17455 +
17456 +       mask = 1UL << nr;
17457 +
17458 +       trace_softirq_raise(nr);
17459 +       or_softirq_pending(mask);
17460 +
17461 +       /*
17462 +        * If we are not in a hard interrupt and inside a bh disabled
17463 +        * region, we simply raise the flag on current. local_bh_enable()
17464 +        * will make sure that the softirq is executed. Otherwise we
17465 +        * delegate it to ksoftirqd.
17466 +        */
17467 +       if (!in_irq() && current->softirq_nestcnt)
17468 +               current->softirqs_raised |= mask;
17469 +       else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
17470 +               return;
17471 +
17472 +       if (mask & TIMER_SOFTIRQS)
17473 +               __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
17474 +       else
17475 +               __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
17476 +}
17477 +
17478 +static void wakeup_proper_softirq(unsigned int nr)
17479 +{
17480 +       if ((1UL << nr) & TIMER_SOFTIRQS)
17481 +               wakeup_timer_softirqd();
17482 +       else
17483 +               wakeup_softirqd();
17484 +}
17485 +
17486 +void __raise_softirq_irqoff(unsigned int nr)
17487 +{
17488 +       do_raise_softirq_irqoff(nr);
17489 +       if (!in_irq() && !current->softirq_nestcnt)
17490 +               wakeup_proper_softirq(nr);
17491 +}
17492 +
17493 +/*
17494 + * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
17495 + */
17496 +void __raise_softirq_irqoff_ksoft(unsigned int nr)
17497 +{
17498 +       unsigned int mask;
17499 +
17500 +       if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
17501 +                        !__this_cpu_read(ktimer_softirqd)))
17502 +               return;
17503 +       mask = 1UL << nr;
17504 +
17505 +       trace_softirq_raise(nr);
17506 +       or_softirq_pending(mask);
17507 +       if (mask & TIMER_SOFTIRQS)
17508 +               __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
17509 +       else
17510 +               __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
17511 +       wakeup_proper_softirq(nr);
17512 +}
17513 +
17514 +/*
17515 + * This function must run with irqs disabled!
17516 + */
17517 +void raise_softirq_irqoff(unsigned int nr)
17518 +{
17519 +       do_raise_softirq_irqoff(nr);
17520 +
17521 +       /*
17522 +        * If we're in an hard interrupt we let irq return code deal
17523 +        * with the wakeup of ksoftirqd.
17524 +        */
17525 +       if (in_irq())
17526 +               return;
17527 +       /*
17528 +        * If we are in thread context but outside of a bh disabled
17529 +        * region, we need to wake ksoftirqd as well.
17530 +        *
17531 +        * CHECKME: Some of the places which do that could be wrapped
17532 +        * into local_bh_disable/enable pairs. Though it's unclear
17533 +        * whether this is worth the effort. To find those places just
17534 +        * raise a WARN() if the condition is met.
17535 +        */
17536 +       if (!current->softirq_nestcnt)
17537 +               wakeup_proper_softirq(nr);
17538 +}
17539 +
17540 +static inline int ksoftirqd_softirq_pending(void)
17541 +{
17542 +       return current->softirqs_raised;
17543 +}
17544 +
17545 +static inline void local_bh_disable_nort(void) { }
17546 +static inline void _local_bh_enable_nort(void) { }
17547 +
17548 +static inline void ksoftirqd_set_sched_params(unsigned int cpu)
17549 +{
17550 +       /* Take over all but timer pending softirqs when starting */
17551 +       local_irq_disable();
17552 +       current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
17553 +       local_irq_enable();
17554 +}
17555 +
17556 +static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
17557 +{
17558 +       struct sched_param param = { .sched_priority = 1 };
17559 +
17560 +       sched_setscheduler(current, SCHED_FIFO, &param);
17561 +
17562 +       /* Take over timer pending softirqs when starting */
17563 +       local_irq_disable();
17564 +       current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
17565 +       local_irq_enable();
17566 +}
17567 +
17568 +static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
17569 +                                                   bool online)
17570 +{
17571 +       struct sched_param param = { .sched_priority = 0 };
17572 +
17573 +       sched_setscheduler(current, SCHED_NORMAL, &param);
17574 +}
17575 +
17576 +static int ktimer_softirqd_should_run(unsigned int cpu)
17577 +{
17578 +       return current->softirqs_raised;
17579 +}
17580 +
17581 +#endif /* PREEMPT_RT_FULL */
17582 +/*
17583   * Enter an interrupt context.
17584   */
17585  void irq_enter(void)
17586 @@ -341,9 +784,9 @@ void irq_enter(void)
17587                  * Prevent raise_softirq from needlessly waking up ksoftirqd
17588                  * here, as softirq will be serviced on return from interrupt.
17589                  */
17590 -               local_bh_disable();
17591 +               local_bh_disable_nort();
17592                 tick_irq_enter();
17593 -               _local_bh_enable();
17594 +               _local_bh_enable_nort();
17595         }
17596  
17597         __irq_enter();
17598 @@ -351,6 +794,7 @@ void irq_enter(void)
17599  
17600  static inline void invoke_softirq(void)
17601  {
17602 +#ifndef CONFIG_PREEMPT_RT_FULL
17603         if (ksoftirqd_running())
17604                 return;
17605  
17606 @@ -373,6 +817,18 @@ static inline void invoke_softirq(void)
17607         } else {
17608                 wakeup_softirqd();
17609         }
17610 +#else /* PREEMPT_RT_FULL */
17611 +       unsigned long flags;
17612 +
17613 +       local_irq_save(flags);
17614 +       if (__this_cpu_read(ksoftirqd) &&
17615 +                       __this_cpu_read(ksoftirqd)->softirqs_raised)
17616 +               wakeup_softirqd();
17617 +       if (__this_cpu_read(ktimer_softirqd) &&
17618 +                       __this_cpu_read(ktimer_softirqd)->softirqs_raised)
17619 +               wakeup_timer_softirqd();
17620 +       local_irq_restore(flags);
17621 +#endif
17622  }
17623  
17624  static inline void tick_irq_exit(void)
17625 @@ -409,26 +865,6 @@ void irq_exit(void)
17626         trace_hardirq_exit(); /* must be last! */
17627  }
17628  
17629 -/*
17630 - * This function must run with irqs disabled!
17631 - */
17632 -inline void raise_softirq_irqoff(unsigned int nr)
17633 -{
17634 -       __raise_softirq_irqoff(nr);
17635 -
17636 -       /*
17637 -        * If we're in an interrupt or softirq, we're done
17638 -        * (this also catches softirq-disabled code). We will
17639 -        * actually run the softirq once we return from
17640 -        * the irq or softirq.
17641 -        *
17642 -        * Otherwise we wake up ksoftirqd to make sure we
17643 -        * schedule the softirq soon.
17644 -        */
17645 -       if (!in_interrupt())
17646 -               wakeup_softirqd();
17647 -}
17648 -
17649  void raise_softirq(unsigned int nr)
17650  {
17651         unsigned long flags;
17652 @@ -438,12 +874,6 @@ void raise_softirq(unsigned int nr)
17653         local_irq_restore(flags);
17654  }
17655  
17656 -void __raise_softirq_irqoff(unsigned int nr)
17657 -{
17658 -       trace_softirq_raise(nr);
17659 -       or_softirq_pending(1UL << nr);
17660 -}
17661 -
17662  void open_softirq(int nr, void (*action)(struct softirq_action *))
17663  {
17664         softirq_vec[nr].action = action;
17665 @@ -460,15 +890,45 @@ struct tasklet_head {
17666  static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
17667  static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
17668  
17669 +static void inline
17670 +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
17671 +{
17672 +       if (tasklet_trylock(t)) {
17673 +again:
17674 +               /* We may have been preempted before tasklet_trylock
17675 +                * and __tasklet_action may have already run.
17676 +                * So double check the sched bit while the takslet
17677 +                * is locked before adding it to the list.
17678 +                */
17679 +               if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
17680 +                       t->next = NULL;
17681 +                       *head->tail = t;
17682 +                       head->tail = &(t->next);
17683 +                       raise_softirq_irqoff(nr);
17684 +                       tasklet_unlock(t);
17685 +               } else {
17686 +                       /* This is subtle. If we hit the corner case above
17687 +                        * It is possible that we get preempted right here,
17688 +                        * and another task has successfully called
17689 +                        * tasklet_schedule(), then this function, and
17690 +                        * failed on the trylock. Thus we must be sure
17691 +                        * before releasing the tasklet lock, that the
17692 +                        * SCHED_BIT is clear. Otherwise the tasklet
17693 +                        * may get its SCHED_BIT set, but not added to the
17694 +                        * list
17695 +                        */
17696 +                       if (!tasklet_tryunlock(t))
17697 +                               goto again;
17698 +               }
17699 +       }
17700 +}
17701 +
17702  void __tasklet_schedule(struct tasklet_struct *t)
17703  {
17704         unsigned long flags;
17705  
17706         local_irq_save(flags);
17707 -       t->next = NULL;
17708 -       *__this_cpu_read(tasklet_vec.tail) = t;
17709 -       __this_cpu_write(tasklet_vec.tail, &(t->next));
17710 -       raise_softirq_irqoff(TASKLET_SOFTIRQ);
17711 +       __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
17712         local_irq_restore(flags);
17713  }
17714  EXPORT_SYMBOL(__tasklet_schedule);
17715 @@ -478,10 +938,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
17716         unsigned long flags;
17717  
17718         local_irq_save(flags);
17719 -       t->next = NULL;
17720 -       *__this_cpu_read(tasklet_hi_vec.tail) = t;
17721 -       __this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
17722 -       raise_softirq_irqoff(HI_SOFTIRQ);
17723 +       __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
17724         local_irq_restore(flags);
17725  }
17726  EXPORT_SYMBOL(__tasklet_hi_schedule);
17727 @@ -490,82 +947,122 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
17728  {
17729         BUG_ON(!irqs_disabled());
17730  
17731 -       t->next = __this_cpu_read(tasklet_hi_vec.head);
17732 -       __this_cpu_write(tasklet_hi_vec.head, t);
17733 -       __raise_softirq_irqoff(HI_SOFTIRQ);
17734 +       __tasklet_hi_schedule(t);
17735  }
17736  EXPORT_SYMBOL(__tasklet_hi_schedule_first);
17737  
17738 -static __latent_entropy void tasklet_action(struct softirq_action *a)
17739 +void  tasklet_enable(struct tasklet_struct *t)
17740  {
17741 -       struct tasklet_struct *list;
17742 +       if (!atomic_dec_and_test(&t->count))
17743 +               return;
17744 +       if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
17745 +               tasklet_schedule(t);
17746 +}
17747 +EXPORT_SYMBOL(tasklet_enable);
17748  
17749 -       local_irq_disable();
17750 -       list = __this_cpu_read(tasklet_vec.head);
17751 -       __this_cpu_write(tasklet_vec.head, NULL);
17752 -       __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
17753 -       local_irq_enable();
17754 +static void __tasklet_action(struct softirq_action *a,
17755 +                            struct tasklet_struct *list)
17756 +{
17757 +       int loops = 1000000;
17758  
17759         while (list) {
17760                 struct tasklet_struct *t = list;
17761  
17762                 list = list->next;
17763  
17764 -               if (tasklet_trylock(t)) {
17765 -                       if (!atomic_read(&t->count)) {
17766 -                               if (!test_and_clear_bit(TASKLET_STATE_SCHED,
17767 -                                                       &t->state))
17768 -                                       BUG();
17769 -                               t->func(t->data);
17770 -                               tasklet_unlock(t);
17771 -                               continue;
17772 -                       }
17773 -                       tasklet_unlock(t);
17774 +               /*
17775 +                * Should always succeed - after a tasklist got on the
17776 +                * list (after getting the SCHED bit set from 0 to 1),
17777 +                * nothing but the tasklet softirq it got queued to can
17778 +                * lock it:
17779 +                */
17780 +               if (!tasklet_trylock(t)) {
17781 +                       WARN_ON(1);
17782 +                       continue;
17783                 }
17784  
17785 -               local_irq_disable();
17786                 t->next = NULL;
17787 -               *__this_cpu_read(tasklet_vec.tail) = t;
17788 -               __this_cpu_write(tasklet_vec.tail, &(t->next));
17789 -               __raise_softirq_irqoff(TASKLET_SOFTIRQ);
17790 -               local_irq_enable();
17791 +
17792 +               /*
17793 +                * If we cannot handle the tasklet because it's disabled,
17794 +                * mark it as pending. tasklet_enable() will later
17795 +                * re-schedule the tasklet.
17796 +                */
17797 +               if (unlikely(atomic_read(&t->count))) {
17798 +out_disabled:
17799 +                       /* implicit unlock: */
17800 +                       wmb();
17801 +                       t->state = TASKLET_STATEF_PENDING;
17802 +                       continue;
17803 +               }
17804 +
17805 +               /*
17806 +                * After this point on the tasklet might be rescheduled
17807 +                * on another CPU, but it can only be added to another
17808 +                * CPU's tasklet list if we unlock the tasklet (which we
17809 +                * dont do yet).
17810 +                */
17811 +               if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
17812 +                       WARN_ON(1);
17813 +
17814 +again:
17815 +               t->func(t->data);
17816 +
17817 +               /*
17818 +                * Try to unlock the tasklet. We must use cmpxchg, because
17819 +                * another CPU might have scheduled or disabled the tasklet.
17820 +                * We only allow the STATE_RUN -> 0 transition here.
17821 +                */
17822 +               while (!tasklet_tryunlock(t)) {
17823 +                       /*
17824 +                        * If it got disabled meanwhile, bail out:
17825 +                        */
17826 +                       if (atomic_read(&t->count))
17827 +                               goto out_disabled;
17828 +                       /*
17829 +                        * If it got scheduled meanwhile, re-execute
17830 +                        * the tasklet function:
17831 +                        */
17832 +                       if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
17833 +                               goto again;
17834 +                       if (!--loops) {
17835 +                               printk("hm, tasklet state: %08lx\n", t->state);
17836 +                               WARN_ON(1);
17837 +                               tasklet_unlock(t);
17838 +                               break;
17839 +                       }
17840 +               }
17841         }
17842  }
17843  
17844 +static void tasklet_action(struct softirq_action *a)
17845 +{
17846 +       struct tasklet_struct *list;
17847 +
17848 +       local_irq_disable();
17849 +
17850 +       list = __this_cpu_read(tasklet_vec.head);
17851 +       __this_cpu_write(tasklet_vec.head, NULL);
17852 +       __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
17853 +
17854 +       local_irq_enable();
17855 +
17856 +       __tasklet_action(a, list);
17857 +}
17858 +
17859  static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
17860  {
17861         struct tasklet_struct *list;
17862  
17863         local_irq_disable();
17864 +
17865         list = __this_cpu_read(tasklet_hi_vec.head);
17866         __this_cpu_write(tasklet_hi_vec.head, NULL);
17867         __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
17868 +
17869         local_irq_enable();
17870  
17871 -       while (list) {
17872 -               struct tasklet_struct *t = list;
17873 -
17874 -               list = list->next;
17875 -
17876 -               if (tasklet_trylock(t)) {
17877 -                       if (!atomic_read(&t->count)) {
17878 -                               if (!test_and_clear_bit(TASKLET_STATE_SCHED,
17879 -                                                       &t->state))
17880 -                                       BUG();
17881 -                               t->func(t->data);
17882 -                               tasklet_unlock(t);
17883 -                               continue;
17884 -                       }
17885 -                       tasklet_unlock(t);
17886 -               }
17887 -
17888 -               local_irq_disable();
17889 -               t->next = NULL;
17890 -               *__this_cpu_read(tasklet_hi_vec.tail) = t;
17891 -               __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
17892 -               __raise_softirq_irqoff(HI_SOFTIRQ);
17893 -               local_irq_enable();
17894 -       }
17895 +       __tasklet_action(a, list);
17896  }
17897  
17898  void tasklet_init(struct tasklet_struct *t,
17899 @@ -586,7 +1083,7 @@ void tasklet_kill(struct tasklet_struct *t)
17900  
17901         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
17902                 do {
17903 -                       yield();
17904 +                       msleep(1);
17905                 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
17906         }
17907         tasklet_unlock_wait(t);
17908 @@ -660,25 +1157,26 @@ void __init softirq_init(void)
17909         open_softirq(HI_SOFTIRQ, tasklet_hi_action);
17910  }
17911  
17912 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
17913 +void tasklet_unlock_wait(struct tasklet_struct *t)
17914 +{
17915 +       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
17916 +               /*
17917 +                * Hack for now to avoid this busy-loop:
17918 +                */
17919 +#ifdef CONFIG_PREEMPT_RT_FULL
17920 +               msleep(1);
17921 +#else
17922 +               barrier();
17923 +#endif
17924 +       }
17925 +}
17926 +EXPORT_SYMBOL(tasklet_unlock_wait);
17927 +#endif
17928 +
17929  static int ksoftirqd_should_run(unsigned int cpu)
17930  {
17931 -       return local_softirq_pending();
17932 -}
17933 -
17934 -static void run_ksoftirqd(unsigned int cpu)
17935 -{
17936 -       local_irq_disable();
17937 -       if (local_softirq_pending()) {
17938 -               /*
17939 -                * We can safely run softirq on inline stack, as we are not deep
17940 -                * in the task stack here.
17941 -                */
17942 -               __do_softirq();
17943 -               local_irq_enable();
17944 -               cond_resched_rcu_qs();
17945 -               return;
17946 -       }
17947 -       local_irq_enable();
17948 +       return ksoftirqd_softirq_pending();
17949  }
17950  
17951  #ifdef CONFIG_HOTPLUG_CPU
17952 @@ -745,17 +1243,31 @@ static int takeover_tasklets(unsigned int cpu)
17953  
17954  static struct smp_hotplug_thread softirq_threads = {
17955         .store                  = &ksoftirqd,
17956 +       .setup                  = ksoftirqd_set_sched_params,
17957         .thread_should_run      = ksoftirqd_should_run,
17958         .thread_fn              = run_ksoftirqd,
17959         .thread_comm            = "ksoftirqd/%u",
17960  };
17961  
17962 +#ifdef CONFIG_PREEMPT_RT_FULL
17963 +static struct smp_hotplug_thread softirq_timer_threads = {
17964 +       .store                  = &ktimer_softirqd,
17965 +       .setup                  = ktimer_softirqd_set_sched_params,
17966 +       .cleanup                = ktimer_softirqd_clr_sched_params,
17967 +       .thread_should_run      = ktimer_softirqd_should_run,
17968 +       .thread_fn              = run_ksoftirqd,
17969 +       .thread_comm            = "ktimersoftd/%u",
17970 +};
17971 +#endif
17972 +
17973  static __init int spawn_ksoftirqd(void)
17974  {
17975         cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
17976                                   takeover_tasklets);
17977         BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
17978 -
17979 +#ifdef CONFIG_PREEMPT_RT_FULL
17980 +       BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
17981 +#endif
17982         return 0;
17983  }
17984  early_initcall(spawn_ksoftirqd);
17985 diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
17986 index ec9ab2f01489..8b89dbedeaff 100644
17987 --- a/kernel/stop_machine.c
17988 +++ b/kernel/stop_machine.c
17989 @@ -36,7 +36,7 @@ struct cpu_stop_done {
17990  struct cpu_stopper {
17991         struct task_struct      *thread;
17992  
17993 -       spinlock_t              lock;
17994 +       raw_spinlock_t          lock;
17995         bool                    enabled;        /* is this stopper enabled? */
17996         struct list_head        works;          /* list of pending works */
17997  
17998 @@ -78,14 +78,14 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
17999         unsigned long flags;
18000         bool enabled;
18001  
18002 -       spin_lock_irqsave(&stopper->lock, flags);
18003 +       raw_spin_lock_irqsave(&stopper->lock, flags);
18004         enabled = stopper->enabled;
18005         if (enabled)
18006                 __cpu_stop_queue_work(stopper, work);
18007         else if (work->done)
18008                 cpu_stop_signal_done(work->done);
18009 -       spin_unlock_irqrestore(&stopper->lock, flags);
18010  
18011 +       raw_spin_unlock_irqrestore(&stopper->lock, flags);
18012         return enabled;
18013  }
18014  
18015 @@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
18016         struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
18017         int err;
18018  retry:
18019 -       spin_lock_irq(&stopper1->lock);
18020 -       spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
18021 +       raw_spin_lock_irq(&stopper1->lock);
18022 +       raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
18023  
18024         err = -ENOENT;
18025         if (!stopper1->enabled || !stopper2->enabled)
18026 @@ -255,8 +255,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
18027         __cpu_stop_queue_work(stopper1, work1);
18028         __cpu_stop_queue_work(stopper2, work2);
18029  unlock:
18030 -       spin_unlock(&stopper2->lock);
18031 -       spin_unlock_irq(&stopper1->lock);
18032 +       raw_spin_unlock(&stopper2->lock);
18033 +       raw_spin_unlock_irq(&stopper1->lock);
18034  
18035         if (unlikely(err == -EDEADLK)) {
18036                 while (stop_cpus_in_progress)
18037 @@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned int cpu)
18038         unsigned long flags;
18039         int run;
18040  
18041 -       spin_lock_irqsave(&stopper->lock, flags);
18042 +       raw_spin_lock_irqsave(&stopper->lock, flags);
18043         run = !list_empty(&stopper->works);
18044 -       spin_unlock_irqrestore(&stopper->lock, flags);
18045 +       raw_spin_unlock_irqrestore(&stopper->lock, flags);
18046         return run;
18047  }
18048  
18049 @@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned int cpu)
18050  
18051  repeat:
18052         work = NULL;
18053 -       spin_lock_irq(&stopper->lock);
18054 +       raw_spin_lock_irq(&stopper->lock);
18055         if (!list_empty(&stopper->works)) {
18056                 work = list_first_entry(&stopper->works,
18057                                         struct cpu_stop_work, list);
18058                 list_del_init(&work->list);
18059         }
18060 -       spin_unlock_irq(&stopper->lock);
18061 +       raw_spin_unlock_irq(&stopper->lock);
18062  
18063         if (work) {
18064                 cpu_stop_fn_t fn = work->fn;
18065 @@ -475,6 +475,8 @@ static void cpu_stopper_thread(unsigned int cpu)
18066                 struct cpu_stop_done *done = work->done;
18067                 int ret;
18068  
18069 +               /* XXX */
18070 +
18071                 /* cpu stop callbacks must not sleep, make in_atomic() == T */
18072                 preempt_count_inc();
18073                 ret = fn(arg);
18074 @@ -541,7 +543,7 @@ static int __init cpu_stop_init(void)
18075         for_each_possible_cpu(cpu) {
18076                 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
18077  
18078 -               spin_lock_init(&stopper->lock);
18079 +               raw_spin_lock_init(&stopper->lock);
18080                 INIT_LIST_HEAD(&stopper->works);
18081         }
18082  
18083 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
18084 index bb5ec425dfe0..8338b14ed3a3 100644
18085 --- a/kernel/time/hrtimer.c
18086 +++ b/kernel/time/hrtimer.c
18087 @@ -53,6 +53,7 @@
18088  #include <asm/uaccess.h>
18089  
18090  #include <trace/events/timer.h>
18091 +#include <trace/events/hist.h>
18092  
18093  #include "tick-internal.h"
18094  
18095 @@ -695,6 +696,29 @@ static void hrtimer_switch_to_hres(void)
18096         retrigger_next_event(NULL);
18097  }
18098  
18099 +#ifdef CONFIG_PREEMPT_RT_FULL
18100 +
18101 +static struct swork_event clock_set_delay_work;
18102 +
18103 +static void run_clock_set_delay(struct swork_event *event)
18104 +{
18105 +       clock_was_set();
18106 +}
18107 +
18108 +void clock_was_set_delayed(void)
18109 +{
18110 +       swork_queue(&clock_set_delay_work);
18111 +}
18112 +
18113 +static __init int create_clock_set_delay_thread(void)
18114 +{
18115 +       WARN_ON(swork_get());
18116 +       INIT_SWORK(&clock_set_delay_work, run_clock_set_delay);
18117 +       return 0;
18118 +}
18119 +early_initcall(create_clock_set_delay_thread);
18120 +#else /* PREEMPT_RT_FULL */
18121 +
18122  static void clock_was_set_work(struct work_struct *work)
18123  {
18124         clock_was_set();
18125 @@ -710,6 +734,7 @@ void clock_was_set_delayed(void)
18126  {
18127         schedule_work(&hrtimer_work);
18128  }
18129 +#endif
18130  
18131  #else
18132  
18133 @@ -719,11 +744,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
18134  static inline void hrtimer_switch_to_hres(void) { }
18135  static inline void
18136  hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
18137 -static inline int hrtimer_reprogram(struct hrtimer *timer,
18138 -                                   struct hrtimer_clock_base *base)
18139 -{
18140 -       return 0;
18141 -}
18142 +static inline void hrtimer_reprogram(struct hrtimer *timer,
18143 +                                    struct hrtimer_clock_base *base) { }
18144  static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
18145  static inline void retrigger_next_event(void *arg) { }
18146  
18147 @@ -855,6 +877,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
18148  }
18149  EXPORT_SYMBOL_GPL(hrtimer_forward);
18150  
18151 +#ifdef CONFIG_PREEMPT_RT_BASE
18152 +# define wake_up_timer_waiters(b)      wake_up(&(b)->wait)
18153 +
18154 +/**
18155 + * hrtimer_wait_for_timer - Wait for a running timer
18156 + *
18157 + * @timer:     timer to wait for
18158 + *
18159 + * The function waits in case the timers callback function is
18160 + * currently executed on the waitqueue of the timer base. The
18161 + * waitqueue is woken up after the timer callback function has
18162 + * finished execution.
18163 + */
18164 +void hrtimer_wait_for_timer(const struct hrtimer *timer)
18165 +{
18166 +       struct hrtimer_clock_base *base = timer->base;
18167 +
18168 +       if (base && base->cpu_base && !timer->irqsafe)
18169 +               wait_event(base->cpu_base->wait,
18170 +                               !(hrtimer_callback_running(timer)));
18171 +}
18172 +
18173 +#else
18174 +# define wake_up_timer_waiters(b)      do { } while (0)
18175 +#endif
18176 +
18177  /*
18178   * enqueue_hrtimer - internal function to (re)start a timer
18179   *
18180 @@ -896,6 +944,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
18181         if (!(state & HRTIMER_STATE_ENQUEUED))
18182                 return;
18183  
18184 +       if (unlikely(!list_empty(&timer->cb_entry))) {
18185 +               list_del_init(&timer->cb_entry);
18186 +               return;
18187 +       }
18188 +
18189         if (!timerqueue_del(&base->active, &timer->node))
18190                 cpu_base->active_bases &= ~(1 << base->index);
18191  
18192 @@ -991,7 +1044,16 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
18193         new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
18194  
18195         timer_stats_hrtimer_set_start_info(timer);
18196 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
18197 +       {
18198 +               ktime_t now = new_base->get_time();
18199  
18200 +               if (ktime_to_ns(tim) < ktime_to_ns(now))
18201 +                       timer->praecox = now;
18202 +               else
18203 +                       timer->praecox = ktime_set(0, 0);
18204 +       }
18205 +#endif
18206         leftmost = enqueue_hrtimer(timer, new_base);
18207         if (!leftmost)
18208                 goto unlock;
18209 @@ -1063,7 +1125,7 @@ int hrtimer_cancel(struct hrtimer *timer)
18210  
18211                 if (ret >= 0)
18212                         return ret;
18213 -               cpu_relax();
18214 +               hrtimer_wait_for_timer(timer);
18215         }
18216  }
18217  EXPORT_SYMBOL_GPL(hrtimer_cancel);
18218 @@ -1127,6 +1189,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
18219  
18220         base = hrtimer_clockid_to_base(clock_id);
18221         timer->base = &cpu_base->clock_base[base];
18222 +       INIT_LIST_HEAD(&timer->cb_entry);
18223         timerqueue_init(&timer->node);
18224  
18225  #ifdef CONFIG_TIMER_STATS
18226 @@ -1167,6 +1230,7 @@ bool hrtimer_active(const struct hrtimer *timer)
18227                 seq = raw_read_seqcount_begin(&cpu_base->seq);
18228  
18229                 if (timer->state != HRTIMER_STATE_INACTIVE ||
18230 +                   cpu_base->running_soft == timer ||
18231                     cpu_base->running == timer)
18232                         return true;
18233  
18234 @@ -1265,10 +1329,112 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
18235         cpu_base->running = NULL;
18236  }
18237  
18238 +#ifdef CONFIG_PREEMPT_RT_BASE
18239 +static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
18240 +                                struct hrtimer_clock_base *base)
18241 +{
18242 +       int leftmost;
18243 +
18244 +       if (restart != HRTIMER_NORESTART &&
18245 +           !(timer->state & HRTIMER_STATE_ENQUEUED)) {
18246 +
18247 +               leftmost = enqueue_hrtimer(timer, base);
18248 +               if (!leftmost)
18249 +                       return;
18250 +#ifdef CONFIG_HIGH_RES_TIMERS
18251 +               if (!hrtimer_is_hres_active(timer)) {
18252 +                       /*
18253 +                        * Kick to reschedule the next tick to handle the new timer
18254 +                        * on dynticks target.
18255 +                        */
18256 +                       if (base->cpu_base->nohz_active)
18257 +                               wake_up_nohz_cpu(base->cpu_base->cpu);
18258 +               } else {
18259 +
18260 +                       hrtimer_reprogram(timer, base);
18261 +               }
18262 +#endif
18263 +       }
18264 +}
18265 +
18266 +/*
18267 + * The changes in mainline which removed the callback modes from
18268 + * hrtimer are not yet working with -rt. The non wakeup_process()
18269 + * based callbacks which involve sleeping locks need to be treated
18270 + * seperately.
18271 + */
18272 +static void hrtimer_rt_run_pending(void)
18273 +{
18274 +       enum hrtimer_restart (*fn)(struct hrtimer *);
18275 +       struct hrtimer_cpu_base *cpu_base;
18276 +       struct hrtimer_clock_base *base;
18277 +       struct hrtimer *timer;
18278 +       int index, restart;
18279 +
18280 +       local_irq_disable();
18281 +       cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
18282 +
18283 +       raw_spin_lock(&cpu_base->lock);
18284 +
18285 +       for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
18286 +               base = &cpu_base->clock_base[index];
18287 +
18288 +               while (!list_empty(&base->expired)) {
18289 +                       timer = list_first_entry(&base->expired,
18290 +                                                struct hrtimer, cb_entry);
18291 +
18292 +                       /*
18293 +                        * Same as the above __run_hrtimer function
18294 +                        * just we run with interrupts enabled.
18295 +                        */
18296 +                       debug_deactivate(timer);
18297 +                       cpu_base->running_soft = timer;
18298 +                       raw_write_seqcount_barrier(&cpu_base->seq);
18299 +
18300 +                       __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
18301 +                       timer_stats_account_hrtimer(timer);
18302 +                       fn = timer->function;
18303 +
18304 +                       raw_spin_unlock_irq(&cpu_base->lock);
18305 +                       restart = fn(timer);
18306 +                       raw_spin_lock_irq(&cpu_base->lock);
18307 +
18308 +                       hrtimer_rt_reprogram(restart, timer, base);
18309 +                       raw_write_seqcount_barrier(&cpu_base->seq);
18310 +
18311 +                       WARN_ON_ONCE(cpu_base->running_soft != timer);
18312 +                       cpu_base->running_soft = NULL;
18313 +               }
18314 +       }
18315 +
18316 +       raw_spin_unlock_irq(&cpu_base->lock);
18317 +
18318 +       wake_up_timer_waiters(cpu_base);
18319 +}
18320 +
18321 +static int hrtimer_rt_defer(struct hrtimer *timer)
18322 +{
18323 +       if (timer->irqsafe)
18324 +               return 0;
18325 +
18326 +       __remove_hrtimer(timer, timer->base, timer->state, 0);
18327 +       list_add_tail(&timer->cb_entry, &timer->base->expired);
18328 +       return 1;
18329 +}
18330 +
18331 +#else
18332 +
18333 +static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
18334 +
18335 +#endif
18336 +
18337 +static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
18338 +
18339  static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
18340  {
18341         struct hrtimer_clock_base *base = cpu_base->clock_base;
18342         unsigned int active = cpu_base->active_bases;
18343 +       int raise = 0;
18344  
18345         for (; active; base++, active >>= 1) {
18346                 struct timerqueue_node *node;
18347 @@ -1284,6 +1450,15 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
18348  
18349                         timer = container_of(node, struct hrtimer, node);
18350  
18351 +                       trace_hrtimer_interrupt(raw_smp_processor_id(),
18352 +                           ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
18353 +                               timer->praecox : hrtimer_get_expires(timer),
18354 +                               basenow)),
18355 +                           current,
18356 +                           timer->function == hrtimer_wakeup ?
18357 +                           container_of(timer, struct hrtimer_sleeper,
18358 +                               timer)->task : NULL);
18359 +
18360                         /*
18361                          * The immediate goal for using the softexpires is
18362                          * minimizing wakeups, not running timers at the
18363 @@ -1299,9 +1474,14 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
18364                         if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
18365                                 break;
18366  
18367 -                       __run_hrtimer(cpu_base, base, timer, &basenow);
18368 +                       if (!hrtimer_rt_defer(timer))
18369 +                               __run_hrtimer(cpu_base, base, timer, &basenow);
18370 +                       else
18371 +                               raise = 1;
18372                 }
18373         }
18374 +       if (raise)
18375 +               raise_softirq_irqoff(HRTIMER_SOFTIRQ);
18376  }
18377  
18378  #ifdef CONFIG_HIGH_RES_TIMERS
18379 @@ -1464,16 +1644,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
18380  void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
18381  {
18382         sl->timer.function = hrtimer_wakeup;
18383 +       sl->timer.irqsafe = 1;
18384         sl->task = task;
18385  }
18386  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
18387  
18388 -static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
18389 +static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
18390 +                               unsigned long state)
18391  {
18392         hrtimer_init_sleeper(t, current);
18393  
18394         do {
18395 -               set_current_state(TASK_INTERRUPTIBLE);
18396 +               set_current_state(state);
18397                 hrtimer_start_expires(&t->timer, mode);
18398  
18399                 if (likely(t->task))
18400 @@ -1515,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
18401                                 HRTIMER_MODE_ABS);
18402         hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
18403  
18404 -       if (do_nanosleep(&t, HRTIMER_MODE_ABS))
18405 +       /* cpu_chill() does not care about restart state. */
18406 +       if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
18407                 goto out;
18408  
18409         rmtp = restart->nanosleep.rmtp;
18410 @@ -1532,8 +1715,10 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
18411         return ret;
18412  }
18413  
18414 -long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
18415 -                      const enum hrtimer_mode mode, const clockid_t clockid)
18416 +static long
18417 +__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
18418 +                   const enum hrtimer_mode mode, const clockid_t clockid,
18419 +                   unsigned long state)
18420  {
18421         struct restart_block *restart;
18422         struct hrtimer_sleeper t;
18423 @@ -1546,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
18424  
18425         hrtimer_init_on_stack(&t.timer, clockid, mode);
18426         hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
18427 -       if (do_nanosleep(&t, mode))
18428 +       if (do_nanosleep(&t, mode, state))
18429                 goto out;
18430  
18431         /* Absolute timers do not update the rmtp value and restart: */
18432 @@ -1573,6 +1758,12 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
18433         return ret;
18434  }
18435  
18436 +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
18437 +                      const enum hrtimer_mode mode, const clockid_t clockid)
18438 +{
18439 +       return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
18440 +}
18441 +
18442  SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
18443                 struct timespec __user *, rmtp)
18444  {
18445 @@ -1587,6 +1778,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
18446         return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
18447  }
18448  
18449 +#ifdef CONFIG_PREEMPT_RT_FULL
18450 +/*
18451 + * Sleep for 1 ms in hope whoever holds what we want will let it go.
18452 + */
18453 +void cpu_chill(void)
18454 +{
18455 +       struct timespec tu = {
18456 +               .tv_nsec = NSEC_PER_MSEC,
18457 +       };
18458 +       unsigned int freeze_flag = current->flags & PF_NOFREEZE;
18459 +
18460 +       current->flags |= PF_NOFREEZE;
18461 +       __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
18462 +                           TASK_UNINTERRUPTIBLE);
18463 +       if (!freeze_flag)
18464 +               current->flags &= ~PF_NOFREEZE;
18465 +}
18466 +EXPORT_SYMBOL(cpu_chill);
18467 +#endif
18468 +
18469  /*
18470   * Functions related to boot-time initialization:
18471   */
18472 @@ -1598,10 +1809,14 @@ int hrtimers_prepare_cpu(unsigned int cpu)
18473         for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
18474                 cpu_base->clock_base[i].cpu_base = cpu_base;
18475                 timerqueue_init_head(&cpu_base->clock_base[i].active);
18476 +               INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
18477         }
18478  
18479         cpu_base->cpu = cpu;
18480         hrtimer_init_hres(cpu_base);
18481 +#ifdef CONFIG_PREEMPT_RT_BASE
18482 +       init_waitqueue_head(&cpu_base->wait);
18483 +#endif
18484         return 0;
18485  }
18486  
18487 @@ -1671,9 +1886,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
18488  
18489  #endif /* CONFIG_HOTPLUG_CPU */
18490  
18491 +#ifdef CONFIG_PREEMPT_RT_BASE
18492 +
18493 +static void run_hrtimer_softirq(struct softirq_action *h)
18494 +{
18495 +       hrtimer_rt_run_pending();
18496 +}
18497 +
18498 +static void hrtimers_open_softirq(void)
18499 +{
18500 +       open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
18501 +}
18502 +
18503 +#else
18504 +static void hrtimers_open_softirq(void) { }
18505 +#endif
18506 +
18507  void __init hrtimers_init(void)
18508  {
18509         hrtimers_prepare_cpu(smp_processor_id());
18510 +       hrtimers_open_softirq();
18511  }
18512  
18513  /**
18514 diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
18515 index 1d5c7204ddc9..184de6751180 100644
18516 --- a/kernel/time/itimer.c
18517 +++ b/kernel/time/itimer.c
18518 @@ -213,6 +213,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
18519                 /* We are sharing ->siglock with it_real_fn() */
18520                 if (hrtimer_try_to_cancel(timer) < 0) {
18521                         spin_unlock_irq(&tsk->sighand->siglock);
18522 +                       hrtimer_wait_for_timer(&tsk->signal->real_timer);
18523                         goto again;
18524                 }
18525                 expires = timeval_to_ktime(value->it_value);
18526 diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
18527 index 555e21f7b966..a5d6435fabbb 100644
18528 --- a/kernel/time/jiffies.c
18529 +++ b/kernel/time/jiffies.c
18530 @@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = {
18531         .max_cycles     = 10,
18532  };
18533  
18534 -__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
18535 +__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
18536 +__cacheline_aligned_in_smp seqcount_t jiffies_seq;
18537  
18538  #if (BITS_PER_LONG < 64)
18539  u64 get_jiffies_64(void)
18540 @@ -83,9 +84,9 @@ u64 get_jiffies_64(void)
18541         u64 ret;
18542  
18543         do {
18544 -               seq = read_seqbegin(&jiffies_lock);
18545 +               seq = read_seqcount_begin(&jiffies_seq);
18546                 ret = jiffies_64;
18547 -       } while (read_seqretry(&jiffies_lock, seq));
18548 +       } while (read_seqcount_retry(&jiffies_seq, seq));
18549         return ret;
18550  }
18551  EXPORT_SYMBOL(get_jiffies_64);
18552 diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
18553 index 6df8927c58a5..05b7391bf9bd 100644
18554 --- a/kernel/time/ntp.c
18555 +++ b/kernel/time/ntp.c
18556 @@ -17,6 +17,7 @@
18557  #include <linux/module.h>
18558  #include <linux/rtc.h>
18559  #include <linux/math64.h>
18560 +#include <linux/swork.h>
18561  
18562  #include "ntp_internal.h"
18563  #include "timekeeping_internal.h"
18564 @@ -568,10 +569,35 @@ static void sync_cmos_clock(struct work_struct *work)
18565                            &sync_cmos_work, timespec64_to_jiffies(&next));
18566  }
18567  
18568 +#ifdef CONFIG_PREEMPT_RT_FULL
18569 +
18570 +static void run_clock_set_delay(struct swork_event *event)
18571 +{
18572 +       queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
18573 +}
18574 +
18575 +static struct swork_event ntp_cmos_swork;
18576 +
18577 +void ntp_notify_cmos_timer(void)
18578 +{
18579 +       swork_queue(&ntp_cmos_swork);
18580 +}
18581 +
18582 +static __init int create_cmos_delay_thread(void)
18583 +{
18584 +       WARN_ON(swork_get());
18585 +       INIT_SWORK(&ntp_cmos_swork, run_clock_set_delay);
18586 +       return 0;
18587 +}
18588 +early_initcall(create_cmos_delay_thread);
18589 +
18590 +#else
18591 +
18592  void ntp_notify_cmos_timer(void)
18593  {
18594         queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
18595  }
18596 +#endif /* CONFIG_PREEMPT_RT_FULL */
18597  
18598  #else
18599  void ntp_notify_cmos_timer(void) { }
18600 diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
18601 index 39008d78927a..633f4eaca9e7 100644
18602 --- a/kernel/time/posix-cpu-timers.c
18603 +++ b/kernel/time/posix-cpu-timers.c
18604 @@ -3,6 +3,7 @@
18605   */
18606  
18607  #include <linux/sched.h>
18608 +#include <linux/sched/rt.h>
18609  #include <linux/posix-timers.h>
18610  #include <linux/errno.h>
18611  #include <linux/math64.h>
18612 @@ -620,7 +621,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
18613         /*
18614          * Disarm any old timer after extracting its expiry time.
18615          */
18616 -       WARN_ON_ONCE(!irqs_disabled());
18617 +       WARN_ON_ONCE_NONRT(!irqs_disabled());
18618  
18619         ret = 0;
18620         old_incr = timer->it.cpu.incr;
18621 @@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
18622         /*
18623          * Now re-arm for the new expiry time.
18624          */
18625 -       WARN_ON_ONCE(!irqs_disabled());
18626 +       WARN_ON_ONCE_NONRT(!irqs_disabled());
18627         arm_timer(timer);
18628         unlock_task_sighand(p, &flags);
18629  
18630 @@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
18631   * already updated our counts.  We need to check if any timers fire now.
18632   * Interrupts are disabled.
18633   */
18634 -void run_posix_cpu_timers(struct task_struct *tsk)
18635 +static void __run_posix_cpu_timers(struct task_struct *tsk)
18636  {
18637         LIST_HEAD(firing);
18638         struct k_itimer *timer, *next;
18639         unsigned long flags;
18640  
18641 -       WARN_ON_ONCE(!irqs_disabled());
18642 +       WARN_ON_ONCE_NONRT(!irqs_disabled());
18643  
18644         /*
18645          * The fast path checks that there are no expired thread or thread
18646 @@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_struct *tsk)
18647         }
18648  }
18649  
18650 +#ifdef CONFIG_PREEMPT_RT_BASE
18651 +#include <linux/kthread.h>
18652 +#include <linux/cpu.h>
18653 +DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
18654 +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
18655 +
18656 +static int posix_cpu_timers_thread(void *data)
18657 +{
18658 +       int cpu = (long)data;
18659 +
18660 +       BUG_ON(per_cpu(posix_timer_task,cpu) != current);
18661 +
18662 +       while (!kthread_should_stop()) {
18663 +               struct task_struct *tsk = NULL;
18664 +               struct task_struct *next = NULL;
18665 +
18666 +               if (cpu_is_offline(cpu))
18667 +                       goto wait_to_die;
18668 +
18669 +               /* grab task list */
18670 +               raw_local_irq_disable();
18671 +               tsk = per_cpu(posix_timer_tasklist, cpu);
18672 +               per_cpu(posix_timer_tasklist, cpu) = NULL;
18673 +               raw_local_irq_enable();
18674 +
18675 +               /* its possible the list is empty, just return */
18676 +               if (!tsk) {
18677 +                       set_current_state(TASK_INTERRUPTIBLE);
18678 +                       schedule();
18679 +                       __set_current_state(TASK_RUNNING);
18680 +                       continue;
18681 +               }
18682 +
18683 +               /* Process task list */
18684 +               while (1) {
18685 +                       /* save next */
18686 +                       next = tsk->posix_timer_list;
18687 +
18688 +                       /* run the task timers, clear its ptr and
18689 +                        * unreference it
18690 +                        */
18691 +                       __run_posix_cpu_timers(tsk);
18692 +                       tsk->posix_timer_list = NULL;
18693 +                       put_task_struct(tsk);
18694 +
18695 +                       /* check if this is the last on the list */
18696 +                       if (next == tsk)
18697 +                               break;
18698 +                       tsk = next;
18699 +               }
18700 +       }
18701 +       return 0;
18702 +
18703 +wait_to_die:
18704 +       /* Wait for kthread_stop */
18705 +       set_current_state(TASK_INTERRUPTIBLE);
18706 +       while (!kthread_should_stop()) {
18707 +               schedule();
18708 +               set_current_state(TASK_INTERRUPTIBLE);
18709 +       }
18710 +       __set_current_state(TASK_RUNNING);
18711 +       return 0;
18712 +}
18713 +
18714 +static inline int __fastpath_timer_check(struct task_struct *tsk)
18715 +{
18716 +       /* tsk == current, ensure it is safe to use ->signal/sighand */
18717 +       if (unlikely(tsk->exit_state))
18718 +               return 0;
18719 +
18720 +       if (!task_cputime_zero(&tsk->cputime_expires))
18721 +                       return 1;
18722 +
18723 +       if (!task_cputime_zero(&tsk->signal->cputime_expires))
18724 +                       return 1;
18725 +
18726 +       return 0;
18727 +}
18728 +
18729 +void run_posix_cpu_timers(struct task_struct *tsk)
18730 +{
18731 +       unsigned long cpu = smp_processor_id();
18732 +       struct task_struct *tasklist;
18733 +
18734 +       BUG_ON(!irqs_disabled());
18735 +       if(!per_cpu(posix_timer_task, cpu))
18736 +               return;
18737 +       /* get per-cpu references */
18738 +       tasklist = per_cpu(posix_timer_tasklist, cpu);
18739 +
18740 +       /* check to see if we're already queued */
18741 +       if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
18742 +               get_task_struct(tsk);
18743 +               if (tasklist) {
18744 +                       tsk->posix_timer_list = tasklist;
18745 +               } else {
18746 +                       /*
18747 +                        * The list is terminated by a self-pointing
18748 +                        * task_struct
18749 +                        */
18750 +                       tsk->posix_timer_list = tsk;
18751 +               }
18752 +               per_cpu(posix_timer_tasklist, cpu) = tsk;
18753 +
18754 +               wake_up_process(per_cpu(posix_timer_task, cpu));
18755 +       }
18756 +}
18757 +
18758 +/*
18759 + * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
18760 + * Here we can start up the necessary migration thread for the new CPU.
18761 + */
18762 +static int posix_cpu_thread_call(struct notifier_block *nfb,
18763 +                                unsigned long action, void *hcpu)
18764 +{
18765 +       int cpu = (long)hcpu;
18766 +       struct task_struct *p;
18767 +       struct sched_param param;
18768 +
18769 +       switch (action) {
18770 +       case CPU_UP_PREPARE:
18771 +               p = kthread_create(posix_cpu_timers_thread, hcpu,
18772 +                                       "posixcputmr/%d",cpu);
18773 +               if (IS_ERR(p))
18774 +                       return NOTIFY_BAD;
18775 +               p->flags |= PF_NOFREEZE;
18776 +               kthread_bind(p, cpu);
18777 +               /* Must be high prio to avoid getting starved */
18778 +               param.sched_priority = MAX_RT_PRIO-1;
18779 +               sched_setscheduler(p, SCHED_FIFO, &param);
18780 +               per_cpu(posix_timer_task,cpu) = p;
18781 +               break;
18782 +       case CPU_ONLINE:
18783 +               /* Strictly unneccessary, as first user will wake it. */
18784 +               wake_up_process(per_cpu(posix_timer_task,cpu));
18785 +               break;
18786 +#ifdef CONFIG_HOTPLUG_CPU
18787 +       case CPU_UP_CANCELED:
18788 +               /* Unbind it from offline cpu so it can run.  Fall thru. */
18789 +               kthread_bind(per_cpu(posix_timer_task, cpu),
18790 +                            cpumask_any(cpu_online_mask));
18791 +               kthread_stop(per_cpu(posix_timer_task,cpu));
18792 +               per_cpu(posix_timer_task,cpu) = NULL;
18793 +               break;
18794 +       case CPU_DEAD:
18795 +               kthread_stop(per_cpu(posix_timer_task,cpu));
18796 +               per_cpu(posix_timer_task,cpu) = NULL;
18797 +               break;
18798 +#endif
18799 +       }
18800 +       return NOTIFY_OK;
18801 +}
18802 +
18803 +/* Register at highest priority so that task migration (migrate_all_tasks)
18804 + * happens before everything else.
18805 + */
18806 +static struct notifier_block posix_cpu_thread_notifier = {
18807 +       .notifier_call = posix_cpu_thread_call,
18808 +       .priority = 10
18809 +};
18810 +
18811 +static int __init posix_cpu_thread_init(void)
18812 +{
18813 +       void *hcpu = (void *)(long)smp_processor_id();
18814 +       /* Start one for boot CPU. */
18815 +       unsigned long cpu;
18816 +
18817 +       /* init the per-cpu posix_timer_tasklets */
18818 +       for_each_possible_cpu(cpu)
18819 +               per_cpu(posix_timer_tasklist, cpu) = NULL;
18820 +
18821 +       posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
18822 +       posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
18823 +       register_cpu_notifier(&posix_cpu_thread_notifier);
18824 +       return 0;
18825 +}
18826 +early_initcall(posix_cpu_thread_init);
18827 +#else /* CONFIG_PREEMPT_RT_BASE */
18828 +void run_posix_cpu_timers(struct task_struct *tsk)
18829 +{
18830 +       __run_posix_cpu_timers(tsk);
18831 +}
18832 +#endif /* CONFIG_PREEMPT_RT_BASE */
18833 +
18834  /*
18835   * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
18836   * The tsk->sighand->siglock must be held by the caller.
18837 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
18838 index f2826c35e918..464a98155a0e 100644
18839 --- a/kernel/time/posix-timers.c
18840 +++ b/kernel/time/posix-timers.c
18841 @@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
18842  static struct pid *good_sigevent(sigevent_t * event)
18843  {
18844         struct task_struct *rtn = current->group_leader;
18845 +       int sig = event->sigev_signo;
18846  
18847         if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
18848                 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
18849 @@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigevent_t * event)
18850                 return NULL;
18851  
18852         if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
18853 -           ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
18854 +           (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
18855 +            sig_kernel_coredump(sig)))
18856                 return NULL;
18857  
18858         return task_pid(rtn);
18859 @@ -826,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
18860         return overrun;
18861  }
18862  
18863 +/*
18864 + * Protected by RCU!
18865 + */
18866 +static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
18867 +{
18868 +#ifdef CONFIG_PREEMPT_RT_FULL
18869 +       if (kc->timer_set == common_timer_set)
18870 +               hrtimer_wait_for_timer(&timr->it.real.timer);
18871 +       else
18872 +               /* FIXME: Whacky hack for posix-cpu-timers */
18873 +               schedule_timeout(1);
18874 +#endif
18875 +}
18876 +
18877  /* Set a POSIX.1b interval timer. */
18878  /* timr->it_lock is taken. */
18879  static int
18880 @@ -903,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
18881         if (!timr)
18882                 return -EINVAL;
18883  
18884 +       rcu_read_lock();
18885         kc = clockid_to_kclock(timr->it_clock);
18886         if (WARN_ON_ONCE(!kc || !kc->timer_set))
18887                 error = -EINVAL;
18888 @@ -911,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
18889  
18890         unlock_timer(timr, flag);
18891         if (error == TIMER_RETRY) {
18892 +               timer_wait_for_callback(kc, timr);
18893                 rtn = NULL;     // We already got the old time...
18894 +               rcu_read_unlock();
18895                 goto retry;
18896         }
18897 +       rcu_read_unlock();
18898  
18899         if (old_setting && !error &&
18900             copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
18901 @@ -951,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
18902         if (!timer)
18903                 return -EINVAL;
18904  
18905 +       rcu_read_lock();
18906         if (timer_delete_hook(timer) == TIMER_RETRY) {
18907                 unlock_timer(timer, flags);
18908 +               timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
18909 +                                       timer);
18910 +               rcu_read_unlock();
18911                 goto retry_delete;
18912         }
18913 +       rcu_read_unlock();
18914  
18915         spin_lock(&current->sighand->siglock);
18916         list_del(&timer->list);
18917 @@ -980,8 +1005,18 @@ static void itimer_delete(struct k_itimer *timer)
18918  retry_delete:
18919         spin_lock_irqsave(&timer->it_lock, flags);
18920  
18921 -       if (timer_delete_hook(timer) == TIMER_RETRY) {
18922 +       /* On RT we can race with a deletion */
18923 +       if (!timer->it_signal) {
18924                 unlock_timer(timer, flags);
18925 +               return;
18926 +       }
18927 +
18928 +       if (timer_delete_hook(timer) == TIMER_RETRY) {
18929 +               rcu_read_lock();
18930 +               unlock_timer(timer, flags);
18931 +               timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
18932 +                                       timer);
18933 +               rcu_read_unlock();
18934                 goto retry_delete;
18935         }
18936         list_del(&timer->list);
18937 diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
18938 index 690b797f522e..fe8ba1619879 100644
18939 --- a/kernel/time/tick-broadcast-hrtimer.c
18940 +++ b/kernel/time/tick-broadcast-hrtimer.c
18941 @@ -107,5 +107,6 @@ void tick_setup_hrtimer_broadcast(void)
18942  {
18943         hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
18944         bctimer.function = bc_handler;
18945 +       bctimer.irqsafe = true;
18946         clockevents_register_device(&ce_broadcast_hrtimer);
18947  }
18948 diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
18949 index 4fcd99e12aa0..5a47f2e98faf 100644
18950 --- a/kernel/time/tick-common.c
18951 +++ b/kernel/time/tick-common.c
18952 @@ -79,13 +79,15 @@ int tick_is_oneshot_available(void)
18953  static void tick_periodic(int cpu)
18954  {
18955         if (tick_do_timer_cpu == cpu) {
18956 -               write_seqlock(&jiffies_lock);
18957 +               raw_spin_lock(&jiffies_lock);
18958 +               write_seqcount_begin(&jiffies_seq);
18959  
18960                 /* Keep track of the next tick event */
18961                 tick_next_period = ktime_add(tick_next_period, tick_period);
18962  
18963                 do_timer(1);
18964 -               write_sequnlock(&jiffies_lock);
18965 +               write_seqcount_end(&jiffies_seq);
18966 +               raw_spin_unlock(&jiffies_lock);
18967                 update_wall_time();
18968         }
18969  
18970 @@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
18971                 ktime_t next;
18972  
18973                 do {
18974 -                       seq = read_seqbegin(&jiffies_lock);
18975 +                       seq = read_seqcount_begin(&jiffies_seq);
18976                         next = tick_next_period;
18977 -               } while (read_seqretry(&jiffies_lock, seq));
18978 +               } while (read_seqcount_retry(&jiffies_seq, seq));
18979  
18980                 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
18981  
18982 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
18983 index 3bcb61b52f6c..66d85482a96e 100644
18984 --- a/kernel/time/tick-sched.c
18985 +++ b/kernel/time/tick-sched.c
18986 @@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now)
18987                 return;
18988  
18989         /* Reevaluate with jiffies_lock held */
18990 -       write_seqlock(&jiffies_lock);
18991 +       raw_spin_lock(&jiffies_lock);
18992 +       write_seqcount_begin(&jiffies_seq);
18993  
18994         delta = ktime_sub(now, last_jiffies_update);
18995         if (delta.tv64 >= tick_period.tv64) {
18996 @@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(ktime_t now)
18997                 /* Keep the tick_next_period variable up to date */
18998                 tick_next_period = ktime_add(last_jiffies_update, tick_period);
18999         } else {
19000 -               write_sequnlock(&jiffies_lock);
19001 +               write_seqcount_end(&jiffies_seq);
19002 +               raw_spin_unlock(&jiffies_lock);
19003                 return;
19004         }
19005 -       write_sequnlock(&jiffies_lock);
19006 +       write_seqcount_end(&jiffies_seq);
19007 +       raw_spin_unlock(&jiffies_lock);
19008         update_wall_time();
19009  }
19010  
19011 @@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(void)
19012  {
19013         ktime_t period;
19014  
19015 -       write_seqlock(&jiffies_lock);
19016 +       raw_spin_lock(&jiffies_lock);
19017 +       write_seqcount_begin(&jiffies_seq);
19018         /* Did we start the jiffies update yet ? */
19019         if (last_jiffies_update.tv64 == 0)
19020                 last_jiffies_update = tick_next_period;
19021         period = last_jiffies_update;
19022 -       write_sequnlock(&jiffies_lock);
19023 +       write_seqcount_end(&jiffies_seq);
19024 +       raw_spin_unlock(&jiffies_lock);
19025         return period;
19026  }
19027  
19028 @@ -215,6 +220,7 @@ static void nohz_full_kick_func(struct irq_work *work)
19029  
19030  static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
19031         .func = nohz_full_kick_func,
19032 +       .flags = IRQ_WORK_HARD_IRQ,
19033  };
19034  
19035  /*
19036 @@ -673,10 +679,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
19037  
19038         /* Read jiffies and the time when jiffies were updated last */
19039         do {
19040 -               seq = read_seqbegin(&jiffies_lock);
19041 +               seq = read_seqcount_begin(&jiffies_seq);
19042                 basemono = last_jiffies_update.tv64;
19043                 basejiff = jiffies;
19044 -       } while (read_seqretry(&jiffies_lock, seq));
19045 +       } while (read_seqcount_retry(&jiffies_seq, seq));
19046         ts->last_jiffies = basejiff;
19047  
19048         if (rcu_needs_cpu(basemono, &next_rcu) ||
19049 @@ -877,14 +883,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
19050                 return false;
19051  
19052         if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
19053 -               static int ratelimit;
19054 -
19055 -               if (ratelimit < 10 &&
19056 -                   (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
19057 -                       pr_warn("NOHZ: local_softirq_pending %02x\n",
19058 -                               (unsigned int) local_softirq_pending());
19059 -                       ratelimit++;
19060 -               }
19061 +               softirq_check_pending_idle();
19062                 return false;
19063         }
19064  
19065 @@ -1193,6 +1192,7 @@ void tick_setup_sched_timer(void)
19066          * Emulate tick processing via per-CPU hrtimers:
19067          */
19068         hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
19069 +       ts->sched_timer.irqsafe = 1;
19070         ts->sched_timer.function = tick_sched_timer;
19071  
19072         /* Get the next period (per-CPU) */
19073 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
19074 index 46e312e9be38..fa75cf5d9253 100644
19075 --- a/kernel/time/timekeeping.c
19076 +++ b/kernel/time/timekeeping.c
19077 @@ -2328,8 +2328,10 @@ EXPORT_SYMBOL(hardpps);
19078   */
19079  void xtime_update(unsigned long ticks)
19080  {
19081 -       write_seqlock(&jiffies_lock);
19082 +       raw_spin_lock(&jiffies_lock);
19083 +       write_seqcount_begin(&jiffies_seq);
19084         do_timer(ticks);
19085 -       write_sequnlock(&jiffies_lock);
19086 +       write_seqcount_end(&jiffies_seq);
19087 +       raw_spin_unlock(&jiffies_lock);
19088         update_wall_time();
19089  }
19090 diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
19091 index 704f595ce83f..763a3e5121ff 100644
19092 --- a/kernel/time/timekeeping.h
19093 +++ b/kernel/time/timekeeping.h
19094 @@ -19,7 +19,8 @@ extern void timekeeping_resume(void);
19095  extern void do_timer(unsigned long ticks);
19096  extern void update_wall_time(void);
19097  
19098 -extern seqlock_t jiffies_lock;
19099 +extern raw_spinlock_t jiffies_lock;
19100 +extern seqcount_t jiffies_seq;
19101  
19102  #define CS_NAME_LEN    32
19103  
19104 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
19105 index c611c47de884..cdff4411f8f6 100644
19106 --- a/kernel/time/timer.c
19107 +++ b/kernel/time/timer.c
19108 @@ -193,8 +193,11 @@ EXPORT_SYMBOL(jiffies_64);
19109  #endif
19110  
19111  struct timer_base {
19112 -       spinlock_t              lock;
19113 +       raw_spinlock_t          lock;
19114         struct timer_list       *running_timer;
19115 +#ifdef CONFIG_PREEMPT_RT_FULL
19116 +       struct swait_queue_head wait_for_running_timer;
19117 +#endif
19118         unsigned long           clk;
19119         unsigned long           next_expiry;
19120         unsigned int            cpu;
19121 @@ -203,6 +206,8 @@ struct timer_base {
19122         bool                    is_idle;
19123         DECLARE_BITMAP(pending_map, WHEEL_SIZE);
19124         struct hlist_head       vectors[WHEEL_SIZE];
19125 +       struct hlist_head       expired_lists[LVL_DEPTH];
19126 +       int                     expired_count;
19127  } ____cacheline_aligned;
19128  
19129  static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
19130 @@ -948,10 +953,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
19131  
19132                 if (!(tf & TIMER_MIGRATING)) {
19133                         base = get_timer_base(tf);
19134 -                       spin_lock_irqsave(&base->lock, *flags);
19135 +                       raw_spin_lock_irqsave(&base->lock, *flags);
19136                         if (timer->flags == tf)
19137                                 return base;
19138 -                       spin_unlock_irqrestore(&base->lock, *flags);
19139 +                       raw_spin_unlock_irqrestore(&base->lock, *flags);
19140                 }
19141                 cpu_relax();
19142         }
19143 @@ -1023,9 +1028,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
19144                         /* See the comment in lock_timer_base() */
19145                         timer->flags |= TIMER_MIGRATING;
19146  
19147 -                       spin_unlock(&base->lock);
19148 +                       raw_spin_unlock(&base->lock);
19149                         base = new_base;
19150 -                       spin_lock(&base->lock);
19151 +                       raw_spin_lock(&base->lock);
19152                         WRITE_ONCE(timer->flags,
19153                                    (timer->flags & ~TIMER_BASEMASK) | base->cpu);
19154                 }
19155 @@ -1050,7 +1055,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
19156         }
19157  
19158  out_unlock:
19159 -       spin_unlock_irqrestore(&base->lock, flags);
19160 +       raw_spin_unlock_irqrestore(&base->lock, flags);
19161  
19162         return ret;
19163  }
19164 @@ -1144,19 +1149,46 @@ void add_timer_on(struct timer_list *timer, int cpu)
19165         if (base != new_base) {
19166                 timer->flags |= TIMER_MIGRATING;
19167  
19168 -               spin_unlock(&base->lock);
19169 +               raw_spin_unlock(&base->lock);
19170                 base = new_base;
19171 -               spin_lock(&base->lock);
19172 +               raw_spin_lock(&base->lock);
19173                 WRITE_ONCE(timer->flags,
19174                            (timer->flags & ~TIMER_BASEMASK) | cpu);
19175         }
19176  
19177         debug_activate(timer, timer->expires);
19178         internal_add_timer(base, timer);
19179 -       spin_unlock_irqrestore(&base->lock, flags);
19180 +       raw_spin_unlock_irqrestore(&base->lock, flags);
19181  }
19182  EXPORT_SYMBOL_GPL(add_timer_on);
19183  
19184 +#ifdef CONFIG_PREEMPT_RT_FULL
19185 +/*
19186 + * Wait for a running timer
19187 + */
19188 +static void wait_for_running_timer(struct timer_list *timer)
19189 +{
19190 +       struct timer_base *base;
19191 +       u32 tf = timer->flags;
19192 +
19193 +       if (tf & TIMER_MIGRATING)
19194 +               return;
19195 +
19196 +       base = get_timer_base(tf);
19197 +       swait_event(base->wait_for_running_timer,
19198 +                  base->running_timer != timer);
19199 +}
19200 +
19201 +# define wakeup_timer_waiters(b)       swake_up_all(&(b)->wait_for_running_timer)
19202 +#else
19203 +static inline void wait_for_running_timer(struct timer_list *timer)
19204 +{
19205 +       cpu_relax();
19206 +}
19207 +
19208 +# define wakeup_timer_waiters(b)       do { } while (0)
19209 +#endif
19210 +
19211  /**
19212   * del_timer - deactive a timer.
19213   * @timer: the timer to be deactivated
19214 @@ -1180,7 +1212,7 @@ int del_timer(struct timer_list *timer)
19215         if (timer_pending(timer)) {
19216                 base = lock_timer_base(timer, &flags);
19217                 ret = detach_if_pending(timer, base, true);
19218 -               spin_unlock_irqrestore(&base->lock, flags);
19219 +               raw_spin_unlock_irqrestore(&base->lock, flags);
19220         }
19221  
19222         return ret;
19223 @@ -1208,13 +1240,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
19224                 timer_stats_timer_clear_start_info(timer);
19225                 ret = detach_if_pending(timer, base, true);
19226         }
19227 -       spin_unlock_irqrestore(&base->lock, flags);
19228 +       raw_spin_unlock_irqrestore(&base->lock, flags);
19229  
19230         return ret;
19231  }
19232  EXPORT_SYMBOL(try_to_del_timer_sync);
19233  
19234 -#ifdef CONFIG_SMP
19235 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
19236  /**
19237   * del_timer_sync - deactivate a timer and wait for the handler to finish.
19238   * @timer: the timer to be deactivated
19239 @@ -1274,7 +1306,7 @@ int del_timer_sync(struct timer_list *timer)
19240                 int ret = try_to_del_timer_sync(timer);
19241                 if (ret >= 0)
19242                         return ret;
19243 -               cpu_relax();
19244 +               wait_for_running_timer(timer);
19245         }
19246  }
19247  EXPORT_SYMBOL(del_timer_sync);
19248 @@ -1323,7 +1355,8 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
19249         }
19250  }
19251  
19252 -static void expire_timers(struct timer_base *base, struct hlist_head *head)
19253 +static inline void __expire_timers(struct timer_base *base,
19254 +                                  struct hlist_head *head)
19255  {
19256         while (!hlist_empty(head)) {
19257                 struct timer_list *timer;
19258 @@ -1339,33 +1372,53 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
19259                 fn = timer->function;
19260                 data = timer->data;
19261  
19262 -               if (timer->flags & TIMER_IRQSAFE) {
19263 -                       spin_unlock(&base->lock);
19264 +               if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) &&
19265 +                   timer->flags & TIMER_IRQSAFE) {
19266 +                       raw_spin_unlock(&base->lock);
19267                         call_timer_fn(timer, fn, data);
19268 -                       spin_lock(&base->lock);
19269 +                       base->running_timer = NULL;
19270 +                       raw_spin_lock(&base->lock);
19271                 } else {
19272 -                       spin_unlock_irq(&base->lock);
19273 +                       raw_spin_unlock_irq(&base->lock);
19274                         call_timer_fn(timer, fn, data);
19275 -                       spin_lock_irq(&base->lock);
19276 +                       base->running_timer = NULL;
19277 +                       raw_spin_lock_irq(&base->lock);
19278                 }
19279         }
19280  }
19281  
19282 -static int __collect_expired_timers(struct timer_base *base,
19283 -                                   struct hlist_head *heads)
19284 +static void expire_timers(struct timer_base *base)
19285 +{
19286 +       struct hlist_head *head;
19287 +
19288 +       while (base->expired_count--) {
19289 +               head = base->expired_lists + base->expired_count;
19290 +               __expire_timers(base, head);
19291 +       }
19292 +       base->expired_count = 0;
19293 +}
19294 +
19295 +static void __collect_expired_timers(struct timer_base *base)
19296  {
19297         unsigned long clk = base->clk;
19298         struct hlist_head *vec;
19299 -       int i, levels = 0;
19300 +       int i;
19301         unsigned int idx;
19302  
19303 +       /*
19304 +        * expire_timers() must be called at least once before we can
19305 +        * collect more timers
19306 +        */
19307 +       if (WARN_ON(base->expired_count))
19308 +               return;
19309 +
19310         for (i = 0; i < LVL_DEPTH; i++) {
19311                 idx = (clk & LVL_MASK) + i * LVL_SIZE;
19312  
19313                 if (__test_and_clear_bit(idx, base->pending_map)) {
19314                         vec = base->vectors + idx;
19315 -                       hlist_move_list(vec, heads++);
19316 -                       levels++;
19317 +                       hlist_move_list(vec,
19318 +                               &base->expired_lists[base->expired_count++]);
19319                 }
19320                 /* Is it time to look at the next level? */
19321                 if (clk & LVL_CLK_MASK)
19322 @@ -1373,7 +1426,6 @@ static int __collect_expired_timers(struct timer_base *base,
19323                 /* Shift clock for the next level granularity */
19324                 clk >>= LVL_CLK_SHIFT;
19325         }
19326 -       return levels;
19327  }
19328  
19329  #ifdef CONFIG_NO_HZ_COMMON
19330 @@ -1515,7 +1567,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
19331         if (cpu_is_offline(smp_processor_id()))
19332                 return expires;
19333  
19334 -       spin_lock(&base->lock);
19335 +       raw_spin_lock(&base->lock);
19336         nextevt = __next_timer_interrupt(base);
19337         is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
19338         base->next_expiry = nextevt;
19339 @@ -1543,7 +1595,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
19340                 if ((expires - basem) > TICK_NSEC)
19341                         base->is_idle = true;
19342         }
19343 -       spin_unlock(&base->lock);
19344 +       raw_spin_unlock(&base->lock);
19345  
19346         return cmp_next_hrtimer_event(basem, expires);
19347  }
19348 @@ -1566,8 +1618,7 @@ void timer_clear_idle(void)
19349         base->is_idle = false;
19350  }
19351  
19352 -static int collect_expired_timers(struct timer_base *base,
19353 -                                 struct hlist_head *heads)
19354 +static void collect_expired_timers(struct timer_base *base)
19355  {
19356         /*
19357          * NOHZ optimization. After a long idle sleep we need to forward the
19358 @@ -1584,20 +1635,49 @@ static int collect_expired_timers(struct timer_base *base,
19359                 if (time_after(next, jiffies)) {
19360                         /* The call site will increment clock! */
19361                         base->clk = jiffies - 1;
19362 -                       return 0;
19363 +                       return;
19364                 }
19365                 base->clk = next;
19366         }
19367 -       return __collect_expired_timers(base, heads);
19368 +       __collect_expired_timers(base);
19369  }
19370  #else
19371 -static inline int collect_expired_timers(struct timer_base *base,
19372 -                                        struct hlist_head *heads)
19373 +static inline void collect_expired_timers(struct timer_base *base)
19374  {
19375 -       return __collect_expired_timers(base, heads);
19376 +       __collect_expired_timers(base);
19377  }
19378  #endif
19379  
19380 +static int find_expired_timers(struct timer_base *base)
19381 +{
19382 +       const unsigned long int end_clk = jiffies;
19383 +
19384 +       while (!base->expired_count && time_after_eq(end_clk, base->clk)) {
19385 +               collect_expired_timers(base);
19386 +               base->clk++;
19387 +       }
19388 +
19389 +       return base->expired_count;
19390 +}
19391 +
19392 +/* Called from CPU tick routine to quickly collect expired timers */
19393 +static int tick_find_expired(struct timer_base *base)
19394 +{
19395 +       int count;
19396 +
19397 +       raw_spin_lock(&base->lock);
19398 +
19399 +       if (unlikely(time_after(jiffies, base->clk + HZ))) {
19400 +               /* defer to ktimersoftd; don't spend too long in irq context */
19401 +               count = -1;
19402 +       } else
19403 +               count = find_expired_timers(base);
19404 +
19405 +       raw_spin_unlock(&base->lock);
19406 +
19407 +       return count;
19408 +}
19409 +
19410  /*
19411   * Called from the timer interrupt handler to charge one tick to the current
19412   * process.  user_tick is 1 if the tick is user time, 0 for system.
19413 @@ -1608,13 +1688,13 @@ void update_process_times(int user_tick)
19414  
19415         /* Note: this timer irq context must be accounted for as well. */
19416         account_process_tick(p, user_tick);
19417 +       scheduler_tick();
19418         run_local_timers();
19419         rcu_check_callbacks(user_tick);
19420 -#ifdef CONFIG_IRQ_WORK
19421 +#if defined(CONFIG_IRQ_WORK)
19422         if (in_irq())
19423                 irq_work_tick();
19424  #endif
19425 -       scheduler_tick();
19426         run_posix_cpu_timers(p);
19427  }
19428  
19429 @@ -1624,24 +1704,13 @@ void update_process_times(int user_tick)
19430   */
19431  static inline void __run_timers(struct timer_base *base)
19432  {
19433 -       struct hlist_head heads[LVL_DEPTH];
19434 -       int levels;
19435 +       raw_spin_lock_irq(&base->lock);
19436  
19437 -       if (!time_after_eq(jiffies, base->clk))
19438 -               return;
19439 +       while (find_expired_timers(base))
19440 +               expire_timers(base);
19441  
19442 -       spin_lock_irq(&base->lock);
19443 -
19444 -       while (time_after_eq(jiffies, base->clk)) {
19445 -
19446 -               levels = collect_expired_timers(base, heads);
19447 -               base->clk++;
19448 -
19449 -               while (levels--)
19450 -                       expire_timers(base, heads + levels);
19451 -       }
19452 -       base->running_timer = NULL;
19453 -       spin_unlock_irq(&base->lock);
19454 +       raw_spin_unlock_irq(&base->lock);
19455 +       wakeup_timer_waiters(base);
19456  }
19457  
19458  /*
19459 @@ -1651,6 +1720,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
19460  {
19461         struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
19462  
19463 +       irq_work_tick_soft();
19464 +
19465         __run_timers(base);
19466         if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
19467                 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
19468 @@ -1665,12 +1736,12 @@ void run_local_timers(void)
19469  
19470         hrtimer_run_queues();
19471         /* Raise the softirq only if required. */
19472 -       if (time_before(jiffies, base->clk)) {
19473 +       if (time_before(jiffies, base->clk) || !tick_find_expired(base)) {
19474                 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
19475                         return;
19476                 /* CPU is awake, so check the deferrable base. */
19477                 base++;
19478 -               if (time_before(jiffies, base->clk))
19479 +               if (time_before(jiffies, base->clk) || !tick_find_expired(base))
19480                         return;
19481         }
19482         raise_softirq(TIMER_SOFTIRQ);
19483 @@ -1836,16 +1907,17 @@ int timers_dead_cpu(unsigned int cpu)
19484                  * The caller is globally serialized and nobody else
19485                  * takes two locks at once, deadlock is not possible.
19486                  */
19487 -               spin_lock_irq(&new_base->lock);
19488 -               spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
19489 +               raw_spin_lock_irq(&new_base->lock);
19490 +               raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
19491  
19492                 BUG_ON(old_base->running_timer);
19493 +               BUG_ON(old_base->expired_count);
19494  
19495                 for (i = 0; i < WHEEL_SIZE; i++)
19496                         migrate_timer_list(new_base, old_base->vectors + i);
19497  
19498 -               spin_unlock(&old_base->lock);
19499 -               spin_unlock_irq(&new_base->lock);
19500 +               raw_spin_unlock(&old_base->lock);
19501 +               raw_spin_unlock_irq(&new_base->lock);
19502                 put_cpu_ptr(&timer_bases);
19503         }
19504         return 0;
19505 @@ -1861,8 +1933,12 @@ static void __init init_timer_cpu(int cpu)
19506         for (i = 0; i < NR_BASES; i++) {
19507                 base = per_cpu_ptr(&timer_bases[i], cpu);
19508                 base->cpu = cpu;
19509 -               spin_lock_init(&base->lock);
19510 +               raw_spin_lock_init(&base->lock);
19511                 base->clk = jiffies;
19512 +#ifdef CONFIG_PREEMPT_RT_FULL
19513 +               init_swait_queue_head(&base->wait_for_running_timer);
19514 +#endif
19515 +               base->expired_count = 0;
19516         }
19517  }
19518  
19519 diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
19520 index 2a96b063d659..812e37237eb8 100644
19521 --- a/kernel/trace/Kconfig
19522 +++ b/kernel/trace/Kconfig
19523 @@ -182,6 +182,24 @@ config IRQSOFF_TRACER
19524           enabled. This option and the preempt-off timing option can be
19525           used together or separately.)
19526  
19527 +config INTERRUPT_OFF_HIST
19528 +       bool "Interrupts-off Latency Histogram"
19529 +       depends on IRQSOFF_TRACER
19530 +       help
19531 +         This option generates continuously updated histograms (one per cpu)
19532 +         of the duration of time periods with interrupts disabled. The
19533 +         histograms are disabled by default. To enable them, write a non-zero
19534 +         number to
19535 +
19536 +             /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
19537 +
19538 +         If PREEMPT_OFF_HIST is also selected, additional histograms (one
19539 +         per cpu) are generated that accumulate the duration of time periods
19540 +         when both interrupts and preemption are disabled. The histogram data
19541 +         will be located in the debug file system at
19542 +
19543 +             /sys/kernel/debug/tracing/latency_hist/irqsoff
19544 +
19545  config PREEMPT_TRACER
19546         bool "Preemption-off Latency Tracer"
19547         default n
19548 @@ -206,6 +224,24 @@ config PREEMPT_TRACER
19549           enabled. This option and the irqs-off timing option can be
19550           used together or separately.)
19551  
19552 +config PREEMPT_OFF_HIST
19553 +       bool "Preemption-off Latency Histogram"
19554 +       depends on PREEMPT_TRACER
19555 +       help
19556 +         This option generates continuously updated histograms (one per cpu)
19557 +         of the duration of time periods with preemption disabled. The
19558 +         histograms are disabled by default. To enable them, write a non-zero
19559 +         number to
19560 +
19561 +             /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
19562 +
19563 +         If INTERRUPT_OFF_HIST is also selected, additional histograms (one
19564 +         per cpu) are generated that accumulate the duration of time periods
19565 +         when both interrupts and preemption are disabled. The histogram data
19566 +         will be located in the debug file system at
19567 +
19568 +             /sys/kernel/debug/tracing/latency_hist/preemptoff
19569 +
19570  config SCHED_TRACER
19571         bool "Scheduling Latency Tracer"
19572         select GENERIC_TRACER
19573 @@ -251,6 +287,74 @@ config HWLAT_TRACER
19574          file. Every time a latency is greater than tracing_thresh, it will
19575          be recorded into the ring buffer.
19576  
19577 +config WAKEUP_LATENCY_HIST
19578 +       bool "Scheduling Latency Histogram"
19579 +       depends on SCHED_TRACER
19580 +       help
19581 +         This option generates continuously updated histograms (one per cpu)
19582 +         of the scheduling latency of the highest priority task.
19583 +         The histograms are disabled by default. To enable them, write a
19584 +         non-zero number to
19585 +
19586 +             /sys/kernel/debug/tracing/latency_hist/enable/wakeup
19587 +
19588 +         Two different algorithms are used, one to determine the latency of
19589 +         processes that exclusively use the highest priority of the system and
19590 +         another one to determine the latency of processes that share the
19591 +         highest system priority with other processes. The former is used to
19592 +         improve hardware and system software, the latter to optimize the
19593 +         priority design of a given system. The histogram data will be
19594 +         located in the debug file system at
19595 +
19596 +             /sys/kernel/debug/tracing/latency_hist/wakeup
19597 +
19598 +         and
19599 +
19600 +             /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
19601 +
19602 +         If both Scheduling Latency Histogram and Missed Timer Offsets
19603 +         Histogram are selected, additional histogram data will be collected
19604 +         that contain, in addition to the wakeup latency, the timer latency, in
19605 +         case the wakeup was triggered by an expired timer. These histograms
19606 +         are available in the
19607 +
19608 +             /sys/kernel/debug/tracing/latency_hist/timerandwakeup
19609 +
19610 +         directory. They reflect the apparent interrupt and scheduling latency
19611 +         and are best suitable to determine the worst-case latency of a given
19612 +         system. To enable these histograms, write a non-zero number to
19613 +
19614 +             /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
19615 +
19616 +config MISSED_TIMER_OFFSETS_HIST
19617 +       depends on HIGH_RES_TIMERS
19618 +       select GENERIC_TRACER
19619 +       bool "Missed Timer Offsets Histogram"
19620 +       help
19621 +         Generate a histogram of missed timer offsets in microseconds. The
19622 +         histograms are disabled by default. To enable them, write a non-zero
19623 +         number to
19624 +
19625 +             /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
19626 +
19627 +         The histogram data will be located in the debug file system at
19628 +
19629 +             /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
19630 +
19631 +         If both Scheduling Latency Histogram and Missed Timer Offsets
19632 +         Histogram are selected, additional histogram data will be collected
19633 +         that contain, in addition to the wakeup latency, the timer latency, in
19634 +         case the wakeup was triggered by an expired timer. These histograms
19635 +         are available in the
19636 +
19637 +             /sys/kernel/debug/tracing/latency_hist/timerandwakeup
19638 +
19639 +         directory. They reflect the apparent interrupt and scheduling latency
19640 +         and are best suitable to determine the worst-case latency of a given
19641 +         system. To enable these histograms, write a non-zero number to
19642 +
19643 +             /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
19644 +
19645  config ENABLE_DEFAULT_TRACERS
19646         bool "Trace process context switches and events"
19647         depends on !GENERIC_TRACER
19648 diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
19649 index e57980845549..83af000b783c 100644
19650 --- a/kernel/trace/Makefile
19651 +++ b/kernel/trace/Makefile
19652 @@ -38,6 +38,10 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
19653  obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
19654  obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
19655  obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
19656 +obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
19657 +obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
19658 +obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
19659 +obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
19660  obj-$(CONFIG_NOP_TRACER) += trace_nop.o
19661  obj-$(CONFIG_STACK_TRACER) += trace_stack.o
19662  obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
19663 diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
19664 new file mode 100644
19665 index 000000000000..7f6ee70dea41
19666 --- /dev/null
19667 +++ b/kernel/trace/latency_hist.c
19668 @@ -0,0 +1,1178 @@
19669 +/*
19670 + * kernel/trace/latency_hist.c
19671 + *
19672 + * Add support for histograms of preemption-off latency and
19673 + * interrupt-off latency and wakeup latency, it depends on
19674 + * Real-Time Preemption Support.
19675 + *
19676 + *  Copyright (C) 2005 MontaVista Software, Inc.
19677 + *  Yi Yang <yyang@ch.mvista.com>
19678 + *
19679 + *  Converted to work with the new latency tracer.
19680 + *  Copyright (C) 2008 Red Hat, Inc.
19681 + *    Steven Rostedt <srostedt@redhat.com>
19682 + *
19683 + */
19684 +#include <linux/module.h>
19685 +#include <linux/debugfs.h>
19686 +#include <linux/seq_file.h>
19687 +#include <linux/percpu.h>
19688 +#include <linux/kallsyms.h>
19689 +#include <linux/uaccess.h>
19690 +#include <linux/sched.h>
19691 +#include <linux/sched/rt.h>
19692 +#include <linux/slab.h>
19693 +#include <linux/atomic.h>
19694 +#include <asm/div64.h>
19695 +
19696 +#include "trace.h"
19697 +#include <trace/events/sched.h>
19698 +
19699 +#define NSECS_PER_USECS 1000L
19700 +
19701 +#define CREATE_TRACE_POINTS
19702 +#include <trace/events/hist.h>
19703 +
19704 +enum {
19705 +       IRQSOFF_LATENCY = 0,
19706 +       PREEMPTOFF_LATENCY,
19707 +       PREEMPTIRQSOFF_LATENCY,
19708 +       WAKEUP_LATENCY,
19709 +       WAKEUP_LATENCY_SHAREDPRIO,
19710 +       MISSED_TIMER_OFFSETS,
19711 +       TIMERANDWAKEUP_LATENCY,
19712 +       MAX_LATENCY_TYPE,
19713 +};
19714 +
19715 +#define MAX_ENTRY_NUM 10240
19716 +
19717 +struct hist_data {
19718 +       atomic_t hist_mode; /* 0 log, 1 don't log */
19719 +       long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
19720 +       long min_lat;
19721 +       long max_lat;
19722 +       unsigned long long below_hist_bound_samples;
19723 +       unsigned long long above_hist_bound_samples;
19724 +       long long accumulate_lat;
19725 +       unsigned long long total_samples;
19726 +       unsigned long long hist_array[MAX_ENTRY_NUM];
19727 +};
19728 +
19729 +struct enable_data {
19730 +       int latency_type;
19731 +       int enabled;
19732 +};
19733 +
19734 +static char *latency_hist_dir_root = "latency_hist";
19735 +
19736 +#ifdef CONFIG_INTERRUPT_OFF_HIST
19737 +static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
19738 +static char *irqsoff_hist_dir = "irqsoff";
19739 +static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
19740 +static DEFINE_PER_CPU(int, hist_irqsoff_counting);
19741 +#endif
19742 +
19743 +#ifdef CONFIG_PREEMPT_OFF_HIST
19744 +static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
19745 +static char *preemptoff_hist_dir = "preemptoff";
19746 +static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
19747 +static DEFINE_PER_CPU(int, hist_preemptoff_counting);
19748 +#endif
19749 +
19750 +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
19751 +static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
19752 +static char *preemptirqsoff_hist_dir = "preemptirqsoff";
19753 +static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
19754 +static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
19755 +#endif
19756 +
19757 +#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
19758 +static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
19759 +static struct enable_data preemptirqsoff_enabled_data = {
19760 +       .latency_type = PREEMPTIRQSOFF_LATENCY,
19761 +       .enabled = 0,
19762 +};
19763 +#endif
19764 +
19765 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
19766 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
19767 +struct maxlatproc_data {
19768 +       char comm[FIELD_SIZEOF(struct task_struct, comm)];
19769 +       char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
19770 +       int pid;
19771 +       int current_pid;
19772 +       int prio;
19773 +       int current_prio;
19774 +       long latency;
19775 +       long timeroffset;
19776 +       cycle_t timestamp;
19777 +};
19778 +#endif
19779 +
19780 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
19781 +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
19782 +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
19783 +static char *wakeup_latency_hist_dir = "wakeup";
19784 +static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
19785 +static notrace void probe_wakeup_latency_hist_start(void *v,
19786 +       struct task_struct *p);
19787 +static notrace void probe_wakeup_latency_hist_stop(void *v,
19788 +       bool preempt, struct task_struct *prev, struct task_struct *next);
19789 +static notrace void probe_sched_migrate_task(void *,
19790 +       struct task_struct *task, int cpu);
19791 +static struct enable_data wakeup_latency_enabled_data = {
19792 +       .latency_type = WAKEUP_LATENCY,
19793 +       .enabled = 0,
19794 +};
19795 +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
19796 +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
19797 +static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
19798 +static DEFINE_PER_CPU(int, wakeup_sharedprio);
19799 +static unsigned long wakeup_pid;
19800 +#endif
19801 +
19802 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
19803 +static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
19804 +static char *missed_timer_offsets_dir = "missed_timer_offsets";
19805 +static notrace void probe_hrtimer_interrupt(void *v, int cpu,
19806 +       long long offset, struct task_struct *curr, struct task_struct *task);
19807 +static struct enable_data missed_timer_offsets_enabled_data = {
19808 +       .latency_type = MISSED_TIMER_OFFSETS,
19809 +       .enabled = 0,
19810 +};
19811 +static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
19812 +static unsigned long missed_timer_offsets_pid;
19813 +#endif
19814 +
19815 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
19816 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
19817 +static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
19818 +static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
19819 +static struct enable_data timerandwakeup_enabled_data = {
19820 +       .latency_type = TIMERANDWAKEUP_LATENCY,
19821 +       .enabled = 0,
19822 +};
19823 +static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
19824 +#endif
19825 +
19826 +void notrace latency_hist(int latency_type, int cpu, long latency,
19827 +                         long timeroffset, cycle_t stop,
19828 +                         struct task_struct *p)
19829 +{
19830 +       struct hist_data *my_hist;
19831 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
19832 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
19833 +       struct maxlatproc_data *mp = NULL;
19834 +#endif
19835 +
19836 +       if (!cpu_possible(cpu) || latency_type < 0 ||
19837 +           latency_type >= MAX_LATENCY_TYPE)
19838 +               return;
19839 +
19840 +       switch (latency_type) {
19841 +#ifdef CONFIG_INTERRUPT_OFF_HIST
19842 +       case IRQSOFF_LATENCY:
19843 +               my_hist = &per_cpu(irqsoff_hist, cpu);
19844 +               break;
19845 +#endif
19846 +#ifdef CONFIG_PREEMPT_OFF_HIST
19847 +       case PREEMPTOFF_LATENCY:
19848 +               my_hist = &per_cpu(preemptoff_hist, cpu);
19849 +               break;
19850 +#endif
19851 +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
19852 +       case PREEMPTIRQSOFF_LATENCY:
19853 +               my_hist = &per_cpu(preemptirqsoff_hist, cpu);
19854 +               break;
19855 +#endif
19856 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
19857 +       case WAKEUP_LATENCY:
19858 +               my_hist = &per_cpu(wakeup_latency_hist, cpu);
19859 +               mp = &per_cpu(wakeup_maxlatproc, cpu);
19860 +               break;
19861 +       case WAKEUP_LATENCY_SHAREDPRIO:
19862 +               my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
19863 +               mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
19864 +               break;
19865 +#endif
19866 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
19867 +       case MISSED_TIMER_OFFSETS:
19868 +               my_hist = &per_cpu(missed_timer_offsets, cpu);
19869 +               mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
19870 +               break;
19871 +#endif
19872 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
19873 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
19874 +       case TIMERANDWAKEUP_LATENCY:
19875 +               my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
19876 +               mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
19877 +               break;
19878 +#endif
19879 +
19880 +       default:
19881 +               return;
19882 +       }
19883 +
19884 +       latency += my_hist->offset;
19885 +
19886 +       if (atomic_read(&my_hist->hist_mode) == 0)
19887 +               return;
19888 +
19889 +       if (latency < 0 || latency >= MAX_ENTRY_NUM) {
19890 +               if (latency < 0)
19891 +                       my_hist->below_hist_bound_samples++;
19892 +               else
19893 +                       my_hist->above_hist_bound_samples++;
19894 +       } else
19895 +               my_hist->hist_array[latency]++;
19896 +
19897 +       if (unlikely(latency > my_hist->max_lat ||
19898 +           my_hist->min_lat == LONG_MAX)) {
19899 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
19900 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
19901 +               if (latency_type == WAKEUP_LATENCY ||
19902 +                   latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
19903 +                   latency_type == MISSED_TIMER_OFFSETS ||
19904 +                   latency_type == TIMERANDWAKEUP_LATENCY) {
19905 +                       strncpy(mp->comm, p->comm, sizeof(mp->comm));
19906 +                       strncpy(mp->current_comm, current->comm,
19907 +                           sizeof(mp->current_comm));
19908 +                       mp->pid = task_pid_nr(p);
19909 +                       mp->current_pid = task_pid_nr(current);
19910 +                       mp->prio = p->prio;
19911 +                       mp->current_prio = current->prio;
19912 +                       mp->latency = latency;
19913 +                       mp->timeroffset = timeroffset;
19914 +                       mp->timestamp = stop;
19915 +               }
19916 +#endif
19917 +               my_hist->max_lat = latency;
19918 +       }
19919 +       if (unlikely(latency < my_hist->min_lat))
19920 +               my_hist->min_lat = latency;
19921 +       my_hist->total_samples++;
19922 +       my_hist->accumulate_lat += latency;
19923 +}
19924 +
19925 +static void *l_start(struct seq_file *m, loff_t *pos)
19926 +{
19927 +       loff_t *index_ptr = NULL;
19928 +       loff_t index = *pos;
19929 +       struct hist_data *my_hist = m->private;
19930 +
19931 +       if (index == 0) {
19932 +               char minstr[32], avgstr[32], maxstr[32];
19933 +
19934 +               atomic_dec(&my_hist->hist_mode);
19935 +
19936 +               if (likely(my_hist->total_samples)) {
19937 +                       long avg = (long) div64_s64(my_hist->accumulate_lat,
19938 +                           my_hist->total_samples);
19939 +                       snprintf(minstr, sizeof(minstr), "%ld",
19940 +                           my_hist->min_lat - my_hist->offset);
19941 +                       snprintf(avgstr, sizeof(avgstr), "%ld",
19942 +                           avg - my_hist->offset);
19943 +                       snprintf(maxstr, sizeof(maxstr), "%ld",
19944 +                           my_hist->max_lat - my_hist->offset);
19945 +               } else {
19946 +                       strcpy(minstr, "<undef>");
19947 +                       strcpy(avgstr, minstr);
19948 +                       strcpy(maxstr, minstr);
19949 +               }
19950 +
19951 +               seq_printf(m, "#Minimum latency: %s microseconds\n"
19952 +                          "#Average latency: %s microseconds\n"
19953 +                          "#Maximum latency: %s microseconds\n"
19954 +                          "#Total samples: %llu\n"
19955 +                          "#There are %llu samples lower than %ld"
19956 +                          " microseconds.\n"
19957 +                          "#There are %llu samples greater or equal"
19958 +                          " than %ld microseconds.\n"
19959 +                          "#usecs\t%16s\n",
19960 +                          minstr, avgstr, maxstr,
19961 +                          my_hist->total_samples,
19962 +                          my_hist->below_hist_bound_samples,
19963 +                          -my_hist->offset,
19964 +                          my_hist->above_hist_bound_samples,
19965 +                          MAX_ENTRY_NUM - my_hist->offset,
19966 +                          "samples");
19967 +       }
19968 +       if (index < MAX_ENTRY_NUM) {
19969 +               index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
19970 +               if (index_ptr)
19971 +                       *index_ptr = index;
19972 +       }
19973 +
19974 +       return index_ptr;
19975 +}
19976 +
19977 +static void *l_next(struct seq_file *m, void *p, loff_t *pos)
19978 +{
19979 +       loff_t *index_ptr = p;
19980 +       struct hist_data *my_hist = m->private;
19981 +
19982 +       if (++*pos >= MAX_ENTRY_NUM) {
19983 +               atomic_inc(&my_hist->hist_mode);
19984 +               return NULL;
19985 +       }
19986 +       *index_ptr = *pos;
19987 +       return index_ptr;
19988 +}
19989 +
19990 +static void l_stop(struct seq_file *m, void *p)
19991 +{
19992 +       kfree(p);
19993 +}
19994 +
19995 +static int l_show(struct seq_file *m, void *p)
19996 +{
19997 +       int index = *(loff_t *) p;
19998 +       struct hist_data *my_hist = m->private;
19999 +
20000 +       seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
20001 +           my_hist->hist_array[index]);
20002 +       return 0;
20003 +}
20004 +
20005 +static const struct seq_operations latency_hist_seq_op = {
20006 +       .start = l_start,
20007 +       .next  = l_next,
20008 +       .stop  = l_stop,
20009 +       .show  = l_show
20010 +};
20011 +
20012 +static int latency_hist_open(struct inode *inode, struct file *file)
20013 +{
20014 +       int ret;
20015 +
20016 +       ret = seq_open(file, &latency_hist_seq_op);
20017 +       if (!ret) {
20018 +               struct seq_file *seq = file->private_data;
20019 +               seq->private = inode->i_private;
20020 +       }
20021 +       return ret;
20022 +}
20023 +
20024 +static const struct file_operations latency_hist_fops = {
20025 +       .open = latency_hist_open,
20026 +       .read = seq_read,
20027 +       .llseek = seq_lseek,
20028 +       .release = seq_release,
20029 +};
20030 +
20031 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
20032 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20033 +static void clear_maxlatprocdata(struct maxlatproc_data *mp)
20034 +{
20035 +       mp->comm[0] = mp->current_comm[0] = '\0';
20036 +       mp->prio = mp->current_prio = mp->pid = mp->current_pid =
20037 +           mp->latency = mp->timeroffset = -1;
20038 +       mp->timestamp = 0;
20039 +}
20040 +#endif
20041 +
20042 +static void hist_reset(struct hist_data *hist)
20043 +{
20044 +       atomic_dec(&hist->hist_mode);
20045 +
20046 +       memset(hist->hist_array, 0, sizeof(hist->hist_array));
20047 +       hist->below_hist_bound_samples = 0ULL;
20048 +       hist->above_hist_bound_samples = 0ULL;
20049 +       hist->min_lat = LONG_MAX;
20050 +       hist->max_lat = LONG_MIN;
20051 +       hist->total_samples = 0ULL;
20052 +       hist->accumulate_lat = 0LL;
20053 +
20054 +       atomic_inc(&hist->hist_mode);
20055 +}
20056 +
20057 +static ssize_t
20058 +latency_hist_reset(struct file *file, const char __user *a,
20059 +                  size_t size, loff_t *off)
20060 +{
20061 +       int cpu;
20062 +       struct hist_data *hist = NULL;
20063 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
20064 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20065 +       struct maxlatproc_data *mp = NULL;
20066 +#endif
20067 +       off_t latency_type = (off_t) file->private_data;
20068 +
20069 +       for_each_online_cpu(cpu) {
20070 +
20071 +               switch (latency_type) {
20072 +#ifdef CONFIG_PREEMPT_OFF_HIST
20073 +               case PREEMPTOFF_LATENCY:
20074 +                       hist = &per_cpu(preemptoff_hist, cpu);
20075 +                       break;
20076 +#endif
20077 +#ifdef CONFIG_INTERRUPT_OFF_HIST
20078 +               case IRQSOFF_LATENCY:
20079 +                       hist = &per_cpu(irqsoff_hist, cpu);
20080 +                       break;
20081 +#endif
20082 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
20083 +               case PREEMPTIRQSOFF_LATENCY:
20084 +                       hist = &per_cpu(preemptirqsoff_hist, cpu);
20085 +                       break;
20086 +#endif
20087 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
20088 +               case WAKEUP_LATENCY:
20089 +                       hist = &per_cpu(wakeup_latency_hist, cpu);
20090 +                       mp = &per_cpu(wakeup_maxlatproc, cpu);
20091 +                       break;
20092 +               case WAKEUP_LATENCY_SHAREDPRIO:
20093 +                       hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
20094 +                       mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
20095 +                       break;
20096 +#endif
20097 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20098 +               case MISSED_TIMER_OFFSETS:
20099 +                       hist = &per_cpu(missed_timer_offsets, cpu);
20100 +                       mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
20101 +                       break;
20102 +#endif
20103 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
20104 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20105 +               case TIMERANDWAKEUP_LATENCY:
20106 +                       hist = &per_cpu(timerandwakeup_latency_hist, cpu);
20107 +                       mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
20108 +                       break;
20109 +#endif
20110 +               }
20111 +
20112 +               hist_reset(hist);
20113 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
20114 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20115 +               if (latency_type == WAKEUP_LATENCY ||
20116 +                   latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
20117 +                   latency_type == MISSED_TIMER_OFFSETS ||
20118 +                   latency_type == TIMERANDWAKEUP_LATENCY)
20119 +                       clear_maxlatprocdata(mp);
20120 +#endif
20121 +       }
20122 +
20123 +       return size;
20124 +}
20125 +
20126 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
20127 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20128 +static ssize_t
20129 +show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
20130 +{
20131 +       char buf[64];
20132 +       int r;
20133 +       unsigned long *this_pid = file->private_data;
20134 +
20135 +       r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
20136 +       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
20137 +}
20138 +
20139 +static ssize_t do_pid(struct file *file, const char __user *ubuf,
20140 +                     size_t cnt, loff_t *ppos)
20141 +{
20142 +       char buf[64];
20143 +       unsigned long pid;
20144 +       unsigned long *this_pid = file->private_data;
20145 +
20146 +       if (cnt >= sizeof(buf))
20147 +               return -EINVAL;
20148 +
20149 +       if (copy_from_user(&buf, ubuf, cnt))
20150 +               return -EFAULT;
20151 +
20152 +       buf[cnt] = '\0';
20153 +
20154 +       if (kstrtoul(buf, 10, &pid))
20155 +               return -EINVAL;
20156 +
20157 +       *this_pid = pid;
20158 +
20159 +       return cnt;
20160 +}
20161 +#endif
20162 +
20163 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
20164 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20165 +static ssize_t
20166 +show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
20167 +{
20168 +       int r;
20169 +       struct maxlatproc_data *mp = file->private_data;
20170 +       int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
20171 +       unsigned long long t;
20172 +       unsigned long usecs, secs;
20173 +       char *buf;
20174 +
20175 +       if (mp->pid == -1 || mp->current_pid == -1) {
20176 +               buf = "(none)\n";
20177 +               return simple_read_from_buffer(ubuf, cnt, ppos, buf,
20178 +                   strlen(buf));
20179 +       }
20180 +
20181 +       buf = kmalloc(strmaxlen, GFP_KERNEL);
20182 +       if (buf == NULL)
20183 +               return -ENOMEM;
20184 +
20185 +       t = ns2usecs(mp->timestamp);
20186 +       usecs = do_div(t, USEC_PER_SEC);
20187 +       secs = (unsigned long) t;
20188 +       r = snprintf(buf, strmaxlen,
20189 +           "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
20190 +           MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
20191 +           mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
20192 +           secs, usecs);
20193 +       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
20194 +       kfree(buf);
20195 +       return r;
20196 +}
20197 +#endif
20198 +
20199 +static ssize_t
20200 +show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
20201 +{
20202 +       char buf[64];
20203 +       struct enable_data *ed = file->private_data;
20204 +       int r;
20205 +
20206 +       r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
20207 +       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
20208 +}
20209 +
20210 +static ssize_t
20211 +do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
20212 +{
20213 +       char buf[64];
20214 +       long enable;
20215 +       struct enable_data *ed = file->private_data;
20216 +
20217 +       if (cnt >= sizeof(buf))
20218 +               return -EINVAL;
20219 +
20220 +       if (copy_from_user(&buf, ubuf, cnt))
20221 +               return -EFAULT;
20222 +
20223 +       buf[cnt] = 0;
20224 +
20225 +       if (kstrtoul(buf, 10, &enable))
20226 +               return -EINVAL;
20227 +
20228 +       if ((enable && ed->enabled) || (!enable && !ed->enabled))
20229 +               return cnt;
20230 +
20231 +       if (enable) {
20232 +               int ret;
20233 +
20234 +               switch (ed->latency_type) {
20235 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
20236 +               case PREEMPTIRQSOFF_LATENCY:
20237 +                       ret = register_trace_preemptirqsoff_hist(
20238 +                           probe_preemptirqsoff_hist, NULL);
20239 +                       if (ret) {
20240 +                               pr_info("wakeup trace: Couldn't assign "
20241 +                                   "probe_preemptirqsoff_hist "
20242 +                                   "to trace_preemptirqsoff_hist\n");
20243 +                               return ret;
20244 +                       }
20245 +                       break;
20246 +#endif
20247 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
20248 +               case WAKEUP_LATENCY:
20249 +                       ret = register_trace_sched_wakeup(
20250 +                           probe_wakeup_latency_hist_start, NULL);
20251 +                       if (ret) {
20252 +                               pr_info("wakeup trace: Couldn't assign "
20253 +                                   "probe_wakeup_latency_hist_start "
20254 +                                   "to trace_sched_wakeup\n");
20255 +                               return ret;
20256 +                       }
20257 +                       ret = register_trace_sched_wakeup_new(
20258 +                           probe_wakeup_latency_hist_start, NULL);
20259 +                       if (ret) {
20260 +                               pr_info("wakeup trace: Couldn't assign "
20261 +                                   "probe_wakeup_latency_hist_start "
20262 +                                   "to trace_sched_wakeup_new\n");
20263 +                               unregister_trace_sched_wakeup(
20264 +                                   probe_wakeup_latency_hist_start, NULL);
20265 +                               return ret;
20266 +                       }
20267 +                       ret = register_trace_sched_switch(
20268 +                           probe_wakeup_latency_hist_stop, NULL);
20269 +                       if (ret) {
20270 +                               pr_info("wakeup trace: Couldn't assign "
20271 +                                   "probe_wakeup_latency_hist_stop "
20272 +                                   "to trace_sched_switch\n");
20273 +                               unregister_trace_sched_wakeup(
20274 +                                   probe_wakeup_latency_hist_start, NULL);
20275 +                               unregister_trace_sched_wakeup_new(
20276 +                                   probe_wakeup_latency_hist_start, NULL);
20277 +                               return ret;
20278 +                       }
20279 +                       ret = register_trace_sched_migrate_task(
20280 +                           probe_sched_migrate_task, NULL);
20281 +                       if (ret) {
20282 +                               pr_info("wakeup trace: Couldn't assign "
20283 +                                   "probe_sched_migrate_task "
20284 +                                   "to trace_sched_migrate_task\n");
20285 +                               unregister_trace_sched_wakeup(
20286 +                                   probe_wakeup_latency_hist_start, NULL);
20287 +                               unregister_trace_sched_wakeup_new(
20288 +                                   probe_wakeup_latency_hist_start, NULL);
20289 +                               unregister_trace_sched_switch(
20290 +                                   probe_wakeup_latency_hist_stop, NULL);
20291 +                               return ret;
20292 +                       }
20293 +                       break;
20294 +#endif
20295 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20296 +               case MISSED_TIMER_OFFSETS:
20297 +                       ret = register_trace_hrtimer_interrupt(
20298 +                           probe_hrtimer_interrupt, NULL);
20299 +                       if (ret) {
20300 +                               pr_info("wakeup trace: Couldn't assign "
20301 +                                   "probe_hrtimer_interrupt "
20302 +                                   "to trace_hrtimer_interrupt\n");
20303 +                               return ret;
20304 +                       }
20305 +                       break;
20306 +#endif
20307 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
20308 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20309 +               case TIMERANDWAKEUP_LATENCY:
20310 +                       if (!wakeup_latency_enabled_data.enabled ||
20311 +                           !missed_timer_offsets_enabled_data.enabled)
20312 +                               return -EINVAL;
20313 +                       break;
20314 +#endif
20315 +               default:
20316 +                       break;
20317 +               }
20318 +       } else {
20319 +               switch (ed->latency_type) {
20320 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
20321 +               case PREEMPTIRQSOFF_LATENCY:
20322 +                       {
20323 +                               int cpu;
20324 +
20325 +                               unregister_trace_preemptirqsoff_hist(
20326 +                                   probe_preemptirqsoff_hist, NULL);
20327 +                               for_each_online_cpu(cpu) {
20328 +#ifdef CONFIG_INTERRUPT_OFF_HIST
20329 +                                       per_cpu(hist_irqsoff_counting,
20330 +                                           cpu) = 0;
20331 +#endif
20332 +#ifdef CONFIG_PREEMPT_OFF_HIST
20333 +                                       per_cpu(hist_preemptoff_counting,
20334 +                                           cpu) = 0;
20335 +#endif
20336 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
20337 +                                       per_cpu(hist_preemptirqsoff_counting,
20338 +                                           cpu) = 0;
20339 +#endif
20340 +                               }
20341 +                       }
20342 +                       break;
20343 +#endif
20344 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
20345 +               case WAKEUP_LATENCY:
20346 +                       {
20347 +                               int cpu;
20348 +
20349 +                               unregister_trace_sched_wakeup(
20350 +                                   probe_wakeup_latency_hist_start, NULL);
20351 +                               unregister_trace_sched_wakeup_new(
20352 +                                   probe_wakeup_latency_hist_start, NULL);
20353 +                               unregister_trace_sched_switch(
20354 +                                   probe_wakeup_latency_hist_stop, NULL);
20355 +                               unregister_trace_sched_migrate_task(
20356 +                                   probe_sched_migrate_task, NULL);
20357 +
20358 +                               for_each_online_cpu(cpu) {
20359 +                                       per_cpu(wakeup_task, cpu) = NULL;
20360 +                                       per_cpu(wakeup_sharedprio, cpu) = 0;
20361 +                               }
20362 +                       }
20363 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20364 +                       timerandwakeup_enabled_data.enabled = 0;
20365 +#endif
20366 +                       break;
20367 +#endif
20368 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20369 +               case MISSED_TIMER_OFFSETS:
20370 +                       unregister_trace_hrtimer_interrupt(
20371 +                           probe_hrtimer_interrupt, NULL);
20372 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
20373 +                       timerandwakeup_enabled_data.enabled = 0;
20374 +#endif
20375 +                       break;
20376 +#endif
20377 +               default:
20378 +                       break;
20379 +               }
20380 +       }
20381 +       ed->enabled = enable;
20382 +       return cnt;
20383 +}
20384 +
20385 +static const struct file_operations latency_hist_reset_fops = {
20386 +       .open = tracing_open_generic,
20387 +       .write = latency_hist_reset,
20388 +};
20389 +
20390 +static const struct file_operations enable_fops = {
20391 +       .open = tracing_open_generic,
20392 +       .read = show_enable,
20393 +       .write = do_enable,
20394 +};
20395 +
20396 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
20397 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20398 +static const struct file_operations pid_fops = {
20399 +       .open = tracing_open_generic,
20400 +       .read = show_pid,
20401 +       .write = do_pid,
20402 +};
20403 +
20404 +static const struct file_operations maxlatproc_fops = {
20405 +       .open = tracing_open_generic,
20406 +       .read = show_maxlatproc,
20407 +};
20408 +#endif
20409 +
20410 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
20411 +static notrace void probe_preemptirqsoff_hist(void *v, int reason,
20412 +       int starthist)
20413 +{
20414 +       int cpu = raw_smp_processor_id();
20415 +       int time_set = 0;
20416 +
20417 +       if (starthist) {
20418 +               cycle_t uninitialized_var(start);
20419 +
20420 +               if (!preempt_count() && !irqs_disabled())
20421 +                       return;
20422 +
20423 +#ifdef CONFIG_INTERRUPT_OFF_HIST
20424 +               if ((reason == IRQS_OFF || reason == TRACE_START) &&
20425 +                   !per_cpu(hist_irqsoff_counting, cpu)) {
20426 +                       per_cpu(hist_irqsoff_counting, cpu) = 1;
20427 +                       start = ftrace_now(cpu);
20428 +                       time_set++;
20429 +                       per_cpu(hist_irqsoff_start, cpu) = start;
20430 +               }
20431 +#endif
20432 +
20433 +#ifdef CONFIG_PREEMPT_OFF_HIST
20434 +               if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
20435 +                   !per_cpu(hist_preemptoff_counting, cpu)) {
20436 +                       per_cpu(hist_preemptoff_counting, cpu) = 1;
20437 +                       if (!(time_set++))
20438 +                               start = ftrace_now(cpu);
20439 +                       per_cpu(hist_preemptoff_start, cpu) = start;
20440 +               }
20441 +#endif
20442 +
20443 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
20444 +               if (per_cpu(hist_irqsoff_counting, cpu) &&
20445 +                   per_cpu(hist_preemptoff_counting, cpu) &&
20446 +                   !per_cpu(hist_preemptirqsoff_counting, cpu)) {
20447 +                       per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
20448 +                       if (!time_set)
20449 +                               start = ftrace_now(cpu);
20450 +                       per_cpu(hist_preemptirqsoff_start, cpu) = start;
20451 +               }
20452 +#endif
20453 +       } else {
20454 +               cycle_t uninitialized_var(stop);
20455 +
20456 +#ifdef CONFIG_INTERRUPT_OFF_HIST
20457 +               if ((reason == IRQS_ON || reason == TRACE_STOP) &&
20458 +                   per_cpu(hist_irqsoff_counting, cpu)) {
20459 +                       cycle_t start = per_cpu(hist_irqsoff_start, cpu);
20460 +
20461 +                       stop = ftrace_now(cpu);
20462 +                       time_set++;
20463 +                       if (start) {
20464 +                               long latency = ((long) (stop - start)) /
20465 +                                   NSECS_PER_USECS;
20466 +
20467 +                               latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
20468 +                                   stop, NULL);
20469 +                       }
20470 +                       per_cpu(hist_irqsoff_counting, cpu) = 0;
20471 +               }
20472 +#endif
20473 +
20474 +#ifdef CONFIG_PREEMPT_OFF_HIST
20475 +               if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
20476 +                   per_cpu(hist_preemptoff_counting, cpu)) {
20477 +                       cycle_t start = per_cpu(hist_preemptoff_start, cpu);
20478 +
20479 +                       if (!(time_set++))
20480 +                               stop = ftrace_now(cpu);
20481 +                       if (start) {
20482 +                               long latency = ((long) (stop - start)) /
20483 +                                   NSECS_PER_USECS;
20484 +
20485 +                               latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
20486 +                                   0, stop, NULL);
20487 +                       }
20488 +                       per_cpu(hist_preemptoff_counting, cpu) = 0;
20489 +               }
20490 +#endif
20491 +
20492 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
20493 +               if ((!per_cpu(hist_irqsoff_counting, cpu) ||
20494 +                    !per_cpu(hist_preemptoff_counting, cpu)) &&
20495 +                  per_cpu(hist_preemptirqsoff_counting, cpu)) {
20496 +                       cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
20497 +
20498 +                       if (!time_set)
20499 +                               stop = ftrace_now(cpu);
20500 +                       if (start) {
20501 +                               long latency = ((long) (stop - start)) /
20502 +                                   NSECS_PER_USECS;
20503 +
20504 +                               latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
20505 +                                   latency, 0, stop, NULL);
20506 +                       }
20507 +                       per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
20508 +               }
20509 +#endif
20510 +       }
20511 +}
20512 +#endif
20513 +
20514 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
20515 +static DEFINE_RAW_SPINLOCK(wakeup_lock);
20516 +static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
20517 +       int cpu)
20518 +{
20519 +       int old_cpu = task_cpu(task);
20520 +
20521 +       if (cpu != old_cpu) {
20522 +               unsigned long flags;
20523 +               struct task_struct *cpu_wakeup_task;
20524 +
20525 +               raw_spin_lock_irqsave(&wakeup_lock, flags);
20526 +
20527 +               cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
20528 +               if (task == cpu_wakeup_task) {
20529 +                       put_task_struct(cpu_wakeup_task);
20530 +                       per_cpu(wakeup_task, old_cpu) = NULL;
20531 +                       cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
20532 +                       get_task_struct(cpu_wakeup_task);
20533 +               }
20534 +
20535 +               raw_spin_unlock_irqrestore(&wakeup_lock, flags);
20536 +       }
20537 +}
20538 +
20539 +static notrace void probe_wakeup_latency_hist_start(void *v,
20540 +       struct task_struct *p)
20541 +{
20542 +       unsigned long flags;
20543 +       struct task_struct *curr = current;
20544 +       int cpu = task_cpu(p);
20545 +       struct task_struct *cpu_wakeup_task;
20546 +
20547 +       raw_spin_lock_irqsave(&wakeup_lock, flags);
20548 +
20549 +       cpu_wakeup_task = per_cpu(wakeup_task, cpu);
20550 +
20551 +       if (wakeup_pid) {
20552 +               if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
20553 +                   p->prio == curr->prio)
20554 +                       per_cpu(wakeup_sharedprio, cpu) = 1;
20555 +               if (likely(wakeup_pid != task_pid_nr(p)))
20556 +                       goto out;
20557 +       } else {
20558 +               if (likely(!rt_task(p)) ||
20559 +                   (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
20560 +                   p->prio > curr->prio)
20561 +                       goto out;
20562 +               if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
20563 +                   p->prio == curr->prio)
20564 +                       per_cpu(wakeup_sharedprio, cpu) = 1;
20565 +       }
20566 +
20567 +       if (cpu_wakeup_task)
20568 +               put_task_struct(cpu_wakeup_task);
20569 +       cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
20570 +       get_task_struct(cpu_wakeup_task);
20571 +       cpu_wakeup_task->preempt_timestamp_hist =
20572 +               ftrace_now(raw_smp_processor_id());
20573 +out:
20574 +       raw_spin_unlock_irqrestore(&wakeup_lock, flags);
20575 +}
20576 +
20577 +static notrace void probe_wakeup_latency_hist_stop(void *v,
20578 +       bool preempt, struct task_struct *prev, struct task_struct *next)
20579 +{
20580 +       unsigned long flags;
20581 +       int cpu = task_cpu(next);
20582 +       long latency;
20583 +       cycle_t stop;
20584 +       struct task_struct *cpu_wakeup_task;
20585 +
20586 +       raw_spin_lock_irqsave(&wakeup_lock, flags);
20587 +
20588 +       cpu_wakeup_task = per_cpu(wakeup_task, cpu);
20589 +
20590 +       if (cpu_wakeup_task == NULL)
20591 +               goto out;
20592 +
20593 +       /* Already running? */
20594 +       if (unlikely(current == cpu_wakeup_task))
20595 +               goto out_reset;
20596 +
20597 +       if (next != cpu_wakeup_task) {
20598 +               if (next->prio < cpu_wakeup_task->prio)
20599 +                       goto out_reset;
20600 +
20601 +               if (next->prio == cpu_wakeup_task->prio)
20602 +                       per_cpu(wakeup_sharedprio, cpu) = 1;
20603 +
20604 +               goto out;
20605 +       }
20606 +
20607 +       if (current->prio == cpu_wakeup_task->prio)
20608 +               per_cpu(wakeup_sharedprio, cpu) = 1;
20609 +
20610 +       /*
20611 +        * The task we are waiting for is about to be switched to.
20612 +        * Calculate latency and store it in histogram.
20613 +        */
20614 +       stop = ftrace_now(raw_smp_processor_id());
20615 +
20616 +       latency = ((long) (stop - next->preempt_timestamp_hist)) /
20617 +           NSECS_PER_USECS;
20618 +
20619 +       if (per_cpu(wakeup_sharedprio, cpu)) {
20620 +               latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
20621 +                   next);
20622 +               per_cpu(wakeup_sharedprio, cpu) = 0;
20623 +       } else {
20624 +               latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
20625 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20626 +               if (timerandwakeup_enabled_data.enabled) {
20627 +                       latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
20628 +                           next->timer_offset + latency, next->timer_offset,
20629 +                           stop, next);
20630 +               }
20631 +#endif
20632 +       }
20633 +
20634 +out_reset:
20635 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20636 +       next->timer_offset = 0;
20637 +#endif
20638 +       put_task_struct(cpu_wakeup_task);
20639 +       per_cpu(wakeup_task, cpu) = NULL;
20640 +out:
20641 +       raw_spin_unlock_irqrestore(&wakeup_lock, flags);
20642 +}
20643 +#endif
20644 +
20645 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20646 +static notrace void probe_hrtimer_interrupt(void *v, int cpu,
20647 +       long long latency_ns, struct task_struct *curr,
20648 +       struct task_struct *task)
20649 +{
20650 +       if (latency_ns <= 0 && task != NULL && rt_task(task) &&
20651 +           (task->prio < curr->prio ||
20652 +           (task->prio == curr->prio &&
20653 +           !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
20654 +               long latency;
20655 +               cycle_t now;
20656 +
20657 +               if (missed_timer_offsets_pid) {
20658 +                       if (likely(missed_timer_offsets_pid !=
20659 +                           task_pid_nr(task)))
20660 +                               return;
20661 +               }
20662 +
20663 +               now = ftrace_now(cpu);
20664 +               latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
20665 +               latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
20666 +                   task);
20667 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
20668 +               task->timer_offset = latency;
20669 +#endif
20670 +       }
20671 +}
20672 +#endif
20673 +
20674 +static __init int latency_hist_init(void)
20675 +{
20676 +       struct dentry *latency_hist_root = NULL;
20677 +       struct dentry *dentry;
20678 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
20679 +       struct dentry *dentry_sharedprio;
20680 +#endif
20681 +       struct dentry *entry;
20682 +       struct dentry *enable_root;
20683 +       int i = 0;
20684 +       struct hist_data *my_hist;
20685 +       char name[64];
20686 +       char *cpufmt = "CPU%d";
20687 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
20688 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20689 +       char *cpufmt_maxlatproc = "max_latency-CPU%d";
20690 +       struct maxlatproc_data *mp = NULL;
20691 +#endif
20692 +
20693 +       dentry = tracing_init_dentry();
20694 +       latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
20695 +       enable_root = debugfs_create_dir("enable", latency_hist_root);
20696 +
20697 +#ifdef CONFIG_INTERRUPT_OFF_HIST
20698 +       dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
20699 +       for_each_possible_cpu(i) {
20700 +               sprintf(name, cpufmt, i);
20701 +               entry = debugfs_create_file(name, 0444, dentry,
20702 +                   &per_cpu(irqsoff_hist, i), &latency_hist_fops);
20703 +               my_hist = &per_cpu(irqsoff_hist, i);
20704 +               atomic_set(&my_hist->hist_mode, 1);
20705 +               my_hist->min_lat = LONG_MAX;
20706 +       }
20707 +       entry = debugfs_create_file("reset", 0644, dentry,
20708 +           (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
20709 +#endif
20710 +
20711 +#ifdef CONFIG_PREEMPT_OFF_HIST
20712 +       dentry = debugfs_create_dir(preemptoff_hist_dir,
20713 +           latency_hist_root);
20714 +       for_each_possible_cpu(i) {
20715 +               sprintf(name, cpufmt, i);
20716 +               entry = debugfs_create_file(name, 0444, dentry,
20717 +                   &per_cpu(preemptoff_hist, i), &latency_hist_fops);
20718 +               my_hist = &per_cpu(preemptoff_hist, i);
20719 +               atomic_set(&my_hist->hist_mode, 1);
20720 +               my_hist->min_lat = LONG_MAX;
20721 +       }
20722 +       entry = debugfs_create_file("reset", 0644, dentry,
20723 +           (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
20724 +#endif
20725 +
20726 +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
20727 +       dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
20728 +           latency_hist_root);
20729 +       for_each_possible_cpu(i) {
20730 +               sprintf(name, cpufmt, i);
20731 +               entry = debugfs_create_file(name, 0444, dentry,
20732 +                   &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
20733 +               my_hist = &per_cpu(preemptirqsoff_hist, i);
20734 +               atomic_set(&my_hist->hist_mode, 1);
20735 +               my_hist->min_lat = LONG_MAX;
20736 +       }
20737 +       entry = debugfs_create_file("reset", 0644, dentry,
20738 +           (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
20739 +#endif
20740 +
20741 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
20742 +       entry = debugfs_create_file("preemptirqsoff", 0644,
20743 +           enable_root, (void *)&preemptirqsoff_enabled_data,
20744 +           &enable_fops);
20745 +#endif
20746 +
20747 +#ifdef CONFIG_WAKEUP_LATENCY_HIST
20748 +       dentry = debugfs_create_dir(wakeup_latency_hist_dir,
20749 +           latency_hist_root);
20750 +       dentry_sharedprio = debugfs_create_dir(
20751 +           wakeup_latency_hist_dir_sharedprio, dentry);
20752 +       for_each_possible_cpu(i) {
20753 +               sprintf(name, cpufmt, i);
20754 +
20755 +               entry = debugfs_create_file(name, 0444, dentry,
20756 +                   &per_cpu(wakeup_latency_hist, i),
20757 +                   &latency_hist_fops);
20758 +               my_hist = &per_cpu(wakeup_latency_hist, i);
20759 +               atomic_set(&my_hist->hist_mode, 1);
20760 +               my_hist->min_lat = LONG_MAX;
20761 +
20762 +               entry = debugfs_create_file(name, 0444, dentry_sharedprio,
20763 +                   &per_cpu(wakeup_latency_hist_sharedprio, i),
20764 +                   &latency_hist_fops);
20765 +               my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
20766 +               atomic_set(&my_hist->hist_mode, 1);
20767 +               my_hist->min_lat = LONG_MAX;
20768 +
20769 +               sprintf(name, cpufmt_maxlatproc, i);
20770 +
20771 +               mp = &per_cpu(wakeup_maxlatproc, i);
20772 +               entry = debugfs_create_file(name, 0444, dentry, mp,
20773 +                   &maxlatproc_fops);
20774 +               clear_maxlatprocdata(mp);
20775 +
20776 +               mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
20777 +               entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
20778 +                   &maxlatproc_fops);
20779 +               clear_maxlatprocdata(mp);
20780 +       }
20781 +       entry = debugfs_create_file("pid", 0644, dentry,
20782 +           (void *)&wakeup_pid, &pid_fops);
20783 +       entry = debugfs_create_file("reset", 0644, dentry,
20784 +           (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
20785 +       entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
20786 +           (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
20787 +       entry = debugfs_create_file("wakeup", 0644,
20788 +           enable_root, (void *)&wakeup_latency_enabled_data,
20789 +           &enable_fops);
20790 +#endif
20791 +
20792 +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
20793 +       dentry = debugfs_create_dir(missed_timer_offsets_dir,
20794 +           latency_hist_root);
20795 +       for_each_possible_cpu(i) {
20796 +               sprintf(name, cpufmt, i);
20797 +               entry = debugfs_create_file(name, 0444, dentry,
20798 +                   &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
20799 +               my_hist = &per_cpu(missed_timer_offsets, i);
20800 +               atomic_set(&my_hist->hist_mode, 1);
20801 +               my_hist->min_lat = LONG_MAX;
20802 +
20803 +               sprintf(name, cpufmt_maxlatproc, i);
20804 +               mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
20805 +               entry = debugfs_create_file(name, 0444, dentry, mp,
20806 +                   &maxlatproc_fops);
20807 +               clear_maxlatprocdata(mp);
20808 +       }
20809 +       entry = debugfs_create_file("pid", 0644, dentry,
20810 +           (void *)&missed_timer_offsets_pid, &pid_fops);
20811 +       entry = debugfs_create_file("reset", 0644, dentry,
20812 +           (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
20813 +       entry = debugfs_create_file("missed_timer_offsets", 0644,
20814 +           enable_root, (void *)&missed_timer_offsets_enabled_data,
20815 +           &enable_fops);
20816 +#endif
20817 +
20818 +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
20819 +       defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
20820 +       dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
20821 +           latency_hist_root);
20822 +       for_each_possible_cpu(i) {
20823 +               sprintf(name, cpufmt, i);
20824 +               entry = debugfs_create_file(name, 0444, dentry,
20825 +                   &per_cpu(timerandwakeup_latency_hist, i),
20826 +                   &latency_hist_fops);
20827 +               my_hist = &per_cpu(timerandwakeup_latency_hist, i);
20828 +               atomic_set(&my_hist->hist_mode, 1);
20829 +               my_hist->min_lat = LONG_MAX;
20830 +
20831 +               sprintf(name, cpufmt_maxlatproc, i);
20832 +               mp = &per_cpu(timerandwakeup_maxlatproc, i);
20833 +               entry = debugfs_create_file(name, 0444, dentry, mp,
20834 +                   &maxlatproc_fops);
20835 +               clear_maxlatprocdata(mp);
20836 +       }
20837 +       entry = debugfs_create_file("reset", 0644, dentry,
20838 +           (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
20839 +       entry = debugfs_create_file("timerandwakeup", 0644,
20840 +           enable_root, (void *)&timerandwakeup_enabled_data,
20841 +           &enable_fops);
20842 +#endif
20843 +       return 0;
20844 +}
20845 +
20846 +device_initcall(latency_hist_init);
20847 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
20848 index 8696ce6bf2f6..277f048a4695 100644
20849 --- a/kernel/trace/trace.c
20850 +++ b/kernel/trace/trace.c
20851 @@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
20852         struct task_struct *tsk = current;
20853  
20854         entry->preempt_count            = pc & 0xff;
20855 +       entry->preempt_lazy_count       = preempt_lazy_count();
20856         entry->pid                      = (tsk) ? tsk->pid : 0;
20857         entry->flags =
20858  #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
20859 @@ -1907,8 +1908,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
20860                 ((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
20861                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
20862                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
20863 -               (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
20864 +               (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
20865 +               (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
20866                 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
20867 +
20868 +       entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
20869  }
20870  EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
20871  
20872 @@ -2892,14 +2896,17 @@ get_total_entries(struct trace_buffer *buf,
20873  
20874  static void print_lat_help_header(struct seq_file *m)
20875  {
20876 -       seq_puts(m, "#                  _------=> CPU#            \n"
20877 -                   "#                 / _-----=> irqs-off        \n"
20878 -                   "#                | / _----=> need-resched    \n"
20879 -                   "#                || / _---=> hardirq/softirq \n"
20880 -                   "#                ||| / _--=> preempt-depth   \n"
20881 -                   "#                |||| /     delay            \n"
20882 -                   "#  cmd     pid   ||||| time  |   caller      \n"
20883 -                   "#     \\   /      |||||  \\    |   /         \n");
20884 +       seq_puts(m, "#                  _--------=> CPU#              \n"
20885 +                   "#                 / _-------=> irqs-off          \n"
20886 +                   "#                | / _------=> need-resched      \n"
20887 +                   "#                || / _-----=> need-resched_lazy \n"
20888 +                   "#                ||| / _----=> hardirq/softirq   \n"
20889 +                   "#                |||| / _---=> preempt-depth     \n"
20890 +                   "#                ||||| / _--=> preempt-lazy-depth\n"
20891 +                   "#                |||||| / _-=> migrate-disable   \n"
20892 +                   "#                ||||||| /     delay             \n"
20893 +                   "# cmd     pid    |||||||| time   |  caller       \n"
20894 +                   "#     \\   /      ||||||||   \\    |  /            \n");
20895  }
20896  
20897  static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
20898 @@ -2925,11 +2932,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
20899         print_event_info(buf, m);
20900         seq_puts(m, "#                              _-----=> irqs-off\n"
20901                     "#                             / _----=> need-resched\n"
20902 -                   "#                            | / _---=> hardirq/softirq\n"
20903 -                   "#                            || / _--=> preempt-depth\n"
20904 -                   "#                            ||| /     delay\n"
20905 -                   "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"
20906 -                   "#              | |       |   ||||       |         |\n");
20907 +                   "#                            |/  _-----=> need-resched_lazy\n"
20908 +                   "#                            || / _---=> hardirq/softirq\n"
20909 +                   "#                            ||| / _--=> preempt-depth\n"
20910 +                   "#                            |||| / _-=> preempt-lazy-depth\n"
20911 +                   "#                            ||||| / _-=> migrate-disable   \n"
20912 +                   "#                            |||||| /    delay\n"
20913 +                   "#           TASK-PID   CPU#  |||||||   TIMESTAMP  FUNCTION\n"
20914 +                   "#              | |       |   |||||||      |         |\n");
20915  }
20916  
20917  void
20918 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
20919 index fd24b1f9ac43..852b2c81be25 100644
20920 --- a/kernel/trace/trace.h
20921 +++ b/kernel/trace/trace.h
20922 @@ -124,6 +124,7 @@ struct kretprobe_trace_entry_head {
20923   *  NEED_RESCHED       - reschedule is requested
20924   *  HARDIRQ            - inside an interrupt handler
20925   *  SOFTIRQ            - inside a softirq handler
20926 + *  NEED_RESCHED_LAZY  - lazy reschedule is requested
20927   */
20928  enum trace_flag_type {
20929         TRACE_FLAG_IRQS_OFF             = 0x01,
20930 @@ -133,6 +134,7 @@ enum trace_flag_type {
20931         TRACE_FLAG_SOFTIRQ              = 0x10,
20932         TRACE_FLAG_PREEMPT_RESCHED      = 0x20,
20933         TRACE_FLAG_NMI                  = 0x40,
20934 +       TRACE_FLAG_NEED_RESCHED_LAZY    = 0x80,
20935  };
20936  
20937  #define TRACE_BUF_SIZE         1024
20938 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
20939 index 03c0a48c3ac4..0b85d516b491 100644
20940 --- a/kernel/trace/trace_events.c
20941 +++ b/kernel/trace/trace_events.c
20942 @@ -187,6 +187,8 @@ static int trace_define_common_fields(void)
20943         __common_field(unsigned char, flags);
20944         __common_field(unsigned char, preempt_count);
20945         __common_field(int, pid);
20946 +       __common_field(unsigned short, migrate_disable);
20947 +       __common_field(unsigned short, padding);
20948  
20949         return ret;
20950  }
20951 diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
20952 index 03cdff84d026..940bd10b4406 100644
20953 --- a/kernel/trace/trace_irqsoff.c
20954 +++ b/kernel/trace/trace_irqsoff.c
20955 @@ -13,6 +13,7 @@
20956  #include <linux/uaccess.h>
20957  #include <linux/module.h>
20958  #include <linux/ftrace.h>
20959 +#include <trace/events/hist.h>
20960  
20961  #include "trace.h"
20962  
20963 @@ -424,11 +425,13 @@ void start_critical_timings(void)
20964  {
20965         if (preempt_trace() || irq_trace())
20966                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
20967 +       trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1);
20968  }
20969  EXPORT_SYMBOL_GPL(start_critical_timings);
20970  
20971  void stop_critical_timings(void)
20972  {
20973 +       trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0);
20974         if (preempt_trace() || irq_trace())
20975                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
20976  }
20977 @@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
20978  #ifdef CONFIG_PROVE_LOCKING
20979  void time_hardirqs_on(unsigned long a0, unsigned long a1)
20980  {
20981 +       trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
20982         if (!preempt_trace() && irq_trace())
20983                 stop_critical_timing(a0, a1);
20984  }
20985 @@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
20986  {
20987         if (!preempt_trace() && irq_trace())
20988                 start_critical_timing(a0, a1);
20989 +       trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
20990  }
20991  
20992  #else /* !CONFIG_PROVE_LOCKING */
20993 @@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct task_struct *curr)
20994   */
20995  void trace_hardirqs_on(void)
20996  {
20997 +       trace_preemptirqsoff_hist(IRQS_ON, 0);
20998         if (!preempt_trace() && irq_trace())
20999                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
21000  }
21001 @@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
21002  {
21003         if (!preempt_trace() && irq_trace())
21004                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
21005 +       trace_preemptirqsoff_hist(IRQS_OFF, 1);
21006  }
21007  EXPORT_SYMBOL(trace_hardirqs_off);
21008  
21009  __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
21010  {
21011 +       trace_preemptirqsoff_hist(IRQS_ON, 0);
21012         if (!preempt_trace() && irq_trace())
21013                 stop_critical_timing(CALLER_ADDR0, caller_addr);
21014  }
21015 @@ -494,6 +502,7 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
21016  {
21017         if (!preempt_trace() && irq_trace())
21018                 start_critical_timing(CALLER_ADDR0, caller_addr);
21019 +       trace_preemptirqsoff_hist(IRQS_OFF, 1);
21020  }
21021  EXPORT_SYMBOL(trace_hardirqs_off_caller);
21022  
21023 @@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
21024  #ifdef CONFIG_PREEMPT_TRACER
21025  void trace_preempt_on(unsigned long a0, unsigned long a1)
21026  {
21027 +       trace_preemptirqsoff_hist(PREEMPT_ON, 0);
21028         if (preempt_trace() && !irq_trace())
21029                 stop_critical_timing(a0, a1);
21030  }
21031  
21032  void trace_preempt_off(unsigned long a0, unsigned long a1)
21033  {
21034 +       trace_preemptirqsoff_hist(PREEMPT_ON, 1);
21035         if (preempt_trace() && !irq_trace())
21036                 start_critical_timing(a0, a1);
21037  }
21038 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
21039 index 3fc20422c166..65a6dde71a7d 100644
21040 --- a/kernel/trace/trace_output.c
21041 +++ b/kernel/trace/trace_output.c
21042 @@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
21043  {
21044         char hardsoft_irq;
21045         char need_resched;
21046 +       char need_resched_lazy;
21047         char irqs_off;
21048         int hardirq;
21049         int softirq;
21050 @@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
21051                 break;
21052         }
21053  
21054 +       need_resched_lazy =
21055 +               (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
21056 +
21057         hardsoft_irq =
21058                 (nmi && hardirq)     ? 'Z' :
21059                 nmi                  ? 'z' :
21060 @@ -424,14 +428,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
21061                 softirq              ? 's' :
21062                                        '.' ;
21063  
21064 -       trace_seq_printf(s, "%c%c%c",
21065 -                        irqs_off, need_resched, hardsoft_irq);
21066 +       trace_seq_printf(s, "%c%c%c%c",
21067 +                        irqs_off, need_resched, need_resched_lazy,
21068 +                        hardsoft_irq);
21069  
21070         if (entry->preempt_count)
21071                 trace_seq_printf(s, "%x", entry->preempt_count);
21072         else
21073                 trace_seq_putc(s, '.');
21074  
21075 +       if (entry->preempt_lazy_count)
21076 +               trace_seq_printf(s, "%x", entry->preempt_lazy_count);
21077 +       else
21078 +               trace_seq_putc(s, '.');
21079 +
21080 +       if (entry->migrate_disable)
21081 +               trace_seq_printf(s, "%x", entry->migrate_disable);
21082 +       else
21083 +               trace_seq_putc(s, '.');
21084 +
21085         return !trace_seq_has_overflowed(s);
21086  }
21087  
21088 diff --git a/kernel/user.c b/kernel/user.c
21089 index b069ccbfb0b0..1a2e88e98b5e 100644
21090 --- a/kernel/user.c
21091 +++ b/kernel/user.c
21092 @@ -161,11 +161,11 @@ void free_uid(struct user_struct *up)
21093         if (!up)
21094                 return;
21095  
21096 -       local_irq_save(flags);
21097 +       local_irq_save_nort(flags);
21098         if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
21099                 free_user(up, flags);
21100         else
21101 -               local_irq_restore(flags);
21102 +               local_irq_restore_nort(flags);
21103  }
21104  
21105  struct user_struct *alloc_uid(kuid_t uid)
21106 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
21107 index 6d1020c03d41..70c6a2f79f7e 100644
21108 --- a/kernel/watchdog.c
21109 +++ b/kernel/watchdog.c
21110 @@ -315,6 +315,8 @@ static int is_softlockup(unsigned long touch_ts)
21111  
21112  #ifdef CONFIG_HARDLOCKUP_DETECTOR
21113  
21114 +static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
21115 +
21116  static struct perf_event_attr wd_hw_attr = {
21117         .type           = PERF_TYPE_HARDWARE,
21118         .config         = PERF_COUNT_HW_CPU_CYCLES,
21119 @@ -348,6 +350,13 @@ static void watchdog_overflow_callback(struct perf_event *event,
21120                 /* only print hardlockups once */
21121                 if (__this_cpu_read(hard_watchdog_warn) == true)
21122                         return;
21123 +               /*
21124 +                * If early-printk is enabled then make sure we do not
21125 +                * lock up in printk() and kill console logging:
21126 +                */
21127 +               printk_kill();
21128 +
21129 +               raw_spin_lock(&watchdog_output_lock);
21130  
21131                 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
21132                 print_modules();
21133 @@ -365,6 +374,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
21134                                 !test_and_set_bit(0, &hardlockup_allcpu_dumped))
21135                         trigger_allbutself_cpu_backtrace();
21136  
21137 +               raw_spin_unlock(&watchdog_output_lock);
21138                 if (hardlockup_panic)
21139                         nmi_panic(regs, "Hard LOCKUP");
21140  
21141 @@ -512,6 +522,7 @@ static void watchdog_enable(unsigned int cpu)
21142         /* kick off the timer for the hardlockup detector */
21143         hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
21144         hrtimer->function = watchdog_timer_fn;
21145 +       hrtimer->irqsafe = 1;
21146  
21147         /* Enable the perf event */
21148         watchdog_nmi_enable(cpu);
21149 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
21150 index 479d840db286..24eba6620a45 100644
21151 --- a/kernel/workqueue.c
21152 +++ b/kernel/workqueue.c
21153 @@ -48,6 +48,8 @@
21154  #include <linux/nodemask.h>
21155  #include <linux/moduleparam.h>
21156  #include <linux/uaccess.h>
21157 +#include <linux/locallock.h>
21158 +#include <linux/delay.h>
21159  
21160  #include "workqueue_internal.h"
21161  
21162 @@ -121,11 +123,16 @@ enum {
21163   *    cpu or grabbing pool->lock is enough for read access.  If
21164   *    POOL_DISASSOCIATED is set, it's identical to L.
21165   *
21166 + *    On RT we need the extra protection via rt_lock_idle_list() for
21167 + *    the list manipulations against read access from
21168 + *    wq_worker_sleeping(). All other places are nicely serialized via
21169 + *    pool->lock.
21170 + *
21171   * A: pool->attach_mutex protected.
21172   *
21173   * PL: wq_pool_mutex protected.
21174   *
21175 - * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
21176 + * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
21177   *
21178   * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
21179   *
21180 @@ -134,7 +141,7 @@ enum {
21181   *
21182   * WQ: wq->mutex protected.
21183   *
21184 - * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
21185 + * WR: wq->mutex protected for writes.  RCU protected for reads.
21186   *
21187   * MD: wq_mayday_lock protected.
21188   */
21189 @@ -185,7 +192,7 @@ struct worker_pool {
21190         atomic_t                nr_running ____cacheline_aligned_in_smp;
21191  
21192         /*
21193 -        * Destruction of pool is sched-RCU protected to allow dereferences
21194 +        * Destruction of pool is RCU protected to allow dereferences
21195          * from get_work_pool().
21196          */
21197         struct rcu_head         rcu;
21198 @@ -214,7 +221,7 @@ struct pool_workqueue {
21199         /*
21200          * Release of unbound pwq is punted to system_wq.  See put_pwq()
21201          * and pwq_unbound_release_workfn() for details.  pool_workqueue
21202 -        * itself is also sched-RCU protected so that the first pwq can be
21203 +        * itself is also RCU protected so that the first pwq can be
21204          * determined without grabbing wq->mutex.
21205          */
21206         struct work_struct      unbound_release_work;
21207 @@ -348,6 +355,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
21208  struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
21209  EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
21210  
21211 +static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
21212 +
21213  static int worker_thread(void *__worker);
21214  static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
21215  
21216 @@ -355,20 +364,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
21217  #include <trace/events/workqueue.h>
21218  
21219  #define assert_rcu_or_pool_mutex()                                     \
21220 -       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
21221 +       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
21222                          !lockdep_is_held(&wq_pool_mutex),              \
21223 -                        "sched RCU or wq_pool_mutex should be held")
21224 +                        "RCU or wq_pool_mutex should be held")
21225  
21226  #define assert_rcu_or_wq_mutex(wq)                                     \
21227 -       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
21228 +       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
21229                          !lockdep_is_held(&wq->mutex),                  \
21230 -                        "sched RCU or wq->mutex should be held")
21231 +                        "RCU or wq->mutex should be held")
21232  
21233  #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                       \
21234 -       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
21235 +       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
21236                          !lockdep_is_held(&wq->mutex) &&                \
21237                          !lockdep_is_held(&wq_pool_mutex),              \
21238 -                        "sched RCU, wq->mutex or wq_pool_mutex should be held")
21239 +                        "RCU, wq->mutex or wq_pool_mutex should be held")
21240  
21241  #define for_each_cpu_worker_pool(pool, cpu)                            \
21242         for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
21243 @@ -380,7 +389,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
21244   * @pool: iteration cursor
21245   * @pi: integer used for iteration
21246   *
21247 - * This must be called either with wq_pool_mutex held or sched RCU read
21248 + * This must be called either with wq_pool_mutex held or RCU read
21249   * locked.  If the pool needs to be used beyond the locking in effect, the
21250   * caller is responsible for guaranteeing that the pool stays online.
21251   *
21252 @@ -412,7 +421,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
21253   * @pwq: iteration cursor
21254   * @wq: the target workqueue
21255   *
21256 - * This must be called either with wq->mutex held or sched RCU read locked.
21257 + * This must be called either with wq->mutex held or RCU read locked.
21258   * If the pwq needs to be used beyond the locking in effect, the caller is
21259   * responsible for guaranteeing that the pwq stays online.
21260   *
21261 @@ -424,6 +433,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
21262                 if (({ assert_rcu_or_wq_mutex(wq); false; })) { }       \
21263                 else
21264  
21265 +#ifdef CONFIG_PREEMPT_RT_BASE
21266 +static inline void rt_lock_idle_list(struct worker_pool *pool)
21267 +{
21268 +       preempt_disable();
21269 +}
21270 +static inline void rt_unlock_idle_list(struct worker_pool *pool)
21271 +{
21272 +       preempt_enable();
21273 +}
21274 +static inline void sched_lock_idle_list(struct worker_pool *pool) { }
21275 +static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
21276 +#else
21277 +static inline void rt_lock_idle_list(struct worker_pool *pool) { }
21278 +static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
21279 +static inline void sched_lock_idle_list(struct worker_pool *pool)
21280 +{
21281 +       spin_lock_irq(&pool->lock);
21282 +}
21283 +static inline void sched_unlock_idle_list(struct worker_pool *pool)
21284 +{
21285 +       spin_unlock_irq(&pool->lock);
21286 +}
21287 +#endif
21288 +
21289 +
21290  #ifdef CONFIG_DEBUG_OBJECTS_WORK
21291  
21292  static struct debug_obj_descr work_debug_descr;
21293 @@ -548,7 +582,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
21294   * @wq: the target workqueue
21295   * @node: the node ID
21296   *
21297 - * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
21298 + * This must be called with any of wq_pool_mutex, wq->mutex or RCU
21299   * read locked.
21300   * If the pwq needs to be used beyond the locking in effect, the caller is
21301   * responsible for guaranteeing that the pwq stays online.
21302 @@ -692,8 +726,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
21303   * @work: the work item of interest
21304   *
21305   * Pools are created and destroyed under wq_pool_mutex, and allows read
21306 - * access under sched-RCU read lock.  As such, this function should be
21307 - * called under wq_pool_mutex or with preemption disabled.
21308 + * access under RCU read lock.  As such, this function should be
21309 + * called under wq_pool_mutex or inside of a rcu_read_lock() region.
21310   *
21311   * All fields of the returned pool are accessible as long as the above
21312   * mentioned locking is in effect.  If the returned pool needs to be used
21313 @@ -830,50 +864,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
21314   */
21315  static void wake_up_worker(struct worker_pool *pool)
21316  {
21317 -       struct worker *worker = first_idle_worker(pool);
21318 +       struct worker *worker;
21319 +
21320 +       rt_lock_idle_list(pool);
21321 +
21322 +       worker = first_idle_worker(pool);
21323  
21324         if (likely(worker))
21325                 wake_up_process(worker->task);
21326 +
21327 +       rt_unlock_idle_list(pool);
21328  }
21329  
21330  /**
21331 - * wq_worker_waking_up - a worker is waking up
21332 + * wq_worker_running - a worker is running again
21333   * @task: task waking up
21334 - * @cpu: CPU @task is waking up to
21335   *
21336 - * This function is called during try_to_wake_up() when a worker is
21337 - * being awoken.
21338 - *
21339 - * CONTEXT:
21340 - * spin_lock_irq(rq->lock)
21341 + * This function is called when a worker returns from schedule()
21342   */
21343 -void wq_worker_waking_up(struct task_struct *task, int cpu)
21344 +void wq_worker_running(struct task_struct *task)
21345  {
21346         struct worker *worker = kthread_data(task);
21347  
21348 -       if (!(worker->flags & WORKER_NOT_RUNNING)) {
21349 -               WARN_ON_ONCE(worker->pool->cpu != cpu);
21350 +       if (!worker->sleeping)
21351 +               return;
21352 +       if (!(worker->flags & WORKER_NOT_RUNNING))
21353                 atomic_inc(&worker->pool->nr_running);
21354 -       }
21355 +       worker->sleeping = 0;
21356  }
21357  
21358  /**
21359   * wq_worker_sleeping - a worker is going to sleep
21360   * @task: task going to sleep
21361   *
21362 - * This function is called during schedule() when a busy worker is
21363 - * going to sleep.  Worker on the same cpu can be woken up by
21364 - * returning pointer to its task.
21365 - *
21366 - * CONTEXT:
21367 - * spin_lock_irq(rq->lock)
21368 - *
21369 - * Return:
21370 - * Worker task on @cpu to wake up, %NULL if none.
21371 + * This function is called from schedule() when a busy worker is
21372 + * going to sleep.
21373   */
21374 -struct task_struct *wq_worker_sleeping(struct task_struct *task)
21375 +void wq_worker_sleeping(struct task_struct *task)
21376  {
21377 -       struct worker *worker = kthread_data(task), *to_wakeup = NULL;
21378 +       struct worker *worker = kthread_data(task);
21379         struct worker_pool *pool;
21380  
21381         /*
21382 @@ -882,29 +911,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
21383          * checking NOT_RUNNING.
21384          */
21385         if (worker->flags & WORKER_NOT_RUNNING)
21386 -               return NULL;
21387 +               return;
21388  
21389         pool = worker->pool;
21390  
21391 -       /* this can only happen on the local cpu */
21392 -       if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
21393 -               return NULL;
21394 +       if (WARN_ON_ONCE(worker->sleeping))
21395 +               return;
21396 +
21397 +       worker->sleeping = 1;
21398  
21399         /*
21400          * The counterpart of the following dec_and_test, implied mb,
21401          * worklist not empty test sequence is in insert_work().
21402          * Please read comment there.
21403 -        *
21404 -        * NOT_RUNNING is clear.  This means that we're bound to and
21405 -        * running on the local cpu w/ rq lock held and preemption
21406 -        * disabled, which in turn means that none else could be
21407 -        * manipulating idle_list, so dereferencing idle_list without pool
21408 -        * lock is safe.
21409          */
21410         if (atomic_dec_and_test(&pool->nr_running) &&
21411 -           !list_empty(&pool->worklist))
21412 -               to_wakeup = first_idle_worker(pool);
21413 -       return to_wakeup ? to_wakeup->task : NULL;
21414 +           !list_empty(&pool->worklist)) {
21415 +               sched_lock_idle_list(pool);
21416 +               wake_up_worker(pool);
21417 +               sched_unlock_idle_list(pool);
21418 +       }
21419  }
21420  
21421  /**
21422 @@ -1098,12 +1124,14 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
21423  {
21424         if (pwq) {
21425                 /*
21426 -                * As both pwqs and pools are sched-RCU protected, the
21427 +                * As both pwqs and pools are RCU protected, the
21428                  * following lock operations are safe.
21429                  */
21430 -               spin_lock_irq(&pwq->pool->lock);
21431 +               rcu_read_lock();
21432 +               local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
21433                 put_pwq(pwq);
21434 -               spin_unlock_irq(&pwq->pool->lock);
21435 +               local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
21436 +               rcu_read_unlock();
21437         }
21438  }
21439  
21440 @@ -1207,7 +1235,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
21441         struct worker_pool *pool;
21442         struct pool_workqueue *pwq;
21443  
21444 -       local_irq_save(*flags);
21445 +       local_lock_irqsave(pendingb_lock, *flags);
21446  
21447         /* try to steal the timer if it exists */
21448         if (is_dwork) {
21449 @@ -1226,6 +1254,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
21450         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
21451                 return 0;
21452  
21453 +       rcu_read_lock();
21454         /*
21455          * The queueing is in progress, or it is already queued. Try to
21456          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
21457 @@ -1264,14 +1293,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
21458                 set_work_pool_and_keep_pending(work, pool->id);
21459  
21460                 spin_unlock(&pool->lock);
21461 +               rcu_read_unlock();
21462                 return 1;
21463         }
21464         spin_unlock(&pool->lock);
21465  fail:
21466 -       local_irq_restore(*flags);
21467 +       rcu_read_unlock();
21468 +       local_unlock_irqrestore(pendingb_lock, *flags);
21469         if (work_is_canceling(work))
21470                 return -ENOENT;
21471 -       cpu_relax();
21472 +       cpu_chill();
21473         return -EAGAIN;
21474  }
21475  
21476 @@ -1373,7 +1404,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
21477          * queued or lose PENDING.  Grabbing PENDING and queueing should
21478          * happen with IRQ disabled.
21479          */
21480 -       WARN_ON_ONCE(!irqs_disabled());
21481 +       WARN_ON_ONCE_NONRT(!irqs_disabled());
21482  
21483         debug_work_activate(work);
21484  
21485 @@ -1381,6 +1412,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
21486         if (unlikely(wq->flags & __WQ_DRAINING) &&
21487             WARN_ON_ONCE(!is_chained_work(wq)))
21488                 return;
21489 +       rcu_read_lock();
21490  retry:
21491         if (req_cpu == WORK_CPU_UNBOUND)
21492                 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
21493 @@ -1437,10 +1469,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
21494         /* pwq determined, queue */
21495         trace_workqueue_queue_work(req_cpu, pwq, work);
21496  
21497 -       if (WARN_ON(!list_empty(&work->entry))) {
21498 -               spin_unlock(&pwq->pool->lock);
21499 -               return;
21500 -       }
21501 +       if (WARN_ON(!list_empty(&work->entry)))
21502 +               goto out;
21503  
21504         pwq->nr_in_flight[pwq->work_color]++;
21505         work_flags = work_color_to_flags(pwq->work_color);
21506 @@ -1458,7 +1488,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
21507  
21508         insert_work(pwq, work, worklist, work_flags);
21509  
21510 +out:
21511         spin_unlock(&pwq->pool->lock);
21512 +       rcu_read_unlock();
21513  }
21514  
21515  /**
21516 @@ -1478,14 +1510,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
21517         bool ret = false;
21518         unsigned long flags;
21519  
21520 -       local_irq_save(flags);
21521 +       local_lock_irqsave(pendingb_lock,flags);
21522  
21523         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
21524                 __queue_work(cpu, wq, work);
21525                 ret = true;
21526         }
21527  
21528 -       local_irq_restore(flags);
21529 +       local_unlock_irqrestore(pendingb_lock, flags);
21530         return ret;
21531  }
21532  EXPORT_SYMBOL(queue_work_on);
21533 @@ -1552,14 +1584,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
21534         unsigned long flags;
21535  
21536         /* read the comment in __queue_work() */
21537 -       local_irq_save(flags);
21538 +       local_lock_irqsave(pendingb_lock, flags);
21539  
21540         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
21541                 __queue_delayed_work(cpu, wq, dwork, delay);
21542                 ret = true;
21543         }
21544  
21545 -       local_irq_restore(flags);
21546 +       local_unlock_irqrestore(pendingb_lock, flags);
21547         return ret;
21548  }
21549  EXPORT_SYMBOL(queue_delayed_work_on);
21550 @@ -1594,7 +1626,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
21551  
21552         if (likely(ret >= 0)) {
21553                 __queue_delayed_work(cpu, wq, dwork, delay);
21554 -               local_irq_restore(flags);
21555 +               local_unlock_irqrestore(pendingb_lock, flags);
21556         }
21557  
21558         /* -ENOENT from try_to_grab_pending() becomes %true */
21559 @@ -1627,7 +1659,9 @@ static void worker_enter_idle(struct worker *worker)
21560         worker->last_active = jiffies;
21561  
21562         /* idle_list is LIFO */
21563 +       rt_lock_idle_list(pool);
21564         list_add(&worker->entry, &pool->idle_list);
21565 +       rt_unlock_idle_list(pool);
21566  
21567         if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
21568                 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
21569 @@ -1660,7 +1694,9 @@ static void worker_leave_idle(struct worker *worker)
21570                 return;
21571         worker_clr_flags(worker, WORKER_IDLE);
21572         pool->nr_idle--;
21573 +       rt_lock_idle_list(pool);
21574         list_del_init(&worker->entry);
21575 +       rt_unlock_idle_list(pool);
21576  }
21577  
21578  static struct worker *alloc_worker(int node)
21579 @@ -1826,7 +1862,9 @@ static void destroy_worker(struct worker *worker)
21580         pool->nr_workers--;
21581         pool->nr_idle--;
21582  
21583 +       rt_lock_idle_list(pool);
21584         list_del_init(&worker->entry);
21585 +       rt_unlock_idle_list(pool);
21586         worker->flags |= WORKER_DIE;
21587         wake_up_process(worker->task);
21588  }
21589 @@ -2785,14 +2823,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
21590  
21591         might_sleep();
21592  
21593 -       local_irq_disable();
21594 +       rcu_read_lock();
21595         pool = get_work_pool(work);
21596         if (!pool) {
21597 -               local_irq_enable();
21598 +               rcu_read_unlock();
21599                 return false;
21600         }
21601  
21602 -       spin_lock(&pool->lock);
21603 +       spin_lock_irq(&pool->lock);
21604         /* see the comment in try_to_grab_pending() with the same code */
21605         pwq = get_work_pwq(work);
21606         if (pwq) {
21607 @@ -2821,10 +2859,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
21608         else
21609                 lock_map_acquire_read(&pwq->wq->lockdep_map);
21610         lock_map_release(&pwq->wq->lockdep_map);
21611 -
21612 +       rcu_read_unlock();
21613         return true;
21614  already_gone:
21615         spin_unlock_irq(&pool->lock);
21616 +       rcu_read_unlock();
21617         return false;
21618  }
21619  
21620 @@ -2911,7 +2950,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
21621  
21622         /* tell other tasks trying to grab @work to back off */
21623         mark_work_canceling(work);
21624 -       local_irq_restore(flags);
21625 +       local_unlock_irqrestore(pendingb_lock, flags);
21626  
21627         flush_work(work);
21628         clear_work_data(work);
21629 @@ -2966,10 +3005,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
21630   */
21631  bool flush_delayed_work(struct delayed_work *dwork)
21632  {
21633 -       local_irq_disable();
21634 +       local_lock_irq(pendingb_lock);
21635         if (del_timer_sync(&dwork->timer))
21636                 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
21637 -       local_irq_enable();
21638 +       local_unlock_irq(pendingb_lock);
21639         return flush_work(&dwork->work);
21640  }
21641  EXPORT_SYMBOL(flush_delayed_work);
21642 @@ -2987,7 +3026,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
21643                 return false;
21644  
21645         set_work_pool_and_clear_pending(work, get_work_pool_id(work));
21646 -       local_irq_restore(flags);
21647 +       local_unlock_irqrestore(pendingb_lock, flags);
21648         return ret;
21649  }
21650  
21651 @@ -3245,7 +3284,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
21652   * put_unbound_pool - put a worker_pool
21653   * @pool: worker_pool to put
21654   *
21655 - * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
21656 + * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
21657   * safe manner.  get_unbound_pool() calls this function on its failure path
21658   * and this function should be able to release pools which went through,
21659   * successfully or not, init_worker_pool().
21660 @@ -3299,8 +3338,8 @@ static void put_unbound_pool(struct worker_pool *pool)
21661         del_timer_sync(&pool->idle_timer);
21662         del_timer_sync(&pool->mayday_timer);
21663  
21664 -       /* sched-RCU protected to allow dereferences from get_work_pool() */
21665 -       call_rcu_sched(&pool->rcu, rcu_free_pool);
21666 +       /* RCU protected to allow dereferences from get_work_pool() */
21667 +       call_rcu(&pool->rcu, rcu_free_pool);
21668  }
21669  
21670  /**
21671 @@ -3407,14 +3446,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
21672         put_unbound_pool(pool);
21673         mutex_unlock(&wq_pool_mutex);
21674  
21675 -       call_rcu_sched(&pwq->rcu, rcu_free_pwq);
21676 +       call_rcu(&pwq->rcu, rcu_free_pwq);
21677  
21678         /*
21679          * If we're the last pwq going away, @wq is already dead and no one
21680          * is gonna access it anymore.  Schedule RCU free.
21681          */
21682         if (is_last)
21683 -               call_rcu_sched(&wq->rcu, rcu_free_wq);
21684 +               call_rcu(&wq->rcu, rcu_free_wq);
21685  }
21686  
21687  /**
21688 @@ -4064,7 +4103,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
21689                  * The base ref is never dropped on per-cpu pwqs.  Directly
21690                  * schedule RCU free.
21691                  */
21692 -               call_rcu_sched(&wq->rcu, rcu_free_wq);
21693 +               call_rcu(&wq->rcu, rcu_free_wq);
21694         } else {
21695                 /*
21696                  * We're the sole accessor of @wq at this point.  Directly
21697 @@ -4157,7 +4196,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
21698         struct pool_workqueue *pwq;
21699         bool ret;
21700  
21701 -       rcu_read_lock_sched();
21702 +       rcu_read_lock();
21703 +       preempt_disable();
21704  
21705         if (cpu == WORK_CPU_UNBOUND)
21706                 cpu = smp_processor_id();
21707 @@ -4168,7 +4208,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
21708                 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
21709  
21710         ret = !list_empty(&pwq->delayed_works);
21711 -       rcu_read_unlock_sched();
21712 +       preempt_enable();
21713 +       rcu_read_unlock();
21714  
21715         return ret;
21716  }
21717 @@ -4194,15 +4235,15 @@ unsigned int work_busy(struct work_struct *work)
21718         if (work_pending(work))
21719                 ret |= WORK_BUSY_PENDING;
21720  
21721 -       local_irq_save(flags);
21722 +       rcu_read_lock();
21723         pool = get_work_pool(work);
21724         if (pool) {
21725 -               spin_lock(&pool->lock);
21726 +               spin_lock_irqsave(&pool->lock, flags);
21727                 if (find_worker_executing_work(pool, work))
21728                         ret |= WORK_BUSY_RUNNING;
21729 -               spin_unlock(&pool->lock);
21730 +               spin_unlock_irqrestore(&pool->lock, flags);
21731         }
21732 -       local_irq_restore(flags);
21733 +       rcu_read_unlock();
21734  
21735         return ret;
21736  }
21737 @@ -4391,7 +4432,7 @@ void show_workqueue_state(void)
21738         unsigned long flags;
21739         int pi;
21740  
21741 -       rcu_read_lock_sched();
21742 +       rcu_read_lock();
21743  
21744         pr_info("Showing busy workqueues and worker pools:\n");
21745  
21746 @@ -4444,7 +4485,7 @@ void show_workqueue_state(void)
21747                 spin_unlock_irqrestore(&pool->lock, flags);
21748         }
21749  
21750 -       rcu_read_unlock_sched();
21751 +       rcu_read_unlock();
21752  }
21753  
21754  /*
21755 @@ -4782,16 +4823,16 @@ bool freeze_workqueues_busy(void)
21756                  * nr_active is monotonically decreasing.  It's safe
21757                  * to peek without lock.
21758                  */
21759 -               rcu_read_lock_sched();
21760 +               rcu_read_lock();
21761                 for_each_pwq(pwq, wq) {
21762                         WARN_ON_ONCE(pwq->nr_active < 0);
21763                         if (pwq->nr_active) {
21764                                 busy = true;
21765 -                               rcu_read_unlock_sched();
21766 +                               rcu_read_unlock();
21767                                 goto out_unlock;
21768                         }
21769                 }
21770 -               rcu_read_unlock_sched();
21771 +               rcu_read_unlock();
21772         }
21773  out_unlock:
21774         mutex_unlock(&wq_pool_mutex);
21775 @@ -4981,7 +5022,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
21776         const char *delim = "";
21777         int node, written = 0;
21778  
21779 -       rcu_read_lock_sched();
21780 +       get_online_cpus();
21781 +       rcu_read_lock();
21782         for_each_node(node) {
21783                 written += scnprintf(buf + written, PAGE_SIZE - written,
21784                                      "%s%d:%d", delim, node,
21785 @@ -4989,7 +5031,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
21786                 delim = " ";
21787         }
21788         written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
21789 -       rcu_read_unlock_sched();
21790 +       rcu_read_unlock();
21791 +       put_online_cpus();
21792  
21793         return written;
21794  }
21795 diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
21796 index 8635417c587b..f000c4d6917e 100644
21797 --- a/kernel/workqueue_internal.h
21798 +++ b/kernel/workqueue_internal.h
21799 @@ -43,6 +43,7 @@ struct worker {
21800         unsigned long           last_active;    /* L: last active timestamp */
21801         unsigned int            flags;          /* X: flags */
21802         int                     id;             /* I: worker id */
21803 +       int                     sleeping;       /* None */
21804  
21805         /*
21806          * Opaque string set with work_set_desc().  Printed out with task
21807 @@ -68,7 +69,7 @@ static inline struct worker *current_wq_worker(void)
21808   * Scheduler hooks for concurrency managed workqueue.  Only to be used from
21809   * sched/core.c and workqueue.c.
21810   */
21811 -void wq_worker_waking_up(struct task_struct *task, int cpu);
21812 -struct task_struct *wq_worker_sleeping(struct task_struct *task);
21813 +void wq_worker_running(struct task_struct *task);
21814 +void wq_worker_sleeping(struct task_struct *task);
21815  
21816  #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
21817 diff --git a/lib/Kconfig b/lib/Kconfig
21818 index 260a80e313b9..b06becb3f477 100644
21819 --- a/lib/Kconfig
21820 +++ b/lib/Kconfig
21821 @@ -400,6 +400,7 @@ config CHECK_SIGNATURE
21822  
21823  config CPUMASK_OFFSTACK
21824         bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
21825 +       depends on !PREEMPT_RT_FULL
21826         help
21827           Use dynamic allocation for cpumask_var_t, instead of putting
21828           them on the stack.  This is a bit more expensive, but avoids
21829 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
21830 index 056052dc8e91..d8494e126de8 100644
21831 --- a/lib/debugobjects.c
21832 +++ b/lib/debugobjects.c
21833 @@ -308,7 +308,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
21834         struct debug_obj *obj;
21835         unsigned long flags;
21836  
21837 -       fill_pool();
21838 +#ifdef CONFIG_PREEMPT_RT_FULL
21839 +       if (preempt_count() == 0 && !irqs_disabled())
21840 +#endif
21841 +               fill_pool();
21842  
21843         db = get_bucket((unsigned long) addr);
21844  
21845 diff --git a/lib/idr.c b/lib/idr.c
21846 index 6098336df267..9decbe914595 100644
21847 --- a/lib/idr.c
21848 +++ b/lib/idr.c
21849 @@ -30,6 +30,7 @@
21850  #include <linux/idr.h>
21851  #include <linux/spinlock.h>
21852  #include <linux/percpu.h>
21853 +#include <linux/locallock.h>
21854  
21855  #define MAX_IDR_SHIFT          (sizeof(int) * 8 - 1)
21856  #define MAX_IDR_BIT            (1U << MAX_IDR_SHIFT)
21857 @@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
21858  static DEFINE_PER_CPU(int, idr_preload_cnt);
21859  static DEFINE_SPINLOCK(simple_ida_lock);
21860  
21861 +#ifdef CONFIG_PREEMPT_RT_FULL
21862 +static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
21863 +
21864 +static inline void idr_preload_lock(void)
21865 +{
21866 +       local_lock(idr_lock);
21867 +}
21868 +
21869 +static inline void idr_preload_unlock(void)
21870 +{
21871 +       local_unlock(idr_lock);
21872 +}
21873 +
21874 +void idr_preload_end(void)
21875 +{
21876 +       idr_preload_unlock();
21877 +}
21878 +EXPORT_SYMBOL(idr_preload_end);
21879 +#else
21880 +static inline void idr_preload_lock(void)
21881 +{
21882 +       preempt_disable();
21883 +}
21884 +
21885 +static inline void idr_preload_unlock(void)
21886 +{
21887 +       preempt_enable();
21888 +}
21889 +#endif
21890 +
21891 +
21892  /* the maximum ID which can be allocated given idr->layers */
21893  static int idr_max(int layers)
21894  {
21895 @@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
21896          * context.  See idr_preload() for details.
21897          */
21898         if (!in_interrupt()) {
21899 -               preempt_disable();
21900 +               idr_preload_lock();
21901                 new = __this_cpu_read(idr_preload_head);
21902                 if (new) {
21903                         __this_cpu_write(idr_preload_head, new->ary[0]);
21904                         __this_cpu_dec(idr_preload_cnt);
21905                         new->ary[0] = NULL;
21906                 }
21907 -               preempt_enable();
21908 +               idr_preload_unlock();
21909                 if (new)
21910                         return new;
21911         }
21912 @@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
21913         idr_mark_full(pa, id);
21914  }
21915  
21916 -
21917  /**
21918   * idr_preload - preload for idr_alloc()
21919   * @gfp_mask: allocation mask to use for preloading
21920 @@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
21921         WARN_ON_ONCE(in_interrupt());
21922         might_sleep_if(gfpflags_allow_blocking(gfp_mask));
21923  
21924 -       preempt_disable();
21925 +       idr_preload_lock();
21926  
21927         /*
21928          * idr_alloc() is likely to succeed w/o full idr_layer buffer and
21929 @@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
21930         while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
21931                 struct idr_layer *new;
21932  
21933 -               preempt_enable();
21934 +               idr_preload_unlock();
21935                 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
21936 -               preempt_disable();
21937 +               idr_preload_lock();
21938                 if (!new)
21939                         break;
21940  
21941 diff --git a/lib/irq_poll.c b/lib/irq_poll.c
21942 index 1d6565e81030..b23a79761df7 100644
21943 --- a/lib/irq_poll.c
21944 +++ b/lib/irq_poll.c
21945 @@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop)
21946         list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
21947         __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
21948         local_irq_restore(flags);
21949 +       preempt_check_resched_rt();
21950  }
21951  EXPORT_SYMBOL(irq_poll_sched);
21952  
21953 @@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *iop)
21954         local_irq_save(flags);
21955         __irq_poll_complete(iop);
21956         local_irq_restore(flags);
21957 +       preempt_check_resched_rt();
21958  }
21959  EXPORT_SYMBOL(irq_poll_complete);
21960  
21961 @@ -95,6 +97,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
21962                 }
21963  
21964                 local_irq_enable();
21965 +               preempt_check_resched_rt();
21966  
21967                 /* Even though interrupts have been re-enabled, this
21968                  * access is safe because interrupts can only add new
21969 @@ -132,6 +135,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
21970                 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
21971  
21972         local_irq_enable();
21973 +       preempt_check_resched_rt();
21974  }
21975  
21976  /**
21977 @@ -195,6 +199,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
21978                          this_cpu_ptr(&blk_cpu_iopoll));
21979         __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
21980         local_irq_enable();
21981 +       preempt_check_resched_rt();
21982  
21983         return 0;
21984  }
21985 diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
21986 index f3a217ea0388..4611b156ef79 100644
21987 --- a/lib/locking-selftest.c
21988 +++ b/lib/locking-selftest.c
21989 @@ -590,6 +590,8 @@ GENERATE_TESTCASE(init_held_rsem)
21990  #include "locking-selftest-spin-hardirq.h"
21991  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
21992  
21993 +#ifndef CONFIG_PREEMPT_RT_FULL
21994 +
21995  #include "locking-selftest-rlock-hardirq.h"
21996  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
21997  
21998 @@ -605,9 +607,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
21999  #include "locking-selftest-wlock-softirq.h"
22000  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
22001  
22002 +#endif
22003 +
22004  #undef E1
22005  #undef E2
22006  
22007 +#ifndef CONFIG_PREEMPT_RT_FULL
22008  /*
22009   * Enabling hardirqs with a softirq-safe lock held:
22010   */
22011 @@ -640,6 +645,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
22012  #undef E1
22013  #undef E2
22014  
22015 +#endif
22016 +
22017  /*
22018   * Enabling irqs with an irq-safe lock held:
22019   */
22020 @@ -663,6 +670,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
22021  #include "locking-selftest-spin-hardirq.h"
22022  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
22023  
22024 +#ifndef CONFIG_PREEMPT_RT_FULL
22025 +
22026  #include "locking-selftest-rlock-hardirq.h"
22027  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
22028  
22029 @@ -678,6 +687,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
22030  #include "locking-selftest-wlock-softirq.h"
22031  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
22032  
22033 +#endif
22034 +
22035  #undef E1
22036  #undef E2
22037  
22038 @@ -709,6 +720,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
22039  #include "locking-selftest-spin-hardirq.h"
22040  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
22041  
22042 +#ifndef CONFIG_PREEMPT_RT_FULL
22043 +
22044  #include "locking-selftest-rlock-hardirq.h"
22045  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
22046  
22047 @@ -724,6 +737,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
22048  #include "locking-selftest-wlock-softirq.h"
22049  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
22050  
22051 +#endif
22052 +
22053  #undef E1
22054  #undef E2
22055  #undef E3
22056 @@ -757,6 +772,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
22057  #include "locking-selftest-spin-hardirq.h"
22058  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
22059  
22060 +#ifndef CONFIG_PREEMPT_RT_FULL
22061 +
22062  #include "locking-selftest-rlock-hardirq.h"
22063  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
22064  
22065 @@ -772,10 +789,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
22066  #include "locking-selftest-wlock-softirq.h"
22067  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
22068  
22069 +#endif
22070 +
22071  #undef E1
22072  #undef E2
22073  #undef E3
22074  
22075 +#ifndef CONFIG_PREEMPT_RT_FULL
22076 +
22077  /*
22078   * read-lock / write-lock irq inversion.
22079   *
22080 @@ -838,6 +859,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
22081  #undef E2
22082  #undef E3
22083  
22084 +#endif
22085 +
22086 +#ifndef CONFIG_PREEMPT_RT_FULL
22087 +
22088  /*
22089   * read-lock / write-lock recursion that is actually safe.
22090   */
22091 @@ -876,6 +901,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
22092  #undef E2
22093  #undef E3
22094  
22095 +#endif
22096 +
22097  /*
22098   * read-lock / write-lock recursion that is unsafe.
22099   */
22100 @@ -1858,6 +1885,7 @@ void locking_selftest(void)
22101  
22102         printk("  --------------------------------------------------------------------------\n");
22103  
22104 +#ifndef CONFIG_PREEMPT_RT_FULL
22105         /*
22106          * irq-context testcases:
22107          */
22108 @@ -1870,6 +1898,28 @@ void locking_selftest(void)
22109  
22110         DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
22111  //     DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
22112 +#else
22113 +       /* On -rt, we only do hardirq context test for raw spinlock */
22114 +       DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
22115 +       DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
22116 +
22117 +       DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
22118 +       DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
22119 +
22120 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
22121 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
22122 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
22123 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
22124 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
22125 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
22126 +
22127 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
22128 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
22129 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
22130 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
22131 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
22132 +       DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
22133 +#endif
22134  
22135         ww_tests();
22136  
22137 diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
22138 index 6d40944960de..822a2c027e72 100644
22139 --- a/lib/percpu_ida.c
22140 +++ b/lib/percpu_ida.c
22141 @@ -26,6 +26,9 @@
22142  #include <linux/string.h>
22143  #include <linux/spinlock.h>
22144  #include <linux/percpu_ida.h>
22145 +#include <linux/locallock.h>
22146 +
22147 +static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock);
22148  
22149  struct percpu_ida_cpu {
22150         /*
22151 @@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
22152         unsigned long flags;
22153         int tag;
22154  
22155 -       local_irq_save(flags);
22156 +       local_lock_irqsave(irq_off_lock, flags);
22157         tags = this_cpu_ptr(pool->tag_cpu);
22158  
22159         /* Fastpath */
22160         tag = alloc_local_tag(tags);
22161         if (likely(tag >= 0)) {
22162 -               local_irq_restore(flags);
22163 +               local_unlock_irqrestore(irq_off_lock, flags);
22164                 return tag;
22165         }
22166  
22167 @@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
22168  
22169                 if (!tags->nr_free)
22170                         alloc_global_tags(pool, tags);
22171 +
22172                 if (!tags->nr_free)
22173                         steal_tags(pool, tags);
22174  
22175 @@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
22176                 }
22177  
22178                 spin_unlock(&pool->lock);
22179 -               local_irq_restore(flags);
22180 +               local_unlock_irqrestore(irq_off_lock, flags);
22181  
22182                 if (tag >= 0 || state == TASK_RUNNING)
22183                         break;
22184 @@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
22185  
22186                 schedule();
22187  
22188 -               local_irq_save(flags);
22189 +               local_lock_irqsave(irq_off_lock, flags);
22190                 tags = this_cpu_ptr(pool->tag_cpu);
22191         }
22192         if (state != TASK_RUNNING)
22193 @@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
22194  
22195         BUG_ON(tag >= pool->nr_tags);
22196  
22197 -       local_irq_save(flags);
22198 +       local_lock_irqsave(irq_off_lock, flags);
22199         tags = this_cpu_ptr(pool->tag_cpu);
22200  
22201         spin_lock(&tags->lock);
22202 @@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
22203                 spin_unlock(&pool->lock);
22204         }
22205  
22206 -       local_irq_restore(flags);
22207 +       local_unlock_irqrestore(irq_off_lock, flags);
22208  }
22209  EXPORT_SYMBOL_GPL(percpu_ida_free);
22210  
22211 @@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
22212         struct percpu_ida_cpu *remote;
22213         unsigned cpu, i, err = 0;
22214  
22215 -       local_irq_save(flags);
22216 +       local_lock_irqsave(irq_off_lock, flags);
22217         for_each_possible_cpu(cpu) {
22218                 remote = per_cpu_ptr(pool->tag_cpu, cpu);
22219                 spin_lock(&remote->lock);
22220 @@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
22221         }
22222         spin_unlock(&pool->lock);
22223  out:
22224 -       local_irq_restore(flags);
22225 +       local_unlock_irqrestore(irq_off_lock, flags);
22226         return err;
22227  }
22228  EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
22229 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
22230 index 8e6d552c40dd..741da5a77fd5 100644
22231 --- a/lib/radix-tree.c
22232 +++ b/lib/radix-tree.c
22233 @@ -36,7 +36,7 @@
22234  #include <linux/bitops.h>
22235  #include <linux/rcupdate.h>
22236  #include <linux/preempt.h>             /* in_interrupt() */
22237 -
22238 +#include <linux/locallock.h>
22239  
22240  /* Number of nodes in fully populated tree of given height */
22241  static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
22242 @@ -68,6 +68,7 @@ struct radix_tree_preload {
22243         struct radix_tree_node *nodes;
22244  };
22245  static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
22246 +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
22247  
22248  static inline void *node_to_entry(void *ptr)
22249  {
22250 @@ -290,13 +291,14 @@ radix_tree_node_alloc(struct radix_tree_root *root)
22251                  * succeed in getting a node here (and never reach
22252                  * kmem_cache_alloc)
22253                  */
22254 -               rtp = this_cpu_ptr(&radix_tree_preloads);
22255 +               rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
22256                 if (rtp->nr) {
22257                         ret = rtp->nodes;
22258                         rtp->nodes = ret->private_data;
22259                         ret->private_data = NULL;
22260                         rtp->nr--;
22261                 }
22262 +               put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
22263                 /*
22264                  * Update the allocation stack trace as this is more useful
22265                  * for debugging.
22266 @@ -357,14 +359,14 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr)
22267          */
22268         gfp_mask &= ~__GFP_ACCOUNT;
22269  
22270 -       preempt_disable();
22271 +       local_lock(radix_tree_preloads_lock);
22272         rtp = this_cpu_ptr(&radix_tree_preloads);
22273         while (rtp->nr < nr) {
22274 -               preempt_enable();
22275 +               local_unlock(radix_tree_preloads_lock);
22276                 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
22277                 if (node == NULL)
22278                         goto out;
22279 -               preempt_disable();
22280 +               local_lock(radix_tree_preloads_lock);
22281                 rtp = this_cpu_ptr(&radix_tree_preloads);
22282                 if (rtp->nr < nr) {
22283                         node->private_data = rtp->nodes;
22284 @@ -406,7 +408,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
22285         if (gfpflags_allow_blocking(gfp_mask))
22286                 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
22287         /* Preloading doesn't help anything with this gfp mask, skip it */
22288 -       preempt_disable();
22289 +       local_lock(radix_tree_preloads_lock);
22290         return 0;
22291  }
22292  EXPORT_SYMBOL(radix_tree_maybe_preload);
22293 @@ -422,7 +424,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
22294  
22295         /* Preloading doesn't help anything with this gfp mask, skip it */
22296         if (!gfpflags_allow_blocking(gfp_mask)) {
22297 -               preempt_disable();
22298 +               local_lock(radix_tree_preloads_lock);
22299                 return 0;
22300         }
22301  
22302 @@ -456,6 +458,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
22303         return __radix_tree_preload(gfp_mask, nr_nodes);
22304  }
22305  
22306 +void radix_tree_preload_end(void)
22307 +{
22308 +       local_unlock(radix_tree_preloads_lock);
22309 +}
22310 +EXPORT_SYMBOL(radix_tree_preload_end);
22311 +
22312  /*
22313   * The maximum index which can be stored in a radix tree
22314   */
22315 diff --git a/lib/scatterlist.c b/lib/scatterlist.c
22316 index 004fc70fc56a..ccc46992a517 100644
22317 --- a/lib/scatterlist.c
22318 +++ b/lib/scatterlist.c
22319 @@ -620,7 +620,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
22320                         flush_kernel_dcache_page(miter->page);
22321  
22322                 if (miter->__flags & SG_MITER_ATOMIC) {
22323 -                       WARN_ON_ONCE(preemptible());
22324 +                       WARN_ON_ONCE(!pagefault_disabled());
22325                         kunmap_atomic(miter->addr);
22326                 } else
22327                         kunmap(miter->page);
22328 @@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
22329         if (!sg_miter_skip(&miter, skip))
22330                 return false;
22331  
22332 -       local_irq_save(flags);
22333 +       local_irq_save_nort(flags);
22334  
22335         while (sg_miter_next(&miter) && offset < buflen) {
22336                 unsigned int len;
22337 @@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
22338  
22339         sg_miter_stop(&miter);
22340  
22341 -       local_irq_restore(flags);
22342 +       local_irq_restore_nort(flags);
22343         return offset;
22344  }
22345  EXPORT_SYMBOL(sg_copy_buffer);
22346 diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
22347 index 1afec32de6f2..11fa431046a8 100644
22348 --- a/lib/smp_processor_id.c
22349 +++ b/lib/smp_processor_id.c
22350 @@ -39,8 +39,9 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
22351         if (!printk_ratelimit())
22352                 goto out_enable;
22353  
22354 -       printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
22355 -               what1, what2, preempt_count() - 1, current->comm, current->pid);
22356 +       printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
22357 +               what1, what2, preempt_count() - 1, __migrate_disabled(current),
22358 +               current->comm, current->pid);
22359  
22360         print_symbol("caller is %s\n", (long)__builtin_return_address(0));
22361         dump_stack();
22362 diff --git a/localversion-rt b/localversion-rt
22363 new file mode 100644
22364 index 000000000000..6e44e540b927
22365 --- /dev/null
22366 +++ b/localversion-rt
22367 @@ -0,0 +1 @@
22368 +-rt12
22369 diff --git a/mm/Kconfig b/mm/Kconfig
22370 index 86e3e0e74d20..77e5862a1ed2 100644
22371 --- a/mm/Kconfig
22372 +++ b/mm/Kconfig
22373 @@ -410,7 +410,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
22374  
22375  config TRANSPARENT_HUGEPAGE
22376         bool "Transparent Hugepage Support"
22377 -       depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
22378 +       depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
22379         select COMPACTION
22380         select RADIX_TREE_MULTIORDER
22381         help
22382 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
22383 index 6ff2d7744223..b5a91dd53b5f 100644
22384 --- a/mm/backing-dev.c
22385 +++ b/mm/backing-dev.c
22386 @@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
22387  {
22388         unsigned long flags;
22389  
22390 -       local_irq_save(flags);
22391 +       local_irq_save_nort(flags);
22392         if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
22393 -               local_irq_restore(flags);
22394 +               local_irq_restore_nort(flags);
22395                 return;
22396         }
22397  
22398 diff --git a/mm/compaction.c b/mm/compaction.c
22399 index 70e6bec46dc2..6678ed58b7c6 100644
22400 --- a/mm/compaction.c
22401 +++ b/mm/compaction.c
22402 @@ -1593,10 +1593,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
22403                                 block_start_pfn(cc->migrate_pfn, cc->order);
22404  
22405                         if (cc->last_migrated_pfn < current_block_start) {
22406 -                               cpu = get_cpu();
22407 +                               cpu = get_cpu_light();
22408 +                               local_lock_irq(swapvec_lock);
22409                                 lru_add_drain_cpu(cpu);
22410 +                               local_unlock_irq(swapvec_lock);
22411                                 drain_local_pages(zone);
22412 -                               put_cpu();
22413 +                               put_cpu_light();
22414                                 /* No more flushing until we migrate again */
22415                                 cc->last_migrated_pfn = 0;
22416                         }
22417 diff --git a/mm/filemap.c b/mm/filemap.c
22418 index d8d7df82c69a..0eac87a07892 100644
22419 --- a/mm/filemap.c
22420 +++ b/mm/filemap.c
22421 @@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct address_space *mapping,
22422                  * node->private_list is protected by
22423                  * mapping->tree_lock.
22424                  */
22425 -               if (!list_empty(&node->private_list))
22426 -                       list_lru_del(&workingset_shadow_nodes,
22427 +               if (!list_empty(&node->private_list)) {
22428 +                       local_lock(workingset_shadow_lock);
22429 +                       list_lru_del(&__workingset_shadow_nodes,
22430                                      &node->private_list);
22431 +                       local_unlock(workingset_shadow_lock);
22432 +               }
22433         }
22434         return 0;
22435  }
22436 @@ -217,8 +220,10 @@ static void page_cache_tree_delete(struct address_space *mapping,
22437                 if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
22438                                 list_empty(&node->private_list)) {
22439                         node->private_data = mapping;
22440 -                       list_lru_add(&workingset_shadow_nodes,
22441 -                                       &node->private_list);
22442 +                       local_lock(workingset_shadow_lock);
22443 +                       list_lru_add(&__workingset_shadow_nodes,
22444 +                                    &node->private_list);
22445 +                       local_unlock(workingset_shadow_lock);
22446                 }
22447         }
22448  
22449 diff --git a/mm/highmem.c b/mm/highmem.c
22450 index 50b4ca6787f0..77518a3b35a1 100644
22451 --- a/mm/highmem.c
22452 +++ b/mm/highmem.c
22453 @@ -29,10 +29,11 @@
22454  #include <linux/kgdb.h>
22455  #include <asm/tlbflush.h>
22456  
22457 -
22458 +#ifndef CONFIG_PREEMPT_RT_FULL
22459  #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
22460  DEFINE_PER_CPU(int, __kmap_atomic_idx);
22461  #endif
22462 +#endif
22463  
22464  /*
22465   * Virtual_count is not a pure "count".
22466 @@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
22467  unsigned long totalhigh_pages __read_mostly;
22468  EXPORT_SYMBOL(totalhigh_pages);
22469  
22470 -
22471 +#ifndef CONFIG_PREEMPT_RT_FULL
22472  EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
22473 +#endif
22474  
22475  unsigned int nr_free_highpages (void)
22476  {
22477 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
22478 index 4c6ade54d833..ba29283aa43d 100644
22479 --- a/mm/memcontrol.c
22480 +++ b/mm/memcontrol.c
22481 @@ -67,6 +67,7 @@
22482  #include <net/sock.h>
22483  #include <net/ip.h>
22484  #include "slab.h"
22485 +#include <linux/locallock.h>
22486  
22487  #include <asm/uaccess.h>
22488  
22489 @@ -92,6 +93,8 @@ int do_swap_account __read_mostly;
22490  #define do_swap_account                0
22491  #endif
22492  
22493 +static DEFINE_LOCAL_IRQ_LOCK(event_lock);
22494 +
22495  /* Whether legacy memory+swap accounting is active */
22496  static bool do_memsw_account(void)
22497  {
22498 @@ -1692,6 +1695,7 @@ struct memcg_stock_pcp {
22499  #define FLUSHING_CACHED_CHARGE 0
22500  };
22501  static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
22502 +static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
22503  static DEFINE_MUTEX(percpu_charge_mutex);
22504  
22505  /**
22506 @@ -1714,7 +1718,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
22507         if (nr_pages > CHARGE_BATCH)
22508                 return ret;
22509  
22510 -       local_irq_save(flags);
22511 +       local_lock_irqsave(memcg_stock_ll, flags);
22512  
22513         stock = this_cpu_ptr(&memcg_stock);
22514         if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
22515 @@ -1722,7 +1726,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
22516                 ret = true;
22517         }
22518  
22519 -       local_irq_restore(flags);
22520 +       local_unlock_irqrestore(memcg_stock_ll, flags);
22521  
22522         return ret;
22523  }
22524 @@ -1749,13 +1753,13 @@ static void drain_local_stock(struct work_struct *dummy)
22525         struct memcg_stock_pcp *stock;
22526         unsigned long flags;
22527  
22528 -       local_irq_save(flags);
22529 +       local_lock_irqsave(memcg_stock_ll, flags);
22530  
22531         stock = this_cpu_ptr(&memcg_stock);
22532         drain_stock(stock);
22533         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
22534  
22535 -       local_irq_restore(flags);
22536 +       local_unlock_irqrestore(memcg_stock_ll, flags);
22537  }
22538  
22539  /*
22540 @@ -1767,7 +1771,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
22541         struct memcg_stock_pcp *stock;
22542         unsigned long flags;
22543  
22544 -       local_irq_save(flags);
22545 +       local_lock_irqsave(memcg_stock_ll, flags);
22546  
22547         stock = this_cpu_ptr(&memcg_stock);
22548         if (stock->cached != memcg) { /* reset if necessary */
22549 @@ -1776,7 +1780,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
22550         }
22551         stock->nr_pages += nr_pages;
22552  
22553 -       local_irq_restore(flags);
22554 +       local_unlock_irqrestore(memcg_stock_ll, flags);
22555  }
22556  
22557  /*
22558 @@ -1792,7 +1796,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
22559                 return;
22560         /* Notify other cpus that system-wide "drain" is running */
22561         get_online_cpus();
22562 -       curcpu = get_cpu();
22563 +       curcpu = get_cpu_light();
22564         for_each_online_cpu(cpu) {
22565                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
22566                 struct mem_cgroup *memcg;
22567 @@ -1809,7 +1813,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
22568                                 schedule_work_on(cpu, &stock->work);
22569                 }
22570         }
22571 -       put_cpu();
22572 +       put_cpu_light();
22573         put_online_cpus();
22574         mutex_unlock(&percpu_charge_mutex);
22575  }
22576 @@ -4548,12 +4552,12 @@ static int mem_cgroup_move_account(struct page *page,
22577  
22578         ret = 0;
22579  
22580 -       local_irq_disable();
22581 +       local_lock_irq(event_lock);
22582         mem_cgroup_charge_statistics(to, page, compound, nr_pages);
22583         memcg_check_events(to, page);
22584         mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
22585         memcg_check_events(from, page);
22586 -       local_irq_enable();
22587 +       local_unlock_irq(event_lock);
22588  out_unlock:
22589         unlock_page(page);
22590  out:
22591 @@ -5428,10 +5432,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
22592  
22593         commit_charge(page, memcg, lrucare);
22594  
22595 -       local_irq_disable();
22596 +       local_lock_irq(event_lock);
22597         mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
22598         memcg_check_events(memcg, page);
22599 -       local_irq_enable();
22600 +       local_unlock_irq(event_lock);
22601  
22602         if (do_memsw_account() && PageSwapCache(page)) {
22603                 swp_entry_t entry = { .val = page_private(page) };
22604 @@ -5487,14 +5491,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
22605                 memcg_oom_recover(memcg);
22606         }
22607  
22608 -       local_irq_save(flags);
22609 +       local_lock_irqsave(event_lock, flags);
22610         __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
22611         __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
22612         __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
22613         __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
22614         __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
22615         memcg_check_events(memcg, dummy_page);
22616 -       local_irq_restore(flags);
22617 +       local_unlock_irqrestore(event_lock, flags);
22618  
22619         if (!mem_cgroup_is_root(memcg))
22620                 css_put_many(&memcg->css, nr_pages);
22621 @@ -5649,10 +5653,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
22622  
22623         commit_charge(newpage, memcg, false);
22624  
22625 -       local_irq_save(flags);
22626 +       local_lock_irqsave(event_lock, flags);
22627         mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
22628         memcg_check_events(memcg, newpage);
22629 -       local_irq_restore(flags);
22630 +       local_unlock_irqrestore(event_lock, flags);
22631  }
22632  
22633  DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
22634 @@ -5832,6 +5836,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
22635  {
22636         struct mem_cgroup *memcg, *swap_memcg;
22637         unsigned short oldid;
22638 +       unsigned long flags;
22639  
22640         VM_BUG_ON_PAGE(PageLRU(page), page);
22641         VM_BUG_ON_PAGE(page_count(page), page);
22642 @@ -5872,12 +5877,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
22643          * important here to have the interrupts disabled because it is the
22644          * only synchronisation we have for udpating the per-CPU variables.
22645          */
22646 +       local_lock_irqsave(event_lock, flags);
22647 +#ifndef CONFIG_PREEMPT_RT_BASE
22648         VM_BUG_ON(!irqs_disabled());
22649 +#endif
22650         mem_cgroup_charge_statistics(memcg, page, false, -1);
22651         memcg_check_events(memcg, page);
22652  
22653         if (!mem_cgroup_is_root(memcg))
22654                 css_put(&memcg->css);
22655 +       local_unlock_irqrestore(event_lock, flags);
22656  }
22657  
22658  /*
22659 diff --git a/mm/mmu_context.c b/mm/mmu_context.c
22660 index 6f4d27c5bb32..5cd25c745a8f 100644
22661 --- a/mm/mmu_context.c
22662 +++ b/mm/mmu_context.c
22663 @@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm)
22664         struct task_struct *tsk = current;
22665  
22666         task_lock(tsk);
22667 +       preempt_disable_rt();
22668         active_mm = tsk->active_mm;
22669         if (active_mm != mm) {
22670                 atomic_inc(&mm->mm_count);
22671 @@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm)
22672         }
22673         tsk->mm = mm;
22674         switch_mm(active_mm, mm, tsk);
22675 +       preempt_enable_rt();
22676         task_unlock(tsk);
22677  #ifdef finish_arch_post_lock_switch
22678         finish_arch_post_lock_switch();
22679 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
22680 index f4a02e240fb6..2e73f8cfde74 100644
22681 --- a/mm/page_alloc.c
22682 +++ b/mm/page_alloc.c
22683 @@ -61,6 +61,7 @@
22684  #include <linux/page_ext.h>
22685  #include <linux/hugetlb.h>
22686  #include <linux/sched/rt.h>
22687 +#include <linux/locallock.h>
22688  #include <linux/page_owner.h>
22689  #include <linux/kthread.h>
22690  #include <linux/memcontrol.h>
22691 @@ -281,6 +282,18 @@ EXPORT_SYMBOL(nr_node_ids);
22692  EXPORT_SYMBOL(nr_online_nodes);
22693  #endif
22694  
22695 +static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
22696 +
22697 +#ifdef CONFIG_PREEMPT_RT_BASE
22698 +# define cpu_lock_irqsave(cpu, flags)          \
22699 +       local_lock_irqsave_on(pa_lock, flags, cpu)
22700 +# define cpu_unlock_irqrestore(cpu, flags)     \
22701 +       local_unlock_irqrestore_on(pa_lock, flags, cpu)
22702 +#else
22703 +# define cpu_lock_irqsave(cpu, flags)          local_irq_save(flags)
22704 +# define cpu_unlock_irqrestore(cpu, flags)     local_irq_restore(flags)
22705 +#endif
22706 +
22707  int page_group_by_mobility_disabled __read_mostly;
22708  
22709  #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
22710 @@ -1072,7 +1085,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
22711  #endif /* CONFIG_DEBUG_VM */
22712  
22713  /*
22714 - * Frees a number of pages from the PCP lists
22715 + * Frees a number of pages which have been collected from the pcp lists.
22716   * Assumes all pages on list are in same zone, and of same order.
22717   * count is the number of pages to free.
22718   *
22719 @@ -1083,19 +1096,58 @@ static bool bulkfree_pcp_prepare(struct page *page)
22720   * pinned" detection logic.
22721   */
22722  static void free_pcppages_bulk(struct zone *zone, int count,
22723 -                                       struct per_cpu_pages *pcp)
22724 +                              struct list_head *list)
22725  {
22726 -       int migratetype = 0;
22727 -       int batch_free = 0;
22728         unsigned long nr_scanned;
22729         bool isolated_pageblocks;
22730 +       unsigned long flags;
22731 +
22732 +       spin_lock_irqsave(&zone->lock, flags);
22733  
22734 -       spin_lock(&zone->lock);
22735         isolated_pageblocks = has_isolate_pageblock(zone);
22736         nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
22737         if (nr_scanned)
22738                 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
22739  
22740 +       while (!list_empty(list)) {
22741 +               struct page *page;
22742 +               int mt; /* migratetype of the to-be-freed page */
22743 +
22744 +               page = list_first_entry(list, struct page, lru);
22745 +               /* must delete as __free_one_page list manipulates */
22746 +               list_del(&page->lru);
22747 +
22748 +               mt = get_pcppage_migratetype(page);
22749 +               /* MIGRATE_ISOLATE page should not go to pcplists */
22750 +               VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
22751 +               /* Pageblock could have been isolated meanwhile */
22752 +               if (unlikely(isolated_pageblocks))
22753 +                       mt = get_pageblock_migratetype(page);
22754 +
22755 +               if (bulkfree_pcp_prepare(page))
22756 +                       continue;
22757 +
22758 +               __free_one_page(page, page_to_pfn(page), zone, 0, mt);
22759 +               trace_mm_page_pcpu_drain(page, 0, mt);
22760 +               count--;
22761 +       }
22762 +       WARN_ON(count != 0);
22763 +       spin_unlock_irqrestore(&zone->lock, flags);
22764 +}
22765 +
22766 +/*
22767 + * Moves a number of pages from the PCP lists to free list which
22768 + * is freed outside of the locked region.
22769 + *
22770 + * Assumes all pages on list are in same zone, and of same order.
22771 + * count is the number of pages to free.
22772 + */
22773 +static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
22774 +                             struct list_head *dst)
22775 +{
22776 +       int migratetype = 0;
22777 +       int batch_free = 0;
22778 +
22779         while (count) {
22780                 struct page *page;
22781                 struct list_head *list;
22782 @@ -1111,7 +1163,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
22783                         batch_free++;
22784                         if (++migratetype == MIGRATE_PCPTYPES)
22785                                 migratetype = 0;
22786 -                       list = &pcp->lists[migratetype];
22787 +                       list = &src->lists[migratetype];
22788                 } while (list_empty(list));
22789  
22790                 /* This is the only non-empty list. Free them all. */
22791 @@ -1119,27 +1171,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
22792                         batch_free = count;
22793  
22794                 do {
22795 -                       int mt; /* migratetype of the to-be-freed page */
22796 -
22797                         page = list_last_entry(list, struct page, lru);
22798 -                       /* must delete as __free_one_page list manipulates */
22799                         list_del(&page->lru);
22800  
22801 -                       mt = get_pcppage_migratetype(page);
22802 -                       /* MIGRATE_ISOLATE page should not go to pcplists */
22803 -                       VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
22804 -                       /* Pageblock could have been isolated meanwhile */
22805 -                       if (unlikely(isolated_pageblocks))
22806 -                               mt = get_pageblock_migratetype(page);
22807 -
22808 -                       if (bulkfree_pcp_prepare(page))
22809 -                               continue;
22810 -
22811 -                       __free_one_page(page, page_to_pfn(page), zone, 0, mt);
22812 -                       trace_mm_page_pcpu_drain(page, 0, mt);
22813 +                       list_add(&page->lru, dst);
22814                 } while (--count && --batch_free && !list_empty(list));
22815         }
22816 -       spin_unlock(&zone->lock);
22817  }
22818  
22819  static void free_one_page(struct zone *zone,
22820 @@ -1148,7 +1185,9 @@ static void free_one_page(struct zone *zone,
22821                                 int migratetype)
22822  {
22823         unsigned long nr_scanned;
22824 -       spin_lock(&zone->lock);
22825 +       unsigned long flags;
22826 +
22827 +       spin_lock_irqsave(&zone->lock, flags);
22828         nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
22829         if (nr_scanned)
22830                 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
22831 @@ -1158,7 +1197,7 @@ static void free_one_page(struct zone *zone,
22832                 migratetype = get_pfnblock_migratetype(page, pfn);
22833         }
22834         __free_one_page(page, pfn, zone, order, migratetype);
22835 -       spin_unlock(&zone->lock);
22836 +       spin_unlock_irqrestore(&zone->lock, flags);
22837  }
22838  
22839  static void __meminit __init_single_page(struct page *page, unsigned long pfn,
22840 @@ -1244,10 +1283,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
22841                 return;
22842  
22843         migratetype = get_pfnblock_migratetype(page, pfn);
22844 -       local_irq_save(flags);
22845 +       local_lock_irqsave(pa_lock, flags);
22846         __count_vm_events(PGFREE, 1 << order);
22847         free_one_page(page_zone(page), page, pfn, order, migratetype);
22848 -       local_irq_restore(flags);
22849 +       local_unlock_irqrestore(pa_lock, flags);
22850  }
22851  
22852  static void __init __free_pages_boot_core(struct page *page, unsigned int order)
22853 @@ -2246,16 +2285,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
22854  void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
22855  {
22856         unsigned long flags;
22857 +       LIST_HEAD(dst);
22858         int to_drain, batch;
22859  
22860 -       local_irq_save(flags);
22861 +       local_lock_irqsave(pa_lock, flags);
22862         batch = READ_ONCE(pcp->batch);
22863         to_drain = min(pcp->count, batch);
22864         if (to_drain > 0) {
22865 -               free_pcppages_bulk(zone, to_drain, pcp);
22866 +               isolate_pcp_pages(to_drain, pcp, &dst);
22867                 pcp->count -= to_drain;
22868         }
22869 -       local_irq_restore(flags);
22870 +       local_unlock_irqrestore(pa_lock, flags);
22871 +       free_pcppages_bulk(zone, to_drain, &dst);
22872  }
22873  #endif
22874  
22875 @@ -2271,16 +2312,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
22876         unsigned long flags;
22877         struct per_cpu_pageset *pset;
22878         struct per_cpu_pages *pcp;
22879 +       LIST_HEAD(dst);
22880 +       int count;
22881  
22882 -       local_irq_save(flags);
22883 +       cpu_lock_irqsave(cpu, flags);
22884         pset = per_cpu_ptr(zone->pageset, cpu);
22885  
22886         pcp = &pset->pcp;
22887 -       if (pcp->count) {
22888 -               free_pcppages_bulk(zone, pcp->count, pcp);
22889 +       count = pcp->count;
22890 +       if (count) {
22891 +               isolate_pcp_pages(count, pcp, &dst);
22892                 pcp->count = 0;
22893         }
22894 -       local_irq_restore(flags);
22895 +       cpu_unlock_irqrestore(cpu, flags);
22896 +       if (count)
22897 +               free_pcppages_bulk(zone, count, &dst);
22898  }
22899  
22900  /*
22901 @@ -2366,8 +2412,17 @@ void drain_all_pages(struct zone *zone)
22902                 else
22903                         cpumask_clear_cpu(cpu, &cpus_with_pcps);
22904         }
22905 +#ifndef CONFIG_PREEMPT_RT_BASE
22906         on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
22907                                                                 zone, 1);
22908 +#else
22909 +       for_each_cpu(cpu, &cpus_with_pcps) {
22910 +               if (zone)
22911 +                       drain_pages_zone(cpu, zone);
22912 +               else
22913 +                       drain_pages(cpu);
22914 +       }
22915 +#endif
22916  }
22917  
22918  #ifdef CONFIG_HIBERNATION
22919 @@ -2427,7 +2482,7 @@ void free_hot_cold_page(struct page *page, bool cold)
22920  
22921         migratetype = get_pfnblock_migratetype(page, pfn);
22922         set_pcppage_migratetype(page, migratetype);
22923 -       local_irq_save(flags);
22924 +       local_lock_irqsave(pa_lock, flags);
22925         __count_vm_event(PGFREE);
22926  
22927         /*
22928 @@ -2453,12 +2508,17 @@ void free_hot_cold_page(struct page *page, bool cold)
22929         pcp->count++;
22930         if (pcp->count >= pcp->high) {
22931                 unsigned long batch = READ_ONCE(pcp->batch);
22932 -               free_pcppages_bulk(zone, batch, pcp);
22933 +               LIST_HEAD(dst);
22934 +
22935 +               isolate_pcp_pages(batch, pcp, &dst);
22936                 pcp->count -= batch;
22937 +               local_unlock_irqrestore(pa_lock, flags);
22938 +               free_pcppages_bulk(zone, batch, &dst);
22939 +               return;
22940         }
22941  
22942  out:
22943 -       local_irq_restore(flags);
22944 +       local_unlock_irqrestore(pa_lock, flags);
22945  }
22946  
22947  /*
22948 @@ -2600,7 +2660,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
22949                 struct per_cpu_pages *pcp;
22950                 struct list_head *list;
22951  
22952 -               local_irq_save(flags);
22953 +               local_lock_irqsave(pa_lock, flags);
22954                 do {
22955                         pcp = &this_cpu_ptr(zone->pageset)->pcp;
22956                         list = &pcp->lists[migratetype];
22957 @@ -2627,7 +2687,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
22958                  * allocate greater than order-1 page units with __GFP_NOFAIL.
22959                  */
22960                 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
22961 -               spin_lock_irqsave(&zone->lock, flags);
22962 +               local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
22963  
22964                 do {
22965                         page = NULL;
22966 @@ -2639,22 +2699,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
22967                         if (!page)
22968                                 page = __rmqueue(zone, order, migratetype);
22969                 } while (page && check_new_pages(page, order));
22970 -               spin_unlock(&zone->lock);
22971 -               if (!page)
22972 +               if (!page) {
22973 +                       spin_unlock(&zone->lock);
22974                         goto failed;
22975 +               }
22976                 __mod_zone_freepage_state(zone, -(1 << order),
22977                                           get_pcppage_migratetype(page));
22978 +               spin_unlock(&zone->lock);
22979         }
22980  
22981         __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
22982         zone_statistics(preferred_zone, zone, gfp_flags);
22983 -       local_irq_restore(flags);
22984 +       local_unlock_irqrestore(pa_lock, flags);
22985  
22986         VM_BUG_ON_PAGE(bad_range(zone, page), page);
22987         return page;
22988  
22989  failed:
22990 -       local_irq_restore(flags);
22991 +       local_unlock_irqrestore(pa_lock, flags);
22992         return NULL;
22993  }
22994  
22995 @@ -6531,7 +6593,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
22996         int cpu = (unsigned long)hcpu;
22997  
22998         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
22999 +               local_lock_irq_on(swapvec_lock, cpu);
23000                 lru_add_drain_cpu(cpu);
23001 +               local_unlock_irq_on(swapvec_lock, cpu);
23002                 drain_pages(cpu);
23003  
23004                 /*
23005 @@ -6557,6 +6621,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
23006  void __init page_alloc_init(void)
23007  {
23008         hotcpu_notifier(page_alloc_cpu_notify, 0);
23009 +       local_irq_lock_init(pa_lock);
23010  }
23011  
23012  /*
23013 @@ -7385,7 +7450,7 @@ void zone_pcp_reset(struct zone *zone)
23014         struct per_cpu_pageset *pset;
23015  
23016         /* avoid races with drain_pages()  */
23017 -       local_irq_save(flags);
23018 +       local_lock_irqsave(pa_lock, flags);
23019         if (zone->pageset != &boot_pageset) {
23020                 for_each_online_cpu(cpu) {
23021                         pset = per_cpu_ptr(zone->pageset, cpu);
23022 @@ -7394,7 +7459,7 @@ void zone_pcp_reset(struct zone *zone)
23023                 free_percpu(zone->pageset);
23024                 zone->pageset = &boot_pageset;
23025         }
23026 -       local_irq_restore(flags);
23027 +       local_unlock_irqrestore(pa_lock, flags);
23028  }
23029  
23030  #ifdef CONFIG_MEMORY_HOTREMOVE
23031 diff --git a/mm/percpu.c b/mm/percpu.c
23032 index 255714302394..59b529b944a9 100644
23033 --- a/mm/percpu.c
23034 +++ b/mm/percpu.c
23035 @@ -1280,6 +1280,28 @@ void free_percpu(void __percpu *ptr)
23036  }
23037  EXPORT_SYMBOL_GPL(free_percpu);
23038  
23039 +bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
23040 +{
23041 +#ifdef CONFIG_SMP
23042 +       const size_t static_size = __per_cpu_end - __per_cpu_start;
23043 +       void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
23044 +       unsigned int cpu;
23045 +
23046 +       for_each_possible_cpu(cpu) {
23047 +               void *start = per_cpu_ptr(base, cpu);
23048 +               void *va = (void *)addr;
23049 +
23050 +               if (va >= start && va < start + static_size) {
23051 +                       if (can_addr)
23052 +                               *can_addr = (unsigned long) (va - start);
23053 +                       return true;
23054 +               }
23055 +       }
23056 +#endif
23057 +       /* on UP, can't distinguish from other static vars, always false */
23058 +       return false;
23059 +}
23060 +
23061  /**
23062   * is_kernel_percpu_address - test whether address is from static percpu area
23063   * @addr: address to test
23064 @@ -1293,20 +1315,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
23065   */
23066  bool is_kernel_percpu_address(unsigned long addr)
23067  {
23068 -#ifdef CONFIG_SMP
23069 -       const size_t static_size = __per_cpu_end - __per_cpu_start;
23070 -       void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
23071 -       unsigned int cpu;
23072 -
23073 -       for_each_possible_cpu(cpu) {
23074 -               void *start = per_cpu_ptr(base, cpu);
23075 -
23076 -               if ((void *)addr >= start && (void *)addr < start + static_size)
23077 -                       return true;
23078 -        }
23079 -#endif
23080 -       /* on UP, can't distinguish from other static vars, always false */
23081 -       return false;
23082 +       return __is_kernel_percpu_address(addr, NULL);
23083  }
23084  
23085  /**
23086 diff --git a/mm/slab.h b/mm/slab.h
23087 index bc05fdc3edce..610cf61634f0 100644
23088 --- a/mm/slab.h
23089 +++ b/mm/slab.h
23090 @@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
23091   * The slab lists for all objects.
23092   */
23093  struct kmem_cache_node {
23094 +#ifdef CONFIG_SLUB
23095 +       raw_spinlock_t list_lock;
23096 +#else
23097         spinlock_t list_lock;
23098 +#endif
23099  
23100  #ifdef CONFIG_SLAB
23101         struct list_head slabs_partial; /* partial list first, better asm code */
23102 diff --git a/mm/slub.c b/mm/slub.c
23103 index 7aa0e97af928..35c873f09201 100644
23104 --- a/mm/slub.c
23105 +++ b/mm/slub.c
23106 @@ -1141,7 +1141,7 @@ static noinline int free_debug_processing(
23107         unsigned long uninitialized_var(flags);
23108         int ret = 0;
23109  
23110 -       spin_lock_irqsave(&n->list_lock, flags);
23111 +       raw_spin_lock_irqsave(&n->list_lock, flags);
23112         slab_lock(page);
23113  
23114         if (s->flags & SLAB_CONSISTENCY_CHECKS) {
23115 @@ -1176,7 +1176,7 @@ static noinline int free_debug_processing(
23116                          bulk_cnt, cnt);
23117  
23118         slab_unlock(page);
23119 -       spin_unlock_irqrestore(&n->list_lock, flags);
23120 +       raw_spin_unlock_irqrestore(&n->list_lock, flags);
23121         if (!ret)
23122                 slab_fix(s, "Object at 0x%p not freed", object);
23123         return ret;
23124 @@ -1304,6 +1304,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
23125  
23126  #endif /* CONFIG_SLUB_DEBUG */
23127  
23128 +struct slub_free_list {
23129 +       raw_spinlock_t          lock;
23130 +       struct list_head        list;
23131 +};
23132 +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
23133 +
23134  /*
23135   * Hooks for other subsystems that check memory allocations. In a typical
23136   * production configuration these hooks all should produce no code at all.
23137 @@ -1527,10 +1533,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
23138         void *start, *p;
23139         int idx, order;
23140         bool shuffle;
23141 +       bool enableirqs = false;
23142  
23143         flags &= gfp_allowed_mask;
23144  
23145         if (gfpflags_allow_blocking(flags))
23146 +               enableirqs = true;
23147 +#ifdef CONFIG_PREEMPT_RT_FULL
23148 +       if (system_state == SYSTEM_RUNNING)
23149 +               enableirqs = true;
23150 +#endif
23151 +       if (enableirqs)
23152                 local_irq_enable();
23153  
23154         flags |= s->allocflags;
23155 @@ -1605,7 +1618,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
23156         page->frozen = 1;
23157  
23158  out:
23159 -       if (gfpflags_allow_blocking(flags))
23160 +       if (enableirqs)
23161                 local_irq_disable();
23162         if (!page)
23163                 return NULL;
23164 @@ -1664,6 +1677,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
23165         __free_pages(page, order);
23166  }
23167  
23168 +static void free_delayed(struct list_head *h)
23169 +{
23170 +       while(!list_empty(h)) {
23171 +               struct page *page = list_first_entry(h, struct page, lru);
23172 +
23173 +               list_del(&page->lru);
23174 +               __free_slab(page->slab_cache, page);
23175 +       }
23176 +}
23177 +
23178  #define need_reserve_slab_rcu                                          \
23179         (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
23180  
23181 @@ -1695,6 +1718,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
23182                 }
23183  
23184                 call_rcu(head, rcu_free_slab);
23185 +       } else if (irqs_disabled()) {
23186 +               struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
23187 +
23188 +               raw_spin_lock(&f->lock);
23189 +               list_add(&page->lru, &f->list);
23190 +               raw_spin_unlock(&f->lock);
23191         } else
23192                 __free_slab(s, page);
23193  }
23194 @@ -1802,7 +1831,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
23195         if (!n || !n->nr_partial)
23196                 return NULL;
23197  
23198 -       spin_lock(&n->list_lock);
23199 +       raw_spin_lock(&n->list_lock);
23200         list_for_each_entry_safe(page, page2, &n->partial, lru) {
23201                 void *t;
23202  
23203 @@ -1827,7 +1856,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
23204                         break;
23205  
23206         }
23207 -       spin_unlock(&n->list_lock);
23208 +       raw_spin_unlock(&n->list_lock);
23209         return object;
23210  }
23211  
23212 @@ -2073,7 +2102,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
23213                          * that acquire_slab() will see a slab page that
23214                          * is frozen
23215                          */
23216 -                       spin_lock(&n->list_lock);
23217 +                       raw_spin_lock(&n->list_lock);
23218                 }
23219         } else {
23220                 m = M_FULL;
23221 @@ -2084,7 +2113,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
23222                          * slabs from diagnostic functions will not see
23223                          * any frozen slabs.
23224                          */
23225 -                       spin_lock(&n->list_lock);
23226 +                       raw_spin_lock(&n->list_lock);
23227                 }
23228         }
23229  
23230 @@ -2119,7 +2148,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
23231                 goto redo;
23232  
23233         if (lock)
23234 -               spin_unlock(&n->list_lock);
23235 +               raw_spin_unlock(&n->list_lock);
23236  
23237         if (m == M_FREE) {
23238                 stat(s, DEACTIVATE_EMPTY);
23239 @@ -2151,10 +2180,10 @@ static void unfreeze_partials(struct kmem_cache *s,
23240                 n2 = get_node(s, page_to_nid(page));
23241                 if (n != n2) {
23242                         if (n)
23243 -                               spin_unlock(&n->list_lock);
23244 +                               raw_spin_unlock(&n->list_lock);
23245  
23246                         n = n2;
23247 -                       spin_lock(&n->list_lock);
23248 +                       raw_spin_lock(&n->list_lock);
23249                 }
23250  
23251                 do {
23252 @@ -2183,7 +2212,7 @@ static void unfreeze_partials(struct kmem_cache *s,
23253         }
23254  
23255         if (n)
23256 -               spin_unlock(&n->list_lock);
23257 +               raw_spin_unlock(&n->list_lock);
23258  
23259         while (discard_page) {
23260                 page = discard_page;
23261 @@ -2222,14 +2251,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
23262                         pobjects = oldpage->pobjects;
23263                         pages = oldpage->pages;
23264                         if (drain && pobjects > s->cpu_partial) {
23265 +                               struct slub_free_list *f;
23266                                 unsigned long flags;
23267 +                               LIST_HEAD(tofree);
23268                                 /*
23269                                  * partial array is full. Move the existing
23270                                  * set to the per node partial list.
23271                                  */
23272                                 local_irq_save(flags);
23273                                 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
23274 +                               f = this_cpu_ptr(&slub_free_list);
23275 +                               raw_spin_lock(&f->lock);
23276 +                               list_splice_init(&f->list, &tofree);
23277 +                               raw_spin_unlock(&f->lock);
23278                                 local_irq_restore(flags);
23279 +                               free_delayed(&tofree);
23280                                 oldpage = NULL;
23281                                 pobjects = 0;
23282                                 pages = 0;
23283 @@ -2301,7 +2337,22 @@ static bool has_cpu_slab(int cpu, void *info)
23284  
23285  static void flush_all(struct kmem_cache *s)
23286  {
23287 +       LIST_HEAD(tofree);
23288 +       int cpu;
23289 +
23290         on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
23291 +       for_each_online_cpu(cpu) {
23292 +               struct slub_free_list *f;
23293 +
23294 +               if (!has_cpu_slab(cpu, s))
23295 +                       continue;
23296 +
23297 +               f = &per_cpu(slub_free_list, cpu);
23298 +               raw_spin_lock_irq(&f->lock);
23299 +               list_splice_init(&f->list, &tofree);
23300 +               raw_spin_unlock_irq(&f->lock);
23301 +               free_delayed(&tofree);
23302 +       }
23303  }
23304  
23305  /*
23306 @@ -2356,10 +2407,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
23307         unsigned long x = 0;
23308         struct page *page;
23309  
23310 -       spin_lock_irqsave(&n->list_lock, flags);
23311 +       raw_spin_lock_irqsave(&n->list_lock, flags);
23312         list_for_each_entry(page, &n->partial, lru)
23313                 x += get_count(page);
23314 -       spin_unlock_irqrestore(&n->list_lock, flags);
23315 +       raw_spin_unlock_irqrestore(&n->list_lock, flags);
23316         return x;
23317  }
23318  #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
23319 @@ -2497,8 +2548,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
23320   * already disabled (which is the case for bulk allocation).
23321   */
23322  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
23323 -                         unsigned long addr, struct kmem_cache_cpu *c)
23324 +                         unsigned long addr, struct kmem_cache_cpu *c,
23325 +                         struct list_head *to_free)
23326  {
23327 +       struct slub_free_list *f;
23328         void *freelist;
23329         struct page *page;
23330  
23331 @@ -2558,6 +2611,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
23332         VM_BUG_ON(!c->page->frozen);
23333         c->freelist = get_freepointer(s, freelist);
23334         c->tid = next_tid(c->tid);
23335 +
23336 +out:
23337 +       f = this_cpu_ptr(&slub_free_list);
23338 +       raw_spin_lock(&f->lock);
23339 +       list_splice_init(&f->list, to_free);
23340 +       raw_spin_unlock(&f->lock);
23341 +
23342         return freelist;
23343  
23344  new_slab:
23345 @@ -2589,7 +2649,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
23346         deactivate_slab(s, page, get_freepointer(s, freelist));
23347         c->page = NULL;
23348         c->freelist = NULL;
23349 -       return freelist;
23350 +       goto out;
23351  }
23352  
23353  /*
23354 @@ -2601,6 +2661,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
23355  {
23356         void *p;
23357         unsigned long flags;
23358 +       LIST_HEAD(tofree);
23359  
23360         local_irq_save(flags);
23361  #ifdef CONFIG_PREEMPT
23362 @@ -2612,8 +2673,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
23363         c = this_cpu_ptr(s->cpu_slab);
23364  #endif
23365  
23366 -       p = ___slab_alloc(s, gfpflags, node, addr, c);
23367 +       p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree);
23368         local_irq_restore(flags);
23369 +       free_delayed(&tofree);
23370         return p;
23371  }
23372  
23373 @@ -2799,7 +2861,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
23374  
23375         do {
23376                 if (unlikely(n)) {
23377 -                       spin_unlock_irqrestore(&n->list_lock, flags);
23378 +                       raw_spin_unlock_irqrestore(&n->list_lock, flags);
23379                         n = NULL;
23380                 }
23381                 prior = page->freelist;
23382 @@ -2831,7 +2893,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
23383                                  * Otherwise the list_lock will synchronize with
23384                                  * other processors updating the list of slabs.
23385                                  */
23386 -                               spin_lock_irqsave(&n->list_lock, flags);
23387 +                               raw_spin_lock_irqsave(&n->list_lock, flags);
23388  
23389                         }
23390                 }
23391 @@ -2873,7 +2935,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
23392                 add_partial(n, page, DEACTIVATE_TO_TAIL);
23393                 stat(s, FREE_ADD_PARTIAL);
23394         }
23395 -       spin_unlock_irqrestore(&n->list_lock, flags);
23396 +       raw_spin_unlock_irqrestore(&n->list_lock, flags);
23397         return;
23398  
23399  slab_empty:
23400 @@ -2888,7 +2950,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
23401                 remove_full(s, n, page);
23402         }
23403  
23404 -       spin_unlock_irqrestore(&n->list_lock, flags);
23405 +       raw_spin_unlock_irqrestore(&n->list_lock, flags);
23406         stat(s, FREE_SLAB);
23407         discard_slab(s, page);
23408  }
23409 @@ -3093,6 +3155,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
23410                           void **p)
23411  {
23412         struct kmem_cache_cpu *c;
23413 +       LIST_HEAD(to_free);
23414         int i;
23415  
23416         /* memcg and kmem_cache debug support */
23417 @@ -3116,7 +3179,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
23418                          * of re-populating per CPU c->freelist
23419                          */
23420                         p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
23421 -                                           _RET_IP_, c);
23422 +                                           _RET_IP_, c, &to_free);
23423                         if (unlikely(!p[i]))
23424                                 goto error;
23425  
23426 @@ -3128,6 +3191,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
23427         }
23428         c->tid = next_tid(c->tid);
23429         local_irq_enable();
23430 +       free_delayed(&to_free);
23431  
23432         /* Clear memory outside IRQ disabled fastpath loop */
23433         if (unlikely(flags & __GFP_ZERO)) {
23434 @@ -3275,7 +3339,7 @@ static void
23435  init_kmem_cache_node(struct kmem_cache_node *n)
23436  {
23437         n->nr_partial = 0;
23438 -       spin_lock_init(&n->list_lock);
23439 +       raw_spin_lock_init(&n->list_lock);
23440         INIT_LIST_HEAD(&n->partial);
23441  #ifdef CONFIG_SLUB_DEBUG
23442         atomic_long_set(&n->nr_slabs, 0);
23443 @@ -3619,6 +3683,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
23444                                                         const char *text)
23445  {
23446  #ifdef CONFIG_SLUB_DEBUG
23447 +#ifdef CONFIG_PREEMPT_RT_BASE
23448 +       /* XXX move out of irq-off section */
23449 +       slab_err(s, page, text, s->name);
23450 +#else
23451         void *addr = page_address(page);
23452         void *p;
23453         unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
23454 @@ -3639,6 +3707,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
23455         slab_unlock(page);
23456         kfree(map);
23457  #endif
23458 +#endif
23459  }
23460  
23461  /*
23462 @@ -3652,7 +3721,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
23463         struct page *page, *h;
23464  
23465         BUG_ON(irqs_disabled());
23466 -       spin_lock_irq(&n->list_lock);
23467 +       raw_spin_lock_irq(&n->list_lock);
23468         list_for_each_entry_safe(page, h, &n->partial, lru) {
23469                 if (!page->inuse) {
23470                         remove_partial(n, page);
23471 @@ -3662,7 +3731,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
23472                         "Objects remaining in %s on __kmem_cache_shutdown()");
23473                 }
23474         }
23475 -       spin_unlock_irq(&n->list_lock);
23476 +       raw_spin_unlock_irq(&n->list_lock);
23477  
23478         list_for_each_entry_safe(page, h, &discard, lru)
23479                 discard_slab(s, page);
23480 @@ -3920,7 +3989,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
23481                 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
23482                         INIT_LIST_HEAD(promote + i);
23483  
23484 -               spin_lock_irqsave(&n->list_lock, flags);
23485 +               raw_spin_lock_irqsave(&n->list_lock, flags);
23486  
23487                 /*
23488                  * Build lists of slabs to discard or promote.
23489 @@ -3951,7 +4020,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
23490                 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
23491                         list_splice(promote + i, &n->partial);
23492  
23493 -               spin_unlock_irqrestore(&n->list_lock, flags);
23494 +               raw_spin_unlock_irqrestore(&n->list_lock, flags);
23495  
23496                 /* Release empty slabs */
23497                 list_for_each_entry_safe(page, t, &discard, lru)
23498 @@ -4127,6 +4196,12 @@ void __init kmem_cache_init(void)
23499  {
23500         static __initdata struct kmem_cache boot_kmem_cache,
23501                 boot_kmem_cache_node;
23502 +       int cpu;
23503 +
23504 +       for_each_possible_cpu(cpu) {
23505 +               raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
23506 +               INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
23507 +       }
23508  
23509         if (debug_guardpage_minorder())
23510                 slub_max_order = 0;
23511 @@ -4335,7 +4410,7 @@ static int validate_slab_node(struct kmem_cache *s,
23512         struct page *page;
23513         unsigned long flags;
23514  
23515 -       spin_lock_irqsave(&n->list_lock, flags);
23516 +       raw_spin_lock_irqsave(&n->list_lock, flags);
23517  
23518         list_for_each_entry(page, &n->partial, lru) {
23519                 validate_slab_slab(s, page, map);
23520 @@ -4357,7 +4432,7 @@ static int validate_slab_node(struct kmem_cache *s,
23521                        s->name, count, atomic_long_read(&n->nr_slabs));
23522  
23523  out:
23524 -       spin_unlock_irqrestore(&n->list_lock, flags);
23525 +       raw_spin_unlock_irqrestore(&n->list_lock, flags);
23526         return count;
23527  }
23528  
23529 @@ -4545,12 +4620,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
23530                 if (!atomic_long_read(&n->nr_slabs))
23531                         continue;
23532  
23533 -               spin_lock_irqsave(&n->list_lock, flags);
23534 +               raw_spin_lock_irqsave(&n->list_lock, flags);
23535                 list_for_each_entry(page, &n->partial, lru)
23536                         process_slab(&t, s, page, alloc, map);
23537                 list_for_each_entry(page, &n->full, lru)
23538                         process_slab(&t, s, page, alloc, map);
23539 -               spin_unlock_irqrestore(&n->list_lock, flags);
23540 +               raw_spin_unlock_irqrestore(&n->list_lock, flags);
23541         }
23542  
23543         for (i = 0; i < t.count; i++) {
23544 diff --git a/mm/swap.c b/mm/swap.c
23545 index 4dcf852e1e6d..69c3a5b24060 100644
23546 --- a/mm/swap.c
23547 +++ b/mm/swap.c
23548 @@ -32,6 +32,7 @@
23549  #include <linux/memcontrol.h>
23550  #include <linux/gfp.h>
23551  #include <linux/uio.h>
23552 +#include <linux/locallock.h>
23553  #include <linux/hugetlb.h>
23554  #include <linux/page_idle.h>
23555  
23556 @@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
23557  #ifdef CONFIG_SMP
23558  static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
23559  #endif
23560 +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
23561 +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
23562  
23563  /*
23564   * This path almost never happens for VM activity - pages are normally
23565 @@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page *page)
23566                 unsigned long flags;
23567  
23568                 get_page(page);
23569 -               local_irq_save(flags);
23570 +               local_lock_irqsave(rotate_lock, flags);
23571                 pvec = this_cpu_ptr(&lru_rotate_pvecs);
23572                 if (!pagevec_add(pvec, page) || PageCompound(page))
23573                         pagevec_move_tail(pvec);
23574 -               local_irq_restore(flags);
23575 +               local_unlock_irqrestore(rotate_lock, flags);
23576         }
23577  }
23578  
23579 @@ -294,12 +297,13 @@ void activate_page(struct page *page)
23580  {
23581         page = compound_head(page);
23582         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
23583 -               struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
23584 +               struct pagevec *pvec = &get_locked_var(swapvec_lock,
23585 +                                                      activate_page_pvecs);
23586  
23587                 get_page(page);
23588                 if (!pagevec_add(pvec, page) || PageCompound(page))
23589                         pagevec_lru_move_fn(pvec, __activate_page, NULL);
23590 -               put_cpu_var(activate_page_pvecs);
23591 +               put_locked_var(swapvec_lock, activate_page_pvecs);
23592         }
23593  }
23594  
23595 @@ -326,7 +330,7 @@ void activate_page(struct page *page)
23596  
23597  static void __lru_cache_activate_page(struct page *page)
23598  {
23599 -       struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
23600 +       struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
23601         int i;
23602  
23603         /*
23604 @@ -348,7 +352,7 @@ static void __lru_cache_activate_page(struct page *page)
23605                 }
23606         }
23607  
23608 -       put_cpu_var(lru_add_pvec);
23609 +       put_locked_var(swapvec_lock, lru_add_pvec);
23610  }
23611  
23612  /*
23613 @@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed);
23614  
23615  static void __lru_cache_add(struct page *page)
23616  {
23617 -       struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
23618 +       struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
23619  
23620         get_page(page);
23621         if (!pagevec_add(pvec, page) || PageCompound(page))
23622                 __pagevec_lru_add(pvec);
23623 -       put_cpu_var(lru_add_pvec);
23624 +       put_locked_var(swapvec_lock, lru_add_pvec);
23625  }
23626  
23627  /**
23628 @@ -593,9 +597,15 @@ void lru_add_drain_cpu(int cpu)
23629                 unsigned long flags;
23630  
23631                 /* No harm done if a racing interrupt already did this */
23632 -               local_irq_save(flags);
23633 +#ifdef CONFIG_PREEMPT_RT_BASE
23634 +               local_lock_irqsave_on(rotate_lock, flags, cpu);
23635                 pagevec_move_tail(pvec);
23636 -               local_irq_restore(flags);
23637 +               local_unlock_irqrestore_on(rotate_lock, flags, cpu);
23638 +#else
23639 +               local_lock_irqsave(rotate_lock, flags);
23640 +               pagevec_move_tail(pvec);
23641 +               local_unlock_irqrestore(rotate_lock, flags);
23642 +#endif
23643         }
23644  
23645         pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
23646 @@ -627,11 +637,12 @@ void deactivate_file_page(struct page *page)
23647                 return;
23648  
23649         if (likely(get_page_unless_zero(page))) {
23650 -               struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
23651 +               struct pagevec *pvec = &get_locked_var(swapvec_lock,
23652 +                                                      lru_deactivate_file_pvecs);
23653  
23654                 if (!pagevec_add(pvec, page) || PageCompound(page))
23655                         pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
23656 -               put_cpu_var(lru_deactivate_file_pvecs);
23657 +               put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
23658         }
23659  }
23660  
23661 @@ -646,27 +657,31 @@ void deactivate_file_page(struct page *page)
23662  void deactivate_page(struct page *page)
23663  {
23664         if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
23665 -               struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
23666 +               struct pagevec *pvec = &get_locked_var(swapvec_lock,
23667 +                                                      lru_deactivate_pvecs);
23668  
23669                 get_page(page);
23670                 if (!pagevec_add(pvec, page) || PageCompound(page))
23671                         pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
23672 -               put_cpu_var(lru_deactivate_pvecs);
23673 +               put_locked_var(swapvec_lock, lru_deactivate_pvecs);
23674         }
23675  }
23676  
23677  void lru_add_drain(void)
23678  {
23679 -       lru_add_drain_cpu(get_cpu());
23680 -       put_cpu();
23681 +       lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
23682 +       local_unlock_cpu(swapvec_lock);
23683  }
23684  
23685 -static void lru_add_drain_per_cpu(struct work_struct *dummy)
23686 +#ifdef CONFIG_PREEMPT_RT_BASE
23687 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
23688  {
23689 -       lru_add_drain();
23690 +       local_lock_on(swapvec_lock, cpu);
23691 +       lru_add_drain_cpu(cpu);
23692 +       local_unlock_on(swapvec_lock, cpu);
23693  }
23694  
23695 -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
23696 +#else
23697  
23698  /*
23699   * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
23700 @@ -686,6 +701,22 @@ static int __init lru_init(void)
23701  }
23702  early_initcall(lru_init);
23703  
23704 +static void lru_add_drain_per_cpu(struct work_struct *dummy)
23705 +{
23706 +       lru_add_drain();
23707 +}
23708 +
23709 +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
23710 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
23711 +{
23712 +       struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
23713 +
23714 +       INIT_WORK(work, lru_add_drain_per_cpu);
23715 +       queue_work_on(cpu, lru_add_drain_wq, work);
23716 +       cpumask_set_cpu(cpu, has_work);
23717 +}
23718 +#endif
23719 +
23720  void lru_add_drain_all(void)
23721  {
23722         static DEFINE_MUTEX(lock);
23723 @@ -697,21 +728,18 @@ void lru_add_drain_all(void)
23724         cpumask_clear(&has_work);
23725  
23726         for_each_online_cpu(cpu) {
23727 -               struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
23728 -
23729                 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
23730                     pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
23731                     pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
23732                     pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
23733 -                   need_activate_page_drain(cpu)) {
23734 -                       INIT_WORK(work, lru_add_drain_per_cpu);
23735 -                       queue_work_on(cpu, lru_add_drain_wq, work);
23736 -                       cpumask_set_cpu(cpu, &has_work);
23737 -               }
23738 +                   need_activate_page_drain(cpu))
23739 +                       remote_lru_add_drain(cpu, &has_work);
23740         }
23741  
23742 +#ifndef CONFIG_PREEMPT_RT_BASE
23743         for_each_cpu(cpu, &has_work)
23744                 flush_work(&per_cpu(lru_add_drain_work, cpu));
23745 +#endif
23746  
23747         put_online_cpus();
23748         mutex_unlock(&lock);
23749 diff --git a/mm/truncate.c b/mm/truncate.c
23750 index 8d8c62d89e6d..5bf1bd25d077 100644
23751 --- a/mm/truncate.c
23752 +++ b/mm/truncate.c
23753 @@ -62,9 +62,12 @@ static void clear_exceptional_entry(struct address_space *mapping,
23754          * protected by mapping->tree_lock.
23755          */
23756         if (!workingset_node_shadows(node) &&
23757 -           !list_empty(&node->private_list))
23758 -               list_lru_del(&workingset_shadow_nodes,
23759 +           !list_empty(&node->private_list)) {
23760 +               local_lock(workingset_shadow_lock);
23761 +               list_lru_del(&__workingset_shadow_nodes,
23762                                 &node->private_list);
23763 +               local_unlock(workingset_shadow_lock);
23764 +       }
23765         __radix_tree_delete_node(&mapping->page_tree, node);
23766  unlock:
23767         spin_unlock_irq(&mapping->tree_lock);
23768 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
23769 index f2481cb4e6b2..db4de08fa97c 100644
23770 --- a/mm/vmalloc.c
23771 +++ b/mm/vmalloc.c
23772 @@ -845,7 +845,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
23773         struct vmap_block *vb;
23774         struct vmap_area *va;
23775         unsigned long vb_idx;
23776 -       int node, err;
23777 +       int node, err, cpu;
23778         void *vaddr;
23779  
23780         node = numa_node_id();
23781 @@ -888,11 +888,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
23782         BUG_ON(err);
23783         radix_tree_preload_end();
23784  
23785 -       vbq = &get_cpu_var(vmap_block_queue);
23786 +       cpu = get_cpu_light();
23787 +       vbq = this_cpu_ptr(&vmap_block_queue);
23788         spin_lock(&vbq->lock);
23789         list_add_tail_rcu(&vb->free_list, &vbq->free);
23790         spin_unlock(&vbq->lock);
23791 -       put_cpu_var(vmap_block_queue);
23792 +       put_cpu_light();
23793  
23794         return vaddr;
23795  }
23796 @@ -961,6 +962,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
23797         struct vmap_block *vb;
23798         void *vaddr = NULL;
23799         unsigned int order;
23800 +       int cpu;
23801  
23802         BUG_ON(offset_in_page(size));
23803         BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
23804 @@ -975,7 +977,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
23805         order = get_order(size);
23806  
23807         rcu_read_lock();
23808 -       vbq = &get_cpu_var(vmap_block_queue);
23809 +       cpu = get_cpu_light();
23810 +       vbq = this_cpu_ptr(&vmap_block_queue);
23811         list_for_each_entry_rcu(vb, &vbq->free, free_list) {
23812                 unsigned long pages_off;
23813  
23814 @@ -998,7 +1001,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
23815                 break;
23816         }
23817  
23818 -       put_cpu_var(vmap_block_queue);
23819 +       put_cpu_light();
23820         rcu_read_unlock();
23821  
23822         /* Allocate new block if nothing was found */
23823 diff --git a/mm/vmstat.c b/mm/vmstat.c
23824 index 604f26a4f696..312006d2db50 100644
23825 --- a/mm/vmstat.c
23826 +++ b/mm/vmstat.c
23827 @@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
23828         long x;
23829         long t;
23830  
23831 +       preempt_disable_rt();
23832         x = delta + __this_cpu_read(*p);
23833  
23834         t = __this_cpu_read(pcp->stat_threshold);
23835 @@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
23836                 x = 0;
23837         }
23838         __this_cpu_write(*p, x);
23839 +       preempt_enable_rt();
23840  }
23841  EXPORT_SYMBOL(__mod_zone_page_state);
23842  
23843 @@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
23844         long x;
23845         long t;
23846  
23847 +       preempt_disable_rt();
23848         x = delta + __this_cpu_read(*p);
23849  
23850         t = __this_cpu_read(pcp->stat_threshold);
23851 @@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
23852                 x = 0;
23853         }
23854         __this_cpu_write(*p, x);
23855 +       preempt_enable_rt();
23856  }
23857  EXPORT_SYMBOL(__mod_node_page_state);
23858  
23859 @@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
23860         s8 __percpu *p = pcp->vm_stat_diff + item;
23861         s8 v, t;
23862  
23863 +       preempt_disable_rt();
23864         v = __this_cpu_inc_return(*p);
23865         t = __this_cpu_read(pcp->stat_threshold);
23866         if (unlikely(v > t)) {
23867 @@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
23868                 zone_page_state_add(v + overstep, zone, item);
23869                 __this_cpu_write(*p, -overstep);
23870         }
23871 +       preempt_enable_rt();
23872  }
23873  
23874  void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
23875 @@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
23876         s8 __percpu *p = pcp->vm_node_stat_diff + item;
23877         s8 v, t;
23878  
23879 +       preempt_disable_rt();
23880         v = __this_cpu_inc_return(*p);
23881         t = __this_cpu_read(pcp->stat_threshold);
23882         if (unlikely(v > t)) {
23883 @@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
23884                 node_page_state_add(v + overstep, pgdat, item);
23885                 __this_cpu_write(*p, -overstep);
23886         }
23887 +       preempt_enable_rt();
23888  }
23889  
23890  void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
23891 @@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
23892         s8 __percpu *p = pcp->vm_stat_diff + item;
23893         s8 v, t;
23894  
23895 +       preempt_disable_rt();
23896         v = __this_cpu_dec_return(*p);
23897         t = __this_cpu_read(pcp->stat_threshold);
23898         if (unlikely(v < - t)) {
23899 @@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
23900                 zone_page_state_add(v - overstep, zone, item);
23901                 __this_cpu_write(*p, overstep);
23902         }
23903 +       preempt_enable_rt();
23904  }
23905  
23906  void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
23907 @@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
23908         s8 __percpu *p = pcp->vm_node_stat_diff + item;
23909         s8 v, t;
23910  
23911 +       preempt_disable_rt();
23912         v = __this_cpu_dec_return(*p);
23913         t = __this_cpu_read(pcp->stat_threshold);
23914         if (unlikely(v < - t)) {
23915 @@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
23916                 node_page_state_add(v - overstep, pgdat, item);
23917                 __this_cpu_write(*p, overstep);
23918         }
23919 +       preempt_enable_rt();
23920  }
23921  
23922  void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
23923 diff --git a/mm/workingset.c b/mm/workingset.c
23924 index fb1f9183d89a..7e6ef1a48cd3 100644
23925 --- a/mm/workingset.c
23926 +++ b/mm/workingset.c
23927 @@ -334,7 +334,8 @@ void workingset_activation(struct page *page)
23928   * point where they would still be useful.
23929   */
23930  
23931 -struct list_lru workingset_shadow_nodes;
23932 +struct list_lru __workingset_shadow_nodes;
23933 +DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
23934  
23935  static unsigned long count_shadow_nodes(struct shrinker *shrinker,
23936                                         struct shrink_control *sc)
23937 @@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
23938         unsigned long pages;
23939  
23940         /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
23941 -       local_irq_disable();
23942 -       shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
23943 -       local_irq_enable();
23944 +       local_lock_irq(workingset_shadow_lock);
23945 +       shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
23946 +       local_unlock_irq(workingset_shadow_lock);
23947  
23948         if (sc->memcg) {
23949                 pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
23950 @@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
23951         spin_unlock(&mapping->tree_lock);
23952         ret = LRU_REMOVED_RETRY;
23953  out:
23954 -       local_irq_enable();
23955 +       local_unlock_irq(workingset_shadow_lock);
23956         cond_resched();
23957 -       local_irq_disable();
23958 +       local_lock_irq(workingset_shadow_lock);
23959         spin_lock(lru_lock);
23960         return ret;
23961  }
23962 @@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
23963         unsigned long ret;
23964  
23965         /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
23966 -       local_irq_disable();
23967 -       ret =  list_lru_shrink_walk(&workingset_shadow_nodes, sc,
23968 +       local_lock_irq(workingset_shadow_lock);
23969 +       ret =  list_lru_shrink_walk(&__workingset_shadow_nodes, sc,
23970                                     shadow_lru_isolate, NULL);
23971 -       local_irq_enable();
23972 +       local_unlock_irq(workingset_shadow_lock);
23973         return ret;
23974  }
23975  
23976 @@ -492,7 +493,7 @@ static int __init workingset_init(void)
23977         pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
23978                timestamp_bits, max_order, bucket_order);
23979  
23980 -       ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
23981 +       ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key);
23982         if (ret)
23983                 goto err;
23984         ret = register_shrinker(&workingset_shadow_shrinker);
23985 @@ -500,7 +501,7 @@ static int __init workingset_init(void)
23986                 goto err_list_lru;
23987         return 0;
23988  err_list_lru:
23989 -       list_lru_destroy(&workingset_shadow_nodes);
23990 +       list_lru_destroy(&__workingset_shadow_nodes);
23991  err:
23992         return ret;
23993  }
23994 diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
23995 index b0bc023d25c5..5af6426fbcbe 100644
23996 --- a/mm/zsmalloc.c
23997 +++ b/mm/zsmalloc.c
23998 @@ -53,6 +53,7 @@
23999  #include <linux/mount.h>
24000  #include <linux/migrate.h>
24001  #include <linux/pagemap.h>
24002 +#include <linux/locallock.h>
24003  
24004  #define ZSPAGE_MAGIC   0x58
24005  
24006 @@ -70,9 +71,22 @@
24007   */
24008  #define ZS_MAX_ZSPAGE_ORDER 2
24009  #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
24010 -
24011  #define ZS_HANDLE_SIZE (sizeof(unsigned long))
24012  
24013 +#ifdef CONFIG_PREEMPT_RT_FULL
24014 +
24015 +struct zsmalloc_handle {
24016 +       unsigned long addr;
24017 +       struct mutex lock;
24018 +};
24019 +
24020 +#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
24021 +
24022 +#else
24023 +
24024 +#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long))
24025 +#endif
24026 +
24027  /*
24028   * Object location (<PFN>, <obj_idx>) is encoded as
24029   * as single (unsigned long) handle value.
24030 @@ -327,7 +341,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
24031  
24032  static int create_cache(struct zs_pool *pool)
24033  {
24034 -       pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
24035 +       pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE,
24036                                         0, 0, NULL);
24037         if (!pool->handle_cachep)
24038                 return 1;
24039 @@ -351,10 +365,27 @@ static void destroy_cache(struct zs_pool *pool)
24040  
24041  static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
24042  {
24043 -       return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
24044 -                       gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
24045 +       void *p;
24046 +
24047 +       p = kmem_cache_alloc(pool->handle_cachep,
24048 +                            gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
24049 +#ifdef CONFIG_PREEMPT_RT_FULL
24050 +       if (p) {
24051 +               struct zsmalloc_handle *zh = p;
24052 +
24053 +               mutex_init(&zh->lock);
24054 +       }
24055 +#endif
24056 +       return (unsigned long)p;
24057  }
24058  
24059 +#ifdef CONFIG_PREEMPT_RT_FULL
24060 +static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
24061 +{
24062 +       return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
24063 +}
24064 +#endif
24065 +
24066  static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
24067  {
24068         kmem_cache_free(pool->handle_cachep, (void *)handle);
24069 @@ -373,12 +404,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
24070  
24071  static void record_obj(unsigned long handle, unsigned long obj)
24072  {
24073 +#ifdef CONFIG_PREEMPT_RT_FULL
24074 +       struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
24075 +
24076 +       WRITE_ONCE(zh->addr, obj);
24077 +#else
24078         /*
24079          * lsb of @obj represents handle lock while other bits
24080          * represent object value the handle is pointing so
24081          * updating shouldn't do store tearing.
24082          */
24083         WRITE_ONCE(*(unsigned long *)handle, obj);
24084 +#endif
24085  }
24086  
24087  /* zpool driver */
24088 @@ -467,6 +504,7 @@ MODULE_ALIAS("zpool-zsmalloc");
24089  
24090  /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
24091  static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
24092 +static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock);
24093  
24094  static bool is_zspage_isolated(struct zspage *zspage)
24095  {
24096 @@ -902,7 +940,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
24097  
24098  static unsigned long handle_to_obj(unsigned long handle)
24099  {
24100 +#ifdef CONFIG_PREEMPT_RT_FULL
24101 +       struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
24102 +
24103 +       return zh->addr;
24104 +#else
24105         return *(unsigned long *)handle;
24106 +#endif
24107  }
24108  
24109  static unsigned long obj_to_head(struct page *page, void *obj)
24110 @@ -916,22 +960,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
24111  
24112  static inline int testpin_tag(unsigned long handle)
24113  {
24114 +#ifdef CONFIG_PREEMPT_RT_FULL
24115 +       struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
24116 +
24117 +       return mutex_is_locked(&zh->lock);
24118 +#else
24119         return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
24120 +#endif
24121  }
24122  
24123  static inline int trypin_tag(unsigned long handle)
24124  {
24125 +#ifdef CONFIG_PREEMPT_RT_FULL
24126 +       struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
24127 +
24128 +       return mutex_trylock(&zh->lock);
24129 +#else
24130         return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
24131 +#endif
24132  }
24133  
24134  static void pin_tag(unsigned long handle)
24135  {
24136 +#ifdef CONFIG_PREEMPT_RT_FULL
24137 +       struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
24138 +
24139 +       return mutex_lock(&zh->lock);
24140 +#else
24141         bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
24142 +#endif
24143  }
24144  
24145  static void unpin_tag(unsigned long handle)
24146  {
24147 +#ifdef CONFIG_PREEMPT_RT_FULL
24148 +       struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
24149 +
24150 +       return mutex_unlock(&zh->lock);
24151 +#else
24152         bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
24153 +#endif
24154  }
24155  
24156  static void reset_page(struct page *page)
24157 @@ -1423,7 +1491,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
24158         class = pool->size_class[class_idx];
24159         off = (class->size * obj_idx) & ~PAGE_MASK;
24160  
24161 -       area = &get_cpu_var(zs_map_area);
24162 +       area = &get_locked_var(zs_map_area_lock, zs_map_area);
24163         area->vm_mm = mm;
24164         if (off + class->size <= PAGE_SIZE) {
24165                 /* this object is contained entirely within a page */
24166 @@ -1477,7 +1545,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
24167  
24168                 __zs_unmap_object(area, pages, off, class->size);
24169         }
24170 -       put_cpu_var(zs_map_area);
24171 +       put_locked_var(zs_map_area_lock, zs_map_area);
24172  
24173         migrate_read_unlock(zspage);
24174         unpin_tag(handle);
24175 diff --git a/net/core/dev.c b/net/core/dev.c
24176 index 60b0a6049e72..660ca3b9c60b 100644
24177 --- a/net/core/dev.c
24178 +++ b/net/core/dev.c
24179 @@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS;
24180  static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
24181  
24182  static seqcount_t devnet_rename_seq;
24183 +static DEFINE_MUTEX(devnet_rename_mutex);
24184  
24185  static inline void dev_base_seq_inc(struct net *net)
24186  {
24187 @@ -211,14 +212,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
24188  static inline void rps_lock(struct softnet_data *sd)
24189  {
24190  #ifdef CONFIG_RPS
24191 -       spin_lock(&sd->input_pkt_queue.lock);
24192 +       raw_spin_lock(&sd->input_pkt_queue.raw_lock);
24193  #endif
24194  }
24195  
24196  static inline void rps_unlock(struct softnet_data *sd)
24197  {
24198  #ifdef CONFIG_RPS
24199 -       spin_unlock(&sd->input_pkt_queue.lock);
24200 +       raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
24201  #endif
24202  }
24203  
24204 @@ -888,7 +889,8 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
24205         strcpy(name, dev->name);
24206         rcu_read_unlock();
24207         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
24208 -               cond_resched();
24209 +               mutex_lock(&devnet_rename_mutex);
24210 +               mutex_unlock(&devnet_rename_mutex);
24211                 goto retry;
24212         }
24213  
24214 @@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
24215         if (dev->flags & IFF_UP)
24216                 return -EBUSY;
24217  
24218 -       write_seqcount_begin(&devnet_rename_seq);
24219 +       mutex_lock(&devnet_rename_mutex);
24220 +       __raw_write_seqcount_begin(&devnet_rename_seq);
24221  
24222 -       if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
24223 -               write_seqcount_end(&devnet_rename_seq);
24224 -               return 0;
24225 -       }
24226 +       if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
24227 +               goto outunlock;
24228  
24229         memcpy(oldname, dev->name, IFNAMSIZ);
24230  
24231         err = dev_get_valid_name(net, dev, newname);
24232 -       if (err < 0) {
24233 -               write_seqcount_end(&devnet_rename_seq);
24234 -               return err;
24235 -       }
24236 +       if (err < 0)
24237 +               goto outunlock;
24238  
24239         if (oldname[0] && !strchr(oldname, '%'))
24240                 netdev_info(dev, "renamed from %s\n", oldname);
24241 @@ -1183,11 +1182,12 @@ int dev_change_name(struct net_device *dev, const char *newname)
24242         if (ret) {
24243                 memcpy(dev->name, oldname, IFNAMSIZ);
24244                 dev->name_assign_type = old_assign_type;
24245 -               write_seqcount_end(&devnet_rename_seq);
24246 -               return ret;
24247 +               err = ret;
24248 +               goto outunlock;
24249         }
24250  
24251 -       write_seqcount_end(&devnet_rename_seq);
24252 +       __raw_write_seqcount_end(&devnet_rename_seq);
24253 +       mutex_unlock(&devnet_rename_mutex);
24254  
24255         netdev_adjacent_rename_links(dev, oldname);
24256  
24257 @@ -1208,7 +1208,8 @@ int dev_change_name(struct net_device *dev, const char *newname)
24258                 /* err >= 0 after dev_alloc_name() or stores the first errno */
24259                 if (err >= 0) {
24260                         err = ret;
24261 -                       write_seqcount_begin(&devnet_rename_seq);
24262 +                       mutex_lock(&devnet_rename_mutex);
24263 +                       __raw_write_seqcount_begin(&devnet_rename_seq);
24264                         memcpy(dev->name, oldname, IFNAMSIZ);
24265                         memcpy(oldname, newname, IFNAMSIZ);
24266                         dev->name_assign_type = old_assign_type;
24267 @@ -1221,6 +1222,11 @@ int dev_change_name(struct net_device *dev, const char *newname)
24268         }
24269  
24270         return err;
24271 +
24272 +outunlock:
24273 +       __raw_write_seqcount_end(&devnet_rename_seq);
24274 +       mutex_unlock(&devnet_rename_mutex);
24275 +       return err;
24276  }
24277  
24278  /**
24279 @@ -2258,6 +2264,7 @@ static void __netif_reschedule(struct Qdisc *q)
24280         sd->output_queue_tailp = &q->next_sched;
24281         raise_softirq_irqoff(NET_TX_SOFTIRQ);
24282         local_irq_restore(flags);
24283 +       preempt_check_resched_rt();
24284  }
24285  
24286  void __netif_schedule(struct Qdisc *q)
24287 @@ -2339,6 +2346,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
24288         __this_cpu_write(softnet_data.completion_queue, skb);
24289         raise_softirq_irqoff(NET_TX_SOFTIRQ);
24290         local_irq_restore(flags);
24291 +       preempt_check_resched_rt();
24292  }
24293  EXPORT_SYMBOL(__dev_kfree_skb_irq);
24294  
24295 @@ -3073,7 +3081,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
24296          * This permits qdisc->running owner to get the lock more
24297          * often and dequeue packets faster.
24298          */
24299 +#ifdef CONFIG_PREEMPT_RT_FULL
24300 +       contended = true;
24301 +#else
24302         contended = qdisc_is_running(q);
24303 +#endif
24304         if (unlikely(contended))
24305                 spin_lock(&q->busylock);
24306  
24307 @@ -3136,8 +3148,10 @@ static void skb_update_prio(struct sk_buff *skb)
24308  #define skb_update_prio(skb)
24309  #endif
24310  
24311 +#ifndef CONFIG_PREEMPT_RT_FULL
24312  DEFINE_PER_CPU(int, xmit_recursion);
24313  EXPORT_SYMBOL(xmit_recursion);
24314 +#endif
24315  
24316  /**
24317   *     dev_loopback_xmit - loop back @skb
24318 @@ -3371,8 +3385,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
24319                 int cpu = smp_processor_id(); /* ok because BHs are off */
24320  
24321                 if (txq->xmit_lock_owner != cpu) {
24322 -                       if (unlikely(__this_cpu_read(xmit_recursion) >
24323 -                                    XMIT_RECURSION_LIMIT))
24324 +                       if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
24325                                 goto recursion_alert;
24326  
24327                         skb = validate_xmit_skb(skb, dev);
24328 @@ -3382,9 +3395,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
24329                         HARD_TX_LOCK(dev, txq, cpu);
24330  
24331                         if (!netif_xmit_stopped(txq)) {
24332 -                               __this_cpu_inc(xmit_recursion);
24333 +                               xmit_rec_inc();
24334                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
24335 -                               __this_cpu_dec(xmit_recursion);
24336 +                               xmit_rec_dec();
24337                                 if (dev_xmit_complete(rc)) {
24338                                         HARD_TX_UNLOCK(dev, txq);
24339                                         goto out;
24340 @@ -3758,6 +3771,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
24341         rps_unlock(sd);
24342  
24343         local_irq_restore(flags);
24344 +       preempt_check_resched_rt();
24345  
24346         atomic_long_inc(&skb->dev->rx_dropped);
24347         kfree_skb(skb);
24348 @@ -3776,7 +3790,7 @@ static int netif_rx_internal(struct sk_buff *skb)
24349                 struct rps_dev_flow voidflow, *rflow = &voidflow;
24350                 int cpu;
24351  
24352 -               preempt_disable();
24353 +               migrate_disable();
24354                 rcu_read_lock();
24355  
24356                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
24357 @@ -3786,13 +3800,13 @@ static int netif_rx_internal(struct sk_buff *skb)
24358                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
24359  
24360                 rcu_read_unlock();
24361 -               preempt_enable();
24362 +               migrate_enable();
24363         } else
24364  #endif
24365         {
24366                 unsigned int qtail;
24367 -               ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
24368 -               put_cpu();
24369 +               ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
24370 +               put_cpu_light();
24371         }
24372         return ret;
24373  }
24374 @@ -3826,11 +3840,9 @@ int netif_rx_ni(struct sk_buff *skb)
24375  
24376         trace_netif_rx_ni_entry(skb);
24377  
24378 -       preempt_disable();
24379 +       local_bh_disable();
24380         err = netif_rx_internal(skb);
24381 -       if (local_softirq_pending())
24382 -               do_softirq();
24383 -       preempt_enable();
24384 +       local_bh_enable();
24385  
24386         return err;
24387  }
24388 @@ -4309,7 +4321,7 @@ static void flush_backlog(struct work_struct *work)
24389         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
24390                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
24391                         __skb_unlink(skb, &sd->input_pkt_queue);
24392 -                       kfree_skb(skb);
24393 +                       __skb_queue_tail(&sd->tofree_queue, skb);
24394                         input_queue_head_incr(sd);
24395                 }
24396         }
24397 @@ -4319,11 +4331,14 @@ static void flush_backlog(struct work_struct *work)
24398         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
24399                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
24400                         __skb_unlink(skb, &sd->process_queue);
24401 -                       kfree_skb(skb);
24402 +                       __skb_queue_tail(&sd->tofree_queue, skb);
24403                         input_queue_head_incr(sd);
24404                 }
24405         }
24406 +       if (!skb_queue_empty(&sd->tofree_queue))
24407 +               raise_softirq_irqoff(NET_RX_SOFTIRQ);
24408         local_bh_enable();
24409 +
24410  }
24411  
24412  static void flush_all_backlogs(void)
24413 @@ -4804,6 +4819,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
24414                 sd->rps_ipi_list = NULL;
24415  
24416                 local_irq_enable();
24417 +               preempt_check_resched_rt();
24418  
24419                 /* Send pending IPI's to kick RPS processing on remote cpus. */
24420                 while (remsd) {
24421 @@ -4817,6 +4833,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
24422         } else
24423  #endif
24424                 local_irq_enable();
24425 +       preempt_check_resched_rt();
24426  }
24427  
24428  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
24429 @@ -4846,7 +4863,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
24430         while (again) {
24431                 struct sk_buff *skb;
24432  
24433 +               local_irq_disable();
24434                 while ((skb = __skb_dequeue(&sd->process_queue))) {
24435 +                       local_irq_enable();
24436                         rcu_read_lock();
24437                         __netif_receive_skb(skb);
24438                         rcu_read_unlock();
24439 @@ -4854,9 +4873,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
24440                         if (++work >= quota)
24441                                 return work;
24442  
24443 +                       local_irq_disable();
24444                 }
24445  
24446 -               local_irq_disable();
24447                 rps_lock(sd);
24448                 if (skb_queue_empty(&sd->input_pkt_queue)) {
24449                         /*
24450 @@ -4894,9 +4913,11 @@ void __napi_schedule(struct napi_struct *n)
24451         local_irq_save(flags);
24452         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
24453         local_irq_restore(flags);
24454 +       preempt_check_resched_rt();
24455  }
24456  EXPORT_SYMBOL(__napi_schedule);
24457  
24458 +#ifndef CONFIG_PREEMPT_RT_FULL
24459  /**
24460   * __napi_schedule_irqoff - schedule for receive
24461   * @n: entry to schedule
24462 @@ -4908,6 +4929,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
24463         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
24464  }
24465  EXPORT_SYMBOL(__napi_schedule_irqoff);
24466 +#endif
24467  
24468  void __napi_complete(struct napi_struct *n)
24469  {
24470 @@ -5197,13 +5219,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
24471         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
24472         unsigned long time_limit = jiffies + 2;
24473         int budget = netdev_budget;
24474 +       struct sk_buff_head tofree_q;
24475 +       struct sk_buff *skb;
24476         LIST_HEAD(list);
24477         LIST_HEAD(repoll);
24478  
24479 +       __skb_queue_head_init(&tofree_q);
24480 +
24481         local_irq_disable();
24482 +       skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
24483         list_splice_init(&sd->poll_list, &list);
24484         local_irq_enable();
24485  
24486 +       while ((skb = __skb_dequeue(&tofree_q)))
24487 +               kfree_skb(skb);
24488 +
24489         for (;;) {
24490                 struct napi_struct *n;
24491  
24492 @@ -5234,7 +5264,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
24493         list_splice_tail(&repoll, &list);
24494         list_splice(&list, &sd->poll_list);
24495         if (!list_empty(&sd->poll_list))
24496 -               __raise_softirq_irqoff(NET_RX_SOFTIRQ);
24497 +               __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
24498  
24499         net_rps_action_and_irq_enable(sd);
24500  }
24501 @@ -7995,16 +8025,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
24502  
24503         raise_softirq_irqoff(NET_TX_SOFTIRQ);
24504         local_irq_enable();
24505 +       preempt_check_resched_rt();
24506  
24507         /* Process offline CPU's input_pkt_queue */
24508         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
24509                 netif_rx_ni(skb);
24510                 input_queue_head_incr(oldsd);
24511         }
24512 -       while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
24513 +       while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
24514                 netif_rx_ni(skb);
24515                 input_queue_head_incr(oldsd);
24516         }
24517 +       while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
24518 +               kfree_skb(skb);
24519 +       }
24520  
24521         return NOTIFY_OK;
24522  }
24523 @@ -8309,8 +8343,9 @@ static int __init net_dev_init(void)
24524  
24525                 INIT_WORK(flush, flush_backlog);
24526  
24527 -               skb_queue_head_init(&sd->input_pkt_queue);
24528 -               skb_queue_head_init(&sd->process_queue);
24529 +               skb_queue_head_init_raw(&sd->input_pkt_queue);
24530 +               skb_queue_head_init_raw(&sd->process_queue);
24531 +               skb_queue_head_init_raw(&sd->tofree_queue);
24532                 INIT_LIST_HEAD(&sd->poll_list);
24533                 sd->output_queue_tailp = &sd->output_queue;
24534  #ifdef CONFIG_RPS
24535 diff --git a/net/core/filter.c b/net/core/filter.c
24536 index b391209838ef..b86e9681a88e 100644
24537 --- a/net/core/filter.c
24538 +++ b/net/core/filter.c
24539 @@ -1645,7 +1645,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
24540  {
24541         int ret;
24542  
24543 -       if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
24544 +       if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
24545                 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
24546                 kfree_skb(skb);
24547                 return -ENETDOWN;
24548 @@ -1653,9 +1653,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
24549  
24550         skb->dev = dev;
24551  
24552 -       __this_cpu_inc(xmit_recursion);
24553 +       xmit_rec_inc();
24554         ret = dev_queue_xmit(skb);
24555 -       __this_cpu_dec(xmit_recursion);
24556 +       xmit_rec_dec();
24557  
24558         return ret;
24559  }
24560 diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
24561 index cad8e791f28e..2a9364fe62a5 100644
24562 --- a/net/core/gen_estimator.c
24563 +++ b/net/core/gen_estimator.c
24564 @@ -84,7 +84,7 @@ struct gen_estimator
24565         struct gnet_stats_basic_packed  *bstats;
24566         struct gnet_stats_rate_est64    *rate_est;
24567         spinlock_t              *stats_lock;
24568 -       seqcount_t              *running;
24569 +       net_seqlock_t           *running;
24570         int                     ewma_log;
24571         u32                     last_packets;
24572         unsigned long           avpps;
24573 @@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
24574                       struct gnet_stats_basic_cpu __percpu *cpu_bstats,
24575                       struct gnet_stats_rate_est64 *rate_est,
24576                       spinlock_t *stats_lock,
24577 -                     seqcount_t *running,
24578 +                     net_seqlock_t *running,
24579                       struct nlattr *opt)
24580  {
24581         struct gen_estimator *est;
24582 @@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
24583                           struct gnet_stats_basic_cpu __percpu *cpu_bstats,
24584                           struct gnet_stats_rate_est64 *rate_est,
24585                           spinlock_t *stats_lock,
24586 -                         seqcount_t *running, struct nlattr *opt)
24587 +                         net_seqlock_t *running, struct nlattr *opt)
24588  {
24589         gen_kill_estimator(bstats, rate_est);
24590         return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
24591 diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
24592 index 508e051304fb..bc3b17b78c94 100644
24593 --- a/net/core/gen_stats.c
24594 +++ b/net/core/gen_stats.c
24595 @@ -130,7 +130,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
24596  }
24597  
24598  void
24599 -__gnet_stats_copy_basic(const seqcount_t *running,
24600 +__gnet_stats_copy_basic(net_seqlock_t *running,
24601                         struct gnet_stats_basic_packed *bstats,
24602                         struct gnet_stats_basic_cpu __percpu *cpu,
24603                         struct gnet_stats_basic_packed *b)
24604 @@ -143,10 +143,10 @@ __gnet_stats_copy_basic(const seqcount_t *running,
24605         }
24606         do {
24607                 if (running)
24608 -                       seq = read_seqcount_begin(running);
24609 +                       seq = net_seq_begin(running);
24610                 bstats->bytes = b->bytes;
24611                 bstats->packets = b->packets;
24612 -       } while (running && read_seqcount_retry(running, seq));
24613 +       } while (running && net_seq_retry(running, seq));
24614  }
24615  EXPORT_SYMBOL(__gnet_stats_copy_basic);
24616  
24617 @@ -164,7 +164,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
24618   * if the room in the socket buffer was not sufficient.
24619   */
24620  int
24621 -gnet_stats_copy_basic(const seqcount_t *running,
24622 +gnet_stats_copy_basic(net_seqlock_t *running,
24623                       struct gnet_dump *d,
24624                       struct gnet_stats_basic_cpu __percpu *cpu,
24625                       struct gnet_stats_basic_packed *b)
24626 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
24627 index 1e3e0087245b..1077b39db717 100644
24628 --- a/net/core/skbuff.c
24629 +++ b/net/core/skbuff.c
24630 @@ -64,6 +64,7 @@
24631  #include <linux/errqueue.h>
24632  #include <linux/prefetch.h>
24633  #include <linux/if_vlan.h>
24634 +#include <linux/locallock.h>
24635  
24636  #include <net/protocol.h>
24637  #include <net/dst.h>
24638 @@ -360,6 +361,8 @@ struct napi_alloc_cache {
24639  
24640  static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
24641  static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
24642 +static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
24643 +static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
24644  
24645  static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
24646  {
24647 @@ -367,10 +370,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
24648         unsigned long flags;
24649         void *data;
24650  
24651 -       local_irq_save(flags);
24652 +       local_lock_irqsave(netdev_alloc_lock, flags);
24653         nc = this_cpu_ptr(&netdev_alloc_cache);
24654         data = __alloc_page_frag(nc, fragsz, gfp_mask);
24655 -       local_irq_restore(flags);
24656 +       local_unlock_irqrestore(netdev_alloc_lock, flags);
24657         return data;
24658  }
24659  
24660 @@ -389,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
24661  
24662  static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
24663  {
24664 -       struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
24665 +       struct napi_alloc_cache *nc;
24666 +       void *data;
24667  
24668 -       return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
24669 +       nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
24670 +       data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
24671 +       put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
24672 +       return data;
24673  }
24674  
24675  void *napi_alloc_frag(unsigned int fragsz)
24676 @@ -438,13 +445,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
24677         if (sk_memalloc_socks())
24678                 gfp_mask |= __GFP_MEMALLOC;
24679  
24680 -       local_irq_save(flags);
24681 +       local_lock_irqsave(netdev_alloc_lock, flags);
24682  
24683         nc = this_cpu_ptr(&netdev_alloc_cache);
24684         data = __alloc_page_frag(nc, len, gfp_mask);
24685         pfmemalloc = nc->pfmemalloc;
24686  
24687 -       local_irq_restore(flags);
24688 +       local_unlock_irqrestore(netdev_alloc_lock, flags);
24689  
24690         if (unlikely(!data))
24691                 return NULL;
24692 @@ -485,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
24693  struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
24694                                  gfp_t gfp_mask)
24695  {
24696 -       struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
24697 +       struct napi_alloc_cache *nc;
24698         struct sk_buff *skb;
24699         void *data;
24700 +       bool pfmemalloc;
24701  
24702         len += NET_SKB_PAD + NET_IP_ALIGN;
24703  
24704 @@ -505,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
24705         if (sk_memalloc_socks())
24706                 gfp_mask |= __GFP_MEMALLOC;
24707  
24708 +       nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
24709         data = __alloc_page_frag(&nc->page, len, gfp_mask);
24710 +       pfmemalloc = nc->page.pfmemalloc;
24711 +       put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
24712         if (unlikely(!data))
24713                 return NULL;
24714  
24715 @@ -516,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
24716         }
24717  
24718         /* use OR instead of assignment to avoid clearing of bits in mask */
24719 -       if (nc->page.pfmemalloc)
24720 +       if (pfmemalloc)
24721                 skb->pfmemalloc = 1;
24722         skb->head_frag = 1;
24723  
24724 @@ -760,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
24725  
24726  void __kfree_skb_flush(void)
24727  {
24728 -       struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
24729 +       struct napi_alloc_cache *nc;
24730  
24731 +       nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
24732         /* flush skb_cache if containing objects */
24733         if (nc->skb_count) {
24734                 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
24735                                      nc->skb_cache);
24736                 nc->skb_count = 0;
24737         }
24738 +       put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
24739  }
24740  
24741  static inline void _kfree_skb_defer(struct sk_buff *skb)
24742  {
24743 -       struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
24744 +       struct napi_alloc_cache *nc;
24745  
24746         /* drop skb->head and call any destructors for packet */
24747         skb_release_all(skb);
24748  
24749 +       nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
24750         /* record skb to CPU local list */
24751         nc->skb_cache[nc->skb_count++] = skb;
24752  
24753 @@ -791,6 +805,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
24754                                      nc->skb_cache);
24755                 nc->skb_count = 0;
24756         }
24757 +       put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
24758  }
24759  void __kfree_skb_defer(struct sk_buff *skb)
24760  {
24761 diff --git a/net/core/sock.c b/net/core/sock.c
24762 index bc6543f7de36..2c32ee79620f 100644
24763 --- a/net/core/sock.c
24764 +++ b/net/core/sock.c
24765 @@ -2488,12 +2488,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
24766         if (sk->sk_lock.owned)
24767                 __lock_sock(sk);
24768         sk->sk_lock.owned = 1;
24769 -       spin_unlock(&sk->sk_lock.slock);
24770 +       spin_unlock_bh(&sk->sk_lock.slock);
24771         /*
24772          * The sk_lock has mutex_lock() semantics here:
24773          */
24774         mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
24775 -       local_bh_enable();
24776  }
24777  EXPORT_SYMBOL(lock_sock_nested);
24778  
24779 diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
24780 index 48734ee6293f..e6864ff11352 100644
24781 --- a/net/ipv4/icmp.c
24782 +++ b/net/ipv4/icmp.c
24783 @@ -69,6 +69,7 @@
24784  #include <linux/jiffies.h>
24785  #include <linux/kernel.h>
24786  #include <linux/fcntl.h>
24787 +#include <linux/sysrq.h>
24788  #include <linux/socket.h>
24789  #include <linux/in.h>
24790  #include <linux/inet.h>
24791 @@ -77,6 +78,7 @@
24792  #include <linux/string.h>
24793  #include <linux/netfilter_ipv4.h>
24794  #include <linux/slab.h>
24795 +#include <linux/locallock.h>
24796  #include <net/snmp.h>
24797  #include <net/ip.h>
24798  #include <net/route.h>
24799 @@ -204,6 +206,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
24800   *
24801   *     On SMP we have one ICMP socket per-cpu.
24802   */
24803 +static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock);
24804 +
24805  static struct sock *icmp_sk(struct net *net)
24806  {
24807         return *this_cpu_ptr(net->ipv4.icmp_sk);
24808 @@ -215,12 +219,14 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
24809  
24810         local_bh_disable();
24811  
24812 +       local_lock(icmp_sk_lock);
24813         sk = icmp_sk(net);
24814  
24815         if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
24816                 /* This can happen if the output path signals a
24817                  * dst_link_failure() for an outgoing ICMP packet.
24818                  */
24819 +               local_unlock(icmp_sk_lock);
24820                 local_bh_enable();
24821                 return NULL;
24822         }
24823 @@ -230,6 +236,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
24824  static inline void icmp_xmit_unlock(struct sock *sk)
24825  {
24826         spin_unlock_bh(&sk->sk_lock.slock);
24827 +       local_unlock(icmp_sk_lock);
24828  }
24829  
24830  int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
24831 @@ -358,6 +365,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
24832         struct sock *sk;
24833         struct sk_buff *skb;
24834  
24835 +       local_lock(icmp_sk_lock);
24836         sk = icmp_sk(dev_net((*rt)->dst.dev));
24837         if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
24838                            icmp_param->data_len+icmp_param->head_len,
24839 @@ -380,6 +388,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
24840                 skb->ip_summed = CHECKSUM_NONE;
24841                 ip_push_pending_frames(sk, fl4);
24842         }
24843 +       local_unlock(icmp_sk_lock);
24844  }
24845  
24846  /*
24847 @@ -891,6 +900,30 @@ static bool icmp_redirect(struct sk_buff *skb)
24848  }
24849  
24850  /*
24851 + * 32bit and 64bit have different timestamp length, so we check for
24852 + * the cookie at offset 20 and verify it is repeated at offset 50
24853 + */
24854 +#define CO_POS0                20
24855 +#define CO_POS1                50
24856 +#define CO_SIZE                sizeof(int)
24857 +#define ICMP_SYSRQ_SIZE        57
24858 +
24859 +/*
24860 + * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
24861 + * pattern and if it matches send the next byte as a trigger to sysrq.
24862 + */
24863 +static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
24864 +{
24865 +       int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
24866 +       char *p = skb->data;
24867 +
24868 +       if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
24869 +           !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
24870 +           p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
24871 +               handle_sysrq(p[CO_POS0 + CO_SIZE]);
24872 +}
24873 +
24874 +/*
24875   *     Handle ICMP_ECHO ("ping") requests.
24876   *
24877   *     RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
24878 @@ -917,6 +950,11 @@ static bool icmp_echo(struct sk_buff *skb)
24879                 icmp_param.data_len        = skb->len;
24880                 icmp_param.head_len        = sizeof(struct icmphdr);
24881                 icmp_reply(&icmp_param, skb);
24882 +
24883 +               if (skb->len == ICMP_SYSRQ_SIZE &&
24884 +                   net->ipv4.sysctl_icmp_echo_sysrq) {
24885 +                       icmp_check_sysrq(net, skb);
24886 +               }
24887         }
24888         /* should there be an ICMP stat for ignored echos? */
24889         return true;
24890 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
24891 index 80bc36b25de2..215b90adfb05 100644
24892 --- a/net/ipv4/sysctl_net_ipv4.c
24893 +++ b/net/ipv4/sysctl_net_ipv4.c
24894 @@ -681,6 +681,13 @@ static struct ctl_table ipv4_net_table[] = {
24895                 .proc_handler   = proc_dointvec
24896         },
24897         {
24898 +               .procname       = "icmp_echo_sysrq",
24899 +               .data           = &init_net.ipv4.sysctl_icmp_echo_sysrq,
24900 +               .maxlen         = sizeof(int),
24901 +               .mode           = 0644,
24902 +               .proc_handler   = proc_dointvec
24903 +       },
24904 +       {
24905                 .procname       = "icmp_ignore_bogus_error_responses",
24906                 .data           = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
24907                 .maxlen         = sizeof(int),
24908 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
24909 index 2259114c7242..829e60985a81 100644
24910 --- a/net/ipv4/tcp_ipv4.c
24911 +++ b/net/ipv4/tcp_ipv4.c
24912 @@ -62,6 +62,7 @@
24913  #include <linux/init.h>
24914  #include <linux/times.h>
24915  #include <linux/slab.h>
24916 +#include <linux/locallock.h>
24917  
24918  #include <net/net_namespace.h>
24919  #include <net/icmp.h>
24920 @@ -564,6 +565,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
24921  }
24922  EXPORT_SYMBOL(tcp_v4_send_check);
24923  
24924 +static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock);
24925  /*
24926   *     This routine will send an RST to the other tcp.
24927   *
24928 @@ -691,6 +693,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
24929                      offsetof(struct inet_timewait_sock, tw_bound_dev_if));
24930  
24931         arg.tos = ip_hdr(skb)->tos;
24932 +
24933 +       local_lock(tcp_sk_lock);
24934         local_bh_disable();
24935         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
24936                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
24937 @@ -700,6 +704,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
24938         __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
24939         __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
24940         local_bh_enable();
24941 +       local_unlock(tcp_sk_lock);
24942  
24943  #ifdef CONFIG_TCP_MD5SIG
24944  out:
24945 @@ -775,6 +780,7 @@ static void tcp_v4_send_ack(struct net *net,
24946         if (oif)
24947                 arg.bound_dev_if = oif;
24948         arg.tos = tos;
24949 +       local_lock(tcp_sk_lock);
24950         local_bh_disable();
24951         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
24952                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
24953 @@ -783,6 +789,7 @@ static void tcp_v4_send_ack(struct net *net,
24954  
24955         __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
24956         local_bh_enable();
24957 +       local_unlock(tcp_sk_lock);
24958  }
24959  
24960  static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
24961 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
24962 index 2384b4aae064..bf7ab51d7035 100644
24963 --- a/net/mac80211/rx.c
24964 +++ b/net/mac80211/rx.c
24965 @@ -4166,7 +4166,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
24966         struct ieee80211_supported_band *sband;
24967         struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
24968  
24969 -       WARN_ON_ONCE(softirq_count() == 0);
24970 +       WARN_ON_ONCE_NONRT(softirq_count() == 0);
24971  
24972         if (WARN_ON(status->band >= NUM_NL80211_BANDS))
24973                 goto drop;
24974 diff --git a/net/netfilter/core.c b/net/netfilter/core.c
24975 index 004af030ef1a..b64f751bda45 100644
24976 --- a/net/netfilter/core.c
24977 +++ b/net/netfilter/core.c
24978 @@ -22,12 +22,18 @@
24979  #include <linux/proc_fs.h>
24980  #include <linux/mutex.h>
24981  #include <linux/slab.h>
24982 +#include <linux/locallock.h>
24983  #include <linux/rcupdate.h>
24984  #include <net/net_namespace.h>
24985  #include <net/sock.h>
24986  
24987  #include "nf_internals.h"
24988  
24989 +#ifdef CONFIG_PREEMPT_RT_BASE
24990 +DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
24991 +EXPORT_PER_CPU_SYMBOL(xt_write_lock);
24992 +#endif
24993 +
24994  static DEFINE_MUTEX(afinfo_mutex);
24995  
24996  const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
24997 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
24998 index 34de326b4f09..fe9597af0840 100644
24999 --- a/net/packet/af_packet.c
25000 +++ b/net/packet/af_packet.c
25001 @@ -63,6 +63,7 @@
25002  #include <linux/if_packet.h>
25003  #include <linux/wireless.h>
25004  #include <linux/kernel.h>
25005 +#include <linux/delay.h>
25006  #include <linux/kmod.h>
25007  #include <linux/slab.h>
25008  #include <linux/vmalloc.h>
25009 @@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
25010         if (BLOCK_NUM_PKTS(pbd)) {
25011                 while (atomic_read(&pkc->blk_fill_in_prog)) {
25012                         /* Waiting for skb_copy_bits to finish... */
25013 -                       cpu_relax();
25014 +                       cpu_chill();
25015                 }
25016         }
25017  
25018 @@ -956,7 +957,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
25019                 if (!(status & TP_STATUS_BLK_TMO)) {
25020                         while (atomic_read(&pkc->blk_fill_in_prog)) {
25021                                 /* Waiting for skb_copy_bits to finish... */
25022 -                               cpu_relax();
25023 +                               cpu_chill();
25024                         }
25025                 }
25026                 prb_close_block(pkc, pbd, po, status);
25027 diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
25028 index 977f69886c00..f3e7a36b0396 100644
25029 --- a/net/rds/ib_rdma.c
25030 +++ b/net/rds/ib_rdma.c
25031 @@ -34,6 +34,7 @@
25032  #include <linux/slab.h>
25033  #include <linux/rculist.h>
25034  #include <linux/llist.h>
25035 +#include <linux/delay.h>
25036  
25037  #include "rds_single_path.h"
25038  #include "ib_mr.h"
25039 @@ -210,7 +211,7 @@ static inline void wait_clean_list_grace(void)
25040         for_each_online_cpu(cpu) {
25041                 flag = &per_cpu(clean_list_grace, cpu);
25042                 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
25043 -                       cpu_relax();
25044 +                       cpu_chill();
25045         }
25046  }
25047  
25048 diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
25049 index 7d921e56e715..13df56a738e5 100644
25050 --- a/net/rxrpc/security.c
25051 +++ b/net/rxrpc/security.c
25052 @@ -19,9 +19,6 @@
25053  #include <keys/rxrpc-type.h>
25054  #include "ar-internal.h"
25055  
25056 -static LIST_HEAD(rxrpc_security_methods);
25057 -static DECLARE_RWSEM(rxrpc_security_sem);
25058 -
25059  static const struct rxrpc_security *rxrpc_security_types[] = {
25060         [RXRPC_SECURITY_NONE]   = &rxrpc_no_security,
25061  #ifdef CONFIG_RXKAD
25062 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
25063 index 206dc24add3a..00ea9bde5bb3 100644
25064 --- a/net/sched/sch_api.c
25065 +++ b/net/sched/sch_api.c
25066 @@ -981,7 +981,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
25067                         rcu_assign_pointer(sch->stab, stab);
25068                 }
25069                 if (tca[TCA_RATE]) {
25070 -                       seqcount_t *running;
25071 +                       net_seqlock_t *running;
25072  
25073                         err = -EOPNOTSUPP;
25074                         if (sch->flags & TCQ_F_MQROOT)
25075 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
25076 index 6cfb6e9038c2..20727e1347de 100644
25077 --- a/net/sched/sch_generic.c
25078 +++ b/net/sched/sch_generic.c
25079 @@ -425,7 +425,11 @@ struct Qdisc noop_qdisc = {
25080         .ops            =       &noop_qdisc_ops,
25081         .q.lock         =       __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
25082         .dev_queue      =       &noop_netdev_queue,
25083 +#ifdef CONFIG_PREEMPT_RT_BASE
25084 +       .running        =       __SEQLOCK_UNLOCKED(noop_qdisc.running),
25085 +#else
25086         .running        =       SEQCNT_ZERO(noop_qdisc.running),
25087 +#endif
25088         .busylock       =       __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
25089  };
25090  EXPORT_SYMBOL(noop_qdisc);
25091 @@ -624,9 +628,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
25092         lockdep_set_class(&sch->busylock,
25093                           dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
25094  
25095 +#ifdef CONFIG_PREEMPT_RT_BASE
25096 +       seqlock_init(&sch->running);
25097 +       lockdep_set_class(&sch->running.seqcount,
25098 +                         dev->qdisc_running_key ?: &qdisc_running_key);
25099 +       lockdep_set_class(&sch->running.lock,
25100 +                         dev->qdisc_running_key ?: &qdisc_running_key);
25101 +#else
25102         seqcount_init(&sch->running);
25103         lockdep_set_class(&sch->running,
25104                           dev->qdisc_running_key ?: &qdisc_running_key);
25105 +#endif
25106  
25107         sch->ops = ops;
25108         sch->enqueue = ops->enqueue;
25109 @@ -925,7 +937,7 @@ void dev_deactivate_many(struct list_head *head)
25110         /* Wait for outstanding qdisc_run calls. */
25111         list_for_each_entry(dev, head, close_list)
25112                 while (some_qdisc_is_busy(dev))
25113 -                       yield();
25114 +                       msleep(1);
25115  }
25116  
25117  void dev_deactivate(struct net_device *dev)
25118 diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
25119 index 9c9db55a0c1e..e6583b018a72 100644
25120 --- a/net/sunrpc/svc_xprt.c
25121 +++ b/net/sunrpc/svc_xprt.c
25122 @@ -396,7 +396,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
25123                 goto out;
25124         }
25125  
25126 -       cpu = get_cpu();
25127 +       cpu = get_cpu_light();
25128         pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
25129  
25130         atomic_long_inc(&pool->sp_stats.packets);
25131 @@ -432,7 +432,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
25132  
25133                 atomic_long_inc(&pool->sp_stats.threads_woken);
25134                 wake_up_process(rqstp->rq_task);
25135 -               put_cpu();
25136 +               put_cpu_light();
25137                 goto out;
25138         }
25139         rcu_read_unlock();
25140 @@ -453,7 +453,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
25141                 goto redo_search;
25142         }
25143         rqstp = NULL;
25144 -       put_cpu();
25145 +       put_cpu_light();
25146  out:
25147         trace_svc_xprt_do_enqueue(xprt, rqstp);
25148  }
25149 diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
25150 index 6fdc97ef6023..523e0420d7f0 100755
25151 --- a/scripts/mkcompile_h
25152 +++ b/scripts/mkcompile_h
25153 @@ -4,7 +4,8 @@ TARGET=$1
25154  ARCH=$2
25155  SMP=$3
25156  PREEMPT=$4
25157 -CC=$5
25158 +RT=$5
25159 +CC=$6
25160  
25161  vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
25162  
25163 @@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
25164  CONFIG_FLAGS=""
25165  if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
25166  if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
25167 +if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
25168  UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
25169  
25170  # Truncate to maximum length
25171 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
25172 index 9d33c1e85c79..3d307bda86f9 100644
25173 --- a/sound/core/pcm_native.c
25174 +++ b/sound/core/pcm_native.c
25175 @@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
25176  void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
25177  {
25178         if (!substream->pcm->nonatomic)
25179 -               local_irq_disable();
25180 +               local_irq_disable_nort();
25181         snd_pcm_stream_lock(substream);
25182  }
25183  EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
25184 @@ -150,7 +150,7 @@ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
25185  {
25186         snd_pcm_stream_unlock(substream);
25187         if (!substream->pcm->nonatomic)
25188 -               local_irq_enable();
25189 +               local_irq_enable_nort();
25190  }
25191  EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
25192  
25193 @@ -158,7 +158,7 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
25194  {
25195         unsigned long flags = 0;
25196         if (!substream->pcm->nonatomic)
25197 -               local_irq_save(flags);
25198 +               local_irq_save_nort(flags);
25199         snd_pcm_stream_lock(substream);
25200         return flags;
25201  }
25202 @@ -176,7 +176,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
25203  {
25204         snd_pcm_stream_unlock(substream);
25205         if (!substream->pcm->nonatomic)
25206 -               local_irq_restore(flags);
25207 +               local_irq_restore_nort(flags);
25208  }
25209  EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
25210  
This page took 2.075338 seconds and 3 git commands to generate.