-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/alpha/include/asm/spinlock_types.h linux-4.14/arch/alpha/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/alpha/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/alpha/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ALPHA_SPINLOCK_TYPES_H
- #define _ALPHA_SPINLOCK_TYPES_H
+diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
+index 2cc08d4a326e..e28f7f29f2b3 100644
+--- a/Documentation/trace/events.txt
++++ b/Documentation/trace/events.txt
+@@ -517,1550 +517,4 @@ The following commands are supported:
+ totals derived from one or more trace event format fields and/or
+ event counts (hitcount).
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
+- The format of a hist trigger is as follows:
-
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/irq.h linux-4.14/arch/arm/include/asm/irq.h
---- linux-4.14.orig/arch/arm/include/asm/irq.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/include/asm/irq.h 2018-09-05 11:05:07.000000000 +0200
-@@ -23,6 +23,8 @@
- #endif
-
- #ifndef __ASSEMBLY__
-+#include <linux/cpumask.h>
-+
- struct irqaction;
- struct pt_regs;
- extern void migrate_irqs(void);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/spinlock_types.h linux-4.14/arch/arm/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/arm/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
+- hist:keys=<field1[,field2,...]>[:values=<field1[,field2,...]>]
+- [:sort=<field1[,field2,...]>][:size=#entries][:pause][:continue]
+- [:clear][:name=histname1] [if <filter>]
-
- #define TICKET_SHIFT 16
-
- typedef struct {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/switch_to.h linux-4.14/arch/arm/include/asm/switch_to.h
---- linux-4.14.orig/arch/arm/include/asm/switch_to.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/include/asm/switch_to.h 2018-09-05 11:05:07.000000000 +0200
-@@ -4,6 +4,13 @@
-
- #include <linux/thread_info.h>
-
-+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
-+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
-+#else
-+static inline void
-+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-+#endif
-+
- /*
- * For v7 SMP cores running a preemptible kernel we may be pre-empted
- * during a TLB maintenance operation, so execute an inner-shareable dsb
-@@ -26,6 +33,7 @@
- #define switch_to(prev,next,last) \
- do { \
- __complete_pending_tlbi(); \
-+ switch_kmaps(prev, next); \
- last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
- } while (0)
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/thread_info.h linux-4.14/arch/arm/include/asm/thread_info.h
---- linux-4.14.orig/arch/arm/include/asm/thread_info.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/include/asm/thread_info.h 2018-09-05 11:05:07.000000000 +0200
-@@ -49,6 +49,7 @@
- struct thread_info {
- unsigned long flags; /* low level flags */
- int preempt_count; /* 0 => preemptable, <0 => bug */
-+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
- __u32 cpu; /* cpu */
-@@ -142,7 +143,8 @@
- #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
- #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
- #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
--#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
-+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
-+#define TIF_NEED_RESCHED_LAZY 7
-
- #define TIF_NOHZ 12 /* in adaptive nohz mode */
- #define TIF_USING_IWMMXT 17
-@@ -152,6 +154,7 @@
- #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_UPROBE (1 << TIF_UPROBE)
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-@@ -167,7 +170,8 @@
- * Change these and you break ASM code in entry-common.S
- */
- #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
-+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-+ _TIF_NEED_RESCHED_LAZY)
-
- #endif /* __KERNEL__ */
- #endif /* __ASM_ARM_THREAD_INFO_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/Kconfig linux-4.14/arch/arm/Kconfig
---- linux-4.14.orig/arch/arm/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -45,7 +45,7 @@
- select HARDIRQS_SW_RESEND
- select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
- select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
-- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
-+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
- select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
- select HAVE_ARCH_MMAP_RND_BITS if MMU
- select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
-@@ -85,6 +85,7 @@
- select HAVE_PERF_EVENTS
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_SYSCALL_TRACEPOINTS
-@@ -2164,7 +2165,7 @@
-
- config KERNEL_MODE_NEON
- bool "Support for NEON in kernel mode"
-- depends on NEON && AEABI
-+ depends on NEON && AEABI && !PREEMPT_RT_BASE
- help
- Say Y to include support for NEON in kernel mode.
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/asm-offsets.c linux-4.14/arch/arm/kernel/asm-offsets.c
---- linux-4.14.orig/arch/arm/kernel/asm-offsets.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/asm-offsets.c 2018-09-05 11:05:07.000000000 +0200
-@@ -65,6 +65,7 @@
- BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/entry-armv.S linux-4.14/arch/arm/kernel/entry-armv.S
---- linux-4.14.orig/arch/arm/kernel/entry-armv.S 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/entry-armv.S 2018-09-05 11:05:07.000000000 +0200
-@@ -220,11 +220,18 @@
-
- #ifdef CONFIG_PREEMPT
- ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
-- ldr r0, [tsk, #TI_FLAGS] @ get flags
- teq r8, #0 @ if preempt count != 0
-+ bne 1f @ return from exeption
-+ ldr r0, [tsk, #TI_FLAGS] @ get flags
-+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
-+ blne svc_preempt @ preempt!
-+
-+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
-+ teq r8, #0 @ if preempt lazy count != 0
- movne r0, #0 @ force flags to 0
-- tst r0, #_TIF_NEED_RESCHED
-+ tst r0, #_TIF_NEED_RESCHED_LAZY
- blne svc_preempt
-+1:
- #endif
-
- svc_exit r5, irq = 1 @ return from exception
-@@ -239,8 +246,14 @@
- 1: bl preempt_schedule_irq @ irq en/disable is done inside
- ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
- tst r0, #_TIF_NEED_RESCHED
-+ bne 1b
-+ tst r0, #_TIF_NEED_RESCHED_LAZY
- reteq r8 @ go again
-- b 1b
-+ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
-+ teq r0, #0 @ if preempt lazy count != 0
-+ beq 1b
-+ ret r8 @ go again
-+
- #endif
-
- __und_fault:
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/entry-common.S linux-4.14/arch/arm/kernel/entry-common.S
---- linux-4.14.orig/arch/arm/kernel/entry-common.S 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/entry-common.S 2018-09-05 11:05:07.000000000 +0200
-@@ -53,7 +53,9 @@
- cmp r2, #TASK_SIZE
- blne addr_limit_check_failed
- ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
-- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
-+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
-+ bne fast_work_pending
-+ tst r1, #_TIF_SECCOMP
- bne fast_work_pending
-
-
-@@ -83,8 +85,11 @@
- cmp r2, #TASK_SIZE
- blne addr_limit_check_failed
- ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
-- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
-+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
-+ bne do_slower_path
-+ tst r1, #_TIF_SECCOMP
- beq no_work_pending
-+do_slower_path:
- UNWIND(.fnend )
- ENDPROC(ret_fast_syscall)
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/patch.c linux-4.14/arch/arm/kernel/patch.c
---- linux-4.14.orig/arch/arm/kernel/patch.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/patch.c 2018-09-05 11:05:07.000000000 +0200
-@@ -16,7 +16,7 @@
- unsigned int insn;
- };
-
--static DEFINE_SPINLOCK(patch_lock);
-+static DEFINE_RAW_SPINLOCK(patch_lock);
-
- static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
- __acquires(&patch_lock)
-@@ -33,7 +33,7 @@
- return addr;
-
- if (flags)
-- spin_lock_irqsave(&patch_lock, *flags);
-+ raw_spin_lock_irqsave(&patch_lock, *flags);
- else
- __acquire(&patch_lock);
-
-@@ -48,7 +48,7 @@
- clear_fixmap(fixmap);
-
- if (flags)
-- spin_unlock_irqrestore(&patch_lock, *flags);
-+ raw_spin_unlock_irqrestore(&patch_lock, *flags);
- else
- __release(&patch_lock);
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/process.c linux-4.14/arch/arm/kernel/process.c
---- linux-4.14.orig/arch/arm/kernel/process.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/process.c 2018-09-05 11:05:07.000000000 +0200
-@@ -325,6 +325,30 @@
- }
-
- #ifdef CONFIG_MMU
-+/*
-+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
-+ * initialized by pgtable_page_ctor() then a coredump of the vector page will
-+ * fail.
-+ */
-+static int __init vectors_user_mapping_init_page(void)
-+{
-+ struct page *page;
-+ unsigned long addr = 0xffff0000;
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ page = pmd_page(*(pmd));
-+
-+ pgtable_page_ctor(page);
-+
-+ return 0;
-+}
-+late_initcall(vectors_user_mapping_init_page);
-+
- #ifdef CONFIG_KUSER_HELPERS
- /*
- * The vectors page is always readable from user space for the
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/signal.c linux-4.14/arch/arm/kernel/signal.c
---- linux-4.14.orig/arch/arm/kernel/signal.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/signal.c 2018-09-05 11:05:07.000000000 +0200
-@@ -615,7 +615,8 @@
- */
- trace_hardirqs_off();
- do {
-- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
-+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
-+ _TIF_NEED_RESCHED_LAZY))) {
- schedule();
- } else {
- if (unlikely(!user_mode(regs)))
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/smp.c linux-4.14/arch/arm/kernel/smp.c
---- linux-4.14.orig/arch/arm/kernel/smp.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/smp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -236,8 +236,6 @@
- flush_cache_louis();
- local_flush_tlb_all();
-
-- clear_tasks_mm_cpumask(cpu);
--
- return 0;
- }
-
-@@ -255,6 +253,7 @@
- }
- pr_debug("CPU%u: shutdown\n", cpu);
-
-+ clear_tasks_mm_cpumask(cpu);
- /*
- * platform_cpu_kill() is generally expected to do the powering off
- * and/or cutting of clocks to the dying CPU. Optionally, this may
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/unwind.c linux-4.14/arch/arm/kernel/unwind.c
---- linux-4.14.orig/arch/arm/kernel/unwind.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/unwind.c 2018-09-05 11:05:07.000000000 +0200
-@@ -93,7 +93,7 @@
- static const struct unwind_idx *__origin_unwind_idx;
- extern const struct unwind_idx __stop_unwind_idx[];
-
--static DEFINE_SPINLOCK(unwind_lock);
-+static DEFINE_RAW_SPINLOCK(unwind_lock);
- static LIST_HEAD(unwind_tables);
-
- /* Convert a prel31 symbol to an absolute address */
-@@ -201,7 +201,7 @@
- /* module unwind tables */
- struct unwind_table *table;
-
-- spin_lock_irqsave(&unwind_lock, flags);
-+ raw_spin_lock_irqsave(&unwind_lock, flags);
- list_for_each_entry(table, &unwind_tables, list) {
- if (addr >= table->begin_addr &&
- addr < table->end_addr) {
-@@ -213,7 +213,7 @@
- break;
- }
- }
-- spin_unlock_irqrestore(&unwind_lock, flags);
-+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
- }
-
- pr_debug("%s: idx = %p\n", __func__, idx);
-@@ -529,9 +529,9 @@
- tab->begin_addr = text_addr;
- tab->end_addr = text_addr + text_size;
-
-- spin_lock_irqsave(&unwind_lock, flags);
-+ raw_spin_lock_irqsave(&unwind_lock, flags);
- list_add_tail(&tab->list, &unwind_tables);
-- spin_unlock_irqrestore(&unwind_lock, flags);
-+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
-
- return tab;
- }
-@@ -543,9 +543,9 @@
- if (!tab)
- return;
-
-- spin_lock_irqsave(&unwind_lock, flags);
-+ raw_spin_lock_irqsave(&unwind_lock, flags);
- list_del(&tab->list);
-- spin_unlock_irqrestore(&unwind_lock, flags);
-+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
-
- kfree(tab);
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-exynos/platsmp.c linux-4.14/arch/arm/mach-exynos/platsmp.c
---- linux-4.14.orig/arch/arm/mach-exynos/platsmp.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-exynos/platsmp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -229,7 +229,7 @@
- return (void __iomem *)(S5P_VA_SCU);
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void exynos_secondary_init(unsigned int cpu)
- {
-@@ -242,8 +242,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
-@@ -307,7 +307,7 @@
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -334,7 +334,7 @@
-
- if (timeout == 0) {
- printk(KERN_ERR "cpu1 power enable failed");
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- return -ETIMEDOUT;
- }
- }
-@@ -380,7 +380,7 @@
- * calibrations, then wait for it to finish
- */
- fail:
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? ret : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-hisi/platmcpm.c linux-4.14/arch/arm/mach-hisi/platmcpm.c
---- linux-4.14.orig/arch/arm/mach-hisi/platmcpm.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-hisi/platmcpm.c 2018-09-05 11:05:07.000000000 +0200
-@@ -61,7 +61,7 @@
-
- static void __iomem *sysctrl, *fabric;
- static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- static u32 fabric_phys_addr;
- /*
- * [0]: bootwrapper physical address
-@@ -113,7 +113,7 @@
- if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
- return -EINVAL;
-
-- spin_lock_irq(&boot_lock);
-+ raw_spin_lock_irq(&boot_lock);
-
- if (hip04_cpu_table[cluster][cpu])
- goto out;
-@@ -147,7 +147,7 @@
-
- out:
- hip04_cpu_table[cluster][cpu]++;
-- spin_unlock_irq(&boot_lock);
-+ raw_spin_unlock_irq(&boot_lock);
-
- return 0;
- }
-@@ -162,11 +162,11 @@
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
- hip04_cpu_table[cluster][cpu]--;
- if (hip04_cpu_table[cluster][cpu] == 1) {
- /* A power_up request went ahead of us. */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- return;
- } else if (hip04_cpu_table[cluster][cpu] > 1) {
- pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
-@@ -174,7 +174,7 @@
- }
-
- last_man = hip04_cluster_is_down(cluster);
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- if (last_man) {
- /* Since it's Cortex A15, disable L2 prefetching. */
- asm volatile(
-@@ -203,7 +203,7 @@
- cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
-
- count = TIMEOUT_MSEC / POLL_MSEC;
-- spin_lock_irq(&boot_lock);
-+ raw_spin_lock_irq(&boot_lock);
- for (tries = 0; tries < count; tries++) {
- if (hip04_cpu_table[cluster][cpu])
- goto err;
-@@ -211,10 +211,10 @@
- data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
- if (data & CORE_WFI_STATUS(cpu))
- break;
-- spin_unlock_irq(&boot_lock);
-+ raw_spin_unlock_irq(&boot_lock);
- /* Wait for clean L2 when the whole cluster is down. */
- msleep(POLL_MSEC);
-- spin_lock_irq(&boot_lock);
-+ raw_spin_lock_irq(&boot_lock);
- }
- if (tries >= count)
- goto err;
-@@ -231,10 +231,10 @@
- goto err;
- if (hip04_cluster_is_down(cluster))
- hip04_set_snoop_filter(cluster, 0);
-- spin_unlock_irq(&boot_lock);
-+ raw_spin_unlock_irq(&boot_lock);
- return 1;
- err:
-- spin_unlock_irq(&boot_lock);
-+ raw_spin_unlock_irq(&boot_lock);
- return 0;
- }
- #endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-omap2/omap-smp.c linux-4.14/arch/arm/mach-omap2/omap-smp.c
---- linux-4.14.orig/arch/arm/mach-omap2/omap-smp.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm/mach-omap2/omap-smp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -69,7 +69,7 @@
- .startup_addr = omap5_secondary_startup,
- };
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- void __iomem *omap4_get_scu_base(void)
- {
-@@ -177,8 +177,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -191,7 +191,7 @@
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -270,7 +270,7 @@
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-prima2/platsmp.c linux-4.14/arch/arm/mach-prima2/platsmp.c
---- linux-4.14.orig/arch/arm/mach-prima2/platsmp.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-prima2/platsmp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -22,7 +22,7 @@
-
- static void __iomem *clk_base;
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void sirfsoc_secondary_init(unsigned int cpu)
- {
-@@ -36,8 +36,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static const struct of_device_id clk_ids[] = {
-@@ -75,7 +75,7 @@
- /* make sure write buffer is drained */
- mb();
-
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -107,7 +107,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-qcom/platsmp.c linux-4.14/arch/arm/mach-qcom/platsmp.c
---- linux-4.14.orig/arch/arm/mach-qcom/platsmp.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-qcom/platsmp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -46,7 +46,7 @@
-
- extern void secondary_startup_arm(void);
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- #ifdef CONFIG_HOTPLUG_CPU
- static void qcom_cpu_die(unsigned int cpu)
-@@ -60,8 +60,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int scss_release_secondary(unsigned int cpu)
-@@ -284,7 +284,7 @@
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * Send the secondary CPU a soft interrupt, thereby causing
-@@ -297,7 +297,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return ret;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-spear/platsmp.c linux-4.14/arch/arm/mach-spear/platsmp.c
---- linux-4.14.orig/arch/arm/mach-spear/platsmp.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-spear/platsmp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -32,7 +32,7 @@
- sync_cache_w(&pen_release);
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
-
-@@ -47,8 +47,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -59,7 +59,7 @@
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -84,7 +84,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-sti/platsmp.c linux-4.14/arch/arm/mach-sti/platsmp.c
---- linux-4.14.orig/arch/arm/mach-sti/platsmp.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-sti/platsmp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -35,7 +35,7 @@
- sync_cache_w(&pen_release);
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void sti_secondary_init(unsigned int cpu)
- {
-@@ -48,8 +48,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -60,7 +60,7 @@
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -91,7 +91,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mm/fault.c linux-4.14/arch/arm/mm/fault.c
---- linux-4.14.orig/arch/arm/mm/fault.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mm/fault.c 2018-09-05 11:05:07.000000000 +0200
-@@ -434,6 +434,9 @@
- if (addr < TASK_SIZE)
- return do_page_fault(addr, fsr, regs);
-
-+ if (interrupts_enabled(regs))
-+ local_irq_enable();
-+
- if (user_mode(regs))
- goto bad_area;
-
-@@ -501,6 +504,9 @@
- static int
- do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
- {
-+ if (interrupts_enabled(regs))
-+ local_irq_enable();
-+
- do_bad_area(addr, fsr, regs);
- return 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mm/highmem.c linux-4.14/arch/arm/mm/highmem.c
---- linux-4.14.orig/arch/arm/mm/highmem.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mm/highmem.c 2018-09-05 11:05:07.000000000 +0200
-@@ -34,6 +34,11 @@
- return *ptep;
- }
-
-+static unsigned int fixmap_idx(int type)
-+{
-+ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+}
-+
- void *kmap(struct page *page)
- {
- might_sleep();
-@@ -54,12 +59,13 @@
-
- void *kmap_atomic(struct page *page)
- {
-+ pte_t pte = mk_pte(page, kmap_prot);
- unsigned int idx;
- unsigned long vaddr;
- void *kmap;
- int type;
-
-- preempt_disable();
-+ preempt_disable_nort();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -79,7 +85,7 @@
-
- type = kmap_atomic_idx_push();
-
-- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+ idx = fixmap_idx(type);
- vaddr = __fix_to_virt(idx);
- #ifdef CONFIG_DEBUG_HIGHMEM
- /*
-@@ -93,7 +99,10 @@
- * in place, so the contained TLB flush ensures the TLB is updated
- * with the new mapping.
- */
-- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_fixmap_pte(idx, pte);
-
- return (void *)vaddr;
- }
-@@ -106,44 +115,75 @@
-
- if (kvaddr >= (void *)FIXADDR_START) {
- type = kmap_atomic_idx();
-- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+ idx = fixmap_idx(type);
-
- if (cache_is_vivt())
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(vaddr != __fix_to_virt(idx));
-- set_fixmap_pte(idx, __pte(0));
- #else
- (void) idx; /* to kill a warning */
- #endif
-+ set_fixmap_pte(idx, __pte(0));
- kmap_atomic_idx_pop();
- } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
- /* this address was obtained through kmap_high_get() */
- kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
- }
- pagefault_enable();
-- preempt_enable();
-+ preempt_enable_nort();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
- void *kmap_atomic_pfn(unsigned long pfn)
- {
-+ pte_t pte = pfn_pte(pfn, kmap_prot);
- unsigned long vaddr;
- int idx, type;
- struct page *page = pfn_to_page(pfn);
-
-- preempt_disable();
-+ preempt_disable_nort();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
-- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+ idx = fixmap_idx(type);
- vaddr = __fix_to_virt(idx);
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
- #endif
-- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_fixmap_pte(idx, pte);
-
- return (void *)vaddr;
- }
-+#if defined CONFIG_PREEMPT_RT_FULL
-+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ int i;
-+
-+ /*
-+ * Clear @prev's kmap_atomic mappings
-+ */
-+ for (i = 0; i < prev_p->kmap_idx; i++) {
-+ int idx = fixmap_idx(i);
-+
-+ set_fixmap_pte(idx, __pte(0));
-+ }
-+ /*
-+ * Restore @next_p's kmap_atomic mappings
-+ */
-+ for (i = 0; i < next_p->kmap_idx; i++) {
-+ int idx = fixmap_idx(i);
-+
-+ if (!pte_none(next_p->kmap_pte[i]))
-+ set_fixmap_pte(idx, next_p->kmap_pte[i]);
-+ }
-+}
-+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/plat-versatile/platsmp.c linux-4.14/arch/arm/plat-versatile/platsmp.c
---- linux-4.14.orig/arch/arm/plat-versatile/platsmp.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/plat-versatile/platsmp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -32,7 +32,7 @@
- sync_cache_w(&pen_release);
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- void versatile_secondary_init(unsigned int cpu)
- {
-@@ -45,8 +45,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -57,7 +57,7 @@
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * This is really belt and braces; we hold unintended secondary
-@@ -87,7 +87,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/crypto/crc32-ce-glue.c linux-4.14/arch/arm64/crypto/crc32-ce-glue.c
---- linux-4.14.orig/arch/arm64/crypto/crc32-ce-glue.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/crypto/crc32-ce-glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -208,7 +208,8 @@
-
- static int __init crc32_pmull_mod_init(void)
- {
-- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) {
-+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
-+ !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && (elf_hwcap & HWCAP_PMULL)) {
- crc32_pmull_algs[0].update = crc32_pmull_update;
- crc32_pmull_algs[1].update = crc32c_pmull_update;
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/crypto/Kconfig linux-4.14/arch/arm64/crypto/Kconfig
---- linux-4.14.orig/arch/arm64/crypto/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm64/crypto/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -19,19 +19,19 @@
-
- config CRYPTO_SHA1_ARM64_CE
- tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
-- depends on KERNEL_MODE_NEON
-+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_HASH
- select CRYPTO_SHA1
-
- config CRYPTO_SHA2_ARM64_CE
- tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
-- depends on KERNEL_MODE_NEON
-+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_HASH
- select CRYPTO_SHA256_ARM64
-
- config CRYPTO_GHASH_ARM64_CE
- tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
-- depends on KERNEL_MODE_NEON
-+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_HASH
- select CRYPTO_GF128MUL
- select CRYPTO_AES
-@@ -39,7 +39,7 @@
-
- config CRYPTO_CRCT10DIF_ARM64_CE
- tristate "CRCT10DIF digest algorithm using PMULL instructions"
-- depends on KERNEL_MODE_NEON && CRC_T10DIF
-+ depends on KERNEL_MODE_NEON && CRC_T10DIF && !PREEMPT_RT_BASE
- select CRYPTO_HASH
-
- config CRYPTO_CRC32_ARM64_CE
-@@ -53,13 +53,13 @@
-
- config CRYPTO_AES_ARM64_CE
- tristate "AES core cipher using ARMv8 Crypto Extensions"
-- depends on ARM64 && KERNEL_MODE_NEON
-+ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_ALGAPI
- select CRYPTO_AES_ARM64
-
- config CRYPTO_AES_ARM64_CE_CCM
- tristate "AES in CCM mode using ARMv8 Crypto Extensions"
-- depends on ARM64 && KERNEL_MODE_NEON
-+ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_ALGAPI
- select CRYPTO_AES_ARM64_CE
- select CRYPTO_AES_ARM64
-@@ -67,7 +67,7 @@
-
- config CRYPTO_AES_ARM64_CE_BLK
- tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
-- depends on KERNEL_MODE_NEON
-+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_BLKCIPHER
- select CRYPTO_AES_ARM64_CE
- select CRYPTO_AES_ARM64
-@@ -75,7 +75,7 @@
-
- config CRYPTO_AES_ARM64_NEON_BLK
- tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
-- depends on KERNEL_MODE_NEON
-+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_BLKCIPHER
- select CRYPTO_AES_ARM64
- select CRYPTO_AES
-@@ -83,13 +83,13 @@
-
- config CRYPTO_CHACHA20_NEON
- tristate "NEON accelerated ChaCha20 symmetric cipher"
-- depends on KERNEL_MODE_NEON
-+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_BLKCIPHER
- select CRYPTO_CHACHA20
-
- config CRYPTO_AES_ARM64_BS
- tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
-- depends on KERNEL_MODE_NEON
-+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
- select CRYPTO_BLKCIPHER
- select CRYPTO_AES_ARM64_NEON_BLK
- select CRYPTO_AES_ARM64
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/include/asm/spinlock_types.h linux-4.14/arch/arm64/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/arm64/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm64/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -16,10 +16,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
--# error "please don't include this file directly"
--#endif
--
- #include <linux/types.h>
-
- #define TICKET_SHIFT 16
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/include/asm/thread_info.h linux-4.14/arch/arm64/include/asm/thread_info.h
---- linux-4.14.orig/arch/arm64/include/asm/thread_info.h 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/include/asm/thread_info.h 2018-09-05 11:05:07.000000000 +0200
-@@ -43,6 +43,7 @@
- u64 ttbr0; /* saved TTBR0_EL1 */
- #endif
- int preempt_count; /* 0 => preemptable, <0 => bug */
-+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- };
-
- #define INIT_THREAD_INFO(tsk) \
-@@ -82,6 +83,7 @@
- #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
- #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
- #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
-+#define TIF_NEED_RESCHED_LAZY 6
- #define TIF_NOHZ 7
- #define TIF_SYSCALL_TRACE 8
- #define TIF_SYSCALL_AUDIT 9
-@@ -98,6 +100,7 @@
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
- #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_NOHZ (1 << TIF_NOHZ)
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-@@ -109,8 +112,9 @@
-
- #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
-- _TIF_UPROBE | _TIF_FSCHECK)
-+ _TIF_UPROBE | _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
-
-+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
- #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
- _TIF_NOHZ)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/Kconfig linux-4.14/arch/arm64/Kconfig
---- linux-4.14.orig/arch/arm64/Kconfig 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -103,6 +103,7 @@
- select HAVE_PERF_EVENTS
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RCU_TABLE_FREE
- select HAVE_SYSCALL_TRACEPOINTS
-@@ -791,7 +792,7 @@
-
- config XEN
- bool "Xen guest support on ARM64"
-- depends on ARM64 && OF
-+ depends on ARM64 && OF && !PREEMPT_RT_FULL
- select SWIOTLB_XEN
- select PARAVIRT
- help
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/asm-offsets.c linux-4.14/arch/arm64/kernel/asm-offsets.c
---- linux-4.14.orig/arch/arm64/kernel/asm-offsets.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/kernel/asm-offsets.c 2018-09-05 11:05:07.000000000 +0200
-@@ -39,6 +39,7 @@
- BLANK();
- DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
- DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
-+ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count));
- DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
- #ifdef CONFIG_ARM64_SW_TTBR0_PAN
- DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/entry.S linux-4.14/arch/arm64/kernel/entry.S
---- linux-4.14.orig/arch/arm64/kernel/entry.S 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/kernel/entry.S 2018-09-05 11:05:07.000000000 +0200
-@@ -637,11 +637,16 @@
-
- #ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
-- cbnz w24, 1f // preempt count != 0
-+ cbnz w24, 2f // preempt count != 0
- ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
-- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
-- bl el1_preempt
-+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
-+
-+ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
-+ cbnz w24, 2f // preempt lazy count != 0
-+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
- 1:
-+ bl el1_preempt
-+2:
- #endif
- #ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-@@ -655,6 +660,7 @@
- 1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
- tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
-+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
- ret x24
- #endif
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/signal.c linux-4.14/arch/arm64/kernel/signal.c
---- linux-4.14.orig/arch/arm64/kernel/signal.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/kernel/signal.c 2018-09-05 11:05:07.000000000 +0200
-@@ -756,7 +756,7 @@
- /* Check valid user FS if needed */
- addr_limit_user_check();
-
-- if (thread_flags & _TIF_NEED_RESCHED) {
-+ if (thread_flags & _TIF_NEED_RESCHED_MASK) {
- schedule();
- } else {
- local_irq_enable();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/blackfin/include/asm/spinlock_types.h linux-4.14/arch/blackfin/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/blackfin/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/blackfin/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -7,10 +7,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- #include <asm/rwlock.h>
-
- typedef struct {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/hexagon/include/asm/spinlock_types.h linux-4.14/arch/hexagon/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/hexagon/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/hexagon/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -21,10 +21,6 @@
- #ifndef _ASM_SPINLOCK_TYPES_H
- #define _ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/ia64/include/asm/spinlock_types.h linux-4.14/arch/ia64/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/ia64/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/ia64/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_IA64_SPINLOCK_TYPES_H
- #define _ASM_IA64_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/ia64/kernel/mca.c linux-4.14/arch/ia64/kernel/mca.c
---- linux-4.14.orig/arch/ia64/kernel/mca.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/ia64/kernel/mca.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1824,7 +1824,7 @@
- ti->cpu = cpu;
- p->stack = ti;
- p->state = TASK_UNINTERRUPTIBLE;
-- cpumask_set_cpu(cpu, &p->cpus_allowed);
-+ cpumask_set_cpu(cpu, &p->cpus_mask);
- INIT_LIST_HEAD(&p->tasks);
- p->parent = p->real_parent = p->group_leader = p;
- INIT_LIST_HEAD(&p->children);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/Kconfig linux-4.14/arch/Kconfig
---- linux-4.14.orig/arch/Kconfig 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -20,6 +20,7 @@
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
-+ depends on !PREEMPT_RT_FULL
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/m32r/include/asm/spinlock_types.h linux-4.14/arch/m32r/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/m32r/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/m32r/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_M32R_SPINLOCK_TYPES_H
- #define _ASM_M32R_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile int slock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/metag/include/asm/spinlock_types.h linux-4.14/arch/metag/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/metag/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/metag/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_METAG_SPINLOCK_TYPES_H
- #define _ASM_METAG_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/include/asm/switch_to.h linux-4.14/arch/mips/include/asm/switch_to.h
---- linux-4.14.orig/arch/mips/include/asm/switch_to.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/mips/include/asm/switch_to.h 2018-09-05 11:05:07.000000000 +0200
-@@ -42,7 +42,7 @@
- * inline to try to keep the overhead down. If we have been forced to run on
- * a "CPU" with an FPU because of a previous high level of FP computation,
- * but did not actually use the FPU during the most recent time-slice (CU1
-- * isn't set), we undo the restriction on cpus_allowed.
-+ * isn't set), we undo the restriction on cpus_mask.
- *
- * We're not calling set_cpus_allowed() here, because we have no need to
- * force prompt migration - we're already switching the current CPU to a
-@@ -57,7 +57,7 @@
- test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
- (!(KSTK_STATUS(prev) & ST0_CU1))) { \
- clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
-- prev->cpus_allowed = prev->thread.user_cpus_allowed; \
-+ prev->cpus_mask = prev->thread.user_cpus_allowed; \
- } \
- next->thread.emulated_fp = 0; \
- } while(0)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/Kconfig linux-4.14/arch/mips/Kconfig
---- linux-4.14.orig/arch/mips/Kconfig 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/mips/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -2519,7 +2519,7 @@
- #
- config HIGHMEM
- bool "High Memory Support"
-- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
-+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
-
- config CPU_SUPPORTS_HIGHMEM
- bool
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/kernel/mips-mt-fpaff.c linux-4.14/arch/mips/kernel/mips-mt-fpaff.c
---- linux-4.14.orig/arch/mips/kernel/mips-mt-fpaff.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/mips/kernel/mips-mt-fpaff.c 2018-09-05 11:05:07.000000000 +0200
-@@ -177,7 +177,7 @@
- if (retval)
- goto out_unlock;
-
-- cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
-+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
- cpumask_and(&mask, &allowed, cpu_active_mask);
-
- out_unlock:
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/kernel/traps.c linux-4.14/arch/mips/kernel/traps.c
---- linux-4.14.orig/arch/mips/kernel/traps.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/mips/kernel/traps.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1193,12 +1193,12 @@
- * restricted the allowed set to exclude any CPUs with FPUs,
- * we'll skip the procedure.
- */
-- if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
-+ if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
- cpumask_t tmask;
-
- current->thread.user_cpus_allowed
-- = current->cpus_allowed;
-- cpumask_and(&tmask, ¤t->cpus_allowed,
-+ = current->cpus_mask;
-+ cpumask_and(&tmask, ¤t->cpus_mask,
- &mt_fpu_cpumask);
- set_cpus_allowed_ptr(current, &tmask);
- set_thread_flag(TIF_FPUBOUND);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mn10300/include/asm/spinlock_types.h linux-4.14/arch/mn10300/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/mn10300/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/mn10300/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_SPINLOCK_TYPES_H
- #define _ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct arch_spinlock {
- unsigned int slock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/include/asm/spinlock_types.h linux-4.14/arch/powerpc/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/powerpc/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
- #define _ASM_POWERPC_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int slock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/include/asm/thread_info.h linux-4.14/arch/powerpc/include/asm/thread_info.h
---- linux-4.14.orig/arch/powerpc/include/asm/thread_info.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/include/asm/thread_info.h 2018-09-05 11:05:07.000000000 +0200
-@@ -36,6 +36,8 @@
- int cpu; /* cpu we're on */
- int preempt_count; /* 0 => preemptable,
- <0 => BUG */
-+ int preempt_lazy_count; /* 0 => preemptable,
-+ <0 => BUG */
- unsigned long local_flags; /* private flags for thread */
- #ifdef CONFIG_LIVEPATCH
- unsigned long *livepatch_sp;
-@@ -81,8 +83,7 @@
- #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
- #define TIF_SIGPENDING 1 /* signal pending */
- #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
--#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
-- TIF_NEED_RESCHED */
-+#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
- #define TIF_32BIT 4 /* 32 bit binary */
- #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
- #define TIF_PATCH_PENDING 6 /* pending live patching update */
-@@ -101,6 +102,8 @@
- #if defined(CONFIG_PPC64)
- #define TIF_ELF2ABI 18 /* function descriptors must die! */
- #endif
-+#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling
-+ TIF_NEED_RESCHED */
-
- /* as above, but as bit values */
- #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -120,14 +123,16 @@
- #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
- #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
- #define _TIF_NOHZ (1<<TIF_NOHZ)
-+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
- #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_NOHZ)
-
- #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-- _TIF_RESTORE_TM | _TIF_PATCH_PENDING)
-+ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | _TIF_NEED_RESCHED_LAZY)
- #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
-+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-
- /* Bits in local_flags */
- /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/Kconfig linux-4.14/arch/powerpc/Kconfig
---- linux-4.14.orig/arch/powerpc/Kconfig 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -111,10 +111,11 @@
-
- config RWSEM_GENERIC_SPINLOCK
- bool
-+ default y if PREEMPT_RT_FULL
-
- config RWSEM_XCHGADD_ALGORITHM
- bool
-- default y
-+ default y if !PREEMPT_RT_FULL
-
- config GENERIC_LOCKBREAK
- bool
-@@ -215,6 +216,7 @@
- select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select HAVE_RCU_TABLE_FREE if SMP
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_SYSCALL_TRACEPOINTS
-@@ -390,7 +392,7 @@
-
- config HIGHMEM
- bool "High memory support"
-- depends on PPC32
-+ depends on PPC32 && !PREEMPT_RT_FULL
-
- source kernel/Kconfig.hz
- source kernel/Kconfig.preempt
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/asm-offsets.c linux-4.14/arch/powerpc/kernel/asm-offsets.c
---- linux-4.14.orig/arch/powerpc/kernel/asm-offsets.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/kernel/asm-offsets.c 2018-09-05 11:05:07.000000000 +0200
-@@ -156,6 +156,7 @@
- OFFSET(TI_FLAGS, thread_info, flags);
- OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
- OFFSET(TI_PREEMPT, thread_info, preempt_count);
-+ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
- OFFSET(TI_TASK, thread_info, task);
- OFFSET(TI_CPU, thread_info, cpu);
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/entry_32.S linux-4.14/arch/powerpc/kernel/entry_32.S
---- linux-4.14.orig/arch/powerpc/kernel/entry_32.S 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/kernel/entry_32.S 2018-09-05 11:05:07.000000000 +0200
-@@ -866,7 +866,14 @@
- cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore
- andi. r8,r8,_TIF_NEED_RESCHED
-+ bne+ 1f
-+ lwz r0,TI_PREEMPT_LAZY(r9)
-+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
-+ bne restore
-+ lwz r0,TI_FLAGS(r9)
-+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
- beq+ restore
-+1:
- lwz r3,_MSR(r1)
- andi. r0,r3,MSR_EE /* interrupts off? */
- beq restore /* don't schedule if so */
-@@ -877,11 +884,11 @@
- */
- bl trace_hardirqs_off
- #endif
--1: bl preempt_schedule_irq
-+2: bl preempt_schedule_irq
- CURRENT_THREAD_INFO(r9, r1)
- lwz r3,TI_FLAGS(r9)
-- andi. r0,r3,_TIF_NEED_RESCHED
-- bne- 1b
-+ andi. r0,r3,_TIF_NEED_RESCHED_MASK
-+ bne- 2b
- #ifdef CONFIG_TRACE_IRQFLAGS
- /* And now, to properly rebalance the above, we tell lockdep they
- * are being turned back on, which will happen when we return
-@@ -1204,7 +1211,7 @@
- #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
-
- do_work: /* r10 contains MSR_KERNEL here */
-- andi. r0,r9,_TIF_NEED_RESCHED
-+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
- beq do_user_signal
-
- do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1225,7 +1232,7 @@
- MTMSRD(r10) /* disable interrupts */
- CURRENT_THREAD_INFO(r9, r1)
- lwz r9,TI_FLAGS(r9)
-- andi. r0,r9,_TIF_NEED_RESCHED
-+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
- bne- do_resched
- andi. r0,r9,_TIF_USER_WORK_MASK
- beq restore_user
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/entry_64.S linux-4.14/arch/powerpc/kernel/entry_64.S
---- linux-4.14.orig/arch/powerpc/kernel/entry_64.S 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/kernel/entry_64.S 2018-09-05 11:05:07.000000000 +0200
-@@ -690,7 +690,7 @@
- bl restore_math
- b restore
- #endif
--1: andi. r0,r4,_TIF_NEED_RESCHED
-+1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
- beq 2f
- bl restore_interrupts
- SCHEDULE_USER
-@@ -752,10 +752,18 @@
-
- #ifdef CONFIG_PREEMPT
- /* Check if we need to preempt */
-+ lwz r8,TI_PREEMPT(r9)
-+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
-+ bne restore
- andi. r0,r4,_TIF_NEED_RESCHED
-+ bne+ check_count
-+
-+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
- beq+ restore
-+ lwz r8,TI_PREEMPT_LAZY(r9)
-+
- /* Check that preempt_count() == 0 and interrupts are enabled */
-- lwz r8,TI_PREEMPT(r9)
-+check_count:
- cmpwi cr1,r8,0
- ld r0,SOFTE(r1)
- cmpdi r0,0
-@@ -772,7 +780,7 @@
- /* Re-test flags and eventually loop */
- CURRENT_THREAD_INFO(r9, r1)
- ld r4,TI_FLAGS(r9)
-- andi. r0,r4,_TIF_NEED_RESCHED
-+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
- bne 1b
-
- /*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/irq.c linux-4.14/arch/powerpc/kernel/irq.c
---- linux-4.14.orig/arch/powerpc/kernel/irq.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/kernel/irq.c 2018-09-05 11:05:07.000000000 +0200
-@@ -693,6 +693,7 @@
- }
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct thread_info *curtp, *irqtp;
-@@ -710,6 +711,7 @@
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
- }
-+#endif
-
- irq_hw_number_t virq_to_hw(unsigned int virq)
- {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/misc_32.S linux-4.14/arch/powerpc/kernel/misc_32.S
---- linux-4.14.orig/arch/powerpc/kernel/misc_32.S 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/kernel/misc_32.S 2018-09-05 11:05:07.000000000 +0200
-@@ -41,6 +41,7 @@
- * We store the saved ksp_limit in the unused part
- * of the STACK_FRAME_OVERHEAD
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- _GLOBAL(call_do_softirq)
- mflr r0
- stw r0,4(r1)
-@@ -57,6 +58,7 @@
- stw r10,THREAD+KSP_LIMIT(r2)
- mtlr r0
- blr
-+#endif
-
- /*
- * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/misc_64.S linux-4.14/arch/powerpc/kernel/misc_64.S
---- linux-4.14.orig/arch/powerpc/kernel/misc_64.S 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/kernel/misc_64.S 2018-09-05 11:05:07.000000000 +0200
-@@ -31,6 +31,7 @@
-
- .text
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- _GLOBAL(call_do_softirq)
- mflr r0
- std r0,16(r1)
-@@ -41,6 +42,7 @@
- ld r0,16(r1)
- mtlr r0
- blr
-+#endif
-
- _GLOBAL(call_do_irq)
- mflr r0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kvm/Kconfig linux-4.14/arch/powerpc/kvm/Kconfig
---- linux-4.14.orig/arch/powerpc/kvm/Kconfig 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/kvm/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -177,6 +177,7 @@
- config KVM_MPIC
- bool "KVM in-kernel MPIC emulation"
- depends on KVM && E500
-+ depends on !PREEMPT_RT_FULL
- select HAVE_KVM_IRQCHIP
- select HAVE_KVM_IRQFD
- select HAVE_KVM_IRQ_ROUTING
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/platforms/cell/spufs/sched.c linux-4.14/arch/powerpc/platforms/cell/spufs/sched.c
---- linux-4.14.orig/arch/powerpc/platforms/cell/spufs/sched.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/platforms/cell/spufs/sched.c 2018-09-05 11:05:07.000000000 +0200
-@@ -141,7 +141,7 @@
- * runqueue. The context will be rescheduled on the proper node
- * if it is timesliced or preempted.
- */
-- cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed);
-+ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
-
- /* Save the current cpu id for spu interrupt routing. */
- ctx->last_ran = raw_smp_processor_id();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.14/arch/powerpc/platforms/ps3/device-init.c
---- linux-4.14.orig/arch/powerpc/platforms/ps3/device-init.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/platforms/ps3/device-init.c 2018-09-05 11:05:07.000000000 +0200
-@@ -752,7 +752,7 @@
- }
- pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
-
-- res = wait_event_interruptible(dev->done.wait,
-+ res = swait_event_interruptible(dev->done.wait,
- dev->done.done || kthread_should_stop());
- if (kthread_should_stop())
- res = -EINTR;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/s390/include/asm/spinlock_types.h linux-4.14/arch/s390/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/s390/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/s390/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- int lock;
- } __attribute__ ((aligned (4))) arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sh/include/asm/spinlock_types.h linux-4.14/arch/sh/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/sh/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/sh/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SH_SPINLOCK_TYPES_H
- #define __ASM_SH_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sh/kernel/irq.c linux-4.14/arch/sh/kernel/irq.c
---- linux-4.14.orig/arch/sh/kernel/irq.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/sh/kernel/irq.c 2018-09-05 11:05:07.000000000 +0200
-@@ -148,6 +148,7 @@
- hardirq_ctx[cpu] = NULL;
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct thread_info *curctx;
-@@ -175,6 +176,7 @@
- "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
- );
- }
-+#endif
- #else
- static inline void handle_one_irq(unsigned int irq)
- {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sparc/Kconfig linux-4.14/arch/sparc/Kconfig
---- linux-4.14.orig/arch/sparc/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/sparc/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -206,12 +206,10 @@
- source kernel/Kconfig.hz
-
- config RWSEM_GENERIC_SPINLOCK
-- bool
-- default y if SPARC32
-+ def_bool PREEMPT_RT_FULL
-
- config RWSEM_XCHGADD_ALGORITHM
-- bool
-- default y if SPARC64
-+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
-
- config GENERIC_HWEIGHT
- bool
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sparc/kernel/irq_64.c linux-4.14/arch/sparc/kernel/irq_64.c
---- linux-4.14.orig/arch/sparc/kernel/irq_64.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/sparc/kernel/irq_64.c 2018-09-05 11:05:07.000000000 +0200
-@@ -855,6 +855,7 @@
- set_irq_regs(old_regs);
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-@@ -869,6 +870,7 @@
- __asm__ __volatile__("mov %0, %%sp"
- : : "r" (orig_sp));
- }
-+#endif
-
- #ifdef CONFIG_HOTPLUG_CPU
- void fixup_irqs(void)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/tile/include/asm/setup.h linux-4.14/arch/tile/include/asm/setup.h
---- linux-4.14.orig/arch/tile/include/asm/setup.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/tile/include/asm/setup.h 2018-09-05 11:05:07.000000000 +0200
-@@ -49,7 +49,7 @@
-
- /* Hook hardwall code into changes in affinity. */
- #define arch_set_cpus_allowed(p, new_mask) do { \
-- if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
-+ if (!cpumask_equal(p->cpus_ptr, new_mask)) \
- hardwall_deactivate_all(p); \
- } while (0)
- #endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/tile/include/asm/spinlock_types.h linux-4.14/arch/tile/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/tile/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/tile/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -15,10 +15,6 @@
- #ifndef _ASM_TILE_SPINLOCK_TYPES_H
- #define _ASM_TILE_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- #ifdef __tilegx__
-
- /* Low 15 bits are "next"; high 15 bits are "current". */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/tile/kernel/hardwall.c linux-4.14/arch/tile/kernel/hardwall.c
---- linux-4.14.orig/arch/tile/kernel/hardwall.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/tile/kernel/hardwall.c 2018-09-05 11:05:07.000000000 +0200
-@@ -590,12 +590,12 @@
- * Get our affinity; if we're not bound to this tile uniquely,
- * we can't access the network registers.
- */
-- if (cpumask_weight(&p->cpus_allowed) != 1)
-+ if (p->nr_cpus_allowed != 1)
- return -EPERM;
-
- /* Make sure we are bound to a cpu assigned to this resource. */
- cpu = smp_processor_id();
-- BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
-+ BUG_ON(cpumask_first(p->cpus_ptr) != cpu);
- if (!cpumask_test_cpu(cpu, &info->cpumask))
- return -EINVAL;
-
-@@ -621,17 +621,17 @@
- * Deactivate a task's hardwall. Must hold lock for hardwall_type.
- * This method may be called from exit_thread(), so we don't want to
- * rely on too many fields of struct task_struct still being valid.
-- * We assume the cpus_allowed, pid, and comm fields are still valid.
-+ * We assume the nr_cpus_allowed, pid, and comm fields are still valid.
- */
- static void _hardwall_deactivate(struct hardwall_type *hwt,
- struct task_struct *task)
- {
- struct thread_struct *ts = &task->thread;
-
-- if (cpumask_weight(&task->cpus_allowed) != 1) {
-+ if (task->nr_cpus_allowed != 1) {
- pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
- task->pid, task->comm, hwt->name,
-- cpumask_weight(&task->cpus_allowed));
-+ task->nr_cpus_allowed);
- BUG();
- }
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.14/arch/x86/crypto/aesni-intel_glue.c
---- linux-4.14.orig/arch/x86/crypto/aesni-intel_glue.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/crypto/aesni-intel_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -387,14 +387,14 @@
-
- err = skcipher_walk_virt(&walk, req, true);
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
-+ kernel_fpu_begin();
- aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
- }
-- kernel_fpu_end();
-
- return err;
- }
-@@ -409,14 +409,14 @@
-
- err = skcipher_walk_virt(&walk, req, true);
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
-+ kernel_fpu_begin();
- aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
- }
-- kernel_fpu_end();
-
- return err;
- }
-@@ -431,14 +431,14 @@
-
- err = skcipher_walk_virt(&walk, req, true);
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
-+ kernel_fpu_begin();
- aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
- }
-- kernel_fpu_end();
-
- return err;
- }
-@@ -453,14 +453,14 @@
-
- err = skcipher_walk_virt(&walk, req, true);
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
-+ kernel_fpu_begin();
- aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
- }
-- kernel_fpu_end();
-
- return err;
- }
-@@ -510,18 +510,20 @@
-
- err = skcipher_walk_virt(&walk, req, true);
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
-+ kernel_fpu_begin();
- aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
- }
- if (walk.nbytes) {
-+ kernel_fpu_begin();
- ctr_crypt_final(ctx, &walk);
-+ kernel_fpu_end();
- err = skcipher_walk_done(&walk, 0);
- }
-- kernel_fpu_end();
-
- return err;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx2_glue.c linux-4.14/arch/x86/crypto/camellia_aesni_avx2_glue.c
---- linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx2_glue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/camellia_aesni_avx2_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -206,6 +206,20 @@
- bool fpu_enabled;
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void camellia_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+ bool fpu_enabled = ctx->fpu_enabled;
-+
-+ if (!fpu_enabled)
-+ return;
-+ camellia_fpu_end(fpu_enabled);
-+ ctx->fpu_enabled = false;
-+}
-+#else
-+static void camellia_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
-@@ -221,16 +235,19 @@
- }
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
-+ kernel_fpu_resched();
- camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
-+ kernel_fpu_resched();
- camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-+ camellia_fpu_end_rt(ctx);
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_enc_blk(ctx->ctx, srcdst, srcdst);
-@@ -251,16 +268,19 @@
- }
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
-+ kernel_fpu_resched();
- camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
-+ kernel_fpu_resched();
- camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-+ camellia_fpu_end_rt(ctx);
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_dec_blk(ctx->ctx, srcdst, srcdst);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx_glue.c linux-4.14/arch/x86/crypto/camellia_aesni_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx_glue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/camellia_aesni_avx_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -210,6 +210,21 @@
- bool fpu_enabled;
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void camellia_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+ bool fpu_enabled = ctx->fpu_enabled;
-+
-+ if (!fpu_enabled)
-+ return;
-+ camellia_fpu_end(fpu_enabled);
-+ ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void camellia_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
-@@ -225,10 +240,12 @@
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
-+ kernel_fpu_resched();
- camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-+ camellia_fpu_end_rt(ctx);
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_enc_blk(ctx->ctx, srcdst, srcdst);
-@@ -249,10 +266,12 @@
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
-+ kernel_fpu_resched();
- camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-+ camellia_fpu_end_rt(ctx);
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_dec_blk(ctx->ctx, srcdst, srcdst);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.14/arch/x86/crypto/cast5_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/cast5_avx_glue.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/crypto/cast5_avx_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -59,7 +59,7 @@
- static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- bool enc)
- {
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- const unsigned int bsize = CAST5_BLOCK_SIZE;
- unsigned int nbytes;
-@@ -73,7 +73,7 @@
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
-
-- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+ fpu_enabled = cast5_fpu_begin(false, nbytes);
-
- /* Process multi-block batch */
- if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
-@@ -102,10 +102,9 @@
- } while (nbytes >= bsize);
-
- done:
-+ cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, walk, nbytes);
- }
--
-- cast5_fpu_end(fpu_enabled);
- return err;
- }
-
-@@ -226,7 +225,7 @@
- static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
- {
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -235,12 +234,11 @@
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- while ((nbytes = walk.nbytes)) {
-- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+ fpu_enabled = cast5_fpu_begin(false, nbytes);
- nbytes = __cbc_decrypt(desc, &walk);
-+ cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
--
-- cast5_fpu_end(fpu_enabled);
- return err;
- }
-
-@@ -309,7 +307,7 @@
- static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
- {
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -318,13 +316,12 @@
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
-- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+ fpu_enabled = cast5_fpu_begin(false, nbytes);
- nbytes = __ctr_crypt(desc, &walk);
-+ cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
-- cast5_fpu_end(fpu_enabled);
--
- if (walk.nbytes) {
- ctr_crypt_final(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/cast6_avx_glue.c linux-4.14/arch/x86/crypto/cast6_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/cast6_avx_glue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/cast6_avx_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -205,19 +205,33 @@
- bool fpu_enabled;
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void cast6_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+ bool fpu_enabled = ctx->fpu_enabled;
-+
-+ if (!fpu_enabled)
-+ return;
-+ cast6_fpu_end(fpu_enabled);
-+ ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void cast6_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
- const unsigned int bsize = CAST6_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
-- ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
--
- if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
-+ ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
- cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
-+ cast6_fpu_end_rt(ctx);
- return;
- }
--
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __cast6_encrypt(ctx->ctx, srcdst, srcdst);
- }
-@@ -228,10 +242,10 @@
- struct crypt_priv *ctx = priv;
- int i;
-
-- ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
--
- if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
-+ ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
- cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
-+ cast6_fpu_end_rt(ctx);
- return;
- }
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/chacha20_glue.c linux-4.14/arch/x86/crypto/chacha20_glue.c
---- linux-4.14.orig/arch/x86/crypto/chacha20_glue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/chacha20_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -81,23 +81,24 @@
-
- crypto_chacha20_init(state, ctx, walk.iv);
-
-- kernel_fpu_begin();
--
- while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
-+ kernel_fpu_begin();
-+
- chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
- rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
-+ kernel_fpu_end();
- err = skcipher_walk_done(&walk,
- walk.nbytes % CHACHA20_BLOCK_SIZE);
- }
-
- if (walk.nbytes) {
-+ kernel_fpu_begin();
- chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
-+ kernel_fpu_end();
- err = skcipher_walk_done(&walk, 0);
- }
-
-- kernel_fpu_end();
--
- return err;
- }
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/glue_helper.c linux-4.14/arch/x86/crypto/glue_helper.c
---- linux-4.14.orig/arch/x86/crypto/glue_helper.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/glue_helper.c 2018-09-05 11:05:07.000000000 +0200
-@@ -40,7 +40,7 @@
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
- const unsigned int bsize = 128 / 8;
- unsigned int nbytes, i, func_bytes;
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- int err;
-
- err = blkcipher_walk_virt(desc, walk);
-@@ -50,7 +50,7 @@
- u8 *wdst = walk->dst.virt.addr;
-
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-- desc, fpu_enabled, nbytes);
-+ desc, false, nbytes);
-
- for (i = 0; i < gctx->num_funcs; i++) {
- func_bytes = bsize * gctx->funcs[i].num_blocks;
-@@ -72,10 +72,10 @@
- }
-
- done:
-+ glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, walk, nbytes);
- }
-
-- glue_fpu_end(fpu_enabled);
- return err;
- }
-
-@@ -192,7 +192,7 @@
- struct scatterlist *src, unsigned int nbytes)
- {
- const unsigned int bsize = 128 / 8;
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -201,12 +201,12 @@
-
- while ((nbytes = walk.nbytes)) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-- desc, fpu_enabled, nbytes);
-+ desc, false, nbytes);
- nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
-+ glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
-- glue_fpu_end(fpu_enabled);
- return err;
- }
- EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
-@@ -275,7 +275,7 @@
- struct scatterlist *src, unsigned int nbytes)
- {
- const unsigned int bsize = 128 / 8;
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -284,13 +284,12 @@
-
- while ((nbytes = walk.nbytes) >= bsize) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-- desc, fpu_enabled, nbytes);
-+ desc, false, nbytes);
- nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
-+ glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
-- glue_fpu_end(fpu_enabled);
--
- if (walk.nbytes) {
- glue_ctr_crypt_final_128bit(
- gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
-@@ -380,7 +379,7 @@
- void *tweak_ctx, void *crypt_ctx)
- {
- const unsigned int bsize = 128 / 8;
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -393,21 +392,21 @@
-
- /* set minimum length to bsize, for tweak_fn */
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-- desc, fpu_enabled,
-+ desc, false,
- nbytes < bsize ? bsize : nbytes);
--
- /* calculate first value of T */
- tweak_fn(tweak_ctx, walk.iv, walk.iv);
-+ glue_fpu_end(fpu_enabled);
-
- while (nbytes) {
-+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-+ desc, false, nbytes);
- nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
-
-+ glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- }
--
-- glue_fpu_end(fpu_enabled);
--
- return err;
- }
- EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/serpent_avx2_glue.c linux-4.14/arch/x86/crypto/serpent_avx2_glue.c
---- linux-4.14.orig/arch/x86/crypto/serpent_avx2_glue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/serpent_avx2_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -184,6 +184,21 @@
- bool fpu_enabled;
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+ bool fpu_enabled = ctx->fpu_enabled;
-+
-+ if (!fpu_enabled)
-+ return;
-+ serpent_fpu_end(fpu_enabled);
-+ ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
-@@ -199,10 +214,12 @@
- }
-
- while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
-+ kernel_fpu_resched();
- serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
- }
-+ serpent_fpu_end_rt(ctx);
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_encrypt(ctx->ctx, srcdst, srcdst);
-@@ -223,10 +240,12 @@
- }
-
- while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
-+ kernel_fpu_resched();
- serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
- }
-+ serpent_fpu_end_rt(ctx);
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_decrypt(ctx->ctx, srcdst, srcdst);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/serpent_avx_glue.c linux-4.14/arch/x86/crypto/serpent_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/serpent_avx_glue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/serpent_avx_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -218,16 +218,31 @@
- bool fpu_enabled;
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+ bool fpu_enabled = ctx->fpu_enabled;
-+
-+ if (!fpu_enabled)
-+ return;
-+ serpent_fpu_end(fpu_enabled);
-+ ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
-- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
--
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
-+ ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
- serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
-+ serpent_fpu_end_rt(ctx);
- return;
- }
-
-@@ -241,10 +256,10 @@
- struct crypt_priv *ctx = priv;
- int i;
-
-- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
--
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
-+ ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
- serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
-+ serpent_fpu_end_rt(ctx);
- return;
- }
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/serpent_sse2_glue.c linux-4.14/arch/x86/crypto/serpent_sse2_glue.c
---- linux-4.14.orig/arch/x86/crypto/serpent_sse2_glue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/serpent_sse2_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -187,16 +187,31 @@
- bool fpu_enabled;
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+ bool fpu_enabled = ctx->fpu_enabled;
-+
-+ if (!fpu_enabled)
-+ return;
-+ serpent_fpu_end(fpu_enabled);
-+ ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
-- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
--
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
-+ ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
- serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
-+ serpent_fpu_end_rt(ctx);
- return;
- }
-
-@@ -210,10 +225,10 @@
- struct crypt_priv *ctx = priv;
- int i;
-
-- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
--
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
-+ ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
- serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
-+ serpent_fpu_end_rt(ctx);
- return;
- }
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/twofish_avx_glue.c linux-4.14/arch/x86/crypto/twofish_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/twofish_avx_glue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/twofish_avx_glue.c 2018-09-05 11:05:07.000000000 +0200
-@@ -218,6 +218,21 @@
- bool fpu_enabled;
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void twofish_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+ bool fpu_enabled = ctx->fpu_enabled;
-+
-+ if (!fpu_enabled)
-+ return;
-+ twofish_fpu_end(fpu_enabled);
-+ ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void twofish_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
- const unsigned int bsize = TF_BLOCK_SIZE;
-@@ -228,12 +243,16 @@
-
- if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
- twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
-+ twofish_fpu_end_rt(ctx);
- return;
- }
-
-- for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
-+ for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) {
-+ kernel_fpu_resched();
- twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
-+ }
-
-+ twofish_fpu_end_rt(ctx);
- nbytes %= bsize * 3;
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-@@ -250,11 +269,15 @@
-
- if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
- twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
-+ twofish_fpu_end_rt(ctx);
- return;
- }
-
-- for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
-+ for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) {
-+ kernel_fpu_resched();
- twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
-+ }
-+ twofish_fpu_end_rt(ctx);
-
- nbytes %= bsize * 3;
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/common.c linux-4.14/arch/x86/entry/common.c
---- linux-4.14.orig/arch/x86/entry/common.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/entry/common.c 2018-09-05 11:05:07.000000000 +0200
-@@ -133,7 +133,7 @@
-
- #define EXIT_TO_USERMODE_LOOP_FLAGS \
- (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
-+ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
-
- static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
- {
-@@ -148,9 +148,16 @@
- /* We have work to do. */
- local_irq_enable();
-
-- if (cached_flags & _TIF_NEED_RESCHED)
-+ if (cached_flags & _TIF_NEED_RESCHED_MASK)
- schedule();
-
-+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
-+ if (unlikely(current->forced_info.si_signo)) {
-+ struct task_struct *t = current;
-+ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
-+ t->forced_info.si_signo = 0;
-+ }
-+#endif
- if (cached_flags & _TIF_UPROBE)
- uprobe_notify_resume(regs);
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/entry_32.S linux-4.14/arch/x86/entry/entry_32.S
---- linux-4.14.orig/arch/x86/entry/entry_32.S 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/entry/entry_32.S 2018-09-05 11:05:07.000000000 +0200
-@@ -350,8 +350,25 @@
- ENTRY(resume_kernel)
- DISABLE_INTERRUPTS(CLBR_ANY)
- .Lneed_resched:
-+ # preempt count == 0 + NEED_RS set?
- cmpl $0, PER_CPU_VAR(__preempt_count)
-+#ifndef CONFIG_PREEMPT_LAZY
- jnz restore_all
-+#else
-+ jz test_int_off
-+
-+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
-+ jne restore_all
-+
-+ movl PER_CPU_VAR(current_task), %ebp
-+ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
-+ jnz restore_all
-+
-+ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
-+ jz restore_all
-+test_int_off:
-+#endif
- testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
- call preempt_schedule_irq
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/entry_64.S linux-4.14/arch/x86/entry/entry_64.S
---- linux-4.14.orig/arch/x86/entry/entry_64.S 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/entry/entry_64.S 2018-09-05 11:05:07.000000000 +0200
-@@ -633,7 +633,23 @@
- bt $9, EFLAGS(%rsp) /* were interrupts off? */
- jnc 1f
- 0: cmpl $0, PER_CPU_VAR(__preempt_count)
-+#ifndef CONFIG_PREEMPT_LAZY
-+ jnz 1f
-+#else
-+ jz do_preempt_schedule_irq
-+
-+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
-+ jnz 1f
-+
-+ movq PER_CPU_VAR(current_task), %rcx
-+ cmpl $0, TASK_TI_preempt_lazy_count(%rcx)
- jnz 1f
-+
-+ bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
-+ jnc 1f
-+do_preempt_schedule_irq:
-+#endif
- call preempt_schedule_irq
- jmp 0b
- 1:
-@@ -988,6 +1004,7 @@
- jmp 2b
- .previous
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /* Call softirq on interrupt stack. Interrupts are off. */
- ENTRY(do_softirq_own_stack)
- pushq %rbp
-@@ -998,6 +1015,7 @@
- leaveq
- ret
- ENDPROC(do_softirq_own_stack)
-+#endif
-
- #ifdef CONFIG_XEN
- idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/fpu/api.h linux-4.14/arch/x86/include/asm/fpu/api.h
---- linux-4.14.orig/arch/x86/include/asm/fpu/api.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/fpu/api.h 2018-09-05 11:05:07.000000000 +0200
-@@ -25,6 +25,7 @@
- extern void __kernel_fpu_end(void);
- extern void kernel_fpu_begin(void);
- extern void kernel_fpu_end(void);
-+extern void kernel_fpu_resched(void);
- extern bool irq_fpu_usable(void);
-
- /*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/preempt.h linux-4.14/arch/x86/include/asm/preempt.h
---- linux-4.14.orig/arch/x86/include/asm/preempt.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/preempt.h 2018-09-05 11:05:07.000000000 +0200
-@@ -86,17 +86,46 @@
- * a decrement which hits zero means we have no preempt_count and should
- * reschedule.
- */
--static __always_inline bool __preempt_count_dec_and_test(void)
-+static __always_inline bool ____preempt_count_dec_and_test(void)
- {
- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
- }
-
-+static __always_inline bool __preempt_count_dec_and_test(void)
-+{
-+ if (____preempt_count_dec_and_test())
-+ return true;
-+#ifdef CONFIG_PREEMPT_LAZY
-+ if (current_thread_info()->preempt_lazy_count)
-+ return false;
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
-+ return false;
-+#endif
-+}
-+
- /*
- * Returns true when we need to resched and can (barring IRQ state).
- */
- static __always_inline bool should_resched(int preempt_offset)
- {
-+#ifdef CONFIG_PREEMPT_LAZY
-+ u32 tmp;
-+
-+ tmp = raw_cpu_read_4(__preempt_count);
-+ if (tmp == preempt_offset)
-+ return true;
-+
-+ /* preempt count == 0 ? */
-+ tmp &= ~PREEMPT_NEED_RESCHED;
-+ if (tmp)
-+ return false;
-+ if (current_thread_info()->preempt_lazy_count)
-+ return false;
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
-+#endif
- }
-
- #ifdef CONFIG_PREEMPT
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/signal.h linux-4.14/arch/x86/include/asm/signal.h
---- linux-4.14.orig/arch/x86/include/asm/signal.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/signal.h 2018-09-05 11:05:07.000000000 +0200
-@@ -28,6 +28,19 @@
- #define SA_IA32_ABI 0x02000000u
- #define SA_X32_ABI 0x01000000u
-
-+/*
-+ * Because some traps use the IST stack, we must keep preemption
-+ * disabled while calling do_trap(), but do_trap() may call
-+ * force_sig_info() which will grab the signal spin_locks for the
-+ * task, which in PREEMPT_RT_FULL are mutexes. By defining
-+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
-+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
-+ * trap.
-+ */
-+#if defined(CONFIG_PREEMPT_RT_FULL)
-+#define ARCH_RT_DELAYS_SIGNAL_SEND
-+#endif
-+
- #ifndef CONFIG_COMPAT
- typedef sigset_t compat_sigset_t;
- #endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/stackprotector.h linux-4.14/arch/x86/include/asm/stackprotector.h
---- linux-4.14.orig/arch/x86/include/asm/stackprotector.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/stackprotector.h 2018-09-05 11:05:07.000000000 +0200
-@@ -60,7 +60,7 @@
- */
- static __always_inline void boot_init_stack_canary(void)
- {
-- u64 canary;
-+ u64 uninitialized_var(canary);
- u64 tsc;
-
- #ifdef CONFIG_X86_64
-@@ -71,8 +71,14 @@
- * of randomness. The TSC only matters for very early init,
- * there it already has some randomness on most systems. Later
- * on during the bootup the random pool has true entropy too.
-+ * For preempt-rt we need to weaken the randomness a bit, as
-+ * we can't call into the random generator from atomic context
-+ * due to locking constraints. We just leave canary
-+ * uninitialized and use the TSC based randomness on top of it.
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- get_random_bytes(&canary, sizeof(canary));
-+#endif
- tsc = rdtsc();
- canary += tsc + (tsc << 32UL);
- canary &= CANARY_MASK;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/thread_info.h linux-4.14/arch/x86/include/asm/thread_info.h
---- linux-4.14.orig/arch/x86/include/asm/thread_info.h 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/include/asm/thread_info.h 2018-09-05 11:05:07.000000000 +0200
-@@ -56,11 +56,14 @@
- struct thread_info {
- unsigned long flags; /* low level flags */
- u32 status; /* thread synchronous flags */
-+ int preempt_lazy_count; /* 0 => lazy preemptable
-+ <0 => BUG */
- };
-
- #define INIT_THREAD_INFO(tsk) \
- { \
- .flags = 0, \
-+ .preempt_lazy_count = 0, \
- }
-
- #define init_stack (init_thread_union.stack)
-@@ -69,6 +72,10 @@
-
- #include <asm/asm-offsets.h>
-
-+#define GET_THREAD_INFO(reg) \
-+ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
-+ _ASM_SUB $(THREAD_SIZE),reg ;
-+
- #endif
-
- /*
-@@ -85,6 +92,7 @@
- #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
- #define TIF_SECCOMP 8 /* secure computing */
-+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
- #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
- #define TIF_UPROBE 12 /* breakpointed or singlestepping */
- #define TIF_PATCH_PENDING 13 /* pending live patching update */
-@@ -112,6 +120,7 @@
- #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
- #define _TIF_SECCOMP (1 << TIF_SECCOMP)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
- #define _TIF_UPROBE (1 << TIF_UPROBE)
- #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
-@@ -153,6 +162,8 @@
- #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
- #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
-
-+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-+
- #define STACK_WARN (THREAD_SIZE/8)
-
- /*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.14/arch/x86/include/asm/uv/uv_bau.h
---- linux-4.14.orig/arch/x86/include/asm/uv/uv_bau.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/uv/uv_bau.h 2018-09-05 11:05:07.000000000 +0200
-@@ -643,9 +643,9 @@
- cycles_t send_message;
- cycles_t period_end;
- cycles_t period_time;
-- spinlock_t uvhub_lock;
-- spinlock_t queue_lock;
-- spinlock_t disable_lock;
-+ raw_spinlock_t uvhub_lock;
-+ raw_spinlock_t queue_lock;
-+ raw_spinlock_t disable_lock;
- /* tunables */
- int max_concurr;
- int max_concurr_const;
-@@ -847,15 +847,15 @@
- * to be lowered below the current 'v'. atomic_add_unless can only stop
- * on equal.
- */
--static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
-+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
- {
-- spin_lock(lock);
-+ raw_spin_lock(lock);
- if (atomic_read(v) >= u) {
-- spin_unlock(lock);
-+ raw_spin_unlock(lock);
- return 0;
- }
- atomic_inc(v);
-- spin_unlock(lock);
-+ raw_spin_unlock(lock);
- return 1;
- }
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/Kconfig linux-4.14/arch/x86/Kconfig
---- linux-4.14.orig/arch/x86/Kconfig 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -169,6 +169,7 @@
- select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select HAVE_RCU_TABLE_FREE
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
-@@ -256,8 +257,11 @@
- def_bool y
- depends on ISA_DMA_API
-
-+config RWSEM_GENERIC_SPINLOCK
-+ def_bool PREEMPT_RT_FULL
-+
- config RWSEM_XCHGADD_ALGORITHM
-- def_bool y
-+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
-
- config GENERIC_CALIBRATE_DELAY
- def_bool y
-@@ -932,7 +936,7 @@
- config MAXSMP
- bool "Enable Maximum number of SMP Processors and NUMA Nodes"
- depends on X86_64 && SMP && DEBUG_KERNEL
-- select CPUMASK_OFFSTACK
-+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
- ---help---
- Enable maximum number of CPUS and NUMA Nodes for this architecture.
- If unsure, say N.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/apic/io_apic.c linux-4.14/arch/x86/kernel/apic/io_apic.c
---- linux-4.14.orig/arch/x86/kernel/apic/io_apic.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/apic/io_apic.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1691,7 +1691,8 @@
- static inline bool ioapic_irqd_mask(struct irq_data *data)
- {
- /* If we are moving the irq we need to mask it */
-- if (unlikely(irqd_is_setaffinity_pending(data))) {
-+ if (unlikely(irqd_is_setaffinity_pending(data) &&
-+ !irqd_irq_inprogress(data))) {
- mask_ioapic_irq(data);
- return true;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/asm-offsets.c linux-4.14/arch/x86/kernel/asm-offsets.c
---- linux-4.14.orig/arch/x86/kernel/asm-offsets.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/asm-offsets.c 2018-09-05 11:05:07.000000000 +0200
-@@ -38,6 +38,7 @@
-
- BLANK();
- OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
-+ OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count);
- OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
-
- BLANK();
-@@ -94,6 +95,7 @@
-
- BLANK();
- DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
-+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
-
- /* TLB state for the entry code */
- OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/cpu/mcheck/dev-mcelog.c linux-4.14/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
---- linux-4.14.orig/arch/x86/kernel/cpu/mcheck/dev-mcelog.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/kernel/cpu/mcheck/dev-mcelog.c 2018-09-05 11:05:07.000000000 +0200
-@@ -14,6 +14,7 @@
- #include <linux/slab.h>
- #include <linux/kmod.h>
- #include <linux/poll.h>
-+#include <linux/swork.h>
-
- #include "mce-internal.h"
-
-@@ -86,13 +87,43 @@
-
- static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
-
--
--void mce_work_trigger(void)
-+static void __mce_work_trigger(struct swork_event *event)
- {
- if (mce_helper[0])
- schedule_work(&mce_trigger_work);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static bool notify_work_ready __read_mostly;
-+static struct swork_event notify_work;
-+
-+static int mce_notify_work_init(void)
-+{
-+ int err;
-+
-+ err = swork_get();
-+ if (err)
-+ return err;
-+
-+ INIT_SWORK(¬ify_work, __mce_work_trigger);
-+ notify_work_ready = true;
-+ return 0;
-+}
-+
-+void mce_work_trigger(void)
-+{
-+ if (notify_work_ready)
-+ swork_queue(¬ify_work);
-+}
-+
-+#else
-+void mce_work_trigger(void)
-+{
-+ __mce_work_trigger(NULL);
-+}
-+static inline int mce_notify_work_init(void) { return 0; }
-+#endif
-+
- static ssize_t
- show_trigger(struct device *s, struct device_attribute *attr, char *buf)
- {
-@@ -356,7 +387,7 @@
-
- return err;
- }
--
-+ mce_notify_work_init();
- mce_register_decode_chain(&dev_mcelog_nb);
- return 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.14/arch/x86/kernel/cpu/mcheck/mce.c
---- linux-4.14.orig/arch/x86/kernel/cpu/mcheck/mce.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/cpu/mcheck/mce.c 2018-09-05 11:05:07.000000000 +0200
-@@ -42,6 +42,7 @@
- #include <linux/debugfs.h>
- #include <linux/irq_work.h>
- #include <linux/export.h>
-+#include <linux/jiffies.h>
- #include <linux/jump_label.h>
-
- #include <asm/intel-family.h>
-@@ -1365,7 +1366,7 @@
- static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
-
- static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
--static DEFINE_PER_CPU(struct timer_list, mce_timer);
-+static DEFINE_PER_CPU(struct hrtimer, mce_timer);
-
- static unsigned long mce_adjust_timer_default(unsigned long interval)
- {
-@@ -1374,27 +1375,19 @@
-
- static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
-
--static void __start_timer(struct timer_list *t, unsigned long interval)
-+static void __start_timer(struct hrtimer *t, unsigned long iv)
- {
-- unsigned long when = jiffies + interval;
-- unsigned long flags;
--
-- local_irq_save(flags);
--
-- if (!timer_pending(t) || time_before(when, t->expires))
-- mod_timer(t, round_jiffies(when));
-+ if (!iv)
-+ return;
-
-- local_irq_restore(flags);
-+ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
-+ 0, HRTIMER_MODE_REL_PINNED);
- }
-
--static void mce_timer_fn(unsigned long data)
-+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-- int cpu = smp_processor_id();
- unsigned long iv;
-
-- WARN_ON(cpu != data);
--
- iv = __this_cpu_read(mce_next_interval);
-
- if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1417,7 +1410,11 @@
-
- done:
- __this_cpu_write(mce_next_interval, iv);
-- __start_timer(t, iv);
-+ if (!iv)
-+ return HRTIMER_NORESTART;
-+
-+ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(iv)));
-+ return HRTIMER_RESTART;
- }
-
- /*
-@@ -1425,7 +1422,7 @@
- */
- void mce_timer_kick(unsigned long interval)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
- unsigned long iv = __this_cpu_read(mce_next_interval);
-
- __start_timer(t, interval);
-@@ -1440,7 +1437,7 @@
- int cpu;
-
- for_each_online_cpu(cpu)
-- del_timer_sync(&per_cpu(mce_timer, cpu));
-+ hrtimer_cancel(&per_cpu(mce_timer, cpu));
- }
-
- /*
-@@ -1769,7 +1766,7 @@
- }
- }
-
--static void mce_start_timer(struct timer_list *t)
-+static void mce_start_timer(struct hrtimer *t)
- {
- unsigned long iv = check_interval * HZ;
-
-@@ -1782,18 +1779,19 @@
-
- static void __mcheck_cpu_setup_timer(void)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-- unsigned int cpu = smp_processor_id();
-+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
-
-- setup_pinned_timer(t, mce_timer_fn, cpu);
-+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ t->function = mce_timer_fn;
- }
-
- static void __mcheck_cpu_init_timer(void)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-- unsigned int cpu = smp_processor_id();
-+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
-+
-+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ t->function = mce_timer_fn;
-
-- setup_pinned_timer(t, mce_timer_fn, cpu);
- mce_start_timer(t);
- }
-
-@@ -2309,7 +2307,7 @@
-
- static int mce_cpu_online(unsigned int cpu)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
- int ret;
-
- mce_device_create(cpu);
-@@ -2326,10 +2324,10 @@
-
- static int mce_cpu_pre_down(unsigned int cpu)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
-
- mce_disable_cpu();
-- del_timer_sync(t);
-+ hrtimer_cancel(t);
- mce_threshold_remove_device(cpu);
- mce_device_remove(cpu);
- return 0;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/fpu/core.c linux-4.14/arch/x86/kernel/fpu/core.c
---- linux-4.14.orig/arch/x86/kernel/fpu/core.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/fpu/core.c 2018-09-05 11:05:07.000000000 +0200
-@@ -138,6 +138,18 @@
- }
- EXPORT_SYMBOL_GPL(kernel_fpu_end);
-
-+void kernel_fpu_resched(void)
-+{
-+ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
-+
-+ if (should_resched(PREEMPT_OFFSET)) {
-+ kernel_fpu_end();
-+ cond_resched();
-+ kernel_fpu_begin();
-+ }
-+}
-+EXPORT_SYMBOL_GPL(kernel_fpu_resched);
-+
- /*
- * Save the FPU state (mark it for reload if necessary):
- *
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/irq_32.c linux-4.14/arch/x86/kernel/irq_32.c
---- linux-4.14.orig/arch/x86/kernel/irq_32.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/irq_32.c 2018-09-05 11:05:07.000000000 +0200
-@@ -130,6 +130,7 @@
- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct irq_stack *irqstk;
-@@ -146,6 +147,7 @@
-
- call_on_stack(__do_softirq, isp);
- }
-+#endif
-
- bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
- {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/process_32.c linux-4.14/arch/x86/kernel/process_32.c
---- linux-4.14.orig/arch/x86/kernel/process_32.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/process_32.c 2018-09-05 11:05:07.000000000 +0200
-@@ -38,6 +38,7 @@
- #include <linux/io.h>
- #include <linux/kdebug.h>
- #include <linux/syscalls.h>
-+#include <linux/highmem.h>
-
- #include <asm/pgtable.h>
- #include <asm/ldt.h>
-@@ -198,6 +199,35 @@
- }
- EXPORT_SYMBOL_GPL(start_thread);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ int i;
-+
-+ /*
-+ * Clear @prev's kmap_atomic mappings
-+ */
-+ for (i = 0; i < prev_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+ pte_t *ptep = kmap_pte - idx;
-+
-+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
-+ }
-+ /*
-+ * Restore @next_p's kmap_atomic mappings
-+ */
-+ for (i = 0; i < next_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+ if (!pte_none(next_p->kmap_pte[i]))
-+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
-+ }
-+}
-+#else
-+static inline void
-+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-+#endif
-+
-
- /*
- * switch_to(x,y) should switch tasks from x to y.
-@@ -273,6 +303,8 @@
- task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
- __switch_to_xtra(prev_p, next_p, tss);
-
-+ switch_kmaps(prev_p, next_p);
-+
- /*
- * Leave lazy mode, flushing any hypercalls made here.
- * This must be done before restoring TLS segments so
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kvm/lapic.c linux-4.14/arch/x86/kvm/lapic.c
---- linux-4.14.orig/arch/x86/kvm/lapic.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kvm/lapic.c 2018-09-05 11:05:07.000000000 +0200
-@@ -2120,7 +2120,7 @@
- apic->vcpu = vcpu;
-
- hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
-- HRTIMER_MODE_ABS_PINNED);
-+ HRTIMER_MODE_ABS_PINNED_HARD);
- apic->lapic_timer.timer.function = apic_timer_fn;
-
- /*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kvm/x86.c linux-4.14/arch/x86/kvm/x86.c
---- linux-4.14.orig/arch/x86/kvm/x86.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kvm/x86.c 2018-09-05 11:05:07.000000000 +0200
-@@ -6285,6 +6285,13 @@
- goto out;
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
-+ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
-+ return -EOPNOTSUPP;
-+ }
-+#endif
-+
- r = kvm_mmu_module_init();
- if (r)
- goto out_free_percpu;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/mm/highmem_32.c linux-4.14/arch/x86/mm/highmem_32.c
---- linux-4.14.orig/arch/x86/mm/highmem_32.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/mm/highmem_32.c 2018-09-05 11:05:07.000000000 +0200
-@@ -32,10 +32,11 @@
- */
- void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- {
-+ pte_t pte = mk_pte(page, prot);
- unsigned long vaddr;
- int idx, type;
-
-- preempt_disable();
-+ preempt_disable_nort();
- pagefault_disable();
-
- if (!PageHighMem(page))
-@@ -45,7 +46,10 @@
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-- set_pte(kmap_pte-idx, mk_pte(page, prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_pte(kmap_pte-idx, pte);
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-@@ -88,6 +92,9 @@
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- arch_flush_lazy_mmu_mode();
-@@ -100,7 +107,7 @@
- #endif
-
- pagefault_enable();
-- preempt_enable();
-+ preempt_enable_nort();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/mm/iomap_32.c linux-4.14/arch/x86/mm/iomap_32.c
---- linux-4.14.orig/arch/x86/mm/iomap_32.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/mm/iomap_32.c 2018-09-05 11:05:07.000000000 +0200
-@@ -56,6 +56,7 @@
-
- void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
- {
-+ pte_t pte = pfn_pte(pfn, prot);
- unsigned long vaddr;
- int idx, type;
-
-@@ -65,7 +66,12 @@
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
-+ WARN_ON(!pte_none(*(kmap_pte - idx)));
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_pte(kmap_pte - idx, pte);
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-@@ -113,6 +119,9 @@
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/platform/uv/tlb_uv.c linux-4.14/arch/x86/platform/uv/tlb_uv.c
---- linux-4.14.orig/arch/x86/platform/uv/tlb_uv.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/platform/uv/tlb_uv.c 2018-09-05 11:05:07.000000000 +0200
-@@ -740,9 +740,9 @@
-
- quiesce_local_uvhub(hmaster);
-
-- spin_lock(&hmaster->queue_lock);
-+ raw_spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
-- spin_unlock(&hmaster->queue_lock);
-+ raw_spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
-@@ -762,9 +762,9 @@
-
- quiesce_local_uvhub(hmaster);
-
-- spin_lock(&hmaster->queue_lock);
-+ raw_spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
-- spin_unlock(&hmaster->queue_lock);
-+ raw_spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
-@@ -785,7 +785,7 @@
- cycles_t tm1;
-
- hmaster = bcp->uvhub_master;
-- spin_lock(&hmaster->disable_lock);
-+ raw_spin_lock(&hmaster->disable_lock);
- if (!bcp->baudisabled) {
- stat->s_bau_disabled++;
- tm1 = get_cycles();
-@@ -798,7 +798,7 @@
- }
- }
- }
-- spin_unlock(&hmaster->disable_lock);
-+ raw_spin_unlock(&hmaster->disable_lock);
- }
-
- static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -861,7 +861,7 @@
- */
- static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
- {
-- spinlock_t *lock = &hmaster->uvhub_lock;
-+ raw_spinlock_t *lock = &hmaster->uvhub_lock;
- atomic_t *v;
-
- v = &hmaster->active_descriptor_count;
-@@ -995,7 +995,7 @@
- struct bau_control *hmaster;
-
- hmaster = bcp->uvhub_master;
-- spin_lock(&hmaster->disable_lock);
-+ raw_spin_lock(&hmaster->disable_lock);
- if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
- stat->s_bau_reenabled++;
- for_each_present_cpu(tcpu) {
-@@ -1007,10 +1007,10 @@
- tbcp->period_giveups = 0;
- }
- }
-- spin_unlock(&hmaster->disable_lock);
-+ raw_spin_unlock(&hmaster->disable_lock);
- return 0;
- }
-- spin_unlock(&hmaster->disable_lock);
-+ raw_spin_unlock(&hmaster->disable_lock);
- return -1;
- }
-
-@@ -1942,9 +1942,9 @@
- bcp->cong_reps = congested_reps;
- bcp->disabled_period = sec_2_cycles(disabled_period);
- bcp->giveup_limit = giveup_limit;
-- spin_lock_init(&bcp->queue_lock);
-- spin_lock_init(&bcp->uvhub_lock);
-- spin_lock_init(&bcp->disable_lock);
-+ raw_spin_lock_init(&bcp->queue_lock);
-+ raw_spin_lock_init(&bcp->uvhub_lock);
-+ raw_spin_lock_init(&bcp->disable_lock);
- }
- }
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/platform/uv/uv_time.c linux-4.14/arch/x86/platform/uv/uv_time.c
---- linux-4.14.orig/arch/x86/platform/uv/uv_time.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/platform/uv/uv_time.c 2018-09-05 11:05:07.000000000 +0200
-@@ -57,7 +57,7 @@
-
- /* There is one of these allocated per node */
- struct uv_rtc_timer_head {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- /* next cpu waiting for timer, local node relative: */
- int next_cpu;
- /* number of cpus on this node: */
-@@ -177,7 +177,7 @@
- uv_rtc_deallocate_timers();
- return -ENOMEM;
- }
-- spin_lock_init(&head->lock);
-+ raw_spin_lock_init(&head->lock);
- head->ncpus = uv_blade_nr_possible_cpus(bid);
- head->next_cpu = -1;
- blade_info[bid] = head;
-@@ -231,7 +231,7 @@
- unsigned long flags;
- int next_cpu;
-
-- spin_lock_irqsave(&head->lock, flags);
-+ raw_spin_lock_irqsave(&head->lock, flags);
-
- next_cpu = head->next_cpu;
- *t = expires;
-@@ -243,12 +243,12 @@
- if (uv_setup_intr(cpu, expires)) {
- *t = ULLONG_MAX;
- uv_rtc_find_next_timer(head, pnode);
-- spin_unlock_irqrestore(&head->lock, flags);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
- return -ETIME;
- }
- }
-
-- spin_unlock_irqrestore(&head->lock, flags);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
- return 0;
- }
-
-@@ -267,7 +267,7 @@
- unsigned long flags;
- int rc = 0;
-
-- spin_lock_irqsave(&head->lock, flags);
-+ raw_spin_lock_irqsave(&head->lock, flags);
-
- if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
- rc = 1;
-@@ -279,7 +279,7 @@
- uv_rtc_find_next_timer(head, pnode);
- }
-
-- spin_unlock_irqrestore(&head->lock, flags);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
-
- return rc;
- }
-@@ -299,13 +299,17 @@
- static u64 uv_read_rtc(struct clocksource *cs)
- {
- unsigned long offset;
-+ u64 cycles;
-
-+ preempt_disable();
- if (uv_get_min_hub_revision_id() == 1)
- offset = 0;
- else
- offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
-
-- return (u64)uv_read_local_mmr(UVH_RTC | offset);
-+ cycles = (u64)uv_read_local_mmr(UVH_RTC | offset);
-+ preempt_enable();
-+ return cycles;
- }
-
- /*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/xtensa/include/asm/spinlock_types.h linux-4.14/arch/xtensa/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/xtensa/include/asm/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/xtensa/include/asm/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int slock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-core.c linux-4.14/block/blk-core.c
---- linux-4.14.orig/block/blk-core.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/block/blk-core.c 2018-09-05 11:05:07.000000000 +0200
-@@ -116,6 +116,9 @@
-
- INIT_LIST_HEAD(&rq->queuelist);
- INIT_LIST_HEAD(&rq->timeout_list);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
-+#endif
- rq->cpu = -1;
- rq->q = q;
- rq->__sector = (sector_t) -1;
-@@ -280,7 +283,7 @@
- void blk_start_queue(struct request_queue *q)
- {
- lockdep_assert_held(q->queue_lock);
-- WARN_ON(!in_interrupt() && !irqs_disabled());
-+ WARN_ON_NONRT(!in_interrupt() && !irqs_disabled());
- WARN_ON_ONCE(q->mq_ops);
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-@@ -808,12 +811,21 @@
- percpu_ref_put(&q->q_usage_counter);
- }
-
-+static void blk_queue_usage_counter_release_swork(struct swork_event *sev)
-+{
-+ struct request_queue *q =
-+ container_of(sev, struct request_queue, mq_pcpu_wake);
-+
-+ wake_up_all(&q->mq_freeze_wq);
-+}
-+
- static void blk_queue_usage_counter_release(struct percpu_ref *ref)
- {
- struct request_queue *q =
- container_of(ref, struct request_queue, q_usage_counter);
-
-- wake_up_all(&q->mq_freeze_wq);
-+ if (wq_has_sleeper(&q->mq_freeze_wq))
-+ swork_queue(&q->mq_pcpu_wake);
- }
-
- static void blk_rq_timed_out_timer(unsigned long data)
-@@ -890,6 +902,7 @@
- __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
-
- init_waitqueue_head(&q->mq_freeze_wq);
-+ INIT_SWORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_swork);
-
- /*
- * Init percpu_ref in atomic mode so that it's faster to shutdown.
-@@ -3308,7 +3321,7 @@
- blk_run_queue_async(q);
- else
- __blk_run_queue(q);
-- spin_unlock(q->queue_lock);
-+ spin_unlock_irq(q->queue_lock);
- }
-
- static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3356,7 +3369,6 @@
- void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
- {
- struct request_queue *q;
-- unsigned long flags;
- struct request *rq;
- LIST_HEAD(list);
- unsigned int depth;
-@@ -3376,11 +3388,6 @@
- q = NULL;
- depth = 0;
-
-- /*
-- * Save and disable interrupts here, to avoid doing it for every
-- * queue lock we have to take.
-- */
-- local_irq_save(flags);
- while (!list_empty(&list)) {
- rq = list_entry_rq(list.next);
- list_del_init(&rq->queuelist);
-@@ -3393,7 +3400,7 @@
- queue_unplugged(q, depth, from_schedule);
- q = rq->q;
- depth = 0;
-- spin_lock(q->queue_lock);
-+ spin_lock_irq(q->queue_lock);
- }
-
- /*
-@@ -3420,8 +3427,6 @@
- */
- if (q)
- queue_unplugged(q, depth, from_schedule);
--
-- local_irq_restore(flags);
- }
-
- void blk_finish_plug(struct blk_plug *plug)
-@@ -3631,6 +3636,8 @@
- if (!kblockd_workqueue)
- panic("Failed to create kblockd\n");
-
-+ BUG_ON(swork_get());
-+
- request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request), 0, SLAB_PANIC, NULL);
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-ioc.c linux-4.14/block/blk-ioc.c
---- linux-4.14.orig/block/blk-ioc.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/block/blk-ioc.c 2018-09-05 11:05:07.000000000 +0200
-@@ -9,6 +9,7 @@
- #include <linux/blkdev.h>
- #include <linux/slab.h>
- #include <linux/sched/task.h>
-+#include <linux/delay.h>
-
- #include "blk.h"
-
-@@ -118,7 +119,7 @@
- spin_unlock(q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
-- cpu_relax();
-+ cpu_chill();
- spin_lock_irqsave_nested(&ioc->lock, flags, 1);
- }
- }
-@@ -202,7 +203,7 @@
- spin_unlock(icq->q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
-- cpu_relax();
-+ cpu_chill();
- goto retry;
- }
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-mq.c linux-4.14/block/blk-mq.c
---- linux-4.14.orig/block/blk-mq.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/block/blk-mq.c 2018-09-05 11:05:07.000000000 +0200
-@@ -339,6 +339,9 @@
- /* tag was already set */
- rq->extra_len = 0;
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
-+#endif
- INIT_LIST_HEAD(&rq->timeout_list);
- rq->timeout = 0;
-
-@@ -533,12 +536,24 @@
- }
- EXPORT_SYMBOL(blk_mq_end_request);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
-+void __blk_mq_complete_request_remote_work(struct work_struct *work)
-+{
-+ struct request *rq = container_of(work, struct request, work);
-+
-+ rq->q->softirq_done_fn(rq);
-+}
-+
-+#else
-+
- static void __blk_mq_complete_request_remote(void *data)
- {
- struct request *rq = data;
-
- rq->q->softirq_done_fn(rq);
- }
-+#endif
-
- static void __blk_mq_complete_request(struct request *rq)
- {
-@@ -558,19 +573,27 @@
- return;
- }
-
-- cpu = get_cpu();
-+ cpu = get_cpu_light();
- if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
- shared = cpus_share_cache(cpu, ctx->cpu);
-
- if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ /*
-+ * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
-+ * here. But we could try to invoke it one the CPU like this.
-+ */
-+ schedule_work_on(ctx->cpu, &rq->work);
-+#else
- rq->csd.func = __blk_mq_complete_request_remote;
- rq->csd.info = rq;
- rq->csd.flags = 0;
- smp_call_function_single_async(ctx->cpu, &rq->csd);
-+#endif
- } else {
- rq->q->softirq_done_fn(rq);
- }
-- put_cpu();
-+ put_cpu_light();
- }
-
- /**
-@@ -1238,14 +1261,14 @@
- return;
-
- if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
-- int cpu = get_cpu();
-+ int cpu = get_cpu_light();
- if (cpumask_test_cpu(cpu, hctx->cpumask)) {
- __blk_mq_run_hw_queue(hctx);
-- put_cpu();
-+ put_cpu_light();
- return;
- }
-
-- put_cpu();
-+ put_cpu_light();
- }
-
- kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
-@@ -2863,10 +2886,9 @@
- kt = nsecs;
-
- mode = HRTIMER_MODE_REL;
-- hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
-+ hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current);
- hrtimer_set_expires(&hs.timer, kt);
-
-- hrtimer_init_sleeper(&hs, current);
- do {
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
- break;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-mq.h linux-4.14/block/blk-mq.h
---- linux-4.14.orig/block/blk-mq.h 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/block/blk-mq.h 2018-09-05 11:05:07.000000000 +0200
-@@ -98,12 +98,12 @@
- */
- static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
- {
-- return __blk_mq_get_ctx(q, get_cpu());
-+ return __blk_mq_get_ctx(q, get_cpu_light());
- }
-
- static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
- {
-- put_cpu();
-+ put_cpu_light();
- }
-
- struct blk_mq_alloc_data {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-softirq.c linux-4.14/block/blk-softirq.c
---- linux-4.14.orig/block/blk-softirq.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/block/blk-softirq.c 2018-09-05 11:05:07.000000000 +0200
-@@ -53,6 +53,7 @@
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
-
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
-
- /*
-@@ -91,6 +92,7 @@
- this_cpu_ptr(&blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
-+ preempt_check_resched_rt();
-
- return 0;
- }
-@@ -143,6 +145,7 @@
- goto do_local;
-
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
-
- /**
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/bounce.c linux-4.14/block/bounce.c
---- linux-4.14.orig/block/bounce.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/block/bounce.c 2018-09-05 11:05:07.000000000 +0200
-@@ -66,11 +66,11 @@
- unsigned long flags;
- unsigned char *vto;
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- vto = kmap_atomic(to->bv_page);
- memcpy(vto + to->bv_offset, vfrom, to->bv_len);
- kunmap_atomic(vto);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
-
- #else /* CONFIG_HIGHMEM */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/crypto/algapi.c linux-4.14/crypto/algapi.c
---- linux-4.14.orig/crypto/algapi.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/crypto/algapi.c 2018-09-05 11:05:07.000000000 +0200
-@@ -731,13 +731,13 @@
-
- int crypto_register_notifier(struct notifier_block *nb)
- {
-- return blocking_notifier_chain_register(&crypto_chain, nb);
-+ return srcu_notifier_chain_register(&crypto_chain, nb);
- }
- EXPORT_SYMBOL_GPL(crypto_register_notifier);
-
- int crypto_unregister_notifier(struct notifier_block *nb)
- {
-- return blocking_notifier_chain_unregister(&crypto_chain, nb);
-+ return srcu_notifier_chain_unregister(&crypto_chain, nb);
- }
- EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/crypto/api.c linux-4.14/crypto/api.c
---- linux-4.14.orig/crypto/api.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/crypto/api.c 2018-09-05 11:05:07.000000000 +0200
-@@ -31,7 +31,7 @@
- DECLARE_RWSEM(crypto_alg_sem);
- EXPORT_SYMBOL_GPL(crypto_alg_sem);
-
--BLOCKING_NOTIFIER_HEAD(crypto_chain);
-+SRCU_NOTIFIER_HEAD(crypto_chain);
- EXPORT_SYMBOL_GPL(crypto_chain);
-
- static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
-@@ -236,10 +236,10 @@
- {
- int ok;
-
-- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
-+ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
- if (ok == NOTIFY_DONE) {
- request_module("cryptomgr");
-- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
-+ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
- }
-
- return ok;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/crypto/internal.h linux-4.14/crypto/internal.h
---- linux-4.14.orig/crypto/internal.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/crypto/internal.h 2018-09-05 11:05:07.000000000 +0200
-@@ -47,7 +47,7 @@
-
- extern struct list_head crypto_alg_list;
- extern struct rw_semaphore crypto_alg_sem;
--extern struct blocking_notifier_head crypto_chain;
-+extern struct srcu_notifier_head crypto_chain;
-
- #ifdef CONFIG_PROC_FS
- void __init crypto_init_proc(void);
-@@ -143,7 +143,7 @@
-
- static inline void crypto_notify(unsigned long val, void *v)
- {
-- blocking_notifier_call_chain(&crypto_chain, val, v);
-+ srcu_notifier_call_chain(&crypto_chain, val, v);
- }
-
- #endif /* _CRYPTO_INTERNAL_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/Documentation/trace/events.txt linux-4.14/Documentation/trace/events.txt
---- linux-4.14.orig/Documentation/trace/events.txt 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/Documentation/trace/events.txt 2018-09-05 11:05:07.000000000 +0200
-@@ -517,1550 +517,4 @@
- totals derived from one or more trace event format fields and/or
- event counts (hitcount).
-
-- The format of a hist trigger is as follows:
--
-- hist:keys=<field1[,field2,...]>[:values=<field1[,field2,...]>]
-- [:sort=<field1[,field2,...]>][:size=#entries][:pause][:continue]
-- [:clear][:name=histname1] [if <filter>]
--
-- When a matching event is hit, an entry is added to a hash table
-- using the key(s) and value(s) named. Keys and values correspond to
-- fields in the event's format description. Values must correspond to
-- numeric fields - on an event hit, the value(s) will be added to a
-- sum kept for that field. The special string 'hitcount' can be used
-- in place of an explicit value field - this is simply a count of
-- event hits. If 'values' isn't specified, an implicit 'hitcount'
-- value will be automatically created and used as the only value.
-- Keys can be any field, or the special string 'stacktrace', which
-- will use the event's kernel stacktrace as the key. The keywords
-- 'keys' or 'key' can be used to specify keys, and the keywords
-- 'values', 'vals', or 'val' can be used to specify values. Compound
-- keys consisting of up to two fields can be specified by the 'keys'
-- keyword. Hashing a compound key produces a unique entry in the
-- table for each unique combination of component keys, and can be
-- useful for providing more fine-grained summaries of event data.
-- Additionally, sort keys consisting of up to two fields can be
-- specified by the 'sort' keyword. If more than one field is
-- specified, the result will be a 'sort within a sort': the first key
-- is taken to be the primary sort key and the second the secondary
-- key. If a hist trigger is given a name using the 'name' parameter,
-- its histogram data will be shared with other triggers of the same
-- name, and trigger hits will update this common data. Only triggers
-- with 'compatible' fields can be combined in this way; triggers are
-- 'compatible' if the fields named in the trigger share the same
-- number and type of fields and those fields also have the same names.
-- Note that any two events always share the compatible 'hitcount' and
-- 'stacktrace' fields and can therefore be combined using those
-- fields, however pointless that may be.
+- When a matching event is hit, an entry is added to a hash table
+- using the key(s) and value(s) named. Keys and values correspond to
+- fields in the event's format description. Values must correspond to
+- numeric fields - on an event hit, the value(s) will be added to a
+- sum kept for that field. The special string 'hitcount' can be used
+- in place of an explicit value field - this is simply a count of
+- event hits. If 'values' isn't specified, an implicit 'hitcount'
+- value will be automatically created and used as the only value.
+- Keys can be any field, or the special string 'stacktrace', which
+- will use the event's kernel stacktrace as the key. The keywords
+- 'keys' or 'key' can be used to specify keys, and the keywords
+- 'values', 'vals', or 'val' can be used to specify values. Compound
+- keys consisting of up to two fields can be specified by the 'keys'
+- keyword. Hashing a compound key produces a unique entry in the
+- table for each unique combination of component keys, and can be
+- useful for providing more fine-grained summaries of event data.
+- Additionally, sort keys consisting of up to two fields can be
+- specified by the 'sort' keyword. If more than one field is
+- specified, the result will be a 'sort within a sort': the first key
+- is taken to be the primary sort key and the second the secondary
+- key. If a hist trigger is given a name using the 'name' parameter,
+- its histogram data will be shared with other triggers of the same
+- name, and trigger hits will update this common data. Only triggers
+- with 'compatible' fields can be combined in this way; triggers are
+- 'compatible' if the fields named in the trigger share the same
+- number and type of fields and those fields also have the same names.
+- Note that any two events always share the compatible 'hitcount' and
+- 'stacktrace' fields and can therefore be combined using those
+- fields, however pointless that may be.
-
- 'hist' triggers add a 'hist' file to each event's subdirectory.
- Reading the 'hist' file for the event will dump the hash table in
- Entries: 7
- Dropped: 0
+ See Documentation/trace/histogram.txt for details and examples.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/Documentation/trace/ftrace.txt linux-4.14/Documentation/trace/ftrace.txt
---- linux-4.14.orig/Documentation/trace/ftrace.txt 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/Documentation/trace/ftrace.txt 2018-09-05 11:05:07.000000000 +0200
-@@ -539,6 +539,30 @@
+diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
+index d4601df6e72e..54213e5c23f6 100644
+--- a/Documentation/trace/ftrace.txt
++++ b/Documentation/trace/ftrace.txt
+@@ -539,6 +539,30 @@ of ftrace. Here is a list of some of the key files:
See events.txt for more information.
hwlat_detector:
Directory for the Hardware Latency Detector.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/Documentation/trace/histogram.txt linux-4.14/Documentation/trace/histogram.txt
---- linux-4.14.orig/Documentation/trace/histogram.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/Documentation/trace/histogram.txt 2018-09-05 11:05:07.000000000 +0200
+diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.txt
+new file mode 100644
+index 000000000000..6e05510afc28
+--- /dev/null
++++ b/Documentation/trace/histogram.txt
@@ -0,0 +1,1995 @@
+ Event Histograms
+
+ entry_SYSCALL_64_fastpath+0x12/0x6a
+ } hitcount: 244
+
-+ Totals:
-+ Hits: 489
-+ Entries: 7
-+ Dropped: 0
++ Totals:
++ Hits: 489
++ Entries: 7
++ Dropped: 0
++
++
++2.2 Inter-event hist triggers
++-----------------------------
++
++Inter-event hist triggers are hist triggers that combine values from
++one or more other events and create a histogram using that data. Data
++from an inter-event histogram can in turn become the source for
++further combined histograms, thus providing a chain of related
++histograms, which is important for some applications.
++
++The most important example of an inter-event quantity that can be used
++in this manner is latency, which is simply a difference in timestamps
++between two events. Although latency is the most important
++inter-event quantity, note that because the support is completely
++general across the trace event subsystem, any event field can be used
++in an inter-event quantity.
++
++An example of a histogram that combines data from other histograms
++into a useful chain would be a 'wakeupswitch latency' histogram that
++combines a 'wakeup latency' histogram and a 'switch latency'
++histogram.
++
++Normally, a hist trigger specification consists of a (possibly
++compound) key along with one or more numeric values, which are
++continually updated sums associated with that key. A histogram
++specification in this case consists of individual key and value
++specifications that refer to trace event fields associated with a
++single event type.
++
++The inter-event hist trigger extension allows fields from multiple
++events to be referenced and combined into a multi-event histogram
++specification. In support of this overall goal, a few enabling
++features have been added to the hist trigger support:
++
++ - In order to compute an inter-event quantity, a value from one
++ event needs to saved and then referenced from another event. This
++ requires the introduction of support for histogram 'variables'.
++
++ - The computation of inter-event quantities and their combination
++ require some minimal amount of support for applying simple
++ expressions to variables (+ and -).
++
++ - A histogram consisting of inter-event quantities isn't logically a
++ histogram on either event (so having the 'hist' file for either
++ event host the histogram output doesn't really make sense). To
++ address the idea that the histogram is associated with a
++ combination of events, support is added allowing the creation of
++ 'synthetic' events that are events derived from other events.
++ These synthetic events are full-fledged events just like any other
++ and can be used as such, as for instance to create the
++ 'combination' histograms mentioned previously.
++
++ - A set of 'actions' can be associated with histogram entries -
++ these can be used to generate the previously mentioned synthetic
++ events, but can also be used for other purposes, such as for
++ example saving context when a 'max' latency has been hit.
++
++ - Trace events don't have a 'timestamp' associated with them, but
++ there is an implicit timestamp saved along with an event in the
++ underlying ftrace ring buffer. This timestamp is now exposed as a
++ a synthetic field named 'common_timestamp' which can be used in
++ histograms as if it were any other event field; it isn't an actual
++ field in the trace format but rather is a synthesized value that
++ nonetheless can be used as if it were an actual field. By default
++ it is in units of nanoseconds; appending '.usecs' to a
++ common_timestamp field changes the units to microseconds.
++
++A note on inter-event timestamps: If common_timestamp is used in a
++histogram, the trace buffer is automatically switched over to using
++absolute timestamps and the "global" trace clock, in order to avoid
++bogus timestamp differences with other clocks that aren't coherent
++across CPUs. This can be overridden by specifying one of the other
++trace clocks instead, using the "clock=XXX" hist trigger attribute,
++where XXX is any of the clocks listed in the tracing/trace_clock
++pseudo-file.
++
++These features are described in more detail in the following sections.
++
++2.2.1 Histogram Variables
++-------------------------
++
++Variables are simply named locations used for saving and retrieving
++values between matching events. A 'matching' event is defined as an
++event that has a matching key - if a variable is saved for a histogram
++entry corresponding to that key, any subsequent event with a matching
++key can access that variable.
++
++A variable's value is normally available to any subsequent event until
++it is set to something else by a subsequent event. The one exception
++to that rule is that any variable used in an expression is essentially
++'read-once' - once it's used by an expression in a subsequent event,
++it's reset to its 'unset' state, which means it can't be used again
++unless it's set again. This ensures not only that an event doesn't
++use an uninitialized variable in a calculation, but that that variable
++is used only once and not for any unrelated subsequent match.
++
++The basic syntax for saving a variable is to simply prefix a unique
++variable name not corresponding to any keyword along with an '=' sign
++to any event field.
++
++Either keys or values can be saved and retrieved in this way. This
++creates a variable named 'ts0' for a histogram entry with the key
++'next_pid':
++
++ # echo 'hist:keys=next_pid:vals=$ts0:ts0=common_timestamp ... >> \
++ event/trigger
++
++The ts0 variable can be accessed by any subsequent event having the
++same pid as 'next_pid'.
++
++Variable references are formed by prepending the variable name with
++the '$' sign. Thus for example, the ts0 variable above would be
++referenced as '$ts0' in expressions.
++
++Because 'vals=' is used, the common_timestamp variable value above
++will also be summed as a normal histogram value would (though for a
++timestamp it makes little sense).
++
++The below shows that a key value can also be saved in the same way:
++
++ # echo 'hist:timer_pid=common_pid:key=timer_pid ...' >> event/trigger
++
++If a variable isn't a key variable or prefixed with 'vals=', the
++associated event field will be saved in a variable but won't be summed
++as a value:
++
++ # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
++
++Multiple variables can be assigned at the same time. The below would
++result in both ts0 and b being created as variables, with both
++common_timestamp and field1 additionally being summed as values:
++
++ # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
++ event/trigger
++
++Note that variable assignments can appear either preceding or
++following their use. The command below behaves identically to the
++command above:
++
++ # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
++ event/trigger
++
++Any number of variables not bound to a 'vals=' prefix can also be
++assigned by simply separating them with colons. Below is the same
++thing but without the values being summed in the histogram:
++
++ # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
++
++Variables set as above can be referenced and used in expressions on
++another event.
++
++For example, here's how a latency can be calculated:
++
++ # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
++ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
++
++In the first line above, the event's timetamp is saved into the
++variable ts0. In the next line, ts0 is subtracted from the second
++event's timestamp to produce the latency, which is then assigned into
++yet another variable, 'wakeup_lat'. The hist trigger below in turn
++makes use of the wakeup_lat variable to compute a combined latency
++using the same key and variable from yet another event:
++
++ # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
++
++2.2.2 Synthetic Events
++----------------------
++
++Synthetic events are user-defined events generated from hist trigger
++variables or fields associated with one or more other events. Their
++purpose is to provide a mechanism for displaying data spanning
++multiple events consistent with the existing and already familiar
++usage for normal events.
++
++To define a synthetic event, the user writes a simple specification
++consisting of the name of the new event along with one or more
++variables and their types, which can be any valid field type,
++separated by semicolons, to the tracing/synthetic_events file.
++
++For instance, the following creates a new event named 'wakeup_latency'
++with 3 fields: lat, pid, and prio. Each of those fields is simply a
++variable reference to a variable on another event:
++
++ # echo 'wakeup_latency \
++ u64 lat; \
++ pid_t pid; \
++ int prio' >> \
++ /sys/kernel/debug/tracing/synthetic_events
++
++Reading the tracing/synthetic_events file lists all the currently
++defined synthetic events, in this case the event defined above:
++
++ # cat /sys/kernel/debug/tracing/synthetic_events
++ wakeup_latency u64 lat; pid_t pid; int prio
++
++An existing synthetic event definition can be removed by prepending
++the command that defined it with a '!':
++
++ # echo '!wakeup_latency u64 lat pid_t pid int prio' >> \
++ /sys/kernel/debug/tracing/synthetic_events
++
++At this point, there isn't yet an actual 'wakeup_latency' event
++instantiated in the event subsytem - for this to happen, a 'hist
++trigger action' needs to be instantiated and bound to actual fields
++and variables defined on other events (see Section 6.3.3 below).
++
++Once that is done, an event instance is created, and a histogram can
++be defined using it:
++
++ # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
++ /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
++
++The new event is created under the tracing/events/synthetic/ directory
++and looks and behaves just like any other event:
++
++ # ls /sys/kernel/debug/tracing/events/synthetic/wakeup_latency
++ enable filter format hist id trigger
++
++Like any other event, once a histogram is enabled for the event, the
++output can be displayed by reading the event's 'hist' file.
++
++2.2.3 Hist trigger 'actions'
++----------------------------
++
++A hist trigger 'action' is a function that's executed whenever a
++histogram entry is added or updated.
++
++The default 'action' if no special function is explicity specified is
++as it always has been, to simply update the set of values associated
++with an entry. Some applications, however, may want to perform
++additional actions at that point, such as generate another event, or
++compare and save a maximum.
++
++The following additional actions are available. To specify an action
++for a given event, simply specify the action between colons in the
++hist trigger specification.
++
++ - onmatch(matching.event).<synthetic_event_name>(param list)
++
++ The 'onmatch(matching.event).<synthetic_event_name>(params)' hist
++ trigger action is invoked whenever an event matches and the
++ histogram entry would be added or updated. It causes the named
++ synthetic event to be generated with the values given in the
++ 'param list'. The result is the generation of a synthetic event
++ that consists of the values contained in those variables at the
++ time the invoking event was hit.
++
++ The 'param list' consists of one or more parameters which may be
++ either variables or fields defined on either the 'matching.event'
++ or the target event. The variables or fields specified in the
++ param list may be either fully-qualified or unqualified. If a
++ variable is specified as unqualified, it must be unique between
++ the two events. A field name used as a param can be unqualified
++ if it refers to the target event, but must be fully qualified if
++ it refers to the matching event. A fully-qualified name is of the
++ form 'system.event_name.$var_name' or 'system.event_name.field'.
++
++ The 'matching.event' specification is simply the fully qualified
++ event name of the event that matches the target event for the
++ onmatch() functionality, in the form 'system.event_name'.
++
++ Finally, the number and type of variables/fields in the 'param
++ list' must match the number and types of the fields in the
++ synthetic event being generated.
++
++ As an example the below defines a simple synthetic event and uses
++ a variable defined on the sched_wakeup_new event as a parameter
++ when invoking the synthetic event. Here we define the synthetic
++ event:
++
++ # echo 'wakeup_new_test pid_t pid' >> \
++ /sys/kernel/debug/tracing/synthetic_events
++
++ # cat /sys/kernel/debug/tracing/synthetic_events
++ wakeup_new_test pid_t pid
++
++ The following hist trigger both defines the missing testpid
++ variable and specifies an onmatch() action that generates a
++ wakeup_new_test synthetic event whenever a sched_wakeup_new event
++ occurs, which because of the 'if comm == "cyclictest"' filter only
++ happens when the executable is cyclictest:
++
++ # echo 'hist:keys=$testpid:testpid=pid:onmatch(sched.sched_wakeup_new).\
++ wakeup_new_test($testpid) if comm=="cyclictest"' >> \
++ /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
++
++ Creating and displaying a histogram based on those events is now
++ just a matter of using the fields and new synthetic event in the
++ tracing/events/synthetic directory, as usual:
++
++ # echo 'hist:keys=pid:sort=pid' >> \
++ /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/trigger
++
++ Running 'cyclictest' should cause wakeup_new events to generate
++ wakeup_new_test synthetic events which should result in histogram
++ output in the wakeup_new_test event's hist file:
++
++ # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/hist
++
++ A more typical usage would be to use two events to calculate a
++ latency. The following example uses a set of hist triggers to
++ produce a 'wakeup_latency' histogram:
++
++ First, we define a 'wakeup_latency' synthetic event:
++
++ # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
++ /sys/kernel/debug/tracing/synthetic_events
+
++ Next, we specify that whenever we see a sched_waking event for a
++ cyclictest thread, save the timestamp in a 'ts0' variable:
+
-+2.2 Inter-event hist triggers
-+-----------------------------
++ # echo 'hist:keys=$saved_pid:saved_pid=pid:ts0=common_timestamp.usecs \
++ if comm=="cyclictest"' >> \
++ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+
-+Inter-event hist triggers are hist triggers that combine values from
-+one or more other events and create a histogram using that data. Data
-+from an inter-event histogram can in turn become the source for
-+further combined histograms, thus providing a chain of related
-+histograms, which is important for some applications.
++ Then, when the corresponding thread is actually scheduled onto the
++ CPU by a sched_switch event, calculate the latency and use that
++ along with another variable and an event field to generate a
++ wakeup_latency synthetic event:
+
-+The most important example of an inter-event quantity that can be used
-+in this manner is latency, which is simply a difference in timestamps
-+between two events. Although latency is the most important
-+inter-event quantity, note that because the support is completely
-+general across the trace event subsystem, any event field can be used
-+in an inter-event quantity.
++ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:\
++ onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,\
++ $saved_pid,next_prio) if next_comm=="cyclictest"' >> \
++ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
-+An example of a histogram that combines data from other histograms
-+into a useful chain would be a 'wakeupswitch latency' histogram that
-+combines a 'wakeup latency' histogram and a 'switch latency'
-+histogram.
++ We also need to create a histogram on the wakeup_latency synthetic
++ event in order to aggregate the generated synthetic event data:
+
-+Normally, a hist trigger specification consists of a (possibly
-+compound) key along with one or more numeric values, which are
-+continually updated sums associated with that key. A histogram
-+specification in this case consists of individual key and value
-+specifications that refer to trace event fields associated with a
-+single event type.
++ # echo 'hist:keys=pid,prio,lat:sort=pid,lat' >> \
++ /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
+
-+The inter-event hist trigger extension allows fields from multiple
-+events to be referenced and combined into a multi-event histogram
-+specification. In support of this overall goal, a few enabling
-+features have been added to the hist trigger support:
++ Finally, once we've run cyclictest to actually generate some
++ events, we can see the output by looking at the wakeup_latency
++ synthetic event's hist file:
+
-+ - In order to compute an inter-event quantity, a value from one
-+ event needs to saved and then referenced from another event. This
-+ requires the introduction of support for histogram 'variables'.
++ # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/hist
+
-+ - The computation of inter-event quantities and their combination
-+ require some minimal amount of support for applying simple
-+ expressions to variables (+ and -).
++ - onmax(var).save(field,.. .)
+
-+ - A histogram consisting of inter-event quantities isn't logically a
-+ histogram on either event (so having the 'hist' file for either
-+ event host the histogram output doesn't really make sense). To
-+ address the idea that the histogram is associated with a
-+ combination of events, support is added allowing the creation of
-+ 'synthetic' events that are events derived from other events.
-+ These synthetic events are full-fledged events just like any other
-+ and can be used as such, as for instance to create the
-+ 'combination' histograms mentioned previously.
++ The 'onmax(var).save(field,...)' hist trigger action is invoked
++ whenever the value of 'var' associated with a histogram entry
++ exceeds the current maximum contained in that variable.
+
-+ - A set of 'actions' can be associated with histogram entries -
-+ these can be used to generate the previously mentioned synthetic
-+ events, but can also be used for other purposes, such as for
-+ example saving context when a 'max' latency has been hit.
++ The end result is that the trace event fields specified as the
++ onmax.save() params will be saved if 'var' exceeds the current
++ maximum for that hist trigger entry. This allows context from the
++ event that exhibited the new maximum to be saved for later
++ reference. When the histogram is displayed, additional fields
++ displaying the saved values will be printed.
+
-+ - Trace events don't have a 'timestamp' associated with them, but
-+ there is an implicit timestamp saved along with an event in the
-+ underlying ftrace ring buffer. This timestamp is now exposed as a
-+ a synthetic field named 'common_timestamp' which can be used in
-+ histograms as if it were any other event field; it isn't an actual
-+ field in the trace format but rather is a synthesized value that
-+ nonetheless can be used as if it were an actual field. By default
-+ it is in units of nanoseconds; appending '.usecs' to a
-+ common_timestamp field changes the units to microseconds.
++ As an example the below defines a couple of hist triggers, one for
++ sched_waking and another for sched_switch, keyed on pid. Whenever
++ a sched_waking occurs, the timestamp is saved in the entry
++ corresponding to the current pid, and when the scheduler switches
++ back to that pid, the timestamp difference is calculated. If the
++ resulting latency, stored in wakeup_lat, exceeds the current
++ maximum latency, the values specified in the save() fields are
++ recoreded:
+
-+A note on inter-event timestamps: If common_timestamp is used in a
-+histogram, the trace buffer is automatically switched over to using
-+absolute timestamps and the "global" trace clock, in order to avoid
-+bogus timestamp differences with other clocks that aren't coherent
-+across CPUs. This can be overridden by specifying one of the other
-+trace clocks instead, using the "clock=XXX" hist trigger attribute,
-+where XXX is any of the clocks listed in the tracing/trace_clock
-+pseudo-file.
++ # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
++ if comm=="cyclictest"' >> \
++ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+
-+These features are described in more detail in the following sections.
++ # echo 'hist:keys=next_pid:\
++ wakeup_lat=common_timestamp.usecs-$ts0:\
++ onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) \
++ if next_comm=="cyclictest"' >> \
++ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
-+2.2.1 Histogram Variables
-+-------------------------
++ When the histogram is displayed, the max value and the saved
++ values corresponding to the max are displayed following the rest
++ of the fields:
+
-+Variables are simply named locations used for saving and retrieving
-+values between matching events. A 'matching' event is defined as an
-+event that has a matching key - if a variable is saved for a histogram
-+entry corresponding to that key, any subsequent event with a matching
-+key can access that variable.
++ # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
++ { next_pid: 2255 } hitcount: 239
++ common_timestamp-ts0: 0
++ max: 27
++ next_comm: cyclictest
++ prev_pid: 0 prev_prio: 120 prev_comm: swapper/1
+
-+A variable's value is normally available to any subsequent event until
-+it is set to something else by a subsequent event. The one exception
-+to that rule is that any variable used in an expression is essentially
-+'read-once' - once it's used by an expression in a subsequent event,
-+it's reset to its 'unset' state, which means it can't be used again
-+unless it's set again. This ensures not only that an event doesn't
-+use an uninitialized variable in a calculation, but that that variable
-+is used only once and not for any unrelated subsequent match.
++ { next_pid: 2256 } hitcount: 2355
++ common_timestamp-ts0: 0
++ max: 49 next_comm: cyclictest
++ prev_pid: 0 prev_prio: 120 prev_comm: swapper/0
+
-+The basic syntax for saving a variable is to simply prefix a unique
-+variable name not corresponding to any keyword along with an '=' sign
-+to any event field.
++ Totals:
++ Hits: 12970
++ Entries: 2
++ Dropped: 0
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 40dc31fea90c..7c6108479209 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -20,6 +20,7 @@ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
++ depends on !PREEMPT_RT_FULL
+ select RING_BUFFER
+ select RING_BUFFER_ALLOW_SWAP
+ help
+diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
+index 1d5716bc060b..6883bc952d22 100644
+--- a/arch/alpha/include/asm/spinlock_types.h
++++ b/arch/alpha/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ALPHA_SPINLOCK_TYPES_H
+ #define _ALPHA_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index d1346a160760..558b0995e94a 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -45,7 +45,7 @@ config ARM
+ select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+@@ -85,6 +85,7 @@ config ARM
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+@@ -2164,7 +2165,7 @@ config NEON
+
+ config KERNEL_MODE_NEON
+ bool "Support for NEON in kernel mode"
+- depends on NEON && AEABI
++ depends on NEON && AEABI && !PREEMPT_RT_BASE
+ help
+ Say Y to include support for NEON in kernel mode.
+
+diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
+index b6f319606e30..ad377ef73739 100644
+--- a/arch/arm/include/asm/irq.h
++++ b/arch/arm/include/asm/irq.h
+@@ -23,6 +23,8 @@
+ #endif
+
+ #ifndef __ASSEMBLY__
++#include <linux/cpumask.h>
+
-+Either keys or values can be saved and retrieved in this way. This
-+creates a variable named 'ts0' for a histogram entry with the key
-+'next_pid':
+ struct irqaction;
+ struct pt_regs;
+ extern void migrate_irqs(void);
+diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
+index 5976958647fe..a37c0803954b 100644
+--- a/arch/arm/include/asm/spinlock_types.h
++++ b/arch/arm/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #define TICKET_SHIFT 16
+
+ typedef struct {
+diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
+index d3e937dcee4d..6ab96a2ce1f8 100644
+--- a/arch/arm/include/asm/switch_to.h
++++ b/arch/arm/include/asm/switch_to.h
+@@ -4,6 +4,13 @@
+
+ #include <linux/thread_info.h>
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
+
-+ # echo 'hist:keys=next_pid:vals=$ts0:ts0=common_timestamp ... >> \
-+ event/trigger
+ /*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+@@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
+ #define switch_to(prev,next,last) \
+ do { \
+ __complete_pending_tlbi(); \
++ switch_kmaps(prev, next); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
+ } while (0)
+
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 776757d1604a..1f36a4eccc72 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -49,6 +49,7 @@ struct cpu_context_save {
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ __u32 cpu; /* cpu */
+@@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
++#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
++#define TIF_NEED_RESCHED_LAZY 7
+
+ #define TIF_NOHZ 12 /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT 17
+@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+@@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ * Change these and you break ASM code in entry-common.S
+ */
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
++ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
++ _TIF_NEED_RESCHED_LAZY)
+
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_ARM_THREAD_INFO_H */
+diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
+index 608008229c7d..3866da3f7bb7 100644
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -65,6 +65,7 @@ int main(void)
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index fbc707626b3e..b434c59d2b64 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -220,11 +220,18 @@ __irq_svc:
+
+ #ifdef CONFIG_PREEMPT
+ ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
+- ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
++ bne 1f @ return from exeption
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
++ blne svc_preempt @ preempt!
+
-+The ts0 variable can be accessed by any subsequent event having the
-+same pid as 'next_pid'.
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r8, #0 @ if preempt lazy count != 0
+ movne r0, #0 @ force flags to 0
+- tst r0, #_TIF_NEED_RESCHED
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ blne svc_preempt
++1:
+ #endif
+
+ svc_exit r5, irq = 1 @ return from exception
+@@ -239,8 +246,14 @@ svc_preempt:
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ reteq r8 @ go again
+- b 1b
++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r0, #0 @ if preempt lazy count != 0
++ beq 1b
++ ret r8 @ go again
+
-+Variable references are formed by prepending the variable name with
-+the '$' sign. Thus for example, the ts0 variable above would be
-+referenced as '$ts0' in expressions.
+ #endif
+
+ __und_fault:
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 99c908226065..5ffd7188af2d 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -53,7 +53,9 @@ ret_fast_syscall:
+ cmp r2, #TASK_SIZE
+ blne addr_limit_check_failed
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++ bne fast_work_pending
++ tst r1, #_TIF_SECCOMP
+ bne fast_work_pending
+
+
+@@ -83,8 +85,11 @@ ret_fast_syscall:
+ cmp r2, #TASK_SIZE
+ blne addr_limit_check_failed
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++ bne do_slower_path
++ tst r1, #_TIF_SECCOMP
+ beq no_work_pending
++do_slower_path:
+ UNWIND(.fnend )
+ ENDPROC(ret_fast_syscall)
+
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index a50dc00d79a2..d0a05a3bdb96 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -16,7 +16,7 @@ struct patch {
+ unsigned int insn;
+ };
+
+-static DEFINE_SPINLOCK(patch_lock);
++static DEFINE_RAW_SPINLOCK(patch_lock);
+
+ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+ __acquires(&patch_lock)
+@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+ return addr;
+
+ if (flags)
+- spin_lock_irqsave(&patch_lock, *flags);
++ raw_spin_lock_irqsave(&patch_lock, *flags);
+ else
+ __acquire(&patch_lock);
+
+@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+ clear_fixmap(fixmap);
+
+ if (flags)
+- spin_unlock_irqrestore(&patch_lock, *flags);
++ raw_spin_unlock_irqrestore(&patch_lock, *flags);
+ else
+ __release(&patch_lock);
+ }
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index d96714e1858c..cf4e1452d4b4 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -325,6 +325,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ }
+
+ #ifdef CONFIG_MMU
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++ struct page *page;
++ unsigned long addr = 0xffff0000;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
+
-+Because 'vals=' is used, the common_timestamp variable value above
-+will also be summed as a normal histogram value would (though for a
-+timestamp it makes little sense).
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ page = pmd_page(*(pmd));
+
-+The below shows that a key value can also be saved in the same way:
++ pgtable_page_ctor(page);
++
++ return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+ * The vectors page is always readable from user space for the
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index b67ae12503f3..7039988510bb 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -615,7 +615,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ */
+ trace_hardirqs_off();
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index c9a0a5299827..7e5122af96ca 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -236,8 +236,6 @@ int __cpu_disable(void)
+ flush_cache_louis();
+ local_flush_tlb_all();
+
+- clear_tasks_mm_cpumask(cpu);
+-
+ return 0;
+ }
+
+@@ -255,6 +253,7 @@ void __cpu_die(unsigned int cpu)
+ }
+ pr_debug("CPU%u: shutdown\n", cpu);
+
++ clear_tasks_mm_cpumask(cpu);
+ /*
+ * platform_cpu_kill() is generally expected to do the powering off
+ * and/or cutting of clocks to the dying CPU. Optionally, this may
+diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
+index 0bee233fef9a..314cfb232a63 100644
+--- a/arch/arm/kernel/unwind.c
++++ b/arch/arm/kernel/unwind.c
+@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
+ static const struct unwind_idx *__origin_unwind_idx;
+ extern const struct unwind_idx __stop_unwind_idx[];
+
+-static DEFINE_SPINLOCK(unwind_lock);
++static DEFINE_RAW_SPINLOCK(unwind_lock);
+ static LIST_HEAD(unwind_tables);
+
+ /* Convert a prel31 symbol to an absolute address */
+@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
+ /* module unwind tables */
+ struct unwind_table *table;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->begin_addr &&
+ addr < table->end_addr) {
+@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ }
+
+ pr_debug("%s: idx = %p\n", __func__, idx);
+@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
+ tab->begin_addr = text_addr;
+ tab->end_addr = text_addr + text_size;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&tab->list, &unwind_tables);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return tab;
+ }
+@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
+ if (!tab)
+ return;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&tab->list);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(tab);
+ }
+diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
+index 5a03bffe7226..3080ea833d19 100644
+--- a/arch/arm/mach-exynos/platsmp.c
++++ b/arch/arm/mach-exynos/platsmp.c
+@@ -229,7 +229,7 @@ static void __iomem *scu_base_addr(void)
+ return (void __iomem *)(S5P_VA_SCU);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void exynos_secondary_init(unsigned int cpu)
+ {
+@@ -242,8 +242,8 @@ static void exynos_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
+@@ -307,7 +307,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -334,7 +334,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+
+ if (timeout == 0) {
+ printk(KERN_ERR "cpu1 power enable failed");
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return -ETIMEDOUT;
+ }
+ }
+@@ -380,7 +380,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * calibrations, then wait for it to finish
+ */
+ fail:
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? ret : 0;
+ }
+diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
+index f66815c3dd07..00524abd963f 100644
+--- a/arch/arm/mach-hisi/platmcpm.c
++++ b/arch/arm/mach-hisi/platmcpm.c
+@@ -61,7 +61,7 @@
+
+ static void __iomem *sysctrl, *fabric;
+ static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ static u32 fabric_phys_addr;
+ /*
+ * [0]: bootwrapper physical address
+@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
+ if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
+ return -EINVAL;
+
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+
+ if (hip04_cpu_table[cluster][cpu])
+ goto out;
+@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
+
+ out:
+ hip04_cpu_table[cluster][cpu]++;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+
+ return 0;
+ }
+@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+ hip04_cpu_table[cluster][cpu]--;
+ if (hip04_cpu_table[cluster][cpu] == 1) {
+ /* A power_up request went ahead of us. */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return;
+ } else if (hip04_cpu_table[cluster][cpu] > 1) {
+ pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
+@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
+ }
+
+ last_man = hip04_cluster_is_down(cluster);
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ if (last_man) {
+ /* Since it's Cortex A15, disable L2 prefetching. */
+ asm volatile(
+@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+ cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
+
+ count = TIMEOUT_MSEC / POLL_MSEC;
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ for (tries = 0; tries < count; tries++) {
+ if (hip04_cpu_table[cluster][cpu])
+ goto err;
+@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+ data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
+ if (data & CORE_WFI_STATUS(cpu))
+ break;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ /* Wait for clean L2 when the whole cluster is down. */
+ msleep(POLL_MSEC);
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ }
+ if (tries >= count)
+ goto err;
+@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+ goto err;
+ if (hip04_cluster_is_down(cluster))
+ hip04_set_snoop_filter(cluster, 0);
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return 1;
+ err:
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return 0;
+ }
+ #endif
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index 1c73694c871a..ac4d2f030b87 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -69,7 +69,7 @@ static const struct omap_smp_config omap5_cfg __initconst = {
+ .startup_addr = omap5_secondary_startup,
+ };
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __iomem *omap4_get_scu_base(void)
+ {
+@@ -177,8 +177,8 @@ static void omap4_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -191,7 +191,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -270,7 +270,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
+index 75ef5d4be554..c17c86e5d860 100644
+--- a/arch/arm/mach-prima2/platsmp.c
++++ b/arch/arm/mach-prima2/platsmp.c
+@@ -22,7 +22,7 @@
+
+ static void __iomem *clk_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void sirfsoc_secondary_init(unsigned int cpu)
+ {
+@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static const struct of_device_id clk_ids[] = {
+@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ /* make sure write buffer is drained */
+ mb();
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
+index 5494c9e0c909..e8ce157d3548 100644
+--- a/arch/arm/mach-qcom/platsmp.c
++++ b/arch/arm/mach-qcom/platsmp.c
+@@ -46,7 +46,7 @@
+
+ extern void secondary_startup_arm(void);
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void qcom_cpu_die(unsigned int cpu)
+@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int scss_release_secondary(unsigned int cpu)
+@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return ret;
+ }
+diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
+index 39038a03836a..6da5c93872bf 100644
+--- a/arch/arm/mach-spear/platsmp.c
++++ b/arch/arm/mach-spear/platsmp.c
+@@ -32,7 +32,7 @@ static void write_pen_release(int val)
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+
+@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
+index 231f19e17436..a3419b7003e6 100644
+--- a/arch/arm/mach-sti/platsmp.c
++++ b/arch/arm/mach-sti/platsmp.c
+@@ -35,7 +35,7 @@ static void write_pen_release(int val)
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void sti_secondary_init(unsigned int cpu)
+ {
+@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 42f585379e19..93d2eccc8b60 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -434,6 +434,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
++ if (interrupts_enabled(regs))
++ local_irq_enable();
+
-+ # echo 'hist:timer_pid=common_pid:key=timer_pid ...' >> event/trigger
+ if (user_mode(regs))
+ goto bad_area;
+
+@@ -501,6 +504,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+ static int
+ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
++ if (interrupts_enabled(regs))
++ local_irq_enable();
+
-+If a variable isn't a key variable or prefixed with 'vals=', the
-+associated event field will be saved in a variable but won't be summed
-+as a value:
+ do_bad_area(addr, fsr, regs);
+ return 0;
+ }
+diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
+index d02f8187b1cc..542692dbd40a 100644
+--- a/arch/arm/mm/highmem.c
++++ b/arch/arm/mm/highmem.c
+@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
+ return *ptep;
+ }
+
++static unsigned int fixmap_idx(int type)
++{
++ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++}
+
-+ # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
+ void *kmap(struct page *page)
+ {
+ might_sleep();
+@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
+
+ void *kmap_atomic(struct page *page)
+ {
++ pte_t pte = mk_pte(page, kmap_prot);
+ unsigned int idx;
+ unsigned long vaddr;
+ void *kmap;
+ int type;
+
+- preempt_disable();
++ preempt_disable_nort();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
+
+ type = kmap_atomic_idx_push();
+
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++ idx = fixmap_idx(type);
+ vaddr = __fix_to_virt(idx);
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ /*
+@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
+ * in place, so the contained TLB flush ensures the TLB is updated
+ * with the new mapping.
+ */
+- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_fixmap_pte(idx, pte);
+
+ return (void *)vaddr;
+ }
+@@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr)
+
+ if (kvaddr >= (void *)FIXADDR_START) {
+ type = kmap_atomic_idx();
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++ idx = fixmap_idx(type);
+
+ if (cache_is_vivt())
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr != __fix_to_virt(idx));
+- set_fixmap_pte(idx, __pte(0));
+ #else
+ (void) idx; /* to kill a warning */
+ #endif
++ set_fixmap_pte(idx, __pte(0));
+ kmap_atomic_idx_pop();
+ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* this address was obtained through kmap_high_get() */
+ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
+ }
+ pagefault_enable();
+- preempt_enable();
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(__kunmap_atomic);
+
+ void *kmap_atomic_pfn(unsigned long pfn)
+ {
++ pte_t pte = pfn_pte(pfn, kmap_prot);
+ unsigned long vaddr;
+ int idx, type;
+ struct page *page = pfn_to_page(pfn);
+
+- preempt_disable();
++ preempt_disable_nort();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ type = kmap_atomic_idx_push();
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++ idx = fixmap_idx(type);
+ vaddr = __fix_to_virt(idx);
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
+ #endif
+- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_fixmap_pte(idx, pte);
+
+ return (void *)vaddr;
+ }
++#if defined CONFIG_PREEMPT_RT_FULL
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ int i;
+
-+Multiple variables can be assigned at the same time. The below would
-+result in both ts0 and b being created as variables, with both
-+common_timestamp and field1 additionally being summed as values:
++ /*
++ * Clear @prev's kmap_atomic mappings
++ */
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = fixmap_idx(i);
+
-+ # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
-+ event/trigger
++ set_fixmap_pte(idx, __pte(0));
++ }
++ /*
++ * Restore @next_p's kmap_atomic mappings
++ */
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = fixmap_idx(i);
+
-+Note that variable assignments can appear either preceding or
-+following their use. The command below behaves identically to the
-+command above:
++ if (!pte_none(next_p->kmap_pte[i]))
++ set_fixmap_pte(idx, next_p->kmap_pte[i]);
++ }
++}
++#endif
+diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
+index c2366510187a..6b60f582b738 100644
+--- a/arch/arm/plat-versatile/platsmp.c
++++ b/arch/arm/plat-versatile/platsmp.c
+@@ -32,7 +32,7 @@ static void write_pen_release(int val)
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void versatile_secondary_init(unsigned int cpu)
+ {
+@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index c30cd78b6918..458d2033ffde 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -103,6 +103,7 @@ config ARM64
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RCU_TABLE_FREE
+ select HAVE_SYSCALL_TRACEPOINTS
+diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
+index 70c517aa4501..2a5f05b5a19a 100644
+--- a/arch/arm64/crypto/Kconfig
++++ b/arch/arm64/crypto/Kconfig
+@@ -19,19 +19,19 @@ config CRYPTO_SHA512_ARM64
+
+ config CRYPTO_SHA1_ARM64_CE
+ tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
+- depends on KERNEL_MODE_NEON
++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_HASH
+ select CRYPTO_SHA1
+
+ config CRYPTO_SHA2_ARM64_CE
+ tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
+- depends on KERNEL_MODE_NEON
++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_HASH
+ select CRYPTO_SHA256_ARM64
+
+ config CRYPTO_GHASH_ARM64_CE
+ tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
+- depends on KERNEL_MODE_NEON
++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_HASH
+ select CRYPTO_GF128MUL
+ select CRYPTO_AES
+@@ -39,7 +39,7 @@ config CRYPTO_GHASH_ARM64_CE
+
+ config CRYPTO_CRCT10DIF_ARM64_CE
+ tristate "CRCT10DIF digest algorithm using PMULL instructions"
+- depends on KERNEL_MODE_NEON && CRC_T10DIF
++ depends on KERNEL_MODE_NEON && CRC_T10DIF && !PREEMPT_RT_BASE
+ select CRYPTO_HASH
+
+ config CRYPTO_CRC32_ARM64_CE
+@@ -53,13 +53,13 @@ config CRYPTO_AES_ARM64
+
+ config CRYPTO_AES_ARM64_CE
+ tristate "AES core cipher using ARMv8 Crypto Extensions"
+- depends on ARM64 && KERNEL_MODE_NEON
++ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES_ARM64
+
+ config CRYPTO_AES_ARM64_CE_CCM
+ tristate "AES in CCM mode using ARMv8 Crypto Extensions"
+- depends on ARM64 && KERNEL_MODE_NEON
++ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES_ARM64_CE
+ select CRYPTO_AES_ARM64
+@@ -67,7 +67,7 @@ config CRYPTO_AES_ARM64_CE_CCM
+
+ config CRYPTO_AES_ARM64_CE_BLK
+ tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
+- depends on KERNEL_MODE_NEON
++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES_ARM64_CE
+ select CRYPTO_AES_ARM64
+@@ -75,7 +75,7 @@ config CRYPTO_AES_ARM64_CE_BLK
+
+ config CRYPTO_AES_ARM64_NEON_BLK
+ tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
+- depends on KERNEL_MODE_NEON
++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES_ARM64
+ select CRYPTO_AES
+@@ -83,13 +83,13 @@ config CRYPTO_AES_ARM64_NEON_BLK
+
+ config CRYPTO_CHACHA20_NEON
+ tristate "NEON accelerated ChaCha20 symmetric cipher"
+- depends on KERNEL_MODE_NEON
++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_CHACHA20
+
+ config CRYPTO_AES_ARM64_BS
+ tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
+- depends on KERNEL_MODE_NEON
++ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES_ARM64_NEON_BLK
+ select CRYPTO_AES_ARM64
+diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
+index 34b4e3d46aab..ae055cdad8cf 100644
+--- a/arch/arm64/crypto/crc32-ce-glue.c
++++ b/arch/arm64/crypto/crc32-ce-glue.c
+@@ -208,7 +208,8 @@ static struct shash_alg crc32_pmull_algs[] = { {
+
+ static int __init crc32_pmull_mod_init(void)
+ {
+- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) {
++ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
++ !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && (elf_hwcap & HWCAP_PMULL)) {
+ crc32_pmull_algs[0].update = crc32_pmull_update;
+ crc32_pmull_algs[1].update = crc32c_pmull_update;
+
+diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
+index 55be59a35e3f..ba0cf1361f65 100644
+--- a/arch/arm64/include/asm/spinlock_types.h
++++ b/arch/arm64/include/asm/spinlock_types.h
+@@ -16,10 +16,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <linux/types.h>
+
+ #define TICKET_SHIFT 16
+diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
+index fc786d344e46..b833258b7594 100644
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -43,6 +43,7 @@ struct thread_info {
+ u64 ttbr0; /* saved TTBR0_EL1 */
+ #endif
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ };
+
+ #define INIT_THREAD_INFO(tsk) \
+@@ -82,6 +83,7 @@ void arch_setup_new_exec(void);
+ #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
+ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
+ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
++#define TIF_NEED_RESCHED_LAZY 6
+ #define TIF_NOHZ 7
+ #define TIF_SYSCALL_TRACE 8
+ #define TIF_SYSCALL_AUDIT 9
+@@ -98,6 +100,7 @@ void arch_setup_new_exec(void);
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+ #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_NOHZ (1 << TIF_NOHZ)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+@@ -109,8 +112,9 @@ void arch_setup_new_exec(void);
+
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+- _TIF_UPROBE | _TIF_FSCHECK)
++ _TIF_UPROBE | _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+ _TIF_NOHZ)
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index b5e43b01b396..ae26a1664436 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -39,6 +39,7 @@ int main(void)
+ BLANK();
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
++ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count));
+ DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index c1ffa95c0ad2..c60ecb5a3916 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -637,11 +637,16 @@ el1_irq:
+
+ #ifdef CONFIG_PREEMPT
+ ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
+- cbnz w24, 1f // preempt count != 0
++ cbnz w24, 2f // preempt count != 0
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
+- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
+- bl el1_preempt
++ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
+
-+ # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
-+ event/trigger
++ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
++ cbnz w24, 2f // preempt lazy count != 0
++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
+ 1:
++ bl el1_preempt
++2:
+ #endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_on
+@@ -655,6 +660,7 @@ el1_preempt:
+ 1: bl preempt_schedule_irq // irq en/disable is done inside
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
+ tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
++ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
+ ret x24
+ #endif
+
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index 43442b3a463f..81bf9545a589 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -756,7 +756,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
+ /* Check valid user FS if needed */
+ addr_limit_user_check();
+
+- if (thread_flags & _TIF_NEED_RESCHED) {
++ if (thread_flags & _TIF_NEED_RESCHED_MASK) {
+ schedule();
+ } else {
+ local_irq_enable();
+diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
+index 1a33608c958b..103b34d3dcf6 100644
+--- a/arch/blackfin/include/asm/spinlock_types.h
++++ b/arch/blackfin/include/asm/spinlock_types.h
+@@ -7,10 +7,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <asm/rwlock.h>
+
+ typedef struct {
+diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h
+index 7a906b5214a4..d8f596fec022 100644
+--- a/arch/hexagon/include/asm/spinlock_types.h
++++ b/arch/hexagon/include/asm/spinlock_types.h
+@@ -21,10 +21,6 @@
+ #ifndef _ASM_SPINLOCK_TYPES_H
+ #define _ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
+index 6e345fefcdca..681408d6816f 100644
+--- a/arch/ia64/include/asm/spinlock_types.h
++++ b/arch/ia64/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_IA64_SPINLOCK_TYPES_H
+ #define _ASM_IA64_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
+index 555b11180156..6866201a7603 100644
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
+ ti->cpu = cpu;
+ p->stack = ti;
+ p->state = TASK_UNINTERRUPTIBLE;
+- cpumask_set_cpu(cpu, &p->cpus_allowed);
++ cpumask_set_cpu(cpu, &p->cpus_mask);
+ INIT_LIST_HEAD(&p->tasks);
+ p->parent = p->real_parent = p->group_leader = p;
+ INIT_LIST_HEAD(&p->children);
+diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
+index bb0d17b64198..fc6afa42fe11 100644
+--- a/arch/m32r/include/asm/spinlock_types.h
++++ b/arch/m32r/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_M32R_SPINLOCK_TYPES_H
+ #define _ASM_M32R_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile int slock;
+ } arch_spinlock_t;
+diff --git a/arch/metag/include/asm/spinlock_types.h b/arch/metag/include/asm/spinlock_types.h
+index cd197f1bed59..adc26e9797c5 100644
+--- a/arch/metag/include/asm/spinlock_types.h
++++ b/arch/metag/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_METAG_SPINLOCK_TYPES_H
+ #define _ASM_METAG_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index c82457b0e733..7bb1838508de 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2519,7 +2519,7 @@ config MIPS_ASID_BITS_VARIABLE
+ #
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
+diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
+index e610473d61b8..1428b4febbc9 100644
+--- a/arch/mips/include/asm/switch_to.h
++++ b/arch/mips/include/asm/switch_to.h
+@@ -42,7 +42,7 @@ extern struct task_struct *ll_task;
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+- * isn't set), we undo the restriction on cpus_allowed.
++ * isn't set), we undo the restriction on cpus_mask.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+@@ -57,7 +57,7 @@ do { \
+ test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
+ (!(KSTK_STATUS(prev) & ST0_CU1))) { \
+ clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
+- prev->cpus_allowed = prev->thread.user_cpus_allowed; \
++ prev->cpus_mask = prev->thread.user_cpus_allowed; \
+ } \
+ next->thread.emulated_fp = 0; \
+ } while(0)
+diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
+index a7c0f97e4b0d..1a08428eedcf 100644
+--- a/arch/mips/kernel/mips-mt-fpaff.c
++++ b/arch/mips/kernel/mips-mt-fpaff.c
+@@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ if (retval)
+ goto out_unlock;
+
+- cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
++ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
+
+ out_unlock:
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 583aed906933..24ad7aaca5eb 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -1193,12 +1193,12 @@ static void mt_ase_fp_affinity(void)
+ * restricted the allowed set to exclude any CPUs with FPUs,
+ * we'll skip the procedure.
+ */
+- if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
++ if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
+ cpumask_t tmask;
+
+ current->thread.user_cpus_allowed
+- = current->cpus_allowed;
+- cpumask_and(&tmask, ¤t->cpus_allowed,
++ = current->cpus_mask;
++ cpumask_and(&tmask, ¤t->cpus_mask,
+ &mt_fpu_cpumask);
+ set_cpus_allowed_ptr(current, &tmask);
+ set_thread_flag(TIF_FPUBOUND);
+diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
+index 32abdc89bbc7..c45230a12d60 100644
+--- a/arch/mn10300/include/asm/spinlock_types.h
++++ b/arch/mn10300/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_SPINLOCK_TYPES_H
+ #define _ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct arch_spinlock {
+ unsigned int slock;
+ } arch_spinlock_t;
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index fe418226df7f..b5658e925465 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -111,10 +111,11 @@ config LOCKDEP_SUPPORT
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
++ default y if PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
+
+ config GENERIC_LOCKBREAK
+ bool
+@@ -215,6 +216,7 @@ config PPC
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+@@ -390,7 +392,7 @@ menu "Kernel options"
+
+ config HIGHMEM
+ bool "High memory support"
+- depends on PPC32
++ depends on PPC32 && !PREEMPT_RT_FULL
+
+ source kernel/Kconfig.hz
+ source kernel/Kconfig.preempt
+diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
+index 87adaf13b7e8..7305cb6a53e4 100644
+--- a/arch/powerpc/include/asm/spinlock_types.h
++++ b/arch/powerpc/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+ #define _ASM_POWERPC_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int slock;
+ } arch_spinlock_t;
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index a264c3ad366b..020afb8329a1 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -36,6 +36,8 @@ struct thread_info {
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => preemptable,
++ <0 => BUG */
+ unsigned long local_flags; /* private flags for thread */
+ #ifdef CONFIG_LIVEPATCH
+ unsigned long *livepatch_sp;
+@@ -81,8 +83,7 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+ #define TIF_SIGPENDING 1 /* signal pending */
+ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+- TIF_NEED_RESCHED */
++#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
+ #define TIF_32BIT 4 /* 32 bit binary */
+ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
+ #define TIF_PATCH_PENDING 6 /* pending live patching update */
+@@ -101,6 +102,8 @@ static inline struct thread_info *current_thread_info(void)
+ #if defined(CONFIG_PPC64)
+ #define TIF_ELF2ABI 18 /* function descriptors must die! */
+ #endif
++#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling
++ TIF_NEED_RESCHED */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -120,14 +123,16 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ (1<<TIF_NOHZ)
++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+- _TIF_RESTORE_TM | _TIF_PATCH_PENDING)
++ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | _TIF_NEED_RESCHED_LAZY)
+ #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
+ /* Bits in local_flags */
+ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 2e5ea300258a..a2cb40098d7c 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -156,6 +156,7 @@ int main(void)
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
+ OFFSET(TI_PREEMPT, thread_info, preempt_count);
++ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_CPU, thread_info, cpu);
+
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index e780e1fbf6c2..dc7fe90ff6a9 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -866,7 +866,14 @@ resume_kernel:
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore
+ andi. r8,r8,_TIF_NEED_RESCHED
++ bne+ 1f
++ lwz r0,TI_PREEMPT_LAZY(r9)
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore
++ lwz r0,TI_FLAGS(r9)
++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++1:
+ lwz r3,_MSR(r1)
+ andi. r0,r3,MSR_EE /* interrupts off? */
+ beq restore /* don't schedule if so */
+@@ -877,11 +884,11 @@ resume_kernel:
+ */
+ bl trace_hardirqs_off
+ #endif
+-1: bl preempt_schedule_irq
++2: bl preempt_schedule_irq
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r3,TI_FLAGS(r9)
+- andi. r0,r3,_TIF_NEED_RESCHED
+- bne- 1b
++ andi. r0,r3,_TIF_NEED_RESCHED_MASK
++ bne- 2b
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+@@ -1204,7 +1211,7 @@ global_dbcr0:
+ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+ do_work: /* r10 contains MSR_KERNEL here */
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ beq do_user_signal
+
+ do_resched: /* r10 contains MSR_KERNEL here */
+@@ -1225,7 +1232,7 @@ recheck:
+ MTMSRD(r10) /* disable interrupts */
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r9,TI_FLAGS(r9)
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ bne- do_resched
+ andi. r0,r9,_TIF_USER_WORK_MASK
+ beq restore_user
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index c194f4c8e66b..117c1f6cab66 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -690,7 +690,7 @@ _GLOBAL(ret_from_except_lite)
+ bl restore_math
+ b restore
+ #endif
+-1: andi. r0,r4,_TIF_NEED_RESCHED
++1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ beq 2f
+ bl restore_interrupts
+ SCHEDULE_USER
+@@ -752,10 +752,18 @@ resume_kernel:
+
+ #ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
++ lwz r8,TI_PREEMPT(r9)
++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
++ bne restore
+ andi. r0,r4,_TIF_NEED_RESCHED
++ bne+ check_count
+
-+Any number of variables not bound to a 'vals=' prefix can also be
-+assigned by simply separating them with colons. Below is the same
-+thing but without the values being summed in the histogram:
++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++ lwz r8,TI_PREEMPT_LAZY(r9)
+
-+ # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+- lwz r8,TI_PREEMPT(r9)
++check_count:
+ cmpwi cr1,r8,0
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+@@ -772,7 +780,7 @@ resume_kernel:
+ /* Re-test flags and eventually loop */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r4,TI_FLAGS(r9)
+- andi. r0,r4,_TIF_NEED_RESCHED
++ andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ bne 1b
+
+ /*
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 0ce8b0e5d7ba..375adb3048fc 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -693,6 +693,7 @@ void irq_ctx_init(void)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curtp, *irqtp;
+@@ -710,6 +711,7 @@ void do_softirq_own_stack(void)
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+ }
++#endif
+
+ irq_hw_number_t virq_to_hw(unsigned int virq)
+ {
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index 3f7a9a2d2435..1795359d27b6 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -41,6 +41,7 @@
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ stw r0,4(r1)
+@@ -57,6 +58,7 @@ _GLOBAL(call_do_softirq)
+ stw r10,THREAD+KSP_LIMIT(r2)
+ mtlr r0
+ blr
++#endif
+
+ /*
+ * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 3280953a82cf..dd2a80d190c4 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -31,6 +31,7 @@
+
+ .text
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ std r0,16(r1)
+@@ -41,6 +42,7 @@ _GLOBAL(call_do_softirq)
+ ld r0,16(r1)
+ mtlr r0
+ blr
++#endif
+
+ _GLOBAL(call_do_irq)
+ mflr r0
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index 648160334abf..9d24331fc9b4 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -177,6 +177,7 @@ config KVM_E500MC
+ config KVM_MPIC
+ bool "KVM in-kernel MPIC emulation"
+ depends on KVM && E500
++ depends on !PREEMPT_RT_FULL
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQFD
+ select HAVE_KVM_IRQ_ROUTING
+diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
+index 1fbb5da17dd2..ca86366d5424 100644
+--- a/arch/powerpc/platforms/cell/spufs/sched.c
++++ b/arch/powerpc/platforms/cell/spufs/sched.c
+@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
+ * runqueue. The context will be rescheduled on the proper node
+ * if it is timesliced or preempted.
+ */
+- cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed);
++ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
+
+ /* Save the current cpu id for spu interrupt routing. */
+ ctx->last_ran = raw_smp_processor_id();
+diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
+index e48462447ff0..2670cee66064 100644
+--- a/arch/powerpc/platforms/ps3/device-init.c
++++ b/arch/powerpc/platforms/ps3/device-init.c
+@@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
+ }
+ pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
+
+- res = wait_event_interruptible(dev->done.wait,
++ res = swait_event_interruptible(dev->done.wait,
+ dev->done.done || kthread_should_stop());
+ if (kthread_should_stop())
+ res = -EINTR;
+diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
+index 1861a0c5dd47..74092ebaca3c 100644
+--- a/arch/s390/include/asm/spinlock_types.h
++++ b/arch/s390/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ int lock;
+ } __attribute__ ((aligned (4))) arch_spinlock_t;
+diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
+index e82369f286a2..22ca9a98bbb8 100644
+--- a/arch/sh/include/asm/spinlock_types.h
++++ b/arch/sh/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SH_SPINLOCK_TYPES_H
+ #define __ASM_SH_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
+index 245dbeb20afe..e298c82d2a69 100644
+--- a/arch/sh/kernel/irq.c
++++ b/arch/sh/kernel/irq.c
+@@ -148,6 +148,7 @@ void irq_ctx_exit(int cpu)
+ hardirq_ctx[cpu] = NULL;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curctx;
+@@ -175,6 +176,7 @@ void do_softirq_own_stack(void)
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 4e83f950713e..7f9d71523763 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -206,12 +206,10 @@ config NR_CPUS
+ source kernel/Kconfig.hz
+
+ config RWSEM_GENERIC_SPINLOCK
+- bool
+- default y if SPARC32
++ def_bool PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+- bool
+- default y if SPARC64
++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config GENERIC_HWEIGHT
+ bool
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index d66dde833f5e..f87b3f8f4d43 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
+ set_irq_regs(old_regs);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+@@ -869,6 +870,7 @@ void do_softirq_own_stack(void)
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
+ }
++#endif
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
+index 2a0347af0702..670fa2f4cfc3 100644
+--- a/arch/tile/include/asm/setup.h
++++ b/arch/tile/include/asm/setup.h
+@@ -49,7 +49,7 @@ int hardwall_ipi_valid(int cpu);
+
+ /* Hook hardwall code into changes in affinity. */
+ #define arch_set_cpus_allowed(p, new_mask) do { \
+- if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
++ if (!cpumask_equal(p->cpus_ptr, new_mask)) \
+ hardwall_deactivate_all(p); \
+ } while (0)
+ #endif
+diff --git a/arch/tile/include/asm/spinlock_types.h b/arch/tile/include/asm/spinlock_types.h
+index a71f59b49c50..9311c6ff2abc 100644
+--- a/arch/tile/include/asm/spinlock_types.h
++++ b/arch/tile/include/asm/spinlock_types.h
+@@ -15,10 +15,6 @@
+ #ifndef _ASM_TILE_SPINLOCK_TYPES_H
+ #define _ASM_TILE_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #ifdef __tilegx__
+
+ /* Low 15 bits are "next"; high 15 bits are "current". */
+diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
+index 2fd1694ac1d0..98f4fb696289 100644
+--- a/arch/tile/kernel/hardwall.c
++++ b/arch/tile/kernel/hardwall.c
+@@ -590,12 +590,12 @@ static int hardwall_activate(struct hardwall_info *info)
+ * Get our affinity; if we're not bound to this tile uniquely,
+ * we can't access the network registers.
+ */
+- if (cpumask_weight(&p->cpus_allowed) != 1)
++ if (p->nr_cpus_allowed != 1)
+ return -EPERM;
+
+ /* Make sure we are bound to a cpu assigned to this resource. */
+ cpu = smp_processor_id();
+- BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
++ BUG_ON(cpumask_first(p->cpus_ptr) != cpu);
+ if (!cpumask_test_cpu(cpu, &info->cpumask))
+ return -EINVAL;
+
+@@ -621,17 +621,17 @@ static int hardwall_activate(struct hardwall_info *info)
+ * Deactivate a task's hardwall. Must hold lock for hardwall_type.
+ * This method may be called from exit_thread(), so we don't want to
+ * rely on too many fields of struct task_struct still being valid.
+- * We assume the cpus_allowed, pid, and comm fields are still valid.
++ * We assume the nr_cpus_allowed, pid, and comm fields are still valid.
+ */
+ static void _hardwall_deactivate(struct hardwall_type *hwt,
+ struct task_struct *task)
+ {
+ struct thread_struct *ts = &task->thread;
+
+- if (cpumask_weight(&task->cpus_allowed) != 1) {
++ if (task->nr_cpus_allowed != 1) {
+ pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
+ task->pid, task->comm, hwt->name,
+- cpumask_weight(&task->cpus_allowed));
++ task->nr_cpus_allowed);
+ BUG();
+ }
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 2af0af33362a..7764f936d6ab 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -169,6 +169,7 @@ config X86
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE
+ select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+@@ -257,8 +258,11 @@ config ARCH_MAY_HAVE_PC_FDC
+ def_bool y
+ depends on ISA_DMA_API
+
++config RWSEM_GENERIC_SPINLOCK
++ def_bool PREEMPT_RT_FULL
+
-+Variables set as above can be referenced and used in expressions on
-+another event.
+ config RWSEM_XCHGADD_ALGORITHM
+- def_bool y
++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config GENERIC_CALIBRATE_DELAY
+ def_bool y
+@@ -933,7 +937,7 @@ config IOMMU_HELPER
+ config MAXSMP
+ bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+ depends on X86_64 && SMP && DEBUG_KERNEL
+- select CPUMASK_OFFSTACK
++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+ ---help---
+ Enable maximum number of CPUS and NUMA Nodes for this architecture.
+ If unsure, say N.
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index c690ddc78c03..7a3138d33e33 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -387,14 +387,14 @@ static int ecb_encrypt(struct skcipher_request *req)
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -409,14 +409,14 @@ static int ecb_decrypt(struct skcipher_request *req)
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -431,14 +431,14 @@ static int cbc_encrypt(struct skcipher_request *req)
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -453,14 +453,14 @@ static int cbc_decrypt(struct skcipher_request *req)
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -510,18 +510,20 @@ static int ctr_crypt(struct skcipher_request *req)
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
++ kernel_fpu_begin();
+ aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+ if (walk.nbytes) {
++ kernel_fpu_begin();
+ ctr_crypt_final(ctx, &walk);
++ kernel_fpu_end();
+ err = skcipher_walk_done(&walk, 0);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+index 60907c139c4e..0902db7d326a 100644
+--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+@@ -206,6 +206,20 @@ struct crypt_priv {
+ bool fpu_enabled;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void camellia_fpu_end_rt(struct crypt_priv *ctx)
++{
++ bool fpu_enabled = ctx->fpu_enabled;
+
-+For example, here's how a latency can be calculated:
++ if (!fpu_enabled)
++ return;
++ camellia_fpu_end(fpu_enabled);
++ ctx->fpu_enabled = false;
++}
++#else
++static void camellia_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
+
-+ # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
-+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
+@@ -221,16 +235,19 @@ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ }
+
+ if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
++ kernel_fpu_resched();
+ camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ }
+
+ while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
++ kernel_fpu_resched();
+ camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ }
++ camellia_fpu_end_rt(ctx);
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+@@ -251,16 +268,19 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ }
+
+ if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
++ kernel_fpu_resched();
+ camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ }
+
+ while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
++ kernel_fpu_resched();
+ camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ }
++ camellia_fpu_end_rt(ctx);
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
+index d96429da88eb..3b8e91841039 100644
+--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
+@@ -210,6 +210,21 @@ struct crypt_priv {
+ bool fpu_enabled;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void camellia_fpu_end_rt(struct crypt_priv *ctx)
++{
++ bool fpu_enabled = ctx->fpu_enabled;
+
-+In the first line above, the event's timetamp is saved into the
-+variable ts0. In the next line, ts0 is subtracted from the second
-+event's timestamp to produce the latency, which is then assigned into
-+yet another variable, 'wakeup_lat'. The hist trigger below in turn
-+makes use of the wakeup_lat variable to compute a combined latency
-+using the same key and variable from yet another event:
++ if (!fpu_enabled)
++ return;
++ camellia_fpu_end(fpu_enabled);
++ ctx->fpu_enabled = false;
++}
+
-+ # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
++#else
++static void camellia_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
+
-+2.2.2 Synthetic Events
-+----------------------
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
+@@ -225,10 +240,12 @@ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ }
+
+ while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
++ kernel_fpu_resched();
+ camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ }
++ camellia_fpu_end_rt(ctx);
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+@@ -249,10 +266,12 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ }
+
+ while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
++ kernel_fpu_resched();
+ camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ }
++ camellia_fpu_end_rt(ctx);
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
+index 575292a33bdf..0a4b0a222b18 100644
+--- a/arch/x86/crypto/cast5_avx_glue.c
++++ b/arch/x86/crypto/cast5_avx_glue.c
+@@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
+ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ bool enc)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ const unsigned int bsize = CAST5_BLOCK_SIZE;
+ unsigned int nbytes;
+@@ -73,7 +73,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ u8 *wsrc = walk->src.virt.addr;
+ u8 *wdst = walk->dst.virt.addr;
+
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+
+ /* Process multi-block batch */
+ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
+@@ -102,10 +102,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ } while (nbytes >= bsize);
+
+ done:
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, walk, nbytes);
+ }
+-
+- cast5_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -226,7 +225,7 @@ static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
+ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -235,12 +234,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ while ((nbytes = walk.nbytes)) {
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+ nbytes = __cbc_decrypt(desc, &walk);
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+-
+- cast5_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -309,7 +307,7 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
+ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -318,13 +316,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+ nbytes = __ctr_crypt(desc, &walk);
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- cast5_fpu_end(fpu_enabled);
+-
+ if (walk.nbytes) {
+ ctr_crypt_final(desc, &walk);
+ err = blkcipher_walk_done(desc, &walk, 0);
+diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
+index 50e684768c55..8caf9ba8c1da 100644
+--- a/arch/x86/crypto/cast6_avx_glue.c
++++ b/arch/x86/crypto/cast6_avx_glue.c
+@@ -205,19 +205,33 @@ struct crypt_priv {
+ bool fpu_enabled;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void cast6_fpu_end_rt(struct crypt_priv *ctx)
++{
++ bool fpu_enabled = ctx->fpu_enabled;
+
-+Synthetic events are user-defined events generated from hist trigger
-+variables or fields associated with one or more other events. Their
-+purpose is to provide a mechanism for displaying data spanning
-+multiple events consistent with the existing and already familiar
-+usage for normal events.
++ if (!fpu_enabled)
++ return;
++ cast6_fpu_end(fpu_enabled);
++ ctx->fpu_enabled = false;
++}
+
-+To define a synthetic event, the user writes a simple specification
-+consisting of the name of the new event along with one or more
-+variables and their types, which can be any valid field type,
-+separated by semicolons, to the tracing/synthetic_events file.
++#else
++static void cast6_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
+
-+For instance, the following creates a new event named 'wakeup_latency'
-+with 3 fields: lat, pid, and prio. Each of those fields is simply a
-+variable reference to a variable on another event:
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ const unsigned int bsize = CAST6_BLOCK_SIZE;
+ struct crypt_priv *ctx = priv;
+ int i;
+
+- ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
++ ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
+ cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
++ cast6_fpu_end_rt(ctx);
+ return;
+ }
+-
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ __cast6_encrypt(ctx->ctx, srcdst, srcdst);
+ }
+@@ -228,10 +242,10 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ struct crypt_priv *ctx = priv;
+ int i;
+
+- ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
++ ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
+ cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
++ cast6_fpu_end_rt(ctx);
+ return;
+ }
+
+diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
+index 1e6af1b35f7b..e7809fd2a4fd 100644
+--- a/arch/x86/crypto/chacha20_glue.c
++++ b/arch/x86/crypto/chacha20_glue.c
+@@ -81,23 +81,24 @@ static int chacha20_simd(struct skcipher_request *req)
+
+ crypto_chacha20_init(state, ctx, walk.iv);
+
+- kernel_fpu_begin();
+-
+ while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
++ kernel_fpu_begin();
+
-+ # echo 'wakeup_latency \
-+ u64 lat; \
-+ pid_t pid; \
-+ int prio' >> \
-+ /sys/kernel/debug/tracing/synthetic_events
+ chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+ rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
++ kernel_fpu_end();
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % CHACHA20_BLOCK_SIZE);
+ }
+
+ if (walk.nbytes) {
++ kernel_fpu_begin();
+ chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes);
++ kernel_fpu_end();
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+- kernel_fpu_end();
+-
+ return err;
+ }
+
+diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
+index d61e57960fe0..c67560d9718a 100644
+--- a/arch/x86/crypto/glue_helper.c
++++ b/arch/x86/crypto/glue_helper.c
+@@ -40,7 +40,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+ void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ const unsigned int bsize = 128 / 8;
+ unsigned int nbytes, i, func_bytes;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ int err;
+
+ err = blkcipher_walk_virt(desc, walk);
+@@ -50,7 +50,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+ u8 *wdst = walk->dst.virt.addr;
+
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+
+ for (i = 0; i < gctx->num_funcs; i++) {
+ func_bytes = bsize * gctx->funcs[i].num_blocks;
+@@ -72,10 +72,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+ }
+
+ done:
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -192,7 +192,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -201,12 +201,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+
+ while ((nbytes = walk.nbytes)) {
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+ nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
+@@ -275,7 +275,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -284,13 +284,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
+
+ while ((nbytes = walk.nbytes) >= bsize) {
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+ nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+-
+ if (walk.nbytes) {
+ glue_ctr_crypt_final_128bit(
+ gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
+@@ -380,7 +379,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+ void *tweak_ctx, void *crypt_ctx)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -393,21 +392,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+
+ /* set minimum length to bsize, for tweak_fn */
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled,
++ desc, false,
+ nbytes < bsize ? bsize : nbytes);
+-
+ /* calculate first value of T */
+ tweak_fn(tweak_ctx, walk.iv, walk.iv);
++ glue_fpu_end(fpu_enabled);
+
+ while (nbytes) {
++ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
++ desc, false, nbytes);
+ nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
+
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = walk.nbytes;
+ }
+-
+- glue_fpu_end(fpu_enabled);
+-
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
+diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
+index 870f6d812a2d..5c806bf39f1d 100644
+--- a/arch/x86/crypto/serpent_avx2_glue.c
++++ b/arch/x86/crypto/serpent_avx2_glue.c
+@@ -184,6 +184,21 @@ struct crypt_priv {
+ bool fpu_enabled;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void serpent_fpu_end_rt(struct crypt_priv *ctx)
++{
++ bool fpu_enabled = ctx->fpu_enabled;
+
-+Reading the tracing/synthetic_events file lists all the currently
-+defined synthetic events, in this case the event defined above:
++ if (!fpu_enabled)
++ return;
++ serpent_fpu_end(fpu_enabled);
++ ctx->fpu_enabled = false;
++}
+
-+ # cat /sys/kernel/debug/tracing/synthetic_events
-+ wakeup_latency u64 lat; pid_t pid; int prio
++#else
++static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
+
-+An existing synthetic event definition can be removed by prepending
-+the command that defined it with a '!':
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ const unsigned int bsize = SERPENT_BLOCK_SIZE;
+@@ -199,10 +214,12 @@ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ }
+
+ while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
++ kernel_fpu_resched();
+ serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
+ nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
+ }
++ serpent_fpu_end_rt(ctx);
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ __serpent_encrypt(ctx->ctx, srcdst, srcdst);
+@@ -223,10 +240,12 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ }
+
+ while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
++ kernel_fpu_resched();
+ serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
+ nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
+ }
++ serpent_fpu_end_rt(ctx);
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ __serpent_decrypt(ctx->ctx, srcdst, srcdst);
+diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
+index 6f778d3daa22..46dcbdbd0518 100644
+--- a/arch/x86/crypto/serpent_avx_glue.c
++++ b/arch/x86/crypto/serpent_avx_glue.c
+@@ -218,16 +218,31 @@ struct crypt_priv {
+ bool fpu_enabled;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void serpent_fpu_end_rt(struct crypt_priv *ctx)
++{
++ bool fpu_enabled = ctx->fpu_enabled;
+
-+ # echo '!wakeup_latency u64 lat pid_t pid int prio' >> \
-+ /sys/kernel/debug/tracing/synthetic_events
++ if (!fpu_enabled)
++ return;
++ serpent_fpu_end(fpu_enabled);
++ ctx->fpu_enabled = false;
++}
+
-+At this point, there isn't yet an actual 'wakeup_latency' event
-+instantiated in the event subsytem - for this to happen, a 'hist
-+trigger action' needs to be instantiated and bound to actual fields
-+and variables defined on other events (see Section 6.3.3 below).
++#else
++static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
+
-+Once that is done, an event instance is created, and a histogram can
-+be defined using it:
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ const unsigned int bsize = SERPENT_BLOCK_SIZE;
+ struct crypt_priv *ctx = priv;
+ int i;
+
+- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
++ ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
++ serpent_fpu_end_rt(ctx);
+ return;
+ }
+
+@@ -241,10 +256,10 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ struct crypt_priv *ctx = priv;
+ int i;
+
+- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
++ ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
++ serpent_fpu_end_rt(ctx);
+ return;
+ }
+
+diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
+index ac0e831943f5..d35f607d067f 100644
+--- a/arch/x86/crypto/serpent_sse2_glue.c
++++ b/arch/x86/crypto/serpent_sse2_glue.c
+@@ -187,16 +187,31 @@ struct crypt_priv {
+ bool fpu_enabled;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void serpent_fpu_end_rt(struct crypt_priv *ctx)
++{
++ bool fpu_enabled = ctx->fpu_enabled;
+
-+ # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
-+ /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
++ if (!fpu_enabled)
++ return;
++ serpent_fpu_end(fpu_enabled);
++ ctx->fpu_enabled = false;
++}
+
-+The new event is created under the tracing/events/synthetic/ directory
-+and looks and behaves just like any other event:
++#else
++static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
+
-+ # ls /sys/kernel/debug/tracing/events/synthetic/wakeup_latency
-+ enable filter format hist id trigger
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ const unsigned int bsize = SERPENT_BLOCK_SIZE;
+ struct crypt_priv *ctx = priv;
+ int i;
+
+- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
++ ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
++ serpent_fpu_end_rt(ctx);
+ return;
+ }
+
+@@ -210,10 +225,10 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ struct crypt_priv *ctx = priv;
+ int i;
+
+- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
++ ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
++ serpent_fpu_end_rt(ctx);
+ return;
+ }
+
+diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
+index b7a3904b953c..de00fe24927e 100644
+--- a/arch/x86/crypto/twofish_avx_glue.c
++++ b/arch/x86/crypto/twofish_avx_glue.c
+@@ -218,6 +218,21 @@ struct crypt_priv {
+ bool fpu_enabled;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void twofish_fpu_end_rt(struct crypt_priv *ctx)
++{
++ bool fpu_enabled = ctx->fpu_enabled;
+
-+Like any other event, once a histogram is enabled for the event, the
-+output can be displayed by reading the event's 'hist' file.
++ if (!fpu_enabled)
++ return;
++ twofish_fpu_end(fpu_enabled);
++ ctx->fpu_enabled = false;
++}
+
-+2.2.3 Hist trigger 'actions'
-+----------------------------
++#else
++static void twofish_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
+
-+A hist trigger 'action' is a function that's executed whenever a
-+histogram entry is added or updated.
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ const unsigned int bsize = TF_BLOCK_SIZE;
+@@ -228,12 +243,16 @@ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+
+ if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
+ twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
++ twofish_fpu_end_rt(ctx);
+ return;
+ }
+
+- for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
++ for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) {
++ kernel_fpu_resched();
+ twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
++ }
+
++ twofish_fpu_end_rt(ctx);
+ nbytes %= bsize * 3;
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+@@ -250,11 +269,15 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+
+ if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
+ twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
++ twofish_fpu_end_rt(ctx);
+ return;
+ }
+
+- for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
++ for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) {
++ kernel_fpu_resched();
+ twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
++ }
++ twofish_fpu_end_rt(ctx);
+
+ nbytes %= bsize * 3;
+
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 60e21ccfb6d6..0e27f35febe7 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -133,7 +133,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
+
+ #define EXIT_TO_USERMODE_LOOP_FLAGS \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
++ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
+
+ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ {
+@@ -148,9 +148,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ /* We have work to do. */
+ local_irq_enable();
+
+- if (cached_flags & _TIF_NEED_RESCHED)
++ if (cached_flags & _TIF_NEED_RESCHED_MASK)
+ schedule();
+
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (unlikely(current->forced_info.si_signo)) {
++ struct task_struct *t = current;
++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
++ t->forced_info.si_signo = 0;
++ }
++#endif
+ if (cached_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 60c4c342316c..cd0c7c56e2dd 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -350,8 +350,25 @@ END(ret_from_exception)
+ ENTRY(resume_kernel)
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ .Lneed_resched:
++ # preempt count == 0 + NEED_RS set?
+ cmpl $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz restore_all
++#else
++ jz test_int_off
+
-+The default 'action' if no special function is explicity specified is
-+as it always has been, to simply update the set of values associated
-+with an entry. Some applications, however, may want to perform
-+additional actions at that point, such as generate another event, or
-+compare and save a maximum.
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jne restore_all
+
-+The following additional actions are available. To specify an action
-+for a given event, simply specify the action between colons in the
-+hist trigger specification.
++ movl PER_CPU_VAR(current_task), %ebp
++ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
++ jnz restore_all
+
-+ - onmatch(matching.event).<synthetic_event_name>(param list)
++ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
++ jz restore_all
++test_int_off:
++#endif
+ testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ jz restore_all
+ call preempt_schedule_irq
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 0fae7096ae23..0ed8dbf6210d 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -633,7 +633,23 @@ retint_kernel:
+ bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ jnc 1f
+ 0: cmpl $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz 1f
++#else
++ jz do_preempt_schedule_irq
+
-+ The 'onmatch(matching.event).<synthetic_event_name>(params)' hist
-+ trigger action is invoked whenever an event matches and the
-+ histogram entry would be added or updated. It causes the named
-+ synthetic event to be generated with the values given in the
-+ 'param list'. The result is the generation of a synthetic event
-+ that consists of the values contained in those variables at the
-+ time the invoking event was hit.
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jnz 1f
+
-+ The 'param list' consists of one or more parameters which may be
-+ either variables or fields defined on either the 'matching.event'
-+ or the target event. The variables or fields specified in the
-+ param list may be either fully-qualified or unqualified. If a
-+ variable is specified as unqualified, it must be unique between
-+ the two events. A field name used as a param can be unqualified
-+ if it refers to the target event, but must be fully qualified if
-+ it refers to the matching event. A fully-qualified name is of the
-+ form 'system.event_name.$var_name' or 'system.event_name.field'.
++ movq PER_CPU_VAR(current_task), %rcx
++ cmpl $0, TASK_TI_preempt_lazy_count(%rcx)
++ jnz 1f
+
-+ The 'matching.event' specification is simply the fully qualified
-+ event name of the event that matches the target event for the
-+ onmatch() functionality, in the form 'system.event_name'.
++ bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
++ jnc 1f
++do_preempt_schedule_irq:
++#endif
+ call preempt_schedule_irq
+ jmp 0b
+ 1:
+@@ -988,6 +1004,7 @@ bad_gs:
+ jmp 2b
+ .previous
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(do_softirq_own_stack)
+ pushq %rbp
+@@ -998,6 +1015,7 @@ ENTRY(do_softirq_own_stack)
+ leaveq
+ ret
+ ENDPROC(do_softirq_own_stack)
++#endif
+
+ #ifdef CONFIG_XEN
+ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
+index a9caac9d4a72..18b31f22ca5d 100644
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -25,6 +25,7 @@ extern void __kernel_fpu_begin(void);
+ extern void __kernel_fpu_end(void);
+ extern void kernel_fpu_begin(void);
+ extern void kernel_fpu_end(void);
++extern void kernel_fpu_resched(void);
+ extern bool irq_fpu_usable(void);
+
+ /*
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index 7f2dbd91fc74..22992c837795 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -86,17 +86,46 @@ static __always_inline void __preempt_count_sub(int val)
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+-static __always_inline bool __preempt_count_dec_and_test(void)
++static __always_inline bool ____preempt_count_dec_and_test(void)
+ {
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
+ }
+
++static __always_inline bool __preempt_count_dec_and_test(void)
++{
++ if (____preempt_count_dec_and_test())
++ return true;
++#ifdef CONFIG_PREEMPT_LAZY
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++ return false;
++#endif
++}
+
-+ Finally, the number and type of variables/fields in the 'param
-+ list' must match the number and types of the fields in the
-+ synthetic event being generated.
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+ static __always_inline bool should_resched(int preempt_offset)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++ u32 tmp;
+
-+ As an example the below defines a simple synthetic event and uses
-+ a variable defined on the sched_wakeup_new event as a parameter
-+ when invoking the synthetic event. Here we define the synthetic
-+ event:
++ tmp = raw_cpu_read_4(__preempt_count);
++ if (tmp == preempt_offset)
++ return true;
+
-+ # echo 'wakeup_new_test pid_t pid' >> \
-+ /sys/kernel/debug/tracing/synthetic_events
++ /* preempt count == 0 ? */
++ tmp &= ~PREEMPT_NEED_RESCHED;
++ if (tmp)
++ return false;
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
++#endif
+ }
+
+ #ifdef CONFIG_PREEMPT
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index 5f9012ff52ed..39117e57caf2 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -28,6 +28,19 @@ typedef struct {
+ #define SA_IA32_ABI 0x02000000u
+ #define SA_X32_ABI 0x01000000u
+
++/*
++ * Because some traps use the IST stack, we must keep preemption
++ * disabled while calling do_trap(), but do_trap() may call
++ * force_sig_info() which will grab the signal spin_locks for the
++ * task, which in PREEMPT_RT_FULL are mutexes. By defining
++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
++ * trap.
++ */
++#if defined(CONFIG_PREEMPT_RT_FULL)
++#define ARCH_RT_DELAYS_SIGNAL_SEND
++#endif
+
-+ # cat /sys/kernel/debug/tracing/synthetic_events
-+ wakeup_new_test pid_t pid
+ #ifndef CONFIG_COMPAT
+ typedef sigset_t compat_sigset_t;
+ #endif
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 371b3a4af000..06613a805b25 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -60,7 +60,7 @@
+ */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+- u64 canary;
++ u64 uninitialized_var(canary);
+ u64 tsc;
+
+ #ifdef CONFIG_X86_64
+@@ -71,8 +71,14 @@ static __always_inline void boot_init_stack_canary(void)
+ * of randomness. The TSC only matters for very early init,
+ * there it already has some randomness on most systems. Later
+ * on during the bootup the random pool has true entropy too.
++ * For preempt-rt we need to weaken the randomness a bit, as
++ * we can't call into the random generator from atomic context
++ * due to locking constraints. We just leave canary
++ * uninitialized and use the TSC based randomness on top of it.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ get_random_bytes(&canary, sizeof(canary));
++#endif
+ tsc = rdtsc();
+ canary += tsc + (tsc << 32UL);
+ canary &= CANARY_MASK;
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 95ff2d7f553f..b1c9129f64fc 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -56,11 +56,14 @@ struct task_struct;
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ u32 status; /* thread synchronous flags */
++ int preempt_lazy_count; /* 0 => lazy preemptable
++ <0 => BUG */
+ };
+
+ #define INIT_THREAD_INFO(tsk) \
+ { \
+ .flags = 0, \
++ .preempt_lazy_count = 0, \
+ }
+
+ #define init_stack (init_thread_union.stack)
+@@ -69,6 +72,10 @@ struct thread_info {
+
+ #include <asm/asm-offsets.h>
+
++#define GET_THREAD_INFO(reg) \
++ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
++ _ASM_SUB $(THREAD_SIZE),reg ;
+
-+ The following hist trigger both defines the missing testpid
-+ variable and specifies an onmatch() action that generates a
-+ wakeup_new_test synthetic event whenever a sched_wakeup_new event
-+ occurs, which because of the 'if comm == "cyclictest"' filter only
-+ happens when the executable is cyclictest:
+ #endif
+
+ /*
+@@ -85,6 +92,7 @@ struct thread_info {
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
+ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+ #define TIF_UPROBE 12 /* breakpointed or singlestepping */
+ #define TIF_PATCH_PENDING 13 /* pending live patching update */
+@@ -112,6 +120,7 @@ struct thread_info {
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
+@@ -153,6 +162,8 @@ struct thread_info {
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
-+ # echo 'hist:keys=$testpid:testpid=pid:onmatch(sched.sched_wakeup_new).\
-+ wakeup_new_test($testpid) if comm=="cyclictest"' >> \
-+ /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
+ #define STACK_WARN (THREAD_SIZE/8)
+
+ /*
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 96a8a68f9c79..c9af5afebc4a 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1688,19 +1688,20 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
+ return false;
+ }
+
+-static inline bool ioapic_irqd_mask(struct irq_data *data)
++static inline bool ioapic_prepare_move(struct irq_data *data)
+ {
+ /* If we are moving the irq we need to mask it */
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+- mask_ioapic_irq(data);
++ if (!irqd_irq_masked(data))
++ mask_ioapic_irq(data);
+ return true;
+ }
+ return false;
+ }
+
+-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
++static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
+ {
+- if (unlikely(masked)) {
++ if (unlikely(moveit)) {
+ /* Only migrate the irq if the ack has been received.
+ *
+ * On rare occasions the broadcast level triggered ack gets
+@@ -1729,15 +1730,17 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
+ */
+ if (!io_apic_level_ack_pending(data->chip_data))
+ irq_move_masked_irq(data);
+- unmask_ioapic_irq(data);
++ /* If the irq is masked in the core, leave it */
++ if (!irqd_irq_masked(data))
++ unmask_ioapic_irq(data);
+ }
+ }
+ #else
+-static inline bool ioapic_irqd_mask(struct irq_data *data)
++static inline bool ioapic_prepare_move(struct irq_data *data)
+ {
+ return false;
+ }
+-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
++static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
+ {
+ }
+ #endif
+@@ -1746,11 +1749,11 @@ static void ioapic_ack_level(struct irq_data *irq_data)
+ {
+ struct irq_cfg *cfg = irqd_cfg(irq_data);
+ unsigned long v;
+- bool masked;
++ bool moveit;
+ int i;
+
+ irq_complete_move(cfg);
+- masked = ioapic_irqd_mask(irq_data);
++ moveit = ioapic_prepare_move(irq_data);
+
+ /*
+ * It appears there is an erratum which affects at least version 0x11
+@@ -1805,7 +1808,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
+ eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
+ }
+
+- ioapic_irqd_unmask(irq_data, masked);
++ ioapic_finish_move(irq_data, moveit);
+ }
+
+ static void ioapic_ir_ack_level(struct irq_data *irq_data)
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 76417a9aab73..62c3e27c8e1c 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -38,6 +38,7 @@ void common(void) {
+
+ BLANK();
+ OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
++ OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count);
+ OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
+
+ BLANK();
+@@ -94,6 +95,7 @@ void common(void) {
+
+ BLANK();
+ DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
++ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
+
+ /* TLB state for the entry code */
+ OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
+diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+index 7f85b76f43bc..9e74b805070f 100644
+--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
++++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/kmod.h>
+ #include <linux/poll.h>
++#include <linux/swork.h>
+
+ #include "mce-internal.h"
+
+@@ -86,13 +87,43 @@ static void mce_do_trigger(struct work_struct *work)
+
+ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+
+-
+-void mce_work_trigger(void)
++static void __mce_work_trigger(struct swork_event *event)
+ {
+ if (mce_helper[0])
+ schedule_work(&mce_trigger_work);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static bool notify_work_ready __read_mostly;
++static struct swork_event notify_work;
+
-+ Creating and displaying a histogram based on those events is now
-+ just a matter of using the fields and new synthetic event in the
-+ tracing/events/synthetic directory, as usual:
++static int mce_notify_work_init(void)
++{
++ int err;
+
-+ # echo 'hist:keys=pid:sort=pid' >> \
-+ /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/trigger
++ err = swork_get();
++ if (err)
++ return err;
+
-+ Running 'cyclictest' should cause wakeup_new events to generate
-+ wakeup_new_test synthetic events which should result in histogram
-+ output in the wakeup_new_test event's hist file:
++ INIT_SWORK(¬ify_work, __mce_work_trigger);
++ notify_work_ready = true;
++ return 0;
++}
+
-+ # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/hist
++void mce_work_trigger(void)
++{
++ if (notify_work_ready)
++ swork_queue(¬ify_work);
++}
+
-+ A more typical usage would be to use two events to calculate a
-+ latency. The following example uses a set of hist triggers to
-+ produce a 'wakeup_latency' histogram:
++#else
++void mce_work_trigger(void)
++{
++ __mce_work_trigger(NULL);
++}
++static inline int mce_notify_work_init(void) { return 0; }
++#endif
+
-+ First, we define a 'wakeup_latency' synthetic event:
+ static ssize_t
+ show_trigger(struct device *s, struct device_attribute *attr, char *buf)
+ {
+@@ -356,7 +387,7 @@ static __init int dev_mcelog_init_device(void)
+
+ return err;
+ }
+-
++ mce_notify_work_init();
+ mce_register_decode_chain(&dev_mcelog_nb);
+ return 0;
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 98e4e4dc4a3b..5cce2ee3b9f6 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -42,6 +42,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
+ #include <linux/jump_label.h>
+
+ #include <asm/intel-family.h>
+@@ -1365,7 +1366,7 @@ int memory_failure(unsigned long pfn, int vector, int flags)
+ static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
+
+ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+
+ static unsigned long mce_adjust_timer_default(unsigned long interval)
+ {
+@@ -1374,27 +1375,19 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
+
+ static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
+
+-static void __start_timer(struct timer_list *t, unsigned long interval)
++static void __start_timer(struct hrtimer *t, unsigned long iv)
+ {
+- unsigned long when = jiffies + interval;
+- unsigned long flags;
+-
+- local_irq_save(flags);
+-
+- if (!timer_pending(t) || time_before(when, t->expires))
+- mod_timer(t, round_jiffies(when));
++ if (!iv)
++ return;
+
+- local_irq_restore(flags);
++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
++ 0, HRTIMER_MODE_REL_PINNED);
+ }
+
+-static void mce_timer_fn(unsigned long data)
++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
+- int cpu = smp_processor_id();
+ unsigned long iv;
+
+- WARN_ON(cpu != data);
+-
+ iv = __this_cpu_read(mce_next_interval);
+
+ if (mce_available(this_cpu_ptr(&cpu_info))) {
+@@ -1417,7 +1410,11 @@ static void mce_timer_fn(unsigned long data)
+
+ done:
+ __this_cpu_write(mce_next_interval, iv);
+- __start_timer(t, iv);
++ if (!iv)
++ return HRTIMER_NORESTART;
+
-+ # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
-+ /sys/kernel/debug/tracing/synthetic_events
++ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(iv)));
++ return HRTIMER_RESTART;
+ }
+
+ /*
+@@ -1425,7 +1422,7 @@ static void mce_timer_fn(unsigned long data)
+ */
+ void mce_timer_kick(unsigned long interval)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ unsigned long iv = __this_cpu_read(mce_next_interval);
+
+ __start_timer(t, interval);
+@@ -1440,7 +1437,7 @@ static void mce_timer_delete_all(void)
+ int cpu;
+
+ for_each_online_cpu(cpu)
+- del_timer_sync(&per_cpu(mce_timer, cpu));
++ hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
+
+ /*
+@@ -1769,7 +1766,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
+ }
+ }
+
+-static void mce_start_timer(struct timer_list *t)
++static void mce_start_timer(struct hrtimer *t)
+ {
+ unsigned long iv = check_interval * HZ;
+
+@@ -1782,18 +1779,19 @@ static void mce_start_timer(struct timer_list *t)
+
+ static void __mcheck_cpu_setup_timer(void)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
+- unsigned int cpu = smp_processor_id();
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+
+- setup_pinned_timer(t, mce_timer_fn, cpu);
++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ t->function = mce_timer_fn;
+ }
+
+ static void __mcheck_cpu_init_timer(void)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
+- unsigned int cpu = smp_processor_id();
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+
-+ Next, we specify that whenever we see a sched_waking event for a
-+ cyclictest thread, save the timestamp in a 'ts0' variable:
++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ t->function = mce_timer_fn;
+
+- setup_pinned_timer(t, mce_timer_fn, cpu);
+ mce_start_timer(t);
+ }
+
+@@ -2309,7 +2307,7 @@ static int mce_cpu_dead(unsigned int cpu)
+
+ static int mce_cpu_online(unsigned int cpu)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ int ret;
+
+ mce_device_create(cpu);
+@@ -2326,10 +2324,10 @@ static int mce_cpu_online(unsigned int cpu)
+
+ static int mce_cpu_pre_down(unsigned int cpu)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+
+ mce_disable_cpu();
+- del_timer_sync(t);
++ hrtimer_cancel(t);
+ mce_threshold_remove_device(cpu);
+ mce_device_remove(cpu);
+ return 0;
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 2ea85b32421a..6914dc569d1e 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -138,6 +138,18 @@ void kernel_fpu_end(void)
+ }
+ EXPORT_SYMBOL_GPL(kernel_fpu_end);
+
++void kernel_fpu_resched(void)
++{
++ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
+
-+ # echo 'hist:keys=$saved_pid:saved_pid=pid:ts0=common_timestamp.usecs \
-+ if comm=="cyclictest"' >> \
-+ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
++ if (should_resched(PREEMPT_OFFSET)) {
++ kernel_fpu_end();
++ cond_resched();
++ kernel_fpu_begin();
++ }
++}
++EXPORT_SYMBOL_GPL(kernel_fpu_resched);
+
-+ Then, when the corresponding thread is actually scheduled onto the
-+ CPU by a sched_switch event, calculate the latency and use that
-+ along with another variable and an event field to generate a
-+ wakeup_latency synthetic event:
+ /*
+ * Save the FPU state (mark it for reload if necessary):
+ *
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 95600a99ae93..9192d76085ba 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -130,6 +130,7 @@ void irq_ctx_init(int cpu)
+ cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct irq_stack *irqstk;
+@@ -146,6 +147,7 @@ void do_softirq_own_stack(void)
+
+ call_on_stack(__do_softirq, isp);
+ }
++#endif
+
+ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+ {
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 5224c6099184..9b2b1f0409c5 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -38,6 +38,7 @@
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
+ #include <linux/syscalls.h>
++#include <linux/highmem.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/ldt.h>
+@@ -198,6 +199,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+ }
+ EXPORT_SYMBOL_GPL(start_thread);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ int i;
+
-+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:\
-+ onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,\
-+ $saved_pid,next_prio) if next_comm=="cyclictest"' >> \
-+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
++ /*
++ * Clear @prev's kmap_atomic mappings
++ */
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++ pte_t *ptep = kmap_pte - idx;
+
-+ We also need to create a histogram on the wakeup_latency synthetic
-+ event in order to aggregate the generated synthetic event data:
++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++ }
++ /*
++ * Restore @next_p's kmap_atomic mappings
++ */
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
+
-+ # echo 'hist:keys=pid,prio,lat:sort=pid,lat' >> \
-+ /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
++ if (!pte_none(next_p->kmap_pte[i]))
++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++ }
++}
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
+
-+ Finally, once we've run cyclictest to actually generate some
-+ events, we can see the output by looking at the wakeup_latency
-+ synthetic event's hist file:
+
+ /*
+ * switch_to(x,y) should switch tasks from x to y.
+@@ -273,6 +303,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
+
++ switch_kmaps(prev_p, next_p);
+
-+ # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/hist
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 6d0fbff71d7a..92f13ac70ad4 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2120,7 +2120,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
+ apic->vcpu = vcpu;
+
+ hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
+- HRTIMER_MODE_ABS_PINNED);
++ HRTIMER_MODE_ABS_PINNED_HARD);
+ apic->lapic_timer.timer.function = apic_timer_fn;
+
+ /*
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3856828ee1dc..407658146ae1 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6287,6 +6287,13 @@ int kvm_arch_init(void *opaque)
+ goto out;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
++ return -EOPNOTSUPP;
++ }
++#endif
+
-+ - onmax(var).save(field,.. .)
+ r = kvm_mmu_module_init();
+ if (r)
+ goto out_free_percpu;
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index 6d18b70ed5a9..f752724c22e8 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
+ */
+ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
++ pte_t pte = mk_pte(page, prot);
+ unsigned long vaddr;
+ int idx, type;
+
+- preempt_disable();
++ preempt_disable_nort();
+ pagefault_disable();
+
+ if (!PageHighMem(page))
+@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+- set_pte(kmap_pte-idx, mk_pte(page, prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_pte(kmap_pte-idx, pte);
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
+ arch_flush_lazy_mmu_mode();
+@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
+ #endif
+
+ pagefault_enable();
+- preempt_enable();
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(__kunmap_atomic);
+
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index ada98b39b8ad..585f6829653b 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
+
+ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ {
++ pte_t pte = pfn_pte(pfn, prot);
+ unsigned long vaddr;
+ int idx, type;
+
+@@ -65,7 +66,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++ WARN_ON(!pte_none(*(kmap_pte - idx)));
+
-+ The 'onmax(var).save(field,...)' hist trigger action is invoked
-+ whenever the value of 'var' associated with a histogram entry
-+ exceeds the current maximum contained in that variable.
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_pte(kmap_pte - idx, pte);
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+@@ -113,6 +119,9 @@ iounmap_atomic(void __iomem *kvaddr)
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
+ }
+diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
+index bb1fe6c1816e..8a22f1e7b6c9 100644
+--- a/arch/xtensa/include/asm/spinlock_types.h
++++ b/arch/xtensa/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int slock;
+ } arch_spinlock_t;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 1d27e2a152e0..a29ca4dfad77 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -116,6 +116,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
+
+ INIT_LIST_HEAD(&rq->queuelist);
+ INIT_LIST_HEAD(&rq->timeout_list);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
++#endif
+ rq->cpu = -1;
+ rq->q = q;
+ rq->__sector = (sector_t) -1;
+@@ -280,7 +283,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
+ void blk_start_queue(struct request_queue *q)
+ {
+ lockdep_assert_held(q->queue_lock);
+- WARN_ON(!in_interrupt() && !irqs_disabled());
++ WARN_ON_NONRT(!in_interrupt() && !irqs_disabled());
+ WARN_ON_ONCE(q->mq_ops);
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+@@ -808,12 +811,21 @@ void blk_queue_exit(struct request_queue *q)
+ percpu_ref_put(&q->q_usage_counter);
+ }
+
++static void blk_queue_usage_counter_release_swork(struct swork_event *sev)
++{
++ struct request_queue *q =
++ container_of(sev, struct request_queue, mq_pcpu_wake);
+
-+ The end result is that the trace event fields specified as the
-+ onmax.save() params will be saved if 'var' exceeds the current
-+ maximum for that hist trigger entry. This allows context from the
-+ event that exhibited the new maximum to be saved for later
-+ reference. When the histogram is displayed, additional fields
-+ displaying the saved values will be printed.
++ wake_up_all(&q->mq_freeze_wq);
++}
+
-+ As an example the below defines a couple of hist triggers, one for
-+ sched_waking and another for sched_switch, keyed on pid. Whenever
-+ a sched_waking occurs, the timestamp is saved in the entry
-+ corresponding to the current pid, and when the scheduler switches
-+ back to that pid, the timestamp difference is calculated. If the
-+ resulting latency, stored in wakeup_lat, exceeds the current
-+ maximum latency, the values specified in the save() fields are
-+ recoreded:
+ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+ {
+ struct request_queue *q =
+ container_of(ref, struct request_queue, q_usage_counter);
+
+- wake_up_all(&q->mq_freeze_wq);
++ if (wq_has_sleeper(&q->mq_freeze_wq))
++ swork_queue(&q->mq_pcpu_wake);
+ }
+
+ static void blk_rq_timed_out_timer(unsigned long data)
+@@ -890,6 +902,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+ init_waitqueue_head(&q->mq_freeze_wq);
++ INIT_SWORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_swork);
+
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+@@ -3309,7 +3322,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
+ blk_run_queue_async(q);
+ else
+ __blk_run_queue(q);
+- spin_unlock(q->queue_lock);
++ spin_unlock_irq(q->queue_lock);
+ }
+
+ static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
+@@ -3357,7 +3370,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ {
+ struct request_queue *q;
+- unsigned long flags;
+ struct request *rq;
+ LIST_HEAD(list);
+ unsigned int depth;
+@@ -3377,11 +3389,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ q = NULL;
+ depth = 0;
+
+- /*
+- * Save and disable interrupts here, to avoid doing it for every
+- * queue lock we have to take.
+- */
+- local_irq_save(flags);
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+@@ -3394,7 +3401,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ queue_unplugged(q, depth, from_schedule);
+ q = rq->q;
+ depth = 0;
+- spin_lock(q->queue_lock);
++ spin_lock_irq(q->queue_lock);
+ }
+
+ /*
+@@ -3421,8 +3428,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
+-
+- local_irq_restore(flags);
+ }
+
+ void blk_finish_plug(struct blk_plug *plug)
+@@ -3634,6 +3639,8 @@ int __init blk_dev_init(void)
+ if (!kblockd_workqueue)
+ panic("Failed to create kblockd\n");
+
++ BUG_ON(swork_get());
+
-+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
-+ if comm=="cyclictest"' >> \
-+ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+ request_cachep = kmem_cache_create("blkdev_requests",
+ sizeof(struct request), 0, SLAB_PANIC, NULL);
+
+diff --git a/block/blk-ioc.c b/block/blk-ioc.c
+index f23311e4b201..ca9ea624f159 100644
+--- a/block/blk-ioc.c
++++ b/block/blk-ioc.c
+@@ -9,6 +9,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/slab.h>
+ #include <linux/sched/task.h>
++#include <linux/delay.h>
+
+ #include "blk.h"
+
+@@ -118,7 +119,7 @@ static void ioc_release_fn(struct work_struct *work)
+ spin_unlock(q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+- cpu_relax();
++ cpu_chill();
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+ }
+ }
+@@ -202,7 +203,7 @@ void put_io_context_active(struct io_context *ioc)
+ spin_unlock(icq->q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+- cpu_relax();
++ cpu_chill();
+ goto retry;
+ }
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 49979c095f31..0815a6599ab3 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -339,6 +339,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
+ /* tag was already set */
+ rq->extra_len = 0;
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
++#endif
+ INIT_LIST_HEAD(&rq->timeout_list);
+ rq->timeout = 0;
+
+@@ -533,12 +536,24 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
+ }
+ EXPORT_SYMBOL(blk_mq_end_request);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
+
-+ # echo 'hist:keys=next_pid:\
-+ wakeup_lat=common_timestamp.usecs-$ts0:\
-+ onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) \
-+ if next_comm=="cyclictest"' >> \
-+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
++void __blk_mq_complete_request_remote_work(struct work_struct *work)
++{
++ struct request *rq = container_of(work, struct request, work);
+
-+ When the histogram is displayed, the max value and the saved
-+ values corresponding to the max are displayed following the rest
-+ of the fields:
++ rq->q->softirq_done_fn(rq);
++}
+
-+ # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
-+ { next_pid: 2255 } hitcount: 239
-+ common_timestamp-ts0: 0
-+ max: 27
-+ next_comm: cyclictest
-+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/1
++#else
+
-+ { next_pid: 2256 } hitcount: 2355
-+ common_timestamp-ts0: 0
-+ max: 49 next_comm: cyclictest
-+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/0
+ static void __blk_mq_complete_request_remote(void *data)
+ {
+ struct request *rq = data;
+
+ rq->q->softirq_done_fn(rq);
+ }
++#endif
+
+ static void __blk_mq_complete_request(struct request *rq)
+ {
+@@ -558,19 +573,27 @@ static void __blk_mq_complete_request(struct request *rq)
+ return;
+ }
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+ shared = cpus_share_cache(cpu, ctx->cpu);
+
+ if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
++ * here. But we could try to invoke it one the CPU like this.
++ */
++ schedule_work_on(ctx->cpu, &rq->work);
++#else
+ rq->csd.func = __blk_mq_complete_request_remote;
+ rq->csd.info = rq;
+ rq->csd.flags = 0;
+ smp_call_function_single_async(ctx->cpu, &rq->csd);
++#endif
+ } else {
+ rq->q->softirq_done_fn(rq);
+ }
+- put_cpu();
++ put_cpu_light();
+ }
+
+ /**
+@@ -1238,14 +1261,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+ return;
+
+ if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
+- int cpu = get_cpu();
++ int cpu = get_cpu_light();
+ if (cpumask_test_cpu(cpu, hctx->cpumask)) {
+ __blk_mq_run_hw_queue(hctx);
+- put_cpu();
++ put_cpu_light();
+ return;
+ }
+
+- put_cpu();
++ put_cpu_light();
+ }
+
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+@@ -2863,10 +2886,9 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
+ kt = nsecs;
+
+ mode = HRTIMER_MODE_REL;
+- hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
++ hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current);
+ hrtimer_set_expires(&hs.timer, kt);
+
+- hrtimer_init_sleeper(&hs, current);
+ do {
+ if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
+ break;
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 877237e09083..d944750bade0 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -98,12 +98,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+ */
+ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
+ {
+- return __blk_mq_get_ctx(q, get_cpu());
++ return __blk_mq_get_ctx(q, get_cpu_light());
+ }
+
+ static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+ {
+- put_cpu();
++ put_cpu_light();
+ }
+
+ struct blk_mq_alloc_data {
+diff --git a/block/blk-softirq.c b/block/blk-softirq.c
+index 01e2b353a2b9..e8c0d4945f5a 100644
+--- a/block/blk-softirq.c
++++ b/block/blk-softirq.c
+@@ -53,6 +53,7 @@ static void trigger_softirq(void *data)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /*
+@@ -91,6 +92,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
+ this_cpu_ptr(&blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ return 0;
+ }
+@@ -143,6 +145,7 @@ void __blk_complete_request(struct request *req)
+ goto do_local;
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /**
+diff --git a/block/bounce.c b/block/bounce.c
+index 1d05c422c932..0101ffefddc4 100644
+--- a/block/bounce.c
++++ b/block/bounce.c
+@@ -66,11 +66,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
+ unsigned long flags;
+ unsigned char *vto;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ vto = kmap_atomic(to->bv_page);
+ memcpy(vto + to->bv_offset, vfrom, to->bv_len);
+ kunmap_atomic(vto);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ #else /* CONFIG_HIGHMEM */
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 50eb828db767..7bce92a6599a 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -731,13 +731,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+
+ int crypto_register_notifier(struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_register(&crypto_chain, nb);
++ return srcu_notifier_chain_register(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_register_notifier);
+
+ int crypto_unregister_notifier(struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_unregister(&crypto_chain, nb);
++ return srcu_notifier_chain_unregister(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
+
+diff --git a/crypto/api.c b/crypto/api.c
+index 941cd4c6c7ec..2b1cf0c1dcea 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
+ DECLARE_RWSEM(crypto_alg_sem);
+ EXPORT_SYMBOL_GPL(crypto_alg_sem);
+
+-BLOCKING_NOTIFIER_HEAD(crypto_chain);
++SRCU_NOTIFIER_HEAD(crypto_chain);
+ EXPORT_SYMBOL_GPL(crypto_chain);
+
+ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+@@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v)
+ {
+ int ok;
+
+- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
++ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ if (ok == NOTIFY_DONE) {
+ request_module("cryptomgr");
+- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
++ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ return ok;
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 248f6ba41688..54b7985c8caa 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -37,6 +37,7 @@
+ struct cryptd_cpu_queue {
+ struct crypto_queue queue;
+ struct work_struct work;
++ spinlock_t qlock;
+ };
+
+ struct cryptd_queue {
+@@ -115,6 +116,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+ INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
++ spin_lock_init(&cpu_queue->qlock);
+ }
+ return 0;
+ }
+@@ -139,8 +141,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ atomic_t *refcnt;
+ bool may_backlog;
+
+- cpu = get_cpu();
+- cpu_queue = this_cpu_ptr(queue->cpu_queue);
++ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
++ spin_lock_bh(&cpu_queue->qlock);
++ cpu = smp_processor_id();
+
-+ Totals:
-+ Hits: 12970
-+ Entries: 2
-+ Dropped: 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/acpi/acpica/acglobal.h linux-4.14/drivers/acpi/acpica/acglobal.h
---- linux-4.14.orig/drivers/acpi/acpica/acglobal.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/acpi/acpica/acglobal.h 2018-09-05 11:05:07.000000000 +0200
-@@ -116,7 +116,7 @@
+ err = crypto_enqueue_request(&cpu_queue->queue, request);
+
+ refcnt = crypto_tfm_ctx(request->tfm);
+@@ -157,7 +161,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ atomic_inc(refcnt);
+
+ out_put_cpu:
+- put_cpu();
++ spin_unlock_bh(&cpu_queue->qlock);
+
+ return err;
+ }
+@@ -173,16 +177,11 @@ static void cryptd_queue_worker(struct work_struct *work)
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+ /*
+ * Only handle one request at a time to avoid hogging crypto workqueue.
+- * preempt_disable/enable is used to prevent being preempted by
+- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
+- * cryptd_enqueue_request() being accessed from software interrupts.
+ */
+- local_bh_disable();
+- preempt_disable();
++ spin_lock_bh(&cpu_queue->qlock);
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+- preempt_enable();
+- local_bh_enable();
++ spin_unlock_bh(&cpu_queue->qlock);
+
+ if (!req)
+ return;
+diff --git a/crypto/internal.h b/crypto/internal.h
+index f07320423191..333d985088fe 100644
+--- a/crypto/internal.h
++++ b/crypto/internal.h
+@@ -47,7 +47,7 @@ struct crypto_larval {
+
+ extern struct list_head crypto_alg_list;
+ extern struct rw_semaphore crypto_alg_sem;
+-extern struct blocking_notifier_head crypto_chain;
++extern struct srcu_notifier_head crypto_chain;
+
+ #ifdef CONFIG_PROC_FS
+ void __init crypto_init_proc(void);
+@@ -143,7 +143,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
+
+ static inline void crypto_notify(unsigned long val, void *v)
+ {
+- blocking_notifier_call_chain(&crypto_chain, val, v);
++ srcu_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ #endif /* _CRYPTO_INTERNAL_H */
+diff --git a/crypto/scompress.c b/crypto/scompress.c
+index 2075e2c4e7df..c6b4e265c6bf 100644
+--- a/crypto/scompress.c
++++ b/crypto/scompress.c
+@@ -24,6 +24,7 @@
+ #include <linux/cryptouser.h>
+ #include <net/netlink.h>
+ #include <linux/scatterlist.h>
++#include <linux/locallock.h>
+ #include <crypto/scatterwalk.h>
+ #include <crypto/internal/acompress.h>
+ #include <crypto/internal/scompress.h>
+@@ -34,6 +35,7 @@ static void * __percpu *scomp_src_scratches;
+ static void * __percpu *scomp_dst_scratches;
+ static int scomp_scratch_users;
+ static DEFINE_MUTEX(scomp_lock);
++static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock);
+
+ #ifdef CONFIG_NET
+ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+@@ -193,7 +195,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void **ctx = acomp_request_ctx(req);
+- const int cpu = get_cpu();
++ const int cpu = local_lock_cpu(scomp_scratches_lock);
+ u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
+ u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+ int ret;
+@@ -228,7 +230,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ 1);
+ }
+ out:
+- put_cpu();
++ local_unlock_cpu(scomp_scratches_lock);
+ return ret;
+ }
+
+diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
+index 95eed442703f..50bc5b61d899 100644
+--- a/drivers/acpi/acpica/acglobal.h
++++ b/drivers/acpi/acpica/acglobal.h
+@@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
* interrupt level
*/
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
/* Mutex for _OSI support */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/acpi/acpica/hwregs.c linux-4.14/drivers/acpi/acpica/hwregs.c
---- linux-4.14.orig/drivers/acpi/acpica/hwregs.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/acpi/acpica/hwregs.c 2018-09-05 11:05:07.000000000 +0200
-@@ -428,14 +428,14 @@
+diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
+index acb417b58bbb..ea49e08c263f 100644
+--- a/drivers/acpi/acpica/hwregs.c
++++ b/drivers/acpi/acpica/hwregs.c
+@@ -428,14 +428,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
if (ACPI_FAILURE(status)) {
goto exit;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/acpi/acpica/hwxface.c linux-4.14/drivers/acpi/acpica/hwxface.c
---- linux-4.14.orig/drivers/acpi/acpica/hwxface.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/acpi/acpica/hwxface.c 2018-09-05 11:05:07.000000000 +0200
-@@ -373,7 +373,7 @@
+diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
+index 34684ae89981..fb84983e1839 100644
+--- a/drivers/acpi/acpica/hwxface.c
++++ b/drivers/acpi/acpica/hwxface.c
+@@ -373,7 +373,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/*
* At this point, we know that the parent register is one of the
-@@ -434,7 +434,7 @@
+@@ -434,7 +434,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
unlock_and_exit:
return_ACPI_STATUS(status);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/acpi/acpica/utmutex.c linux-4.14/drivers/acpi/acpica/utmutex.c
---- linux-4.14.orig/drivers/acpi/acpica/utmutex.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/acpi/acpica/utmutex.c 2018-09-05 11:05:07.000000000 +0200
-@@ -88,7 +88,7 @@
+diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
+index 586354788018..3a3c2a86437f 100644
+--- a/drivers/acpi/acpica/utmutex.c
++++ b/drivers/acpi/acpica/utmutex.c
+@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
return_ACPI_STATUS (status);
}
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
-@@ -145,7 +145,7 @@
+@@ -145,7 +145,7 @@ void acpi_ut_mutex_terminate(void)
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/ata/libata-sff.c linux-4.14/drivers/ata/libata-sff.c
---- linux-4.14.orig/drivers/ata/libata-sff.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/ata/libata-sff.c 2018-09-05 11:05:07.000000000 +0200
-@@ -679,9 +679,9 @@
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index cc2f2e35f4c2..0f0bc86e02df 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -679,9 +679,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc, unsigned char *b
unsigned long flags;
unsigned int consumed;
return consumed;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/block/brd.c linux-4.14/drivers/block/brd.c
---- linux-4.14.orig/drivers/block/brd.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/block/brd.c 2018-09-05 11:05:07.000000000 +0200
-@@ -60,7 +60,6 @@
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index cdd6f256da59..2269d379c92f 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -52,7 +52,7 @@ static void split_counters(unsigned int *cnt, unsigned int *inpr)
+ /* A preserved old value of the events counter. */
+ static unsigned int saved_count;
+
+-static DEFINE_SPINLOCK(events_lock);
++static DEFINE_RAW_SPINLOCK(events_lock);
+
+ static void pm_wakeup_timer_fn(unsigned long data);
+
+@@ -180,9 +180,9 @@ void wakeup_source_add(struct wakeup_source *ws)
+ ws->active = false;
+ ws->last_time = ktime_get();
+
+- spin_lock_irqsave(&events_lock, flags);
++ raw_spin_lock_irqsave(&events_lock, flags);
+ list_add_rcu(&ws->entry, &wakeup_sources);
+- spin_unlock_irqrestore(&events_lock, flags);
++ raw_spin_unlock_irqrestore(&events_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(wakeup_source_add);
+
+@@ -197,9 +197,9 @@ void wakeup_source_remove(struct wakeup_source *ws)
+ if (WARN_ON(!ws))
+ return;
+
+- spin_lock_irqsave(&events_lock, flags);
++ raw_spin_lock_irqsave(&events_lock, flags);
+ list_del_rcu(&ws->entry);
+- spin_unlock_irqrestore(&events_lock, flags);
++ raw_spin_unlock_irqrestore(&events_lock, flags);
+ synchronize_srcu(&wakeup_srcu);
+ }
+ EXPORT_SYMBOL_GPL(wakeup_source_remove);
+@@ -844,7 +844,7 @@ bool pm_wakeup_pending(void)
+ unsigned long flags;
+ bool ret = false;
+
+- spin_lock_irqsave(&events_lock, flags);
++ raw_spin_lock_irqsave(&events_lock, flags);
+ if (events_check_enabled) {
+ unsigned int cnt, inpr;
+
+@@ -852,7 +852,7 @@ bool pm_wakeup_pending(void)
+ ret = (cnt != saved_count || inpr > 0);
+ events_check_enabled = !ret;
+ }
+- spin_unlock_irqrestore(&events_lock, flags);
++ raw_spin_unlock_irqrestore(&events_lock, flags);
+
+ if (ret) {
+ pr_info("PM: Wakeup pending, aborting suspend\n");
+@@ -941,13 +941,13 @@ bool pm_save_wakeup_count(unsigned int count)
+ unsigned long flags;
+
+ events_check_enabled = false;
+- spin_lock_irqsave(&events_lock, flags);
++ raw_spin_lock_irqsave(&events_lock, flags);
+ split_counters(&cnt, &inpr);
+ if (cnt == count && inpr == 0) {
+ saved_count = count;
+ events_check_enabled = true;
+ }
+- spin_unlock_irqrestore(&events_lock, flags);
++ raw_spin_unlock_irqrestore(&events_lock, flags);
+ return events_check_enabled;
+ }
+
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 2d7178f7754e..c1cf87718c2e 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -60,7 +60,6 @@ struct brd_device {
/*
* Look up and return a brd's page for a given sector.
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
pgoff_t idx;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/block/zram/zcomp.c linux-4.14/drivers/block/zram/zcomp.c
---- linux-4.14.orig/drivers/block/zram/zcomp.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/block/zram/zcomp.c 2018-09-05 11:05:07.000000000 +0200
-@@ -116,12 +116,20 @@
+diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
+index 5b8992beffec..40345483a022 100644
+--- a/drivers/block/zram/zcomp.c
++++ b/drivers/block/zram/zcomp.c
+@@ -116,12 +116,20 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
}
int zcomp_compress(struct zcomp_strm *zstrm,
-@@ -171,6 +179,7 @@
+@@ -171,6 +179,7 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
pr_err("Can't allocate a compression stream\n");
return -ENOMEM;
}
*per_cpu_ptr(comp->stream, cpu) = zstrm;
return 0;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/block/zram/zcomp.h linux-4.14/drivers/block/zram/zcomp.h
---- linux-4.14.orig/drivers/block/zram/zcomp.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/block/zram/zcomp.h 2018-09-05 11:05:07.000000000 +0200
-@@ -14,6 +14,7 @@
+diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
+index 41c1002a7d7d..d424eafcbf8e 100644
+--- a/drivers/block/zram/zcomp.h
++++ b/drivers/block/zram/zcomp.h
+@@ -14,6 +14,7 @@ struct zcomp_strm {
/* compression/decompression buffer */
void *buffer;
struct crypto_comp *tfm;
};
/* dynamic per-device compression frontend */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/block/zram/zram_drv.c linux-4.14/drivers/block/zram/zram_drv.c
---- linux-4.14.orig/drivers/block/zram/zram_drv.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/block/zram/zram_drv.c 2018-09-05 11:05:07.000000000 +0200
-@@ -756,6 +756,30 @@
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 1e2648e4c286..c5d61209eb05 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -761,6 +761,30 @@ static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
static DEVICE_ATTR_RO(debug_stat);
static void zram_slot_lock(struct zram *zram, u32 index)
{
bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value);
-@@ -765,6 +789,7 @@
+@@ -770,6 +794,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
{
bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
}
static void zram_meta_free(struct zram *zram, u64 disksize)
{
-@@ -794,6 +819,7 @@
+@@ -799,6 +824,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
return false;
}
return true;
}
-@@ -845,6 +871,7 @@
+@@ -850,6 +876,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
unsigned long handle;
unsigned int size;
void *src, *dst;
if (zram_wb_enabled(zram)) {
zram_slot_lock(zram, index);
-@@ -879,6 +906,7 @@
+@@ -884,6 +911,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
size = zram_get_obj_size(zram, index);
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
-@@ -886,14 +914,13 @@
+@@ -891,14 +919,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
kunmap_atomic(dst);
ret = 0;
} else {
zram_slot_unlock(zram, index);
/* Should NEVER happen. Return bio error if it does. */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/block/zram/zram_drv.h linux-4.14/drivers/block/zram/zram_drv.h
---- linux-4.14.orig/drivers/block/zram/zram_drv.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/block/zram/zram_drv.h 2018-09-05 11:05:07.000000000 +0200
-@@ -77,6 +77,9 @@
+diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
+index 31762db861e3..a417c96b8f3f 100644
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -77,6 +77,9 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long value;
};
struct zram_stats {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/char/random.c linux-4.14/drivers/char/random.c
---- linux-4.14.orig/drivers/char/random.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/drivers/char/random.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index ea4dbfa30657..c72a7f0b4494 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
@@ -265,6 +265,7 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <crypto/chacha20.h>
#include <asm/processor.h>
-@@ -856,7 +857,7 @@
+@@ -856,7 +857,7 @@ static int crng_fast_load(const char *cp, size_t len)
invalidate_batched_entropy();
crng_init = 1;
wake_up_interruptible(&crng_init_wait);
}
return 1;
}
-@@ -941,17 +942,21 @@
+@@ -941,17 +942,21 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
urandom_warning.missed = 0;
}
}
-@@ -1122,8 +1127,6 @@
+@@ -1122,8 +1127,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
} sample;
long delta, delta2, delta3;
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
-@@ -1164,7 +1167,6 @@
+@@ -1164,7 +1167,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
}
void add_input_randomness(unsigned int type, unsigned int code,
-@@ -1221,28 +1223,27 @@
+@@ -1221,28 +1223,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
return *ptr;
}
fast_mix(fast_pool);
add_interrupt_bench(cycles);
-@@ -2200,6 +2201,7 @@
+@@ -2200,6 +2201,7 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
* at any point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
{
u64 ret;
-@@ -2220,7 +2222,7 @@
+@@ -2220,7 +2222,7 @@ u64 get_random_u64(void)
warn_unseeded_randomness(&previous);
use_lock = READ_ONCE(crng_init) < 2;
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
-@@ -2230,12 +2232,13 @@
+@@ -2230,12 +2232,13 @@ u64 get_random_u64(void)
ret = batch->entropy_u64[batch->position++];
if (use_lock)
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
u32 get_random_u32(void)
{
u32 ret;
-@@ -2250,7 +2253,7 @@
+@@ -2250,7 +2253,7 @@ u32 get_random_u32(void)
warn_unseeded_randomness(&previous);
use_lock = READ_ONCE(crng_init) < 2;
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
-@@ -2260,7 +2263,7 @@
+@@ -2260,7 +2263,7 @@ u32 get_random_u32(void)
ret = batch->entropy_u32[batch->position++];
if (use_lock)
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/char/tpm/tpm_tis.c linux-4.14/drivers/char/tpm/tpm_tis.c
---- linux-4.14.orig/drivers/char/tpm/tpm_tis.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/drivers/char/tpm/tpm_tis.c 2018-09-05 11:05:07.000000000 +0200
-@@ -52,6 +52,31 @@
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 50b59a69dc33..cbdb0a6c5337 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -52,6 +52,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
return container_of(data, struct tpm_tis_tcg_phy, priv);
}
static bool interrupts = true;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
-@@ -149,7 +174,7 @@
+@@ -149,7 +174,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
while (len--)
return 0;
}
-@@ -176,7 +201,7 @@
+@@ -176,7 +201,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
return 0;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/clocksource/tcb_clksrc.c linux-4.14/drivers/clocksource/tcb_clksrc.c
---- linux-4.14.orig/drivers/clocksource/tcb_clksrc.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/clocksource/tcb_clksrc.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
+index 9de47d4d2d9e..05f4b88bb955 100644
+--- a/drivers/clocksource/tcb_clksrc.c
++++ b/drivers/clocksource/tcb_clksrc.c
@@ -25,8 +25,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
*
* A boot clocksource and clockevent source are also currently needed,
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
-@@ -126,6 +125,8 @@
+@@ -126,6 +125,8 @@ static struct clocksource clksrc = {
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
void __iomem *regs;
};
-@@ -134,15 +135,26 @@
+@@ -134,15 +135,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
static int tc_shutdown(struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-@@ -150,8 +162,14 @@
+@@ -150,8 +162,14 @@ static int tc_shutdown(struct clock_event_device *d)
writel(0xff, regs + ATMEL_TC_REG(2, IDR));
writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
return 0;
}
-@@ -164,9 +182,9 @@
+@@ -164,9 +182,9 @@ static int tc_set_oneshot(struct clock_event_device *d)
if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
tc_shutdown(d);
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -186,12 +204,12 @@
+@@ -186,12 +204,12 @@ static int tc_set_periodic(struct clock_event_device *d)
/* By not making the gentime core emulate periodic mode on top
* of oneshot, we get lower overhead and improved accuracy.
*/
/* Enable clock and interrupts on RC compare */
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -218,9 +236,13 @@
+@@ -218,9 +236,13 @@ static struct tc_clkevt_device clkevt = {
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
.set_state_periodic = tc_set_periodic,
.set_state_oneshot = tc_set_oneshot,
},
-@@ -240,8 +262,9 @@
+@@ -240,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void *handle)
return IRQ_NONE;
}
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
-@@ -262,7 +285,11 @@
+@@ -262,7 +285,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
clkevt.clkevt.cpumask = cpumask_of(0);
-@@ -273,7 +300,7 @@
+@@ -273,7 +300,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
return ret;
}
return ret;
}
-@@ -410,7 +437,11 @@
+@@ -410,7 +437,11 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
if (ret)
goto err_unregister_clksrc;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/clocksource/timer-atmel-pit.c linux-4.14/drivers/clocksource/timer-atmel-pit.c
---- linux-4.14.orig/drivers/clocksource/timer-atmel-pit.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/clocksource/timer-atmel-pit.c 2018-09-05 11:05:07.000000000 +0200
-@@ -46,6 +46,7 @@
+diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
+index ec8a4376f74f..d813ea388562 100644
+--- a/drivers/clocksource/timer-atmel-pit.c
++++ b/drivers/clocksource/timer-atmel-pit.c
+@@ -46,6 +46,7 @@ struct pit_data {
u32 cycle;
u32 cnt;
unsigned int irq;
struct clk *mck;
};
-@@ -96,15 +97,29 @@
+@@ -96,15 +97,29 @@ static int pit_clkevt_shutdown(struct clock_event_device *dev)
/* disable irq, leaving the clocksource active */
pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
/* update clocksource counter */
data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
-@@ -230,15 +245,6 @@
+@@ -230,15 +245,6 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
return ret;
}
/* Set up and register clockevents */
data->clkevt.name = "pit";
data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/clocksource/timer-atmel-st.c linux-4.14/drivers/clocksource/timer-atmel-st.c
---- linux-4.14.orig/drivers/clocksource/timer-atmel-st.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/clocksource/timer-atmel-st.c 2018-09-05 11:05:07.000000000 +0200
-@@ -115,18 +115,29 @@
+diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
+index d2e660f475af..c63b96cfc23e 100644
+--- a/drivers/clocksource/timer-atmel-st.c
++++ b/drivers/clocksource/timer-atmel-st.c
+@@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_irq(void)
last_crtr = read_CRTR();
}
/*
* ALM for oneshot irqs, set by next_event()
* before 32 seconds have passed.
-@@ -139,8 +150,16 @@
+@@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct clock_event_device *dev)
static int clkevt32k_set_periodic(struct clock_event_device *dev)
{
/* PIT for periodic irqs; fixed rate of 1/HZ */
irqmask = AT91_ST_PITS;
regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
-@@ -198,7 +217,7 @@
+@@ -198,7 +217,7 @@ static int __init atmel_st_timer_init(struct device_node *node)
{
struct clk *sclk;
unsigned int sclk_rate, val;
regmap_st = syscon_node_to_regmap(node);
if (IS_ERR(regmap_st)) {
-@@ -212,21 +231,12 @@
+@@ -212,21 +231,12 @@ static int __init atmel_st_timer_init(struct device_node *node)
regmap_read(regmap_st, AT91_ST_SR, &val);
/* Get the interrupts property */
sclk = of_clk_get(node, 0);
if (IS_ERR(sclk)) {
pr_err("Unable to get slow clock\n");
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/connector/cn_proc.c linux-4.14/drivers/connector/cn_proc.c
---- linux-4.14.orig/drivers/connector/cn_proc.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/connector/cn_proc.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index a782ce87715c..19d265948526 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
@@ -32,6 +32,7 @@
#include <linux/pid_namespace.h>
/*
* Size of a cn_msg followed by a proc_event structure. Since the
-@@ -54,10 +55,11 @@
+@@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
/* proc_event_counts is used as the sequence number of the netlink message */
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
((struct proc_event *)msg->data)->cpu = smp_processor_id();
-@@ -70,7 +72,7 @@
+@@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg)
*/
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
}
void proc_fork_connector(struct task_struct *task)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/cpufreq/Kconfig.x86 linux-4.14/drivers/cpufreq/Kconfig.x86
---- linux-4.14.orig/drivers/cpufreq/Kconfig.x86 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/cpufreq/Kconfig.x86 2018-09-05 11:05:07.000000000 +0200
-@@ -125,7 +125,7 @@
+diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
+index 35f71825b7f3..bb4a6160d0f7 100644
+--- a/drivers/cpufreq/Kconfig.x86
++++ b/drivers/cpufreq/Kconfig.x86
+@@ -125,7 +125,7 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
Support for K10 and newer processors is now in acpi-cpufreq.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/gpu/drm/i915/i915_gem_timeline.c linux-4.14/drivers/gpu/drm/i915/i915_gem_timeline.c
---- linux-4.14.orig/drivers/gpu/drm/i915/i915_gem_timeline.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/gpu/drm/i915/i915_gem_timeline.c 2018-09-05 11:05:07.000000000 +0200
-@@ -33,11 +33,8 @@
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index c3eefa126e3b..47093745a53c 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -74,7 +74,7 @@ static unsigned long *efi_tables[] = {
+ &efi.mem_attr_table,
+ };
+
+-static bool disable_runtime;
++static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE);
+ static int __init setup_noefi(char *arg)
+ {
+ disable_runtime = true;
+@@ -100,6 +100,9 @@ static int __init parse_efi_cmdline(char *str)
+ if (parse_option_str(str, "noruntime"))
+ disable_runtime = true;
+
++ if (parse_option_str(str, "runtime"))
++ disable_runtime = false;
++
+ return 0;
+ }
+ early_param("efi", parse_efi_cmdline);
+diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
+index c597ce277a04..c1108d3921f8 100644
+--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
++++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
+@@ -33,11 +33,8 @@ static void __intel_timeline_init(struct intel_timeline *tl,
{
tl->fence_context = context;
tl->common = parent;
init_request_active(&tl->last_request, NULL);
INIT_LIST_HEAD(&tl->requests);
i915_syncmap_init(&tl->sync);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/gpu/drm/i915/i915_irq.c linux-4.14/drivers/gpu/drm/i915/i915_irq.c
---- linux-4.14.orig/drivers/gpu/drm/i915/i915_irq.c 2018-09-05 11:03:21.000000000 +0200
-+++ linux-4.14/drivers/gpu/drm/i915/i915_irq.c 2018-09-05 11:05:07.000000000 +0200
-@@ -867,6 +867,7 @@
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 20a471ad0ad2..5d34d48a8b7b 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -867,6 +867,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
/* Get optional system timestamp before query. */
if (stime)
-@@ -918,6 +919,7 @@
+@@ -918,6 +919,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.14/drivers/gpu/drm/i915/intel_sprite.c
---- linux-4.14.orig/drivers/gpu/drm/i915/intel_sprite.c 2018-09-05 11:03:21.000000000 +0200
-+++ linux-4.14/drivers/gpu/drm/i915/intel_sprite.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
+index 41e31a454604..7e0cadf51b31 100644
+--- a/drivers/gpu/drm/i915/intel_sprite.c
++++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -36,6 +36,7 @@
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
-@@ -67,7 +68,7 @@
+@@ -67,7 +68,7 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
}
#define VBLANK_EVASION_TIME_US 100
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
-@@ -102,7 +103,7 @@
+@@ -102,7 +103,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
if (min <= 0 || max <= 0)
return;
-@@ -132,11 +133,11 @@
+@@ -132,11 +133,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
break;
}
}
finish_wait(wq, &wait);
-@@ -201,7 +202,7 @@
+@@ -201,7 +202,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
crtc->base.state->event = NULL;
}
if (intel_vgpu_active(dev_priv))
return;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/gpu/drm/radeon/radeon_display.c linux-4.14/drivers/gpu/drm/radeon/radeon_display.c
---- linux-4.14.orig/drivers/gpu/drm/radeon/radeon_display.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/gpu/drm/radeon/radeon_display.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1839,6 +1839,7 @@
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index ddfe91efa61e..3157bcf6428f 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1839,6 +1839,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
struct radeon_device *rdev = dev->dev_private;
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
/* Get optional system timestamp before query. */
if (stime)
-@@ -1931,6 +1932,7 @@
+@@ -1931,6 +1932,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
/* Decode into vertical and horizontal scanout position. */
*vpos = position & 0x1fff;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/hv/vmbus_drv.c linux-4.14/drivers/hv/vmbus_drv.c
---- linux-4.14.orig/drivers/hv/vmbus_drv.c 2018-09-05 11:03:21.000000000 +0200
-+++ linux-4.14/drivers/hv/vmbus_drv.c 2018-09-05 11:05:37.000000000 +0200
-@@ -39,6 +39,7 @@
- #include <asm/hyperv.h>
- #include <asm/hypervisor.h>
- #include <asm/mshyperv.h>
-+#include <asm/irq_regs.h>
- #include <linux/notifier.h>
- #include <linux/ptrace.h>
- #include <linux/screen_info.h>
-@@ -966,6 +967,8 @@
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 49569f8fe038..a3608cd52805 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -30,6 +30,7 @@
+ #include <linux/atomic.h>
+ #include <linux/hyperv.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+
+ /*
+ * Timeout for services such as KVP and fcopy.
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 2cd134dd94d2..cedf225d4182 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -966,6 +966,8 @@ static void vmbus_isr(void)
void *page_addr = hv_cpu->synic_event_page;
struct hv_message *msg;
union hv_synic_event_flags *event;
bool handled = false;
if (unlikely(page_addr == NULL))
-@@ -1009,7 +1012,7 @@
+@@ -1009,7 +1011,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/ide/alim15x3.c linux-4.14/drivers/ide/alim15x3.c
---- linux-4.14.orig/drivers/ide/alim15x3.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/ide/alim15x3.c 2018-09-05 11:05:07.000000000 +0200
-@@ -234,7 +234,7 @@
+diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
+index 36f76e28a0bf..394f142f90c7 100644
+--- a/drivers/ide/alim15x3.c
++++ b/drivers/ide/alim15x3.c
+@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
if (m5229_revision < 0xC2) {
/*
-@@ -325,7 +325,7 @@
+@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
}
pci_dev_put(north);
pci_dev_put(isa_dev);
return 0;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/ide/hpt366.c linux-4.14/drivers/ide/hpt366.c
---- linux-4.14.orig/drivers/ide/hpt366.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/ide/hpt366.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1236,7 +1236,7 @@
+diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
+index 4b5dc0162e67..590cc7d64622 100644
+--- a/drivers/ide/hpt366.c
++++ b/drivers/ide/hpt366.c
+@@ -1236,7 +1236,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
dma_old = inb(base + 2);
dma_new = dma_old;
pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
-@@ -1247,7 +1247,7 @@
+@@ -1247,7 +1247,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
if (dma_new != dma_old)
outb(dma_new, base + 2);
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/ide/ide-io.c linux-4.14/drivers/ide/ide-io.c
---- linux-4.14.orig/drivers/ide/ide-io.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/ide/ide-io.c 2018-09-05 11:05:07.000000000 +0200
-@@ -660,7 +660,7 @@
- /* disable_irq_nosync ?? */
- disable_irq(hwif->irq);
- /* local CPU only, as if we were handling an interrupt */
-- local_irq_disable();
-+ local_irq_disable_nort();
- if (hwif->polling) {
- startstop = handler(drive);
- } else if (drive_is_ready(drive)) {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/ide/ide-iops.c linux-4.14/drivers/ide/ide-iops.c
---- linux-4.14.orig/drivers/ide/ide-iops.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/ide/ide-iops.c 2018-09-05 11:05:07.000000000 +0200
-@@ -129,12 +129,12 @@
- if ((stat & ATA_BUSY) == 0)
- break;
-
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- *rstat = stat;
- return -EBUSY;
- }
- }
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
- /*
- * Allow status to settle, then read it again.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/ide/ide-io-std.c linux-4.14/drivers/ide/ide-io-std.c
---- linux-4.14.orig/drivers/ide/ide-io-std.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/ide/ide-io-std.c 2018-09-05 11:05:07.000000000 +0200
-@@ -175,7 +175,7 @@
+diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
+index 19763977568c..4169433faab5 100644
+--- a/drivers/ide/ide-io-std.c
++++ b/drivers/ide/ide-io-std.c
+@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
ata_vlb_sync(io_ports->nsect_addr);
}
-@@ -186,7 +186,7 @@
+@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
insl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
if (((len + 1) & 3) < 2)
return;
-@@ -219,7 +219,7 @@
+@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
ata_vlb_sync(io_ports->nsect_addr);
}
-@@ -230,7 +230,7 @@
+@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
outsl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
if (((len + 1) & 3) < 2)
return;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/ide/ide-probe.c linux-4.14/drivers/ide/ide-probe.c
---- linux-4.14.orig/drivers/ide/ide-probe.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/ide/ide-probe.c 2018-09-05 11:05:07.000000000 +0200
-@@ -196,10 +196,10 @@
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
+index 3a234701d92c..420e4e645856 100644
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -660,7 +660,7 @@ void ide_timer_expiry (unsigned long data)
+ /* disable_irq_nosync ?? */
+ disable_irq(hwif->irq);
+ /* local CPU only, as if we were handling an interrupt */
+- local_irq_disable();
++ local_irq_disable_nort();
+ if (hwif->polling) {
+ startstop = handler(drive);
+ } else if (drive_is_ready(drive)) {
+diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
+index 210a0887dd29..7bf05b6147e8 100644
+--- a/drivers/ide/ide-iops.c
++++ b/drivers/ide/ide-iops.c
+@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
+ if ((stat & ATA_BUSY) == 0)
+ break;
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ *rstat = stat;
+ return -EBUSY;
+ }
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ /*
+ * Allow status to settle, then read it again.
+diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
+index eaf39e5db08b..be4c941eaa83 100644
+--- a/drivers/ide/ide-probe.c
++++ b/drivers/ide/ide-probe.c
+@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
int bswap = 1;
/* local CPU only; some systems need this */
drive->dev_flags |= IDE_DFLAG_ID_READ;
#ifdef DEBUG
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/ide/ide-taskfile.c linux-4.14/drivers/ide/ide-taskfile.c
---- linux-4.14.orig/drivers/ide/ide-taskfile.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/ide/ide-taskfile.c 2018-09-05 11:05:07.000000000 +0200
-@@ -251,7 +251,7 @@
+diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
+index 4efe4c6e956c..7eae3aa1def7 100644
+--- a/drivers/ide/ide-taskfile.c
++++ b/drivers/ide/ide-taskfile.c
+@@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
page_is_high = PageHighMem(page);
if (page_is_high)
buf = kmap_atomic(page) + offset;
-@@ -272,7 +272,7 @@
+@@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
kunmap_atomic(buf);
if (page_is_high)
len -= nr_bytes;
}
-@@ -415,7 +415,7 @@
+@@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
}
if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/infiniband/hw/hfi1/affinity.c linux-4.14/drivers/infiniband/hw/hfi1/affinity.c
---- linux-4.14.orig/drivers/infiniband/hw/hfi1/affinity.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/infiniband/hw/hfi1/affinity.c 2018-09-05 11:05:07.000000000 +0200
-@@ -575,7 +575,7 @@
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
+index b197e925fe36..95ac319c8e69 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -593,7 +593,7 @@ int hfi1_get_proc_affinity(int node)
struct hfi1_affinity_node *entry;
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc;
-@@ -583,7 +583,7 @@
+@@ -601,7 +601,7 @@ int hfi1_get_proc_affinity(int node)
* check whether process/context affinity has already
* been set
*/
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
-@@ -594,7 +594,7 @@
+@@ -612,7 +612,7 @@ int hfi1_get_proc_affinity(int node)
cpu = cpumask_first(proc_mask);
cpumask_set_cpu(cpu, &set->used);
goto done;
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/infiniband/hw/hfi1/sdma.c linux-4.14/drivers/infiniband/hw/hfi1/sdma.c
---- linux-4.14.orig/drivers/infiniband/hw/hfi1/sdma.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/infiniband/hw/hfi1/sdma.c 2018-09-05 11:05:07.000000000 +0200
-@@ -856,14 +856,13 @@
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index 6781bcdb10b3..d069ad261572 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -856,14 +856,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
{
struct sdma_rht_node *rht_node;
struct sdma_engine *sde = NULL;
goto out;
cpu_id = smp_processor_id();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/infiniband/hw/qib/qib_file_ops.c linux-4.14/drivers/infiniband/hw/qib/qib_file_ops.c
---- linux-4.14.orig/drivers/infiniband/hw/qib/qib_file_ops.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/infiniband/hw/qib/qib_file_ops.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1167,7 +1167,7 @@
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index 40efc9151ec4..12924aad90cc 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -1167,7 +1167,7 @@ static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
{
struct qib_filedata *fd = fp->private_data;
const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
int local_cpu;
-@@ -1648,9 +1648,8 @@
+@@ -1648,9 +1648,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else {
int unit;
if (weight == 1 && !test_bit(cpu, qib_cpulist))
if (!find_hca(cpu, &unit) && unit >= 0)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-4.14/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
---- linux-4.14.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2018-09-05 11:05:07.000000000 +0200
-@@ -898,7 +898,7 @@
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index 9b3f47ae2016..8327b598d909 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -898,7 +898,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
netif_addr_lock(dev);
spin_lock(&priv->lock);
-@@ -980,7 +980,7 @@
+@@ -980,7 +980,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
spin_unlock(&priv->lock);
netif_addr_unlock(dev);
ipoib_mcast_remove_list(&remove_list);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/input/gameport/gameport.c linux-4.14/drivers/input/gameport/gameport.c
---- linux-4.14.orig/drivers/input/gameport/gameport.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/input/gameport/gameport.c 2018-09-05 11:05:07.000000000 +0200
-@@ -91,13 +91,13 @@
+diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
+index cedc665364cd..4a4fdef151aa 100644
+--- a/drivers/input/gameport/gameport.c
++++ b/drivers/input/gameport/gameport.c
+@@ -91,13 +91,13 @@ static int gameport_measure_speed(struct gameport *gameport)
tx = ~0;
for (i = 0; i < 50; i++) {
udelay(i * 10);
t = (t2 - t1) - (t3 - t2);
if (t < tx)
-@@ -124,12 +124,12 @@
+@@ -124,12 +124,12 @@ static int old_gameport_measure_speed(struct gameport *gameport)
tx = 1 << 30;
for(i = 0; i < 50; i++) {
udelay(i * 10);
if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
}
-@@ -148,11 +148,11 @@
+@@ -148,11 +148,11 @@ static int old_gameport_measure_speed(struct gameport *gameport)
tx = 1 << 30;
for(i = 0; i < 50; i++) {
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/iommu/amd_iommu.c linux-4.14/drivers/iommu/amd_iommu.c
---- linux-4.14.orig/drivers/iommu/amd_iommu.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/iommu/amd_iommu.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 10190e361a13..b96b8c11a586 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
@@ -81,11 +81,12 @@
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
-@@ -204,40 +205,33 @@
+@@ -204,40 +205,33 @@ static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
struct iommu_dev_data *dev_data;
- unsigned long flags;
+ struct llist_node *node;
++
++ if (llist_empty(&dev_data_list))
++ return NULL;
- spin_lock_irqsave(&dev_data_list_lock, flags);
- list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
-+ if (llist_empty(&dev_data_list))
-+ return NULL;
-+
+ node = dev_data_list.first;
+ llist_for_each_entry(dev_data, node, dev_data_list) {
if (dev_data->devid == devid)
}
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
-@@ -1056,9 +1050,9 @@
+@@ -1056,9 +1050,9 @@ static int iommu_queue_command_sync(struct amd_iommu *iommu,
unsigned long flags;
int ret;
return ret;
}
-@@ -1084,7 +1078,7 @@
+@@ -1084,7 +1078,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
iommu->cmd_sem = 0;
-@@ -1095,7 +1089,7 @@
+@@ -1095,7 +1089,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
ret = wait_on_sem(&iommu->cmd_sem);
out_unlock:
return ret;
}
-@@ -1604,29 +1598,26 @@
+@@ -1604,29 +1598,26 @@ static void del_domain_from_list(struct protection_domain *domain)
static u16 domain_id_alloc(void)
{
}
#define DEFINE_FREE_PT_FN(LVL, FN) \
-@@ -1946,10 +1937,10 @@
+@@ -1946,10 +1937,10 @@ static int __attach_device(struct iommu_dev_data *dev_data,
int ret;
/*
/* lock domain */
spin_lock(&domain->lock);
-@@ -2095,9 +2086,9 @@
+@@ -2095,9 +2086,9 @@ static int attach_device(struct device *dev,
}
skip_ats_check:
/*
* We might boot into a crash-kernel here. The crashed kernel
-@@ -2117,10 +2108,10 @@
+@@ -2117,10 +2108,10 @@ static void __detach_device(struct iommu_dev_data *dev_data)
struct protection_domain *domain;
/*
if (WARN_ON(!dev_data->domain))
return;
-@@ -2147,9 +2138,9 @@
+@@ -2147,9 +2138,9 @@ static void detach_device(struct device *dev)
domain = dev_data->domain;
/* lock device table */
if (!dev_is_pci(dev))
return;
-@@ -2813,7 +2804,7 @@
+@@ -2813,7 +2804,7 @@ static void cleanup_domain(struct protection_domain *domain)
struct iommu_dev_data *entry;
unsigned long flags;
while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list,
-@@ -2821,7 +2812,7 @@
+@@ -2821,7 +2812,7 @@ static void cleanup_domain(struct protection_domain *domain)
__detach_device(entry);
}
}
static void protection_domain_free(struct protection_domain *domain)
-@@ -3588,14 +3579,62 @@
+@@ -3588,14 +3579,62 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
amd_iommu_dev_table[devid].data[2] = dte;
}
iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
-@@ -3608,60 +3647,45 @@
+@@ -3608,60 +3647,45 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
alias = amd_iommu_alias_table[devid];
table = irq_lookup_table[alias];
if (table) {
- else
- memset(table->table, 0,
- (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
--
+
- if (ioapic) {
- int i;
-
+-
- for (i = 0; i < 32; ++i)
- iommu->irte_ops->set_allocated(table, i);
+ table = irq_lookup_table[alias];
return table;
}
-@@ -3675,11 +3699,11 @@
+@@ -3675,11 +3699,11 @@ static int alloc_irq_index(u16 devid, int count)
if (!iommu)
return -ENODEV;
/* Scan table for free entries */
for (c = 0, index = table->min_index;
-@@ -3702,7 +3726,7 @@
+@@ -3702,7 +3726,7 @@ static int alloc_irq_index(u16 devid, int count)
index = -ENOSPC;
out:
return index;
}
-@@ -3719,11 +3743,11 @@
+@@ -3719,11 +3743,11 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
if (iommu == NULL)
return -EINVAL;
entry = (struct irte_ga *)table->table;
entry = &entry[index];
-@@ -3734,7 +3758,7 @@
+@@ -3734,7 +3758,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
if (data)
data->ref = entry;
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
-@@ -3752,13 +3776,13 @@
+@@ -3752,13 +3776,13 @@ static int modify_irte(u16 devid, int index, union irte *irte)
if (iommu == NULL)
return -EINVAL;
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
-@@ -3776,13 +3800,13 @@
+@@ -3776,13 +3800,13 @@ static void free_irte(u16 devid, int index)
if (iommu == NULL)
return;
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
-@@ -3863,10 +3887,8 @@
+@@ -3863,10 +3887,8 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->hi.fields.vector = vector;
irte->lo.fields_remap.destination = dest_apicid;
modify_irte_ga(devid, index, irte, NULL);
-@@ -4072,7 +4094,7 @@
+@@ -4072,7 +4094,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct amd_ir_data *data = NULL;
struct irq_cfg *cfg;
int i, ret, devid;
if (!info)
return -EINVAL;
-@@ -4096,10 +4118,26 @@
+@@ -4096,10 +4118,26 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
return ret;
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
} else {
index = alloc_irq_index(devid, nr_irqs);
}
-@@ -4343,7 +4381,7 @@
+@@ -4343,7 +4381,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{
unsigned long flags;
struct amd_iommu *iommu;
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
int devid = ir_data->irq_2_irte.devid;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
-@@ -4357,11 +4395,11 @@
+@@ -4357,11 +4395,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
if (!iommu)
return -ENODEV;
if (ref->lo.fields_vapic.guest_mode) {
if (cpu >= 0)
-@@ -4370,7 +4408,7 @@
+@@ -4370,7 +4408,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
barrier();
}
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/iommu/amd_iommu_init.c linux-4.14/drivers/iommu/amd_iommu_init.c
---- linux-4.14.orig/drivers/iommu/amd_iommu_init.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/iommu/amd_iommu_init.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1474,7 +1474,7 @@
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 6fe2d0346073..e3cd81b32a33 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1474,7 +1474,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{
int ret;
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/iommu/amd_iommu_types.h linux-4.14/drivers/iommu/amd_iommu_types.h
---- linux-4.14.orig/drivers/iommu/amd_iommu_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/iommu/amd_iommu_types.h 2018-09-05 11:05:07.000000000 +0200
-@@ -406,7 +406,7 @@
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index f6b24c7d8b70..16b1404da58c 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -406,7 +406,7 @@ extern bool amd_iommu_iotlb_sup;
#define IRQ_TABLE_ALIGNMENT 128
struct irq_remap_table {
unsigned min_index;
u32 *table;
};
-@@ -488,7 +488,7 @@
+@@ -488,7 +488,7 @@ struct amd_iommu {
int index;
/* locks the accesses to the hardware */
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
-@@ -625,7 +625,7 @@
+@@ -625,7 +625,7 @@ struct devid_map {
*/
struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
u16 alias; /* Alias Device ID */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/iommu/iova.c linux-4.14/drivers/iommu/iova.c
---- linux-4.14.orig/drivers/iommu/iova.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/iommu/iova.c 2018-09-05 11:05:07.000000000 +0200
-@@ -570,7 +570,7 @@
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index 33edfa794ae9..b30900025c62 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -570,7 +570,7 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data)
{
unsigned long flags;
unsigned idx;
-@@ -600,8 +600,6 @@
+@@ -600,8 +600,6 @@ void queue_iova(struct iova_domain *iovad,
if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
}
EXPORT_SYMBOL_GPL(queue_iova);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/leds/trigger/Kconfig linux-4.14/drivers/leds/trigger/Kconfig
---- linux-4.14.orig/drivers/leds/trigger/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/leds/trigger/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -69,7 +69,7 @@
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 2ea39a83737f..a3e23d0fc4af 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -148,7 +148,7 @@ static struct {
+ } vpe_proxy;
+
+ static LIST_HEAD(its_nodes);
+-static DEFINE_SPINLOCK(its_lock);
++static DEFINE_RAW_SPINLOCK(its_lock);
+ static struct rdists *gic_rdists;
+ static struct irq_domain *its_parent;
+
+@@ -165,6 +165,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
+ static DEFINE_IDA(its_vpeid_ida);
+
+ #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
++#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
+ #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+ #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+
+@@ -1432,7 +1433,7 @@ static void its_free_prop_table(struct page *prop_page)
+ get_order(LPI_PROPBASE_SZ));
+ }
+
+-static int __init its_alloc_lpi_tables(void)
++static int __init its_alloc_lpi_prop_table(void)
+ {
+ phys_addr_t paddr;
+
+@@ -1758,30 +1759,47 @@ static void its_free_pending_table(struct page *pt)
+ get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ }
+
+-static void its_cpu_init_lpis(void)
++static int __init allocate_lpi_tables(void)
+ {
+- void __iomem *rbase = gic_data_rdist_rd_base();
+- struct page *pend_page;
+- u64 val, tmp;
++ int err, cpu;
+
+- /* If we didn't allocate the pending table yet, do it now */
+- pend_page = gic_data_rdist()->pend_page;
+- if (!pend_page) {
+- phys_addr_t paddr;
++ err = its_alloc_lpi_prop_table();
++ if (err)
++ return err;
++
++ /*
++ * We allocate all the pending tables anyway, as we may have a
++ * mix of RDs that have had LPIs enabled, and some that
++ * don't. We'll free the unused ones as each CPU comes online.
++ */
++ for_each_possible_cpu(cpu) {
++ struct page *pend_page;
+
+ pend_page = its_allocate_pending_table(GFP_NOWAIT);
+ if (!pend_page) {
+- pr_err("Failed to allocate PENDBASE for CPU%d\n",
+- smp_processor_id());
+- return;
++ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
++ return -ENOMEM;
+ }
+
+- paddr = page_to_phys(pend_page);
+- pr_info("CPU%d: using LPI pending table @%pa\n",
+- smp_processor_id(), &paddr);
+- gic_data_rdist()->pend_page = pend_page;
++ gic_data_rdist_cpu(cpu)->pend_page = pend_page;
+ }
+
++ return 0;
++}
++
++static void its_cpu_init_lpis(void)
++{
++ void __iomem *rbase = gic_data_rdist_rd_base();
++ struct page *pend_page;
++ phys_addr_t paddr;
++ u64 val, tmp;
++
++ if (gic_data_rdist()->lpi_enabled)
++ return;
++
++ pend_page = gic_data_rdist()->pend_page;
++ paddr = page_to_phys(pend_page);
++
+ /* Disable LPIs */
+ val = readl_relaxed(rbase + GICR_CTLR);
+ val &= ~GICR_CTLR_ENABLE_LPIS;
+@@ -1843,6 +1861,10 @@ static void its_cpu_init_lpis(void)
+
+ /* Make sure the GIC has seen the above */
+ dsb(sy);
++ gic_data_rdist()->lpi_enabled = true;
++ pr_info("GICv3: CPU%d: using LPI pending table @%pa\n",
++ smp_processor_id(),
++ &paddr);
+ }
+
+ static void its_cpu_init_collection(void)
+@@ -1850,7 +1872,7 @@ static void its_cpu_init_collection(void)
+ struct its_node *its;
+ int cpu;
+
+- spin_lock(&its_lock);
++ raw_spin_lock(&its_lock);
+ cpu = smp_processor_id();
+
+ list_for_each_entry(its, &its_nodes, entry) {
+@@ -1892,7 +1914,7 @@ static void its_cpu_init_collection(void)
+ its_send_invall(its, &its->collections[cpu]);
+ }
+
+- spin_unlock(&its_lock);
++ raw_spin_unlock(&its_lock);
+ }
+
+ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
+@@ -3041,9 +3063,9 @@ static int __init its_probe_one(struct resource *res,
+ if (err)
+ goto out_free_tables;
+
+- spin_lock(&its_lock);
++ raw_spin_lock(&its_lock);
+ list_add(&its->entry, &its_nodes);
+- spin_unlock(&its_lock);
++ raw_spin_unlock(&its_lock);
+
+ return 0;
+
+@@ -3278,7 +3300,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
+ }
+
+ gic_rdists = rdists;
+- err = its_alloc_lpi_tables();
++
++ err = allocate_lpi_tables();
+ if (err)
+ return err;
+
+diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
+index 3f9ddb9fafa7..09da5b6b44a1 100644
+--- a/drivers/leds/trigger/Kconfig
++++ b/drivers/leds/trigger/Kconfig
+@@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/md/bcache/Kconfig linux-4.14/drivers/md/bcache/Kconfig
---- linux-4.14.orig/drivers/md/bcache/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/md/bcache/Kconfig 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
+index 4d200883c505..98b64ed5cb81 100644
+--- a/drivers/md/bcache/Kconfig
++++ b/drivers/md/bcache/Kconfig
@@ -1,6 +1,7 @@
config BCACHE
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/md/dm-rq.c linux-4.14/drivers/md/dm-rq.c
---- linux-4.14.orig/drivers/md/dm-rq.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/md/dm-rq.c 2018-09-05 11:05:07.000000000 +0200
-@@ -671,7 +671,7 @@
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index eadfcfd106ff..8824aeda85cf 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -671,7 +671,7 @@ static void dm_old_request_fn(struct request_queue *q)
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work);
}
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/md/raid5.c linux-4.14/drivers/md/raid5.c
---- linux-4.14.orig/drivers/md/raid5.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/md/raid5.c 2018-09-05 11:05:07.000000000 +0200
-@@ -410,7 +410,7 @@
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 5018fb2352c2..84fadd93f3a0 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -410,7 +410,7 @@ void raid5_release_stripe(struct stripe_head *sh)
md_wakeup_thread(conf->mddev->thread);
return;
slow_path:
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
INIT_LIST_HEAD(&list);
-@@ -419,7 +419,7 @@
+@@ -419,7 +419,7 @@ void raid5_release_stripe(struct stripe_head *sh)
spin_unlock(&conf->device_lock);
release_inactive_stripe_list(conf, &list, hash);
}
}
static inline void remove_hash(struct stripe_head *sh)
-@@ -2067,8 +2067,9 @@
+@@ -2067,8 +2067,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
struct raid5_percpu *percpu;
unsigned long cpu;
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -2127,7 +2128,8 @@
+@@ -2127,7 +2128,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
-@@ -6775,6 +6777,7 @@
+@@ -6781,6 +6783,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
__func__, cpu);
return -ENOMEM;
}
return 0;
}
-@@ -6785,7 +6788,6 @@
+@@ -6791,7 +6794,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;
err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
if (!err) {
conf->scribble_disks = max(conf->raid_disks,
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/md/raid5.h linux-4.14/drivers/md/raid5.h
---- linux-4.14.orig/drivers/md/raid5.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/md/raid5.h 2018-09-05 11:05:07.000000000 +0200
-@@ -624,6 +624,7 @@
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index 2e6123825095..37a6021418a2 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -624,6 +624,7 @@ struct r5conf {
int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
struct flex_array *scribble; /* space for constructing buffer
* lists and performing address
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/mfd/atmel-smc.c linux-4.14/drivers/mfd/atmel-smc.c
---- linux-4.14.orig/drivers/mfd/atmel-smc.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/mfd/atmel-smc.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c
+index 7d77948567d7..0adbd2e796fe 100644
+--- a/drivers/mfd/atmel-smc.c
++++ b/drivers/mfd/atmel-smc.c
@@ -12,6 +12,7 @@
*/
/**
* atmel_smc_cs_conf_init - initialize a SMC CS conf
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/misc/Kconfig linux-4.14/drivers/misc/Kconfig
---- linux-4.14.orig/drivers/misc/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/misc/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -54,6 +54,7 @@
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 8136dc7e863d..86e83b9629d7 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
depends on (AVR32 || ARCH_AT91)
help
Select this if you want a library to allocate the Timer/Counter
blocks found on many Atmel processors. This facilitates using
-@@ -69,8 +70,7 @@
+@@ -69,8 +70,7 @@ config ATMEL_TCB_CLKSRC
are combined to make a single 32-bit timer.
When GENERIC_CLOCKEVENTS is defined, the third timer channel
config ATMEL_TCB_CLKSRC_BLOCK
int
-@@ -84,6 +84,15 @@
+@@ -84,6 +84,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
TC can be used for other purposes, such as PWM generation and
interval timing.
config DUMMY_IRQ
tristate "Dummy IRQ handler"
default n
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/mmc/host/mmci.c linux-4.14/drivers/mmc/host/mmci.c
---- linux-4.14.orig/drivers/mmc/host/mmci.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/mmc/host/mmci.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1200,15 +1200,12 @@
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index f1f54a818489..ce102378df02 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1200,15 +1200,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
void __iomem *base = host->base;
do {
unsigned int remain, len;
char *buffer;
-@@ -1248,8 +1245,6 @@
+@@ -1248,8 +1245,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
sg_miter_stop(sg_miter);
/*
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/net/ethernet/3com/3c59x.c linux-4.14/drivers/net/ethernet/3com/3c59x.c
---- linux-4.14.orig/drivers/net/ethernet/3com/3c59x.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/net/ethernet/3com/3c59x.c 2018-09-05 11:05:07.000000000 +0200
-@@ -842,9 +842,9 @@
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index 402d9090ad29..9bc02563b853 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
unsigned long flags;
}
#endif
-@@ -1908,12 +1908,12 @@
+@@ -1908,12 +1908,12 @@ static void vortex_tx_timeout(struct net_device *dev)
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
}
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/net/ethernet/marvell/mvpp2.c linux-4.14/drivers/net/ethernet/marvell/mvpp2.c
---- linux-4.14.orig/drivers/net/ethernet/marvell/mvpp2.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/net/ethernet/marvell/mvpp2.c 2018-09-05 11:05:07.000000000 +0200
-@@ -830,9 +830,8 @@
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 529be74f609d..b1d7378b131c 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -830,9 +830,8 @@ struct mvpp2_pcpu_stats {
/* Per-CPU port control */
struct mvpp2_port_pcpu {
struct hrtimer tx_done_timer;
};
struct mvpp2_queue_vector {
-@@ -5954,46 +5953,34 @@
+@@ -5954,46 +5953,34 @@ static void mvpp2_link_event(struct net_device *dev)
}
}
return HRTIMER_NORESTART;
}
-@@ -6482,7 +6469,12 @@
+@@ -6482,7 +6469,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
}
return NETDEV_TX_OK;
-@@ -6871,7 +6863,6 @@
+@@ -6871,7 +6863,6 @@ static int mvpp2_stop(struct net_device *dev)
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
}
}
mvpp2_cleanup_rxqs(port);
-@@ -7644,13 +7635,10 @@
+@@ -7644,13 +7635,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port_pcpu = per_cpu_ptr(port->pcpu, cpu);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
}
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/net/wireless/intersil/orinoco/orinoco_usb.c linux-4.14/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
---- linux-4.14.orig/drivers/net/wireless/intersil/orinoco/orinoco_usb.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/net/wireless/intersil/orinoco/orinoco_usb.c 2018-09-05 11:05:07.000000000 +0200
-@@ -697,7 +697,7 @@
+diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+index 56f6e3b71f48..a50350d01a80 100644
+--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
while (!ctx->done.done && msecs--)
udelay(1000);
} else {
ctx->done.done);
}
break;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/net/wireless/mac80211_hwsim.c linux-4.14/drivers/net/wireless/mac80211_hwsim.c
---- linux-4.14.orig/drivers/net/wireless/mac80211_hwsim.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/net/wireless/mac80211_hwsim.c 2018-09-05 11:05:07.000000000 +0200
-@@ -537,7 +537,7 @@
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index d686ba10fecc..4afcdee4f8e2 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -537,7 +537,7 @@ struct mac80211_hwsim_data {
unsigned int rx_filter;
bool started, idle, scanning;
struct mutex mutex;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
} ps;
-@@ -1423,7 +1423,7 @@
+@@ -1423,7 +1423,7 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
data->started = false;
wiphy_debug(hw->wiphy, "%s\n", __func__);
}
-@@ -1546,14 +1546,12 @@
+@@ -1546,14 +1546,12 @@ static enum hrtimer_restart
mac80211_hwsim_beacon(struct hrtimer *timer)
{
struct mac80211_hwsim_data *data =
ieee80211_iterate_active_interfaces_atomic(
hw, IEEE80211_IFACE_ITER_NORMAL,
-@@ -1565,11 +1563,9 @@
+@@ -1565,11 +1563,9 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
data->bcn_delta = 0;
}
}
static const char * const hwsim_chanwidths[] = {
-@@ -1643,15 +1639,15 @@
+@@ -1643,15 +1639,15 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&data->mutex);
if (!data->started || !data->beacon_int)
}
return 0;
-@@ -1714,7 +1710,7 @@
+@@ -1714,7 +1710,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
info->enable_beacon, info->beacon_int);
vp->bcn_en = info->enable_beacon;
if (data->started &&
info->enable_beacon) {
u64 tsf, until_tbtt;
u32 bcn_int;
-@@ -1722,9 +1718,9 @@
+@@ -1722,9 +1718,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
tsf = mac80211_hwsim_get_tsf(hw, vif);
bcn_int = data->beacon_int;
until_tbtt = bcn_int - do_div(tsf, bcn_int);
} else if (!info->enable_beacon) {
unsigned int count = 0;
ieee80211_iterate_active_interfaces_atomic(
-@@ -1733,7 +1729,7 @@
+@@ -1733,7 +1729,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
count);
if (count == 0) {
data->beacon_int = 0;
}
}
-@@ -2725,9 +2721,9 @@
+@@ -2725,9 +2721,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
data->debugfs,
data, &hwsim_simulate_radar);
spin_lock_bh(&hwsim_radio_lock);
list_add_tail(&data->list, &hwsim_radios);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/pci/switch/switchtec.c linux-4.14/drivers/pci/switch/switchtec.c
---- linux-4.14.orig/drivers/pci/switch/switchtec.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/pci/switch/switchtec.c 2018-09-05 11:05:07.000000000 +0200
-@@ -306,10 +306,11 @@
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 620f5b995a12..7fd1548a2905 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -308,10 +308,11 @@ struct switchtec_user {
enum mrpc_state state;
u32 cmd;
u32 status;
u32 return_code;
-@@ -331,7 +332,7 @@
+@@ -333,7 +334,7 @@ static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
stuser->stdev = stdev;
kref_init(&stuser->kref);
INIT_LIST_HEAD(&stuser->list);
stuser->event_cnt = atomic_read(&stdev->event_cnt);
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
-@@ -414,7 +415,7 @@
+@@ -416,7 +417,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
kref_get(&stuser->kref);
stuser->read_len = sizeof(stuser->data);
stuser_set_state(stuser, MRPC_QUEUED);
list_add_tail(&stuser->list, &stdev->mrpc_queue);
mrpc_cmd_submit(stdev);
-@@ -451,7 +452,8 @@
+@@ -453,7 +454,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
stuser->read_len);
out:
list_del_init(&stuser->list);
stuser_put(stuser);
stdev->mrpc_busy = 0;
-@@ -721,10 +723,11 @@
+@@ -723,10 +725,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
mutex_unlock(&stdev->mrpc_mutex);
if (filp->f_flags & O_NONBLOCK) {
if (rc < 0)
return rc;
}
-@@ -772,7 +775,7 @@
+@@ -774,7 +777,7 @@ static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
struct switchtec_dev *stdev = stuser->stdev;
int ret = 0;
poll_wait(filp, &stdev->event_wq, wait);
if (lock_mutex_and_test_alive(stdev))
-@@ -780,7 +783,7 @@
+@@ -782,7 +785,7 @@ static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
mutex_unlock(&stdev->mrpc_mutex);
ret |= POLLIN | POLLRDNORM;
if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
-@@ -1255,7 +1258,8 @@
+@@ -1259,7 +1262,8 @@ static void stdev_kill(struct switchtec_dev *stdev)
/* Wake up and kill any users waiting on an MRPC request */
list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
list_del_init(&stuser->list);
stuser_put(stuser);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/scsi/fcoe/fcoe.c linux-4.14/drivers/scsi/fcoe/fcoe.c
---- linux-4.14.orig/drivers/scsi/fcoe/fcoe.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/scsi/fcoe/fcoe.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1464,11 +1464,11 @@
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 85f9a3eba387..08ea05ddcd82 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -1464,11 +1464,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
return rc;
}
-@@ -1655,11 +1655,11 @@
+@@ -1655,11 +1655,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
}
return -EINVAL;
}
-@@ -1702,7 +1702,7 @@
+@@ -1702,7 +1702,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
-@@ -1734,13 +1734,13 @@
+@@ -1734,13 +1734,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
kfree_skb(skb);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.14/drivers/scsi/fcoe/fcoe_ctlr.c
---- linux-4.14.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/scsi/fcoe/fcoe_ctlr.c 2018-09-05 11:05:07.000000000 +0200
-@@ -835,7 +835,7 @@
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index 03019e07abb9..9ec11316bfe6 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -835,7 +835,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
INIT_LIST_HEAD(&del_list);
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -871,7 +871,7 @@
+@@ -871,7 +871,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
sel_time = fcf->time;
}
}
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/scsi/libfc/fc_exch.c linux-4.14/drivers/scsi/libfc/fc_exch.c
---- linux-4.14.orig/drivers/scsi/libfc/fc_exch.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/scsi/libfc/fc_exch.c 2018-09-05 11:05:07.000000000 +0200
-@@ -833,10 +833,10 @@
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 42bcf7f3a0f9..2ce045d6860c 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/scsi/libsas/sas_ata.c linux-4.14/drivers/scsi/libsas/sas_ata.c
---- linux-4.14.orig/drivers/scsi/libsas/sas_ata.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/scsi/libsas/sas_ata.c 2018-09-05 11:05:07.000000000 +0200
-@@ -190,7 +190,7 @@
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 70be4425ae0b..a23ef685deac 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -190,7 +190,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
/* TODO: audit callers to ensure they are ready for qc_issue to
* unconditionally re-enable interrupts
*/
spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
-@@ -252,7 +252,7 @@
+@@ -252,7 +252,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
out:
spin_lock(ap->lock);
return ret;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.14/drivers/scsi/qla2xxx/qla_inline.h
---- linux-4.14.orig/drivers/scsi/qla2xxx/qla_inline.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/scsi/qla2xxx/qla_inline.h 2018-09-05 11:05:07.000000000 +0200
-@@ -59,12 +59,12 @@
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index 3f5a0f0f8b62..c75783143dc1 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -59,12 +59,12 @@ qla2x00_poll(struct rsp_que *rsp)
{
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
}
static inline uint8_t *
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/staging/greybus/audio_manager.c linux-4.14/drivers/staging/greybus/audio_manager.c
---- linux-4.14.orig/drivers/staging/greybus/audio_manager.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/staging/greybus/audio_manager.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
+index aa6508b44fab..045696ce85c7 100644
+--- a/drivers/staging/greybus/audio_manager.c
++++ b/drivers/staging/greybus/audio_manager.c
@@ -10,7 +10,7 @@
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/idr.h>
#include "audio_manager.h"
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/target/target_core_tmr.c linux-4.14/drivers/target/target_core_tmr.c
---- linux-4.14.orig/drivers/target/target_core_tmr.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/target/target_core_tmr.c 2018-09-05 11:05:07.000000000 +0200
-@@ -114,8 +114,6 @@
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 9c7bc1ca341a..3d35dad1de2c 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -114,8 +114,6 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
{
struct se_session *sess = se_cmd->se_sess;
/*
* If command already reached CMD_T_COMPLETE state within
* target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/target/target_core_transport.c linux-4.14/drivers/target/target_core_transport.c
---- linux-4.14.orig/drivers/target/target_core_transport.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/target/target_core_transport.c 2018-09-05 11:05:07.000000000 +0200
-@@ -2966,9 +2966,6 @@
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 0d0be7d8b9d6..f652e58e2988 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2967,9 +2967,6 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
__acquires(&cmd->t_state_lock)
{
if (fabric_stop)
cmd->transport_state |= CMD_T_FABRIC_STOP;
-@@ -3238,9 +3235,6 @@
+@@ -3239,9 +3236,6 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
int ret;
if (!(cmd->transport_state & CMD_T_ABORTED))
return 0;
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.14/drivers/thermal/x86_pkg_temp_thermal.c
---- linux-4.14.orig/drivers/thermal/x86_pkg_temp_thermal.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/thermal/x86_pkg_temp_thermal.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
+index d93eee2f101b..0287333b1f3c 100644
+--- a/drivers/thermal/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -29,6 +29,7 @@
#include <linux/pm.h>
#include <linux/thermal.h>
#include <asm/cpu_device_id.h>
#include <asm/mce.h>
-@@ -329,7 +330,7 @@
+@@ -329,7 +330,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
schedule_delayed_work_on(cpu, work, ms);
}
{
int cpu = smp_processor_id();
struct pkg_device *pkgdev;
-@@ -348,8 +349,46 @@
+@@ -348,9 +349,47 @@ static int pkg_thermal_notify(u64 msr_val)
}
spin_unlock_irqrestore(&pkg_temp_lock, flags);
+ return err;
+
+ INIT_SWORK(¬ify_work, pkg_thermal_notify_work);
-+ return 0;
-+}
-+
+ return 0;
+ }
+
+static void pkg_thermal_notify_work_cleanup(void)
+{
+ swork_put();
+static int pkg_thermal_notify(u64 msr_val)
+{
+ pkg_thermal_notify_work(NULL);
- return 0;
- }
++ return 0;
++}
+#endif /* CONFIG_PREEMPT_RT_FULL */
-
++
static int pkg_temp_thermal_device_add(unsigned int cpu)
{
-@@ -515,10 +554,15 @@
+ int pkgid = topology_logical_package_id(cpu);
+@@ -515,10 +554,15 @@ static int __init pkg_temp_thermal_init(void)
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online",
pkg_thermal_cpu_online, pkg_thermal_cpu_offline);
-@@ -536,6 +580,7 @@
+@@ -536,6 +580,7 @@ static int __init pkg_temp_thermal_init(void)
return 0;
err:
kfree(packages);
return ret;
}
-@@ -549,6 +594,7 @@
+@@ -549,6 +594,7 @@ static void __exit pkg_temp_thermal_exit(void)
cpuhp_remove_state(pkg_thermal_hp_state);
debugfs_remove_recursive(debugfs);
kfree(packages);
}
module_exit(pkg_temp_thermal_exit)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/tty/serial/8250/8250_core.c linux-4.14/drivers/tty/serial/8250/8250_core.c
---- linux-4.14.orig/drivers/tty/serial/8250/8250_core.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/tty/serial/8250/8250_core.c 2018-09-05 11:05:07.000000000 +0200
-@@ -58,7 +58,16 @@
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index d29b512a7d9f..bc8cbb995b29 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -58,7 +58,16 @@ static struct uart_driver serial8250_reg;
static unsigned int skip_txen_test; /* force skip of txen test at init time */
#include <asm/serial.h>
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/tty/serial/8250/8250_port.c linux-4.14/drivers/tty/serial/8250/8250_port.c
---- linux-4.14.orig/drivers/tty/serial/8250/8250_port.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/tty/serial/8250/8250_port.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index ecf3d631bc09..6e029f34f37f 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
@@ -35,6 +35,7 @@
#include <linux/nmi.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/ktime.h>
-@@ -3224,9 +3225,9 @@
+@@ -3224,9 +3225,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
serial8250_rpm_get(up);
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/tty/serial/amba-pl011.c linux-4.14/drivers/tty/serial/amba-pl011.c
---- linux-4.14.orig/drivers/tty/serial/amba-pl011.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/tty/serial/amba-pl011.c 2018-09-05 11:05:07.000000000 +0200
-@@ -2236,13 +2236,19 @@
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index c9f701aca677..81d6b15fb80a 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2236,13 +2236,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
clk_enable(uap->clk);
/*
* First save the CR then disable the interrupts
-@@ -2268,8 +2274,7 @@
+@@ -2268,8 +2274,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
pl011_write(old_cr, uap, REG_CR);
if (locked)
clk_disable(uap->clk);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/tty/serial/omap-serial.c linux-4.14/drivers/tty/serial/omap-serial.c
---- linux-4.14.orig/drivers/tty/serial/omap-serial.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/tty/serial/omap-serial.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1311,13 +1311,10 @@
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 26a22b100df1..69117e355bcd 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1311,13 +1311,10 @@ serial_omap_console_write(struct console *co, const char *s,
pm_runtime_get_sync(up->dev);
/*
* First save the IER then disable the interrupts
-@@ -1346,8 +1343,7 @@
+@@ -1346,8 +1343,7 @@ serial_omap_console_write(struct console *co, const char *s,
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
}
static int __init
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/usb/core/hcd.c linux-4.14/drivers/usb/core/hcd.c
---- linux-4.14.orig/drivers/usb/core/hcd.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/usb/core/hcd.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1775,9 +1775,9 @@
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index d0b2e0ed9bab..91f4f2bd55b0 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1775,9 +1775,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
*/
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/usb/gadget/function/f_fs.c linux-4.14/drivers/usb/gadget/function/f_fs.c
---- linux-4.14.orig/drivers/usb/gadget/function/f_fs.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/drivers/usb/gadget/function/f_fs.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1623,7 +1623,7 @@
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 17467545391b..42ec6f2db6a9 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1623,7 +1623,7 @@ static void ffs_data_put(struct ffs_data *ffs)
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
waitqueue_active(&ffs->wait));
destroy_workqueue(ffs->io_completion_wq);
kfree(ffs->dev_name);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/usb/gadget/function/f_ncm.c linux-4.14/drivers/usb/gadget/function/f_ncm.c
---- linux-4.14.orig/drivers/usb/gadget/function/f_ncm.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/usb/gadget/function/f_ncm.c 2018-09-05 11:05:07.000000000 +0200
-@@ -77,9 +77,7 @@
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 45b334ceaf2e..5f24e6d3b6eb 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -77,9 +77,7 @@ struct f_ncm {
struct sk_buff *skb_tx_ndp;
u16 ndp_dgram_count;
bool timer_force_tx;
bool timer_stopping;
};
-@@ -1108,7 +1106,7 @@
+@@ -1108,7 +1106,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
/* Delay the timer. */
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
-@@ -1152,17 +1150,15 @@
+@@ -1152,17 +1150,15 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
}
/*
ncm->timer_force_tx = true;
/* XXX This allowance of a NULL skb argument to ndo_start_xmit
-@@ -1175,16 +1171,6 @@
+@@ -1175,16 +1171,6 @@ static void ncm_tx_tasklet(unsigned long data)
ncm->timer_force_tx = false;
}
return HRTIMER_NORESTART;
}
-@@ -1517,8 +1503,7 @@
+@@ -1517,8 +1503,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
-@@ -1627,7 +1612,6 @@
+@@ -1627,7 +1612,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n");
hrtimer_cancel(&ncm->task_timer);
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/drivers/usb/gadget/legacy/inode.c linux-4.14/drivers/usb/gadget/legacy/inode.c
---- linux-4.14.orig/drivers/usb/gadget/legacy/inode.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/drivers/usb/gadget/legacy/inode.c 2018-09-05 11:05:07.000000000 +0200
-@@ -347,7 +347,7 @@
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 5c28bee327e1..ed49dba4704d 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -347,7 +347,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
-@@ -356,7 +356,7 @@
+@@ -356,7 +356,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/aio.c linux-4.14/fs/aio.c
---- linux-4.14.orig/fs/aio.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/aio.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/fs/aio.c b/fs/aio.c
+index 3a749c3a92e3..24c6ceadaae6 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
@@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <asm/kmap_types.h>
#include <linux/uaccess.h>
-@@ -117,6 +118,7 @@
+@@ -117,6 +118,7 @@ struct kioctx {
struct rcu_head free_rcu;
struct work_struct free_work; /* see free_ioctx() */
/*
* signals when all in-flight requests are done
-@@ -259,6 +261,7 @@
+@@ -259,6 +261,7 @@ static int __init aio_setup(void)
.mount = aio_mount,
.kill_sb = kill_anon_super,
};
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
-@@ -633,9 +636,9 @@
+@@ -633,9 +636,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -653,6 +656,14 @@
+@@ -653,6 +656,14 @@ static void free_ioctx_users(struct percpu_ref *ref)
percpu_ref_put(&ctx->reqs);
}
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
unsigned i, new_nr;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/autofs4/autofs_i.h linux-4.14/fs/autofs4/autofs_i.h
---- linux-4.14.orig/fs/autofs4/autofs_i.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/autofs4/autofs_i.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
+index ce696d6c4641..b120fbd41483 100644
+--- a/fs/autofs4/autofs_i.h
++++ b/fs/autofs4/autofs_i.h
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/autofs4/expire.c linux-4.14/fs/autofs4/expire.c
---- linux-4.14.orig/fs/autofs4/expire.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/autofs4/expire.c 2018-09-05 11:05:07.000000000 +0200
-@@ -148,7 +148,7 @@
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 57725d4a8c59..62220508bace 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -148,7 +148,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev,
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
goto relock;
}
spin_unlock(&p->d_lock);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/buffer.c linux-4.14/fs/buffer.c
---- linux-4.14.orig/fs/buffer.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/buffer.c 2018-09-05 11:05:07.000000000 +0200
-@@ -302,8 +302,7 @@
+diff --git a/fs/buffer.c b/fs/buffer.c
+index b96f3b98a6ef..4ca5f222537a 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -302,8 +302,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
* decide that the page is now completely done.
*/
first = page_buffers(page);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -316,8 +315,7 @@
+@@ -316,8 +315,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
/*
* If none of the buffers had errors and they are all
-@@ -329,9 +327,7 @@
+@@ -329,9 +327,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
return;
still_busy:
}
/*
-@@ -358,8 +354,7 @@
+@@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
first = page_buffers(page);
clear_buffer_async_write(bh);
unlock_buffer(bh);
-@@ -371,15 +366,12 @@
+@@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
}
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3417,6 +3409,7 @@
+@@ -3417,6 +3409,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/cifs/readdir.c linux-4.14/fs/cifs/readdir.c
---- linux-4.14.orig/fs/cifs/readdir.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/cifs/readdir.c 2018-09-05 11:05:07.000000000 +0200
-@@ -80,7 +80,7 @@
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index a27fc8791551..791aecb7c1ac 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
struct inode *inode;
struct super_block *sb = parent->d_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/dcache.c linux-4.14/fs/dcache.c
---- linux-4.14.orig/fs/dcache.c 2018-09-05 11:03:29.000000000 +0200
-+++ linux-4.14/fs/dcache.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/fs/dcache.c b/fs/dcache.c
+index c1a7c174a905..26c798d79add 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
-@@ -793,6 +794,8 @@
+@@ -794,6 +795,8 @@ static inline bool fast_dput(struct dentry *dentry)
*/
void dput(struct dentry *dentry)
{
if (unlikely(!dentry))
return;
-@@ -829,9 +832,18 @@
+@@ -830,9 +833,18 @@ void dput(struct dentry *dentry)
return;
kill_it:
goto repeat;
}
}
-@@ -2394,7 +2406,7 @@
+@@ -2394,7 +2406,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
-@@ -2439,9 +2451,10 @@
+@@ -2439,9 +2451,10 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
return n;
cpu_relax();
}
-@@ -2449,26 +2462,30 @@
+@@ -2449,26 +2462,30 @@ static inline unsigned start_dir_add(struct inode *dir)
static inline void end_dir_add(struct inode *dir, unsigned n)
{
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
-@@ -2482,7 +2499,7 @@
+@@ -2482,7 +2499,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
retry:
rcu_read_lock();
r_seq = read_seqbegin(&rename_lock);
dentry = __d_lookup_rcu(parent, name, &d_seq);
if (unlikely(dentry)) {
-@@ -2510,7 +2527,7 @@
+@@ -2510,7 +2527,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
}
hlist_bl_lock(b);
hlist_bl_unlock(b);
rcu_read_unlock();
goto retry;
-@@ -2583,7 +2600,7 @@
+@@ -2583,7 +2600,7 @@ void __d_lookup_done(struct dentry *dentry)
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
dentry->d_wait = NULL;
hlist_bl_unlock(b);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
-@@ -3619,6 +3636,8 @@
+@@ -3618,6 +3635,8 @@ __setup("dhash_entries=", set_dhash_entries);
static void __init dcache_init_early(void)
{
/* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
-@@ -3635,10 +3654,14 @@
+@@ -3634,10 +3653,14 @@ static void __init dcache_init_early(void)
&d_hash_mask,
0,
0);
/*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature
-@@ -3661,6 +3684,10 @@
+@@ -3660,6 +3683,10 @@ static void __init dcache_init(void)
&d_hash_mask,
0,
0);
}
/* SLAB cache for __getname() consumers */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/eventpoll.c linux-4.14/fs/eventpoll.c
---- linux-4.14.orig/fs/eventpoll.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/eventpoll.c 2018-09-05 11:05:07.000000000 +0200
-@@ -587,12 +587,12 @@
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 2fabd19cdeea..b768c32631eb 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -587,12 +587,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/exec.c linux-4.14/fs/exec.c
---- linux-4.14.orig/fs/exec.c 2018-09-05 11:03:29.000000000 +0200
-+++ linux-4.14/fs/exec.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1025,12 +1025,14 @@
+diff --git a/fs/exec.c b/fs/exec.c
+index 0da4d748b4e6..609aee4dbfa9 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1024,12 +1024,14 @@ static int exec_mmap(struct mm_struct *mm)
}
}
task_lock(tsk);
task_unlock(tsk);
if (old_mm) {
up_read(&old_mm->mmap_sem);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/ext4/page-io.c linux-4.14/fs/ext4/page-io.c
---- linux-4.14.orig/fs/ext4/page-io.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/ext4/page-io.c 2018-09-05 11:05:07.000000000 +0200
-@@ -95,8 +95,7 @@
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index db7590178dfc..d76364124443 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio)
* We check all buffers in the page under BH_Uptodate_Lock
* to avoid races with other end io clearing async_write flags
*/
do {
if (bh_offset(bh) < bio_start ||
bh_offset(bh) + bh->b_size > bio_end) {
-@@ -108,8 +107,7 @@
+@@ -108,8 +107,7 @@ static void ext4_finish_bio(struct bio *bio)
if (bio->bi_status)
buffer_io_error(bh);
} while ((bh = bh->b_this_page) != head);
if (!under_io) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (data_page)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/fuse/dir.c linux-4.14/fs/fuse/dir.c
---- linux-4.14.orig/fs/fuse/dir.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/fuse/dir.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1187,7 +1187,7 @@
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 29868c35c19a..76d354eee035 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1188,7 +1188,7 @@ static int fuse_direntplus_link(struct file *file,
struct inode *dir = d_inode(parent);
struct fuse_conn *fc;
struct inode *inode;
if (!o->nodeid) {
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/inode.c linux-4.14/fs/inode.c
---- linux-4.14.orig/fs/inode.c 2018-09-05 11:03:29.000000000 +0200
-+++ linux-4.14/fs/inode.c 2018-09-05 11:05:07.000000000 +0200
-@@ -154,7 +154,7 @@
+diff --git a/fs/inode.c b/fs/inode.c
+index cfc36d11bcb3..b77ce179798a 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -154,7 +154,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_bdev = NULL;
inode->i_cdev = NULL;
inode->i_link = NULL;
inode->i_rdev = 0;
inode->dirtied_when = 0;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/libfs.c linux-4.14/fs/libfs.c
---- linux-4.14.orig/fs/libfs.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/libfs.c 2018-09-05 11:05:07.000000000 +0200
-@@ -90,7 +90,7 @@
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 3aabe553fc45..b5d63bf1ad8e 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -90,7 +90,7 @@ static struct dentry *next_positive(struct dentry *parent,
struct list_head *from,
int count)
{
struct dentry *res;
struct list_head *p;
bool skipped;
-@@ -123,8 +123,9 @@
+@@ -123,8 +123,9 @@ static struct dentry *next_positive(struct dentry *parent,
static void move_cursor(struct dentry *cursor, struct list_head *after)
{
struct dentry *parent = cursor->d_parent;
for (;;) {
n = *seq;
if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
-@@ -137,6 +138,7 @@
+@@ -137,6 +138,7 @@ static void move_cursor(struct dentry *cursor, struct list_head *after)
else
list_add_tail(&cursor->d_child, &parent->d_subdirs);
smp_store_release(seq, n + 2);
spin_unlock(&parent->d_lock);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/locks.c linux-4.14/fs/locks.c
---- linux-4.14.orig/fs/locks.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/locks.c 2018-09-05 11:05:07.000000000 +0200
-@@ -945,7 +945,7 @@
+diff --git a/fs/locks.c b/fs/locks.c
+index 1bd71c4d663a..fef5f1e29a4f 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -945,7 +945,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
return -ENOMEM;
}
spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
-@@ -986,7 +986,7 @@
+@@ -986,7 +986,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
out:
spin_unlock(&ctx->flc_lock);
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
-@@ -1023,7 +1023,7 @@
+@@ -1023,7 +1023,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
new_fl2 = locks_alloc_lock();
}
spin_lock(&ctx->flc_lock);
/*
* New lock request. Walk all POSIX locks and look for conflicts. If
-@@ -1195,7 +1195,7 @@
+@@ -1195,7 +1195,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
}
out:
spin_unlock(&ctx->flc_lock);
/*
* Free any unused locks.
*/
-@@ -1470,7 +1470,7 @@
+@@ -1470,7 +1470,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
return error;
}
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
-@@ -1522,13 +1522,13 @@
+@@ -1522,13 +1522,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
locks_insert_block(fl, new_fl);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
-@@ -1545,7 +1545,7 @@
+@@ -1545,7 +1545,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
}
out:
spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
-@@ -1619,7 +1619,7 @@
+@@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
-@@ -1629,7 +1629,7 @@
+@@ -1629,7 +1629,7 @@ int fcntl_getlease(struct file *filp)
break;
}
spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
}
-@@ -1704,7 +1704,7 @@
+@@ -1704,7 +1704,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
return -EINVAL;
}
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg, lease->fl_flags);
-@@ -1775,7 +1775,7 @@
+@@ -1775,7 +1775,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
lease->fl_lmops->lm_setup(lease, priv);
out:
spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
if (is_deleg)
inode_unlock(inode);
-@@ -1798,7 +1798,7 @@
+@@ -1798,7 +1798,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
return error;
}
spin_lock(&ctx->flc_lock);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_file == filp &&
-@@ -1811,7 +1811,7 @@
+@@ -1811,7 +1811,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
if (victim)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
return error;
}
-@@ -2535,13 +2535,13 @@
+@@ -2535,13 +2535,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
if (list_empty(&ctx->flc_lease))
return;
locks_dispose_list(&dispose);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/namei.c linux-4.14/fs/namei.c
---- linux-4.14.orig/fs/namei.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/namei.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1627,7 +1627,7 @@
+diff --git a/fs/namei.c b/fs/namei.c
+index 0b46b858cd42..f5c6c2ec44ce 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1627,7 +1627,7 @@ static struct dentry *lookup_slow(const struct qstr *name,
{
struct dentry *dentry = ERR_PTR(-ENOENT), *old;
struct inode *inode = dir->d_inode;
inode_lock_shared(inode);
/* Don't go there if it's already dead */
-@@ -3100,7 +3100,7 @@
+@@ -3100,7 +3100,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
if (unlikely(IS_DEADDIR(dir_inode)))
return -ENOENT;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/namespace.c linux-4.14/fs/namespace.c
---- linux-4.14.orig/fs/namespace.c 2018-09-05 11:03:29.000000000 +0200
-+++ linux-4.14/fs/namespace.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 9dc146e7b5e0..85bfe5e55adf 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
#include <linux/security.h>
#include <linux/cred.h>
#include <linux/idr.h>
-@@ -353,8 +354,11 @@
+@@ -353,8 +354,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/nfs/delegation.c linux-4.14/fs/nfs/delegation.c
---- linux-4.14.orig/fs/nfs/delegation.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/nfs/delegation.c 2018-09-05 11:05:07.000000000 +0200
-@@ -150,11 +150,11 @@
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 606dd3871f66..fa41eb75b4d8 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -150,11 +150,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
sp = state->owner;
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
err = -EAGAIN;
mutex_unlock(&sp->so_delegreturn_mutex);
put_nfs_open_context(ctx);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/nfs/dir.c linux-4.14/fs/nfs/dir.c
---- linux-4.14.orig/fs/nfs/dir.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/nfs/dir.c 2018-09-05 11:05:07.000000000 +0200
-@@ -452,7 +452,7 @@
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index bf2c43635062..f43f5da4a8c3 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -452,7 +452,7 @@ static
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
struct dentry *dentry;
struct dentry *alias;
struct inode *dir = d_inode(parent);
-@@ -1443,7 +1443,7 @@
+@@ -1443,7 +1443,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned open_flags,
umode_t mode, int *opened)
{
struct nfs_open_context *ctx;
struct dentry *res;
struct iattr attr = { .ia_valid = ATTR_OPEN };
-@@ -1763,7 +1763,11 @@
+@@ -1763,7 +1763,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
trace_nfs_rmdir_enter(dir, dentry);
if (d_really_is_positive(dentry)) {
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
/* Ensure the VFS deletes this inode */
switch (error) {
-@@ -1773,7 +1777,11 @@
+@@ -1773,7 +1777,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
case -ENOENT:
nfs_dentry_handle_enoent(dentry);
}
} else
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
trace_nfs_rmdir_exit(dir, dentry, error);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/nfs/inode.c linux-4.14/fs/nfs/inode.c
---- linux-4.14.orig/fs/nfs/inode.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/nfs/inode.c 2018-09-05 11:05:07.000000000 +0200
-@@ -2014,7 +2014,11 @@
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 134d9f560240..ff64167f9811 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -2014,7 +2014,11 @@ static void init_once(void *foo)
atomic_long_set(&nfsi->nrequests, 0);
atomic_long_set(&nfsi->commit_info.ncommit, 0);
atomic_set(&nfsi->commit_info.rpcs_out, 0);
mutex_init(&nfsi->commit_mutex);
nfs4_init_once(nfsi);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/nfs/nfs4_fs.h linux-4.14/fs/nfs/nfs4_fs.h
---- linux-4.14.orig/fs/nfs/nfs4_fs.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/nfs/nfs4_fs.h 2018-09-05 11:05:07.000000000 +0200
-@@ -112,7 +112,7 @@
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index a73144b3cb8c..0c403d280b96 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -112,7 +112,7 @@ struct nfs4_state_owner {
unsigned long so_flags;
struct list_head so_states;
struct nfs_seqid_counter so_seqid;
struct mutex so_delegreturn_mutex;
};
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/nfs/nfs4proc.c linux-4.14/fs/nfs/nfs4proc.c
---- linux-4.14.orig/fs/nfs/nfs4proc.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/nfs/nfs4proc.c 2018-09-05 11:05:07.000000000 +0200
-@@ -2689,7 +2689,7 @@
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 77c7d29fcd3b..e0a9f811f0ef 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2696,7 +2696,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
unsigned int seq;
int ret;
ret = _nfs4_proc_open(opendata);
if (ret != 0)
-@@ -2727,7 +2727,7 @@
+@@ -2734,7 +2734,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
nfs4_schedule_stateid_recovery(server, state);
}
out:
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/nfs/nfs4state.c linux-4.14/fs/nfs/nfs4state.c
---- linux-4.14.orig/fs/nfs/nfs4state.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/nfs/nfs4state.c 2018-09-05 11:05:07.000000000 +0200
-@@ -494,7 +494,7 @@
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 45873ed92057..c487a1ca7106 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -494,7 +494,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
nfs4_init_seqid_counter(&sp->so_seqid);
atomic_set(&sp->so_count, 1);
INIT_LIST_HEAD(&sp->so_lru);
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
-@@ -1519,8 +1519,12 @@
+@@ -1519,8 +1519,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
* recovering after a network partition or a reboot from a
* server that doesn't support a grace period.
*/
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
-@@ -1589,14 +1593,20 @@
+@@ -1589,14 +1593,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
spin_lock(&sp->so_lock);
goto restart;
}
return status;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/nfs/unlink.c linux-4.14/fs/nfs/unlink.c
---- linux-4.14.orig/fs/nfs/unlink.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/nfs/unlink.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
+index 630b4a3c1a93..0dc1d3e6a62f 100644
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
@@ -13,7 +13,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/namei.h>
#include <linux/fsnotify.h>
-@@ -52,6 +52,29 @@
+@@ -52,6 +52,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
rpc_restart_call_prepare(task);
}
/**
* nfs_async_unlink_release - Release the sillydelete data.
* @task: rpc_task of the sillydelete
-@@ -65,7 +88,7 @@
+@@ -65,7 +88,7 @@ static void nfs_async_unlink_release(void *calldata)
struct dentry *dentry = data->dentry;
struct super_block *sb = dentry->d_sb;
d_lookup_done(dentry);
nfs_free_unlinkdata(data);
dput(dentry);
-@@ -118,10 +141,10 @@
+@@ -118,10 +141,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
struct inode *dir = d_inode(dentry->d_parent);
struct dentry *alias;
return 0;
}
if (!d_in_lookup(alias)) {
-@@ -143,7 +166,7 @@
+@@ -143,7 +166,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
ret = 0;
spin_unlock(&alias->d_lock);
dput(alias);
/*
* If we'd displaced old cached devname, free it. At that
* point dentry is definitely not a root, so we won't need
-@@ -183,7 +206,7 @@
+@@ -183,7 +206,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
goto out_free_name;
}
data->res.dir_attr = &data->dir_attr;
status = -EBUSY;
spin_lock(&dentry->d_lock);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/ntfs/aops.c linux-4.14/fs/ntfs/aops.c
---- linux-4.14.orig/fs/ntfs/aops.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/ntfs/aops.c 2018-09-05 11:05:07.000000000 +0200
-@@ -93,13 +93,13 @@
+diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
+index cc91856b5e2d..a982d7c3ad91 100644
+--- a/fs/ntfs/aops.c
++++ b/fs/ntfs/aops.c
+@@ -93,13 +93,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
ofs = 0;
if (file_ofs < init_size)
ofs = init_size - file_ofs;
}
} else {
clear_buffer_uptodate(bh);
-@@ -108,8 +108,7 @@
+@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -124,8 +123,7 @@
+@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
-@@ -146,13 +144,13 @@
+@@ -146,13 +144,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
recs = PAGE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
-@@ -160,9 +158,7 @@
+@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
unlock_page(page);
return;
still_busy:
}
/**
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/proc/array.c linux-4.14/fs/proc/array.c
---- linux-4.14.orig/fs/proc/array.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/proc/array.c 2018-09-05 11:05:07.000000000 +0200
-@@ -386,9 +386,9 @@
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 4ac811e1a26c..9dcb40690cde 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -386,9 +386,9 @@ static inline void task_context_switch_counts(struct seq_file *m,
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Cpus_allowed:\t%*pb\n",
}
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/proc/base.c linux-4.14/fs/proc/base.c
---- linux-4.14.orig/fs/proc/base.c 2018-09-05 11:03:28.000000000 +0200
-+++ linux-4.14/fs/proc/base.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1886,7 +1886,7 @@
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index c5c42f3e33d1..f5dcd63f37aa 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1886,7 +1886,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
child = d_hash_and_lookup(dir, &qname);
if (!child) {
child = d_alloc_parallel(dir, &qname, &wq);
if (IS_ERR(child))
goto end_instantiate;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/proc/proc_sysctl.c linux-4.14/fs/proc/proc_sysctl.c
---- linux-4.14.orig/fs/proc/proc_sysctl.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/proc/proc_sysctl.c 2018-09-05 11:05:07.000000000 +0200
-@@ -679,7 +679,7 @@
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 82ac5f682b73..c35714621a38 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -679,7 +679,7 @@ static bool proc_sys_fill_cache(struct file *file,
child = d_lookup(dir, &qname);
if (!child) {
child = d_alloc_parallel(dir, &qname, &wq);
if (IS_ERR(child))
return false;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/timerfd.c linux-4.14/fs/timerfd.c
---- linux-4.14.orig/fs/timerfd.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/fs/timerfd.c 2018-09-05 11:05:07.000000000 +0200
-@@ -471,7 +471,10 @@
+diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
+index 23a9c28ad8ea..6a73c4fa88e7 100644
+--- a/fs/squashfs/decompressor_multi_percpu.c
++++ b/fs/squashfs/decompressor_multi_percpu.c
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/percpu.h>
+ #include <linux/buffer_head.h>
++#include <linux/locallock.h>
+
+ #include "squashfs_fs.h"
+ #include "squashfs_fs_sb.h"
+@@ -25,6 +26,8 @@ struct squashfs_stream {
+ void *stream;
+ };
+
++static DEFINE_LOCAL_IRQ_LOCK(stream_lock);
++
+ void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+ {
+@@ -79,10 +82,15 @@ int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
+ {
+ struct squashfs_stream __percpu *percpu =
+ (struct squashfs_stream __percpu *) msblk->stream;
+- struct squashfs_stream *stream = get_cpu_ptr(percpu);
+- int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
+- offset, length, output);
+- put_cpu_ptr(stream);
++ struct squashfs_stream *stream;
++ int res;
++
++ stream = get_locked_ptr(stream_lock, percpu);
++
++ res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
++ offset, length, output);
++
++ put_locked_ptr(stream_lock, stream);
+
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+diff --git a/fs/timerfd.c b/fs/timerfd.c
+index 040612ec9598..b3d9d435926c 100644
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -471,7 +471,10 @@ static int do_timerfd_settime(int ufd, int flags,
break;
}
spin_unlock_irq(&ctx->wqh.lock);
}
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/fs/xfs/xfs_aops.c linux-4.14/fs/xfs/xfs_aops.c
---- linux-4.14.orig/fs/xfs/xfs_aops.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/fs/xfs/xfs_aops.c 2018-09-05 11:05:07.000000000 +0200
-@@ -120,8 +120,7 @@
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index b0cccf8a81a8..eaa4383defec 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -120,8 +120,7 @@ xfs_finish_page_writeback(
ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
do {
if (off >= bvec->bv_offset &&
off < bvec->bv_offset + bvec->bv_len) {
-@@ -143,8 +142,7 @@
+@@ -143,8 +142,7 @@ xfs_finish_page_writeback(
}
off += bh->b_size;
} while ((bh = bh->b_this_page) != head);
if (!busy)
end_page_writeback(bvec->bv_page);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/acpi/platform/aclinux.h linux-4.14/include/acpi/platform/aclinux.h
---- linux-4.14.orig/include/acpi/platform/aclinux.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/acpi/platform/aclinux.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
+index 1b473efd9eb6..89ee5e1dac48 100644
+--- a/include/acpi/platform/aclinux.h
++++ b/include/acpi/platform/aclinux.h
@@ -134,6 +134,7 @@
#define acpi_cache_t struct kmem_cache
/*
* OSL interfaces used by debugger/disassembler
*/
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/asm-generic/bug.h linux-4.14/include/asm-generic/bug.h
---- linux-4.14.orig/include/asm-generic/bug.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/asm-generic/bug.h 2018-09-05 11:05:07.000000000 +0200
-@@ -234,6 +234,20 @@
+diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
+index ae1a33aa8955..c6d04eca8345 100644
+--- a/include/asm-generic/bug.h
++++ b/include/asm-generic/bug.h
+@@ -234,6 +234,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
# define WARN_ON_SMP(x) ({0;})
#endif
#endif /* __ASSEMBLY__ */
#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/blkdev.h linux-4.14/include/linux/blkdev.h
---- linux-4.14.orig/include/linux/blkdev.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/blkdev.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index 994cbb0f7ffc..0d4b7e3489a9 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -226,7 +226,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
+ return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
+ }
+
+-
++void __blk_mq_complete_request_remote_work(struct work_struct *work);
+ int blk_mq_request_started(struct request *rq);
+ void blk_mq_start_request(struct request *rq);
+ void blk_mq_end_request(struct request *rq, blk_status_t error);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 4d4af0e94059..cbf9d5730dd3 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
@@ -27,6 +27,7 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
struct module;
struct scsi_ioctl_command;
-@@ -134,6 +135,9 @@
+@@ -134,6 +135,9 @@ typedef __u32 __bitwise req_flags_t;
*/
struct request {
struct list_head queuelist;
union {
struct __call_single_data csd;
u64 fifo_time;
-@@ -596,6 +600,7 @@
+@@ -596,6 +600,7 @@ struct request_queue {
#endif
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/blk-mq.h linux-4.14/include/linux/blk-mq.h
---- linux-4.14.orig/include/linux/blk-mq.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/blk-mq.h 2018-09-05 11:05:07.000000000 +0200
-@@ -226,7 +226,7 @@
- return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
- }
-
--
-+void __blk_mq_complete_request_remote_work(struct work_struct *work);
- int blk_mq_request_started(struct request *rq);
- void blk_mq_start_request(struct request *rq);
- void blk_mq_end_request(struct request *rq, blk_status_t error);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/bottom_half.h linux-4.14/include/linux/bottom_half.h
---- linux-4.14.orig/include/linux/bottom_half.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/bottom_half.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
+index a19519f4241d..40dd5ef9c154 100644
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
@@ -4,6 +4,39 @@
#include <linux/preempt.h>
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
-@@ -31,5 +64,6 @@
+@@ -31,5 +64,6 @@ static inline void local_bh_enable(void)
{
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
+#endif
#endif /* _LINUX_BH_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/buffer_head.h linux-4.14/include/linux/buffer_head.h
---- linux-4.14.orig/include/linux/buffer_head.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/buffer_head.h 2018-09-05 11:05:07.000000000 +0200
-@@ -76,8 +76,50 @@
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index afa37f807f12..48505fade7e1 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -76,8 +76,50 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/cgroup-defs.h linux-4.14/include/linux/cgroup-defs.h
---- linux-4.14.orig/include/linux/cgroup-defs.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/cgroup-defs.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 4e8f77504a57..8155c26315b7 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
@@ -19,6 +19,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
#ifdef CONFIG_CGROUPS
-@@ -152,6 +153,7 @@
+@@ -152,6 +153,7 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
/*
* PI: the parent css. Placed here for cache proximity to following
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/completion.h linux-4.14/include/linux/completion.h
---- linux-4.14.orig/include/linux/completion.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/completion.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/completion.h b/include/linux/completion.h
+index 7828451e161a..f5838b10cf84 100644
+--- a/include/linux/completion.h
++++ b/include/linux/completion.h
@@ -9,7 +9,7 @@
* See kernel/sched/completion.c for details.
*/
#ifdef CONFIG_LOCKDEP_COMPLETIONS
struct lockdep_map_cross map;
#endif
-@@ -67,11 +67,11 @@
+@@ -67,11 +67,11 @@ static inline void complete_release_commit(struct completion *x) {}
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#define COMPLETION_INITIALIZER(work) \
#endif
#define COMPLETION_INITIALIZER_ONSTACK(work) \
-@@ -117,7 +117,7 @@
+@@ -117,7 +117,7 @@ static inline void complete_release_commit(struct completion *x) {}
static inline void __init_completion(struct completion *x)
{
x->done = 0;
}
/**
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/cpu.h linux-4.14/include/linux/cpu.h
---- linux-4.14.orig/include/linux/cpu.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/cpu.h 2018-09-05 11:05:07.000000000 +0200
-@@ -120,6 +120,8 @@
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 2a378d261914..b418d3c5159d 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -120,6 +120,8 @@ extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
#else /* CONFIG_HOTPLUG_CPU */
-@@ -130,6 +132,9 @@
+@@ -130,6 +132,9 @@ static inline void cpus_read_unlock(void) { }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
#endif /* !CONFIG_HOTPLUG_CPU */
/* Wrappers which go away once all code is converted */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/dcache.h linux-4.14/include/linux/dcache.h
---- linux-4.14.orig/include/linux/dcache.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/dcache.h 2018-09-05 11:05:07.000000000 +0200
-@@ -107,7 +107,7 @@
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 006f4ccda5f5..d413993f7f17 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -107,7 +107,7 @@ struct dentry {
union {
struct list_head d_lru; /* LRU list */
};
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
-@@ -238,7 +238,7 @@
+@@ -238,7 +238,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/delay.h linux-4.14/include/linux/delay.h
---- linux-4.14.orig/include/linux/delay.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/delay.h 2018-09-05 11:05:07.000000000 +0200
-@@ -64,4 +64,10 @@
+diff --git a/include/linux/delay.h b/include/linux/delay.h
+index b78bab4395d8..7c4bc414a504 100644
+--- a/include/linux/delay.h
++++ b/include/linux/delay.h
+@@ -64,4 +64,10 @@ static inline void ssleep(unsigned int seconds)
msleep(seconds * 1000);
}
+#endif
+
#endif /* defined(_LINUX_DELAY_H) */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/fs.h linux-4.14/include/linux/fs.h
---- linux-4.14.orig/include/linux/fs.h 2018-09-05 11:03:29.000000000 +0200
-+++ linux-4.14/include/linux/fs.h 2018-09-05 11:05:07.000000000 +0200
-@@ -655,7 +655,7 @@
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index cc613f20e5a6..b806e2116f5c 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -655,7 +655,7 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
};
__u32 i_generation;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/highmem.h linux-4.14/include/linux/highmem.h
---- linux-4.14.orig/include/linux/highmem.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/highmem.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 776f90f3a1cd..5f0bd7a3e6a7 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-@@ -66,7 +67,7 @@
+@@ -66,7 +67,7 @@ static inline void kunmap(struct page *page)
static inline void *kmap_atomic(struct page *page)
{
pagefault_disable();
return page_address(page);
}
-@@ -75,7 +76,7 @@
+@@ -75,7 +76,7 @@ static inline void *kmap_atomic(struct page *page)
static inline void __kunmap_atomic(void *addr)
{
pagefault_enable();
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-@@ -87,32 +88,51 @@
+@@ -87,32 +88,51 @@ static inline void __kunmap_atomic(void *addr)
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
#endif
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/hrtimer.h linux-4.14/include/linux/hrtimer.h
---- linux-4.14.orig/include/linux/hrtimer.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/hrtimer.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index 012c37fdb688..3bd606859b0a 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
@@ -22,19 +22,42 @@
#include <linux/percpu.h>
#include <linux/timer.h>
};
/*
-@@ -87,6 +110,7 @@
+@@ -87,6 +110,7 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
-@@ -97,6 +121,7 @@
+@@ -97,6 +121,7 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
};
/**
-@@ -112,9 +137,9 @@
+@@ -112,9 +137,9 @@ struct hrtimer_sleeper {
};
#ifdef CONFIG_64BIT
#endif
/**
-@@ -123,48 +148,57 @@
+@@ -123,48 +148,57 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @clock_base: array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
-@@ -173,31 +207,31 @@
+@@ -173,31 +207,31 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
timer->node.expires = time;
timer->_softexpires = time;
}
-@@ -266,16 +300,17 @@
+@@ -266,16 +300,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
return timer->base->get_time();
}
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
-@@ -298,11 +333,6 @@
+@@ -298,11 +333,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
static inline void clock_was_set_delayed(void) { }
#endif
-@@ -344,10 +374,17 @@
+@@ -344,10 +374,17 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
/* Initialize timers: */
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
-@@ -357,6 +394,15 @@
+@@ -357,6 +394,15 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer,
{
hrtimer_init(timer, which_clock, mode);
}
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
-@@ -365,11 +411,12 @@
+@@ -365,11 +411,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
/**
*/
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
-@@ -396,6 +443,13 @@
+@@ -396,6 +443,13 @@ static inline void hrtimer_restart(struct hrtimer *timer)
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
-@@ -420,9 +474,9 @@
+@@ -420,9 +474,9 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
* Helper function to check, whether the timer is running the callback
* function
*/
}
/* Forward a hrtimer so it expires after now: */
-@@ -458,15 +512,12 @@
+@@ -458,15 +512,12 @@ extern long hrtimer_nanosleep(const struct timespec64 *rqtp,
const enum hrtimer_mode mode,
const clockid_t clockid);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/idr.h linux-4.14/include/linux/idr.h
---- linux-4.14.orig/include/linux/idr.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/idr.h 2018-09-05 11:05:07.000000000 +0200
-@@ -167,10 +167,7 @@
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index 7c3a365f7e12..a922d984d9b6 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -167,10 +167,7 @@ static inline bool idr_is_empty(const struct idr *idr)
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
*/
/**
* idr_find - return pointer for given id
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/init_task.h linux-4.14/include/linux/init_task.h
---- linux-4.14.orig/include/linux/init_task.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/init_task.h 2018-09-05 11:05:07.000000000 +0200
-@@ -163,6 +163,12 @@
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index 8062e6cc607c..ee3ff961b84c 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -163,6 +163,12 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
.vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
-@@ -234,7 +240,8 @@
+@@ -234,7 +240,8 @@ extern struct cred init_cred;
.static_prio = MAX_PRIO-20, \
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.nr_cpus_allowed= NR_CPUS, \
.mm = NULL, \
.active_mm = &init_mm, \
-@@ -276,6 +283,7 @@
+@@ -276,6 +283,7 @@ extern struct cred init_cred;
INIT_CPU_TIMERS(tsk) \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/interrupt.h linux-4.14/include/linux/interrupt.h
---- linux-4.14.orig/include/linux/interrupt.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/interrupt.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 69c238210325..0f25fa19b2d8 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
@@ -15,6 +15,7 @@
#include <linux/hrtimer.h>
#include <linux/kref.h>
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
-@@ -207,7 +210,7 @@
+@@ -207,7 +210,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
#endif
extern void disable_irq_nosync(unsigned int irq);
-@@ -227,6 +230,7 @@
+@@ -227,6 +230,7 @@ extern void resume_device_irqs(void);
* struct irq_affinity_notify - context for notification of IRQ affinity changes
* @irq: Interrupt to which notification applies
* @kref: Reference count, for internal use
* @work: Work item, for internal use
* @notify: Function to be called on change. This will be
* called in process context.
-@@ -238,7 +242,11 @@
+@@ -238,7 +242,11 @@ extern void resume_device_irqs(void);
struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
-@@ -429,9 +437,13 @@
+@@ -429,9 +437,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
-@@ -488,9 +500,10 @@
+@@ -488,9 +500,10 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
-@@ -499,13 +512,25 @@
+@@ -499,13 +512,25 @@ static inline void do_softirq_own_stack(void)
__do_softirq();
}
#endif
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -527,8 +552,9 @@
+@@ -527,8 +552,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
-@@ -553,27 +579,36 @@
+@@ -553,27 +579,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
-@@ -607,41 +642,17 @@
+@@ -607,41 +642,17 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb();
}
/*
* Autoprobing for irqs:
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/irqdesc.h linux-4.14/include/linux/irqdesc.h
---- linux-4.14.orig/include/linux/irqdesc.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/irqdesc.h 2018-09-05 11:05:07.000000000 +0200
-@@ -70,6 +70,7 @@
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 0d53626405bf..ddd23c6e2e55 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -74,6 +74,7 @@ enum irqchip_irq_state;
+ * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
+ * it from the spurious interrupt detection
+ * mechanism and from core side polling.
++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
+ * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
+ */
+ enum {
+@@ -101,13 +102,14 @@ enum {
+ IRQ_PER_CPU_DEVID = (1 << 17),
+ IRQ_IS_POLLED = (1 << 18),
+ IRQ_DISABLE_UNLAZY = (1 << 19),
++ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
+ };
+
+ #define IRQF_MODIFY_MASK \
+ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
+ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
+- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
++ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
+
+ #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
+index 9270d73ea682..1e66fac6f1d2 100644
+--- a/include/linux/irq_work.h
++++ b/include/linux/irq_work.h
+@@ -17,6 +17,7 @@
+ #define IRQ_WORK_BUSY 2UL
+ #define IRQ_WORK_FLAGS 3UL
+ #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
++#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
+
+ struct irq_work {
+ unsigned long flags;
+@@ -52,4 +53,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
+ static inline void irq_work_run(void) { }
+ #endif
+
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
++void irq_work_tick_soft(void);
++#else
++static inline void irq_work_tick_soft(void) { }
++#endif
++
+ #endif /* _LINUX_IRQ_WORK_H */
+diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
+index bacb499c512c..688f2565294c 100644
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -568,6 +568,7 @@ struct rdists {
+ void __iomem *rd_base;
+ struct page *pend_page;
+ phys_addr_t phys_base;
++ bool lpi_enabled;
+ } __percpu *rdist;
+ struct page *prop_page;
+ int id_bits;
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
+index b6084898d330..d334476cdca6 100644
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -70,6 +70,7 @@ struct irq_desc {
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
const struct cpumask *percpu_affinity;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/irqflags.h linux-4.14/include/linux/irqflags.h
---- linux-4.14.orig/include/linux/irqflags.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/irqflags.h 2018-09-05 11:05:07.000000000 +0200
-@@ -34,16 +34,6 @@
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 46cb57d5eb13..2e023bfe45af 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -34,16 +34,6 @@ do { \
current->hardirq_context--; \
crossrelease_hist_end(XHLOCK_HARD); \
} while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
-@@ -56,9 +46,23 @@
+@@ -56,9 +46,23 @@ do { \
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
-@@ -165,4 +169,23 @@
+@@ -165,4 +169,23 @@ do { \
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+#endif
+
#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/irq.h linux-4.14/include/linux/irq.h
---- linux-4.14.orig/include/linux/irq.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/irq.h 2018-09-05 11:05:07.000000000 +0200
-@@ -74,6 +74,7 @@
- * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
- * it from the spurious interrupt detection
- * mechanism and from core side polling.
-+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
- * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
- */
- enum {
-@@ -101,13 +102,14 @@
- IRQ_PER_CPU_DEVID = (1 << 17),
- IRQ_IS_POLLED = (1 << 18),
- IRQ_DISABLE_UNLAZY = (1 << 19),
-+ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
- };
-
- #define IRQF_MODIFY_MASK \
- (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
-+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
-
- #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/irq_work.h linux-4.14/include/linux/irq_work.h
---- linux-4.14.orig/include/linux/irq_work.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/irq_work.h 2018-09-05 11:05:07.000000000 +0200
-@@ -17,6 +17,7 @@
- #define IRQ_WORK_BUSY 2UL
- #define IRQ_WORK_FLAGS 3UL
- #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
-+#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
-
- struct irq_work {
- unsigned long flags;
-@@ -52,4 +53,10 @@
- static inline void irq_work_run(void) { }
- #endif
-
-+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
-+void irq_work_tick_soft(void);
-+#else
-+static inline void irq_work_tick_soft(void) { }
-+#endif
-+
- #endif /* _LINUX_IRQ_WORK_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/jbd2.h linux-4.14/include/linux/jbd2.h
---- linux-4.14.orig/include/linux/jbd2.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/jbd2.h 2018-09-05 11:05:07.000000000 +0200
-@@ -347,32 +347,56 @@
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 29290bfb94a8..32379bfab9f0 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
}
#define J_ASSERT(assert) BUG_ON(!(assert))
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/kdb.h linux-4.14/include/linux/kdb.h
---- linux-4.14.orig/include/linux/kdb.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/kdb.h 2018-09-05 11:05:07.000000000 +0200
-@@ -167,6 +167,7 @@
+diff --git a/include/linux/kdb.h b/include/linux/kdb.h
+index 68bd88223417..e033b25b0b72 100644
+--- a/include/linux/kdb.h
++++ b/include/linux/kdb.h
+@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
extern __printf(1, 2) int kdb_printf(const char *, ...);
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
extern void kdb_init(int level);
/* Access to kdb specific polling devices */
-@@ -201,6 +202,7 @@
+@@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/kernel.h linux-4.14/include/linux/kernel.h
---- linux-4.14.orig/include/linux/kernel.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/kernel.h 2018-09-05 11:05:07.000000000 +0200
-@@ -225,6 +225,9 @@
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 4b484ab9e163..74feebf9d82c 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -225,6 +225,9 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
-@@ -232,6 +235,7 @@
+@@ -232,6 +235,7 @@ extern int _cond_resched(void);
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
# define sched_annotate_sleep() do { } while (0)
#endif
-@@ -531,6 +535,7 @@
+@@ -531,6 +535,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/list_bl.h linux-4.14/include/linux/list_bl.h
---- linux-4.14.orig/include/linux/list_bl.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/list_bl.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
+index 3fc2cc57ba1b..0b5de7d9ffcf 100644
+--- a/include/linux/list_bl.h
++++ b/include/linux/list_bl.h
@@ -3,6 +3,7 @@
#define _LINUX_LIST_BL_H
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
-@@ -119,12 +131,26 @@
+@@ -119,12 +131,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
static inline void hlist_bl_lock(struct hlist_bl_head *b)
{
}
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/locallock.h linux-4.14/include/linux/locallock.h
---- linux-4.14.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/locallock.h 2018-09-05 11:05:07.000000000 +0200
-@@ -0,0 +1,271 @@
+diff --git a/include/linux/locallock.h b/include/linux/locallock.h
+new file mode 100644
+index 000000000000..921eab83cd34
+--- /dev/null
++++ b/include/linux/locallock.h
+@@ -0,0 +1,281 @@
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
+
+
+#define put_locked_var(lvar, var) local_unlock(lvar);
+
++#define get_locked_ptr(lvar, var) \
++ ({ \
++ local_lock(lvar); \
++ this_cpu_ptr(var); \
++ })
++
++#define put_locked_ptr(lvar, var) local_unlock(lvar);
++
+#define local_lock_cpu(lvar) \
+ ({ \
+ local_lock(lvar); \
+
+#define get_locked_var(lvar, var) get_cpu_var(var)
+#define put_locked_var(lvar, var) put_cpu_var(var)
++#define get_locked_ptr(lvar, var) get_cpu_ptr(var)
++#define put_locked_ptr(lvar, var) put_cpu_ptr(var)
+
+#define local_lock_cpu(lvar) get_cpu()
+#define local_unlock_cpu(lvar) put_cpu()
+#endif
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/mm_types.h linux-4.14/include/linux/mm_types.h
---- linux-4.14.orig/include/linux/mm_types.h 2018-09-05 11:03:28.000000000 +0200
-+++ linux-4.14/include/linux/mm_types.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index e41ef532c4ce..63317710311e 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
-@@ -498,6 +499,9 @@
+@@ -496,6 +497,9 @@ struct mm_struct {
bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/mutex.h linux-4.14/include/linux/mutex.h
---- linux-4.14.orig/include/linux/mutex.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/mutex.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 153274f78402..dbb52857b25b 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
@@ -23,6 +23,17 @@
struct ww_acquire_ctx;
/*
* Simple, straightforward mutexes with strict semantics:
*
-@@ -114,13 +125,6 @@
+@@ -114,13 +125,6 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
-@@ -228,4 +232,6 @@
+@@ -228,4 +232,6 @@ mutex_trylock_recursive(struct mutex *lock)
return mutex_trylock(lock);
}
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* __LINUX_MUTEX_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/mutex_rt.h linux-4.14/include/linux/mutex_rt.h
---- linux-4.14.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/mutex_rt.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
+new file mode 100644
+index 000000000000..3fcb5edb1d2b
+--- /dev/null
++++ b/include/linux/mutex_rt.h
@@ -0,0 +1,130 @@
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/netdevice.h linux-4.14/include/linux/netdevice.h
---- linux-4.14.orig/include/linux/netdevice.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/netdevice.h 2018-09-05 11:05:07.000000000 +0200
-@@ -409,7 +409,19 @@
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 2ea7ee1fb495..8b7282a13652 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -409,7 +409,19 @@ typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
void __napi_schedule(struct napi_struct *n);
static inline bool napi_disable_pending(struct napi_struct *n)
{
-@@ -571,7 +583,11 @@
+@@ -571,7 +583,11 @@ struct netdev_queue {
* write-mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
/*
* Time (in jiffies) of last Tx
*/
-@@ -2433,14 +2449,53 @@
+@@ -2433,14 +2449,53 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-@@ -2792,6 +2847,7 @@
+@@ -2792,6 +2847,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
};
-@@ -3515,10 +3571,48 @@
+@@ -3515,10 +3571,48 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
return (1 << debug_value) - 1;
}
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
-@@ -3535,32 +3629,32 @@
+@@ -3535,32 +3629,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
txq->trans_start = jiffies;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/netfilter/x_tables.h linux-4.14/include/linux/netfilter/x_tables.h
---- linux-4.14.orig/include/linux/netfilter/x_tables.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/netfilter/x_tables.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index 54f346a45cd0..79723e76af66 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/static_key.h>
#include <uapi/linux/netfilter/x_tables.h>
/* Test a struct->invflags and a boolean for inequality */
-@@ -341,6 +342,8 @@
+@@ -341,6 +342,8 @@ void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
/* xt_tee_enabled - true if x_tables needs to handle reentrancy
*
* Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -361,6 +364,9 @@
+@@ -361,6 +364,9 @@ static inline unsigned int xt_write_recseq_begin(void)
{
unsigned int addend;
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -391,6 +397,7 @@
+@@ -391,6 +397,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
}
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/nfs_fs.h linux-4.14/include/linux/nfs_fs.h
---- linux-4.14.orig/include/linux/nfs_fs.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/nfs_fs.h 2018-09-05 11:05:07.000000000 +0200
-@@ -162,7 +162,11 @@
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index f0015f801a78..c38288622819 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -162,7 +162,11 @@ struct nfs_inode {
/* Readers: in-flight sillydelete RPC calls */
/* Writers: rmdir */
struct mutex commit_mutex;
#if IS_ENABLED(CONFIG_NFS_V4)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/nfs_xdr.h linux-4.14/include/linux/nfs_xdr.h
---- linux-4.14.orig/include/linux/nfs_xdr.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/nfs_xdr.h 2018-09-05 11:05:07.000000000 +0200
-@@ -1530,7 +1530,7 @@
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 6959968dc36a..802e849b57ac 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1530,7 +1530,7 @@ struct nfs_unlinkdata {
struct nfs_removeargs args;
struct nfs_removeres res;
struct dentry *dentry;
struct rpc_cred *cred;
struct nfs_fattr dir_attr;
long timeout;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/notifier.h linux-4.14/include/linux/notifier.h
---- linux-4.14.orig/include/linux/notifier.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/notifier.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index 6d731110e0db..e758627da14d 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
@@ -7,7 +7,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
*/
struct notifier_block;
-@@ -91,7 +89,7 @@
+@@ -91,7 +89,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
-@@ -104,7 +102,13 @@
+@@ -104,7 +102,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
-@@ -116,6 +120,26 @@
+@@ -116,6 +120,26 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
-@@ -185,12 +209,12 @@
+@@ -185,12 +209,12 @@ static inline int notifier_to_errno(int ret)
/*
* Declared notifiers so far. I can imagine quite a few more chains
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/percpu.h linux-4.14/include/linux/percpu.h
---- linux-4.14.orig/include/linux/percpu.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/percpu.h 2018-09-05 11:05:07.000000000 +0200
-@@ -19,6 +19,35 @@
- #define PERCPU_MODULE_RESERVE 0
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
-+#define get_local_var(var) (*({ \
-+ migrate_disable(); \
-+ this_cpu_ptr(&var); }))
-+
-+#define put_local_var(var) do { \
-+ (void)&(var); \
-+ migrate_enable(); \
-+} while (0)
-+
-+# define get_local_ptr(var) ({ \
-+ migrate_disable(); \
-+ this_cpu_ptr(var); })
-+
-+# define put_local_ptr(var) do { \
-+ (void)(var); \
-+ migrate_enable(); \
-+} while (0)
-+
-+#else
-+
-+#define get_local_var(var) get_cpu_var(var)
-+#define put_local_var(var) put_cpu_var(var)
-+#define get_local_ptr(var) get_cpu_ptr(var)
-+#define put_local_ptr(var) put_cpu_ptr(var)
-+
-+#endif
-+
- /* minimum unit size, also is the maximum supported allocation size */
- #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/percpu-rwsem.h linux-4.14/include/linux/percpu-rwsem.h
---- linux-4.14.orig/include/linux/percpu-rwsem.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/percpu-rwsem.h 2018-09-05 11:05:07.000000000 +0200
-@@ -29,7 +29,7 @@
+diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
+index 79b99d653e03..fb44e237316d 100644
+--- a/include/linux/percpu-rwsem.h
++++ b/include/linux/percpu-rwsem.h
+@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
{
might_sleep();
-@@ -47,16 +47,10 @@
+@@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
__this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */
preempt_enable();
}
-@@ -83,13 +77,9 @@
+@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
return ret;
}
/*
* Same as in percpu_down_read().
*/
-@@ -102,12 +92,6 @@
+@@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/pid.h linux-4.14/include/linux/pid.h
---- linux-4.14.orig/include/linux/pid.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/pid.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/percpu.h b/include/linux/percpu.h
+index 296bbe49d5d1..4414796e3941 100644
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
+@@ -19,6 +19,35 @@
+ #define PERCPU_MODULE_RESERVE 0
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++#define get_local_var(var) (*({ \
++ migrate_disable(); \
++ this_cpu_ptr(&var); }))
++
++#define put_local_var(var) do { \
++ (void)&(var); \
++ migrate_enable(); \
++} while (0)
++
++# define get_local_ptr(var) ({ \
++ migrate_disable(); \
++ this_cpu_ptr(var); })
++
++# define put_local_ptr(var) do { \
++ (void)(var); \
++ migrate_enable(); \
++} while (0)
++
++#else
++
++#define get_local_var(var) get_cpu_var(var)
++#define put_local_var(var) put_cpu_var(var)
++#define get_local_ptr(var) get_cpu_ptr(var)
++#define put_local_ptr(var) put_cpu_ptr(var)
++
++#endif
++
+ /* minimum unit size, also is the maximum supported allocation size */
+ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
+
+diff --git a/include/linux/pid.h b/include/linux/pid.h
+index dfd684ce0787..bc954a99aa70 100644
+--- a/include/linux/pid.h
++++ b/include/linux/pid.h
@@ -3,6 +3,7 @@
#define _LINUX_PID_H
enum pid_type
{
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/posix-timers.h linux-4.14/include/linux/posix-timers.h
---- linux-4.14.orig/include/linux/posix-timers.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/posix-timers.h 2018-09-05 11:05:07.000000000 +0200
-@@ -101,8 +101,8 @@
+diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
+index 672c4f32311e..4754eb4298b1 100644
+--- a/include/linux/posix-timers.h
++++ b/include/linux/posix-timers.h
+@@ -101,8 +101,8 @@ struct k_itimer {
struct {
struct alarm alarmtimer;
} alarm;
};
void run_posix_cpu_timers(struct task_struct *task);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/preempt.h linux-4.14/include/linux/preempt.h
---- linux-4.14.orig/include/linux/preempt.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/preempt.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 5bd3f151da78..6728662a81e8 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
@@ -51,7 +51,11 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
/*
* The preempt_count offset needed for things like:
-@@ -167,6 +180,20 @@
+@@ -167,6 +180,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
-@@ -175,16 +202,53 @@
+@@ -175,16 +202,53 @@ do { \
barrier(); \
} while (0)
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-+#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+}
+
+#else
-+#define migrate_disable() barrier()
-+#define migrate_enable() barrier()
++#define migrate_disable() preempt_disable()
++#define migrate_enable() preempt_enable()
+static inline int __migrate_disabled(struct task_struct *p)
+{
+ return 0;
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
-@@ -206,6 +270,13 @@
+@@ -206,6 +270,13 @@ do { \
__preempt_schedule(); \
} while (0)
#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
-@@ -213,6 +284,12 @@
+@@ -213,6 +284,12 @@ do { \
preempt_count_dec(); \
} while (0)
#define preempt_enable_notrace() \
do { \
barrier(); \
-@@ -251,8 +328,16 @@
+@@ -251,8 +328,16 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
#endif /* CONFIG_PREEMPT_COUNT */
#ifdef MODULE
-@@ -271,10 +356,22 @@
+@@ -271,10 +356,22 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/printk.h linux-4.14/include/linux/printk.h
---- linux-4.14.orig/include/linux/printk.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/printk.h 2018-09-05 11:05:07.000000000 +0200
-@@ -142,9 +142,11 @@
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 6106befed756..1dba9cb7b91b 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -142,9 +142,11 @@ struct va_format {
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
#endif
#ifdef CONFIG_PRINTK_NMI
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/radix-tree.h linux-4.14/include/linux/radix-tree.h
---- linux-4.14.orig/include/linux/radix-tree.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/radix-tree.h 2018-09-05 11:05:07.000000000 +0200
-@@ -328,6 +328,8 @@
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 567ebb5eaab0..9da7ea957399 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -328,6 +328,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
-@@ -347,11 +349,6 @@
+@@ -347,11 +349,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
unsigned int max_items, unsigned int tag);
int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
int radix_tree_split(struct radix_tree_root *, unsigned long index,
unsigned new_order);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/random.h linux-4.14/include/linux/random.h
---- linux-4.14.orig/include/linux/random.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/random.h 2018-09-05 11:05:07.000000000 +0200
-@@ -32,7 +32,7 @@
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 4024f7d9c77d..462d752a739b 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -32,7 +32,7 @@ static inline void add_latent_entropy(void) {}
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rbtree_augmented.h linux-4.14/include/linux/rbtree_augmented.h
---- linux-4.14.orig/include/linux/rbtree_augmented.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/rbtree_augmented.h 2018-09-05 11:05:07.000000000 +0200
-@@ -26,6 +26,7 @@
-
- #include <linux/compiler.h>
- #include <linux/rbtree.h>
-+#include <linux/rcupdate.h>
-
- /*
- * Please note - only struct rb_augment_callbacks and the prototypes for
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rbtree.h linux-4.14/include/linux/rbtree.h
---- linux-4.14.orig/include/linux/rbtree.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/rbtree.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
+index d574361943ea..0a9f442409b9 100644
+--- a/include/linux/rbtree.h
++++ b/include/linux/rbtree.h
@@ -31,7 +31,7 @@
#include <linux/kernel.h>
struct rb_node {
unsigned long __rb_parent_color;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rbtree_latch.h linux-4.14/include/linux/rbtree_latch.h
---- linux-4.14.orig/include/linux/rbtree_latch.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/rbtree_latch.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
+index 6bfd2b581f75..af8a61be2d8d 100644
+--- a/include/linux/rbtree_augmented.h
++++ b/include/linux/rbtree_augmented.h
+@@ -26,6 +26,7 @@
+
+ #include <linux/compiler.h>
+ #include <linux/rbtree.h>
++#include <linux/rcupdate.h>
+
+ /*
+ * Please note - only struct rb_augment_callbacks and the prototypes for
+diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
+index ece43e882b56..7d012faa509a 100644
+--- a/include/linux/rbtree_latch.h
++++ b/include/linux/rbtree_latch.h
@@ -35,6 +35,7 @@
#include <linux/rbtree.h>
struct latch_tree_node {
struct rb_node node[2];
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rcu_assign_pointer.h linux-4.14/include/linux/rcu_assign_pointer.h
---- linux-4.14.orig/include/linux/rcu_assign_pointer.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/rcu_assign_pointer.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h
+new file mode 100644
+index 000000000000..7066962a4379
+--- /dev/null
++++ b/include/linux/rcu_assign_pointer.h
@@ -0,0 +1,54 @@
+#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
+#define __LINUX_RCU_ASSIGN_POINTER_H__
+})
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rcupdate.h linux-4.14/include/linux/rcupdate.h
---- linux-4.14.orig/include/linux/rcupdate.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/rcupdate.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index a6ddc42f87a5..70996e134818 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
@@ -42,6 +42,7 @@
#include <linux/lockdep.h>
#include <asm/processor.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
-@@ -55,7 +56,11 @@
+@@ -55,7 +56,11 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
#define call_rcu call_rcu_sched
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void);
void rcu_barrier_tasks(void);
-@@ -74,6 +79,11 @@
+@@ -74,6 +79,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -99,6 +109,8 @@
+@@ -99,6 +109,8 @@ static inline int rcu_preempt_depth(void)
return 0;
}
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
-@@ -255,7 +267,14 @@
+@@ -255,7 +267,14 @@ extern struct lockdep_map rcu_sched_lock_map;
extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
int rcu_read_lock_sched_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-@@ -365,54 +384,6 @@
+@@ -364,54 +383,6 @@ static inline void rcu_preempt_sleep_check(void) { }
+ ((typeof(*p) __force __kernel *)(________p1)); \
})
- /**
+-/**
- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
- * @v: The value to statically initialize with.
- */
- _r_a_p__v; \
-})
-
--/**
+ /**
* rcu_swap_protected() - swap an RCU and a regular pointer
* @rcu_ptr: RCU pointer
- * @ptr: regular pointer
-@@ -707,10 +678,14 @@
+@@ -707,10 +678,14 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
}
/*
-@@ -720,10 +695,14 @@
+@@ -720,10 +695,14 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
local_bh_enable();
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rcutree.h linux-4.14/include/linux/rcutree.h
---- linux-4.14.orig/include/linux/rcutree.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/rcutree.h 2018-09-05 11:05:07.000000000 +0200
-@@ -44,7 +44,11 @@
+diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
+index 37d6fd3b7ff8..a082fde7d6bc 100644
+--- a/include/linux/rcutree.h
++++ b/include/linux/rcutree.h
+@@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(false);
}
void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void);
-@@ -72,7 +76,11 @@
+@@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void)
}
void rcu_barrier(void);
void rcu_barrier_sched(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/ring_buffer.h linux-4.14/include/linux/ring_buffer.h
---- linux-4.14.orig/include/linux/ring_buffer.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/ring_buffer.h 2018-09-05 11:05:07.000000000 +0200
-@@ -34,10 +34,12 @@
+diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
+index 5caa062a02b2..abce5f5325e1 100644
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -34,10 +34,12 @@ struct ring_buffer_event {
* array[0] = time delta (28 .. 59)
* size = 8 bytes
*
*
* <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
* Data record
-@@ -54,12 +56,12 @@
+@@ -54,12 +56,12 @@ enum ring_buffer_type {
RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
RINGBUF_TYPE_PADDING,
RINGBUF_TYPE_TIME_EXTEND,
/*
* ring_buffer_discard_commit will remove an event that has not
-@@ -115,6 +117,9 @@
+@@ -115,6 +117,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data);
struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
-@@ -179,6 +184,8 @@
+@@ -179,6 +184,8 @@ void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
int cpu, u64 *ts);
void ring_buffer_set_clock(struct ring_buffer *buffer,
u64 (*clock)(void));
size_t ring_buffer_page_len(void *page);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rtmutex.h linux-4.14/include/linux/rtmutex.h
---- linux-4.14.orig/include/linux/rtmutex.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/rtmutex.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 6fd615a0eea9..138bd1e183e0 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
@@ -14,11 +14,15 @@
#define __LINUX_RT_MUTEX_H
/**
* The rt_mutex structure
*
-@@ -31,8 +35,8 @@
+@@ -31,8 +35,8 @@ struct rt_mutex {
raw_spinlock_t wait_lock;
struct rb_root_cached waiters;
struct task_struct *owner;
const char *name, *file;
int line;
void *magic;
-@@ -82,16 +86,23 @@
+@@ -82,16 +86,23 @@ do { \
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
/**
* rt_mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
-@@ -108,6 +119,7 @@
+@@ -115,6 +126,7 @@ extern void rt_mutex_lock(struct rt_mutex *lock);
+ #endif
- extern void rt_mutex_lock(struct rt_mutex *lock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rwlock_rt.h linux-4.14/include/linux/rwlock_rt.h
---- linux-4.14.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/rwlock_rt.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
+new file mode 100644
+index 000000000000..a9c4c2ac4d1f
+--- /dev/null
++++ b/include/linux/rwlock_rt.h
@@ -0,0 +1,119 @@
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+void __write_rt_unlock(struct rt_rw_lock *lock);
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rwlock_types.h linux-4.14/include/linux/rwlock_types.h
---- linux-4.14.orig/include/linux/rwlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/rwlock_types.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
+index cc0072e93e36..5317cd957292 100644
+--- a/include/linux/rwlock_types.h
++++ b/include/linux/rwlock_types.h
@@ -1,6 +1,10 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
/*
* include/linux/rwlock_types.h - generic rwlock type definitions
* and initializers
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rwlock_types_rt.h linux-4.14/include/linux/rwlock_types_rt.h
---- linux-4.14.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/rwlock_types_rt.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
+new file mode 100644
+index 000000000000..546a1f8f1274
+--- /dev/null
++++ b/include/linux/rwlock_types_rt.h
@@ -0,0 +1,55 @@
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
+#define __LINUX_RWLOCK_TYPES_RT_H
+ } while (0)
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rwsem.h linux-4.14/include/linux/rwsem.h
---- linux-4.14.orig/include/linux/rwsem.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/rwsem.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index c427ffaa4904..513df11a364e 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
@@ -20,6 +20,10 @@
#include <linux/osq_lock.h>
#endif
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-@@ -114,6 +118,13 @@
+@@ -114,6 +118,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
return !list_empty(&sem->wait_list);
}
/*
* lock for reading
*/
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/rwsem_rt.h linux-4.14/include/linux/rwsem_rt.h
---- linux-4.14.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/rwsem_rt.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
+new file mode 100644
+index 000000000000..2ffbf093ae92
+--- /dev/null
++++ b/include/linux/rwsem_rt.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
+extern void __downgrade_write(struct rw_semaphore *sem);
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/sched/mm.h linux-4.14/include/linux/sched/mm.h
---- linux-4.14.orig/include/linux/sched/mm.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/sched/mm.h 2018-09-05 11:05:07.000000000 +0200
-@@ -43,6 +43,17 @@
- __mmdrop(mm);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __mmdrop_delayed(struct rcu_head *rhp);
-+static inline void mmdrop_delayed(struct mm_struct *mm)
-+{
-+ if (atomic_dec_and_test(&mm->mm_count))
-+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
-+}
-+#else
-+# define mmdrop_delayed(mm) mmdrop(mm)
-+#endif
-+
- static inline void mmdrop_async_fn(struct work_struct *work)
- {
- struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/sched/task.h linux-4.14/include/linux/sched/task.h
---- linux-4.14.orig/include/linux/sched/task.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/sched/task.h 2018-09-05 11:05:07.000000000 +0200
-@@ -88,6 +88,15 @@
-
- #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __put_task_struct_cb(struct rcu_head *rhp);
-+
-+static inline void put_task_struct(struct task_struct *t)
-+{
-+ if (atomic_dec_and_test(&t->usage))
-+ call_rcu(&t->put_rcu, __put_task_struct_cb);
-+}
-+#else
- extern void __put_task_struct(struct task_struct *t);
-
- static inline void put_task_struct(struct task_struct *t)
-@@ -95,7 +104,7 @@
- if (atomic_dec_and_test(&t->usage))
- __put_task_struct(t);
- }
--
-+#endif
- struct task_struct *task_rcu_dereference(struct task_struct **ptask);
-
- #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/sched/wake_q.h linux-4.14/include/linux/sched/wake_q.h
---- linux-4.14.orig/include/linux/sched/wake_q.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/sched/wake_q.h 2018-09-05 11:05:07.000000000 +0200
-@@ -47,8 +47,29 @@
- head->lastp = &head->first;
- }
-
--extern void wake_q_add(struct wake_q_head *head,
-- struct task_struct *task);
--extern void wake_up_q(struct wake_q_head *head);
-+extern void __wake_q_add(struct wake_q_head *head,
-+ struct task_struct *task, bool sleeper);
-+static inline void wake_q_add(struct wake_q_head *head,
-+ struct task_struct *task)
-+{
-+ __wake_q_add(head, task, false);
-+}
-+
-+static inline void wake_q_add_sleeper(struct wake_q_head *head,
-+ struct task_struct *task)
-+{
-+ __wake_q_add(head, task, true);
-+}
-+
-+extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
-+static inline void wake_up_q(struct wake_q_head *head)
-+{
-+ __wake_up_q(head, false);
-+}
-+
-+static inline void wake_up_q_sleeper(struct wake_q_head *head)
-+{
-+ __wake_up_q(head, true);
-+}
-
- #endif /* _LINUX_SCHED_WAKE_Q_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/sched.h linux-4.14/include/linux/sched.h
---- linux-4.14.orig/include/linux/sched.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/sched.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index e04919aa8201..a6ffb552be01 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
@@ -27,6 +27,7 @@
#include <linux/signal_types.h>
#include <linux/mm_types_task.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-@@ -93,7 +94,6 @@
+@@ -93,7 +94,6 @@ struct task_group;
/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
/* get_task_state(): */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
-@@ -101,12 +101,8 @@
+@@ -101,12 +101,8 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
-@@ -134,6 +130,11 @@
+@@ -134,6 +130,11 @@ struct task_group;
smp_store_mb(current->state, (state_value)); \
} while (0)
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
-@@ -187,6 +188,9 @@
+@@ -187,6 +188,9 @@ struct task_group;
#define set_current_state(state_value) \
smp_store_mb(current->state, (state_value))
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
-@@ -566,6 +570,8 @@
+@@ -566,6 +570,8 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
/*
* This begins the randomizable portion of task_struct. Only
-@@ -618,7 +624,25 @@
+@@ -618,7 +624,25 @@ struct task_struct {
unsigned int policy;
int nr_cpus_allowed;
- cpumask_t cpus_allowed;
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
-+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ int migrate_disable;
+ int migrate_disable_update;
+ int pinned_on_cpu;
+# endif
+
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
-+ int migrate_disable;
+# ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable;
+ int migrate_disable_atomic;
+# endif
+#endif
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
-@@ -777,6 +801,9 @@
+@@ -777,6 +801,9 @@ struct task_struct {
#ifdef CONFIG_POSIX_TIMERS
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
#endif
/* Process credentials: */
-@@ -820,11 +847,17 @@
+@@ -820,11 +847,17 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct *sighand;
unsigned long sas_ss_sp;
size_t sas_ss_size;
unsigned int sas_ss_flags;
-@@ -849,6 +882,7 @@
+@@ -849,6 +882,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task: */
-@@ -1116,9 +1150,23 @@
+@@ -1116,8 +1150,22 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
+#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
- #endif
++#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int xmit_recursion;
-+#endif
+ #endif
int pagefault_disabled;
#ifdef CONFIG_MMU
- struct task_struct *oom_reaper_list;
-@@ -1332,6 +1380,7 @@
+@@ -1332,6 +1380,7 @@ extern struct pid *cad_pid;
/*
* Per process flags
*/
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
-@@ -1355,7 +1404,7 @@
+@@ -1355,7 +1404,7 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
-@@ -1535,6 +1584,7 @@
+@@ -1535,6 +1584,7 @@ extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *n
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
-@@ -1611,6 +1661,89 @@
+@@ -1611,6 +1661,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
-@@ -1636,12 +1769,16 @@
+@@ -1636,12 +1769,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
static inline void cond_resched_rcu(void)
{
-@@ -1671,6 +1808,23 @@
+@@ -1671,6 +1808,23 @@ static __always_inline bool need_resched(void)
return unlikely(tif_need_resched());
}
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/seqlock.h linux-4.14/include/linux/seqlock.h
---- linux-4.14.orig/include/linux/seqlock.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/seqlock.h 2018-09-05 11:05:07.000000000 +0200
-@@ -221,20 +221,30 @@
+diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
+index 3d49b91b674d..d8f2fa8f500c 100644
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -43,6 +43,17 @@ static inline void mmdrop(struct mm_struct *mm)
+ __mmdrop(mm);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
++{
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++}
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
++
+ static inline void mmdrop_async_fn(struct work_struct *work)
+ {
+ struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index a74ec619ac51..8e7f741370c5 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -88,6 +88,15 @@ extern void sched_exec(void);
+
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ call_rcu(&t->put_rcu, __put_task_struct_cb);
++}
++#else
+ extern void __put_task_struct(struct task_struct *t);
+
+ static inline void put_task_struct(struct task_struct *t)
+@@ -95,7 +104,7 @@ static inline void put_task_struct(struct task_struct *t)
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+ }
+-
++#endif
+ struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+
+ #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
+index 10b19a192b2d..ce3ccff3d9d8 100644
+--- a/include/linux/sched/wake_q.h
++++ b/include/linux/sched/wake_q.h
+@@ -47,8 +47,29 @@ static inline void wake_q_init(struct wake_q_head *head)
+ head->lastp = &head->first;
+ }
+
+-extern void wake_q_add(struct wake_q_head *head,
+- struct task_struct *task);
+-extern void wake_up_q(struct wake_q_head *head);
++extern void __wake_q_add(struct wake_q_head *head,
++ struct task_struct *task, bool sleeper);
++static inline void wake_q_add(struct wake_q_head *head,
++ struct task_struct *task)
++{
++ __wake_q_add(head, task, false);
++}
++
++static inline void wake_q_add_sleeper(struct wake_q_head *head,
++ struct task_struct *task)
++{
++ __wake_q_add(head, task, true);
++}
++
++extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
++static inline void wake_up_q(struct wake_q_head *head)
++{
++ __wake_up_q(head, false);
++}
++
++static inline void wake_up_q_sleeper(struct wake_q_head *head)
++{
++ __wake_up_q(head, true);
++}
+
+ #endif /* _LINUX_SCHED_WAKE_Q_H */
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index f189a8a3bbb8..107079a2d7ed 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -221,20 +221,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
return __read_seqcount_retry(s, start);
}
/**
* raw_write_seqcount_barrier - do a seq write barrier
* @s: pointer to seqcount_t
-@@ -429,10 +439,32 @@
+@@ -429,10 +439,33 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
+ spin_unlock_wait(&sl->lock);
+ goto repeat;
+ }
++ smp_rmb();
+ return ret;
+}
+#endif
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
-@@ -447,36 +479,45 @@
+@@ -447,36 +480,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
spin_unlock_irq(&sl->lock);
}
-@@ -485,7 +526,7 @@
+@@ -485,7 +527,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
return flags;
}
-@@ -495,7 +536,7 @@
+@@ -495,7 +537,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
spin_unlock_irqrestore(&sl->lock, flags);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/signal.h linux-4.14/include/linux/signal.h
---- linux-4.14.orig/include/linux/signal.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/signal.h 2018-09-05 11:05:07.000000000 +0200
-@@ -243,6 +243,7 @@
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index 042968dd98f0..a7d20f85cc0e 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -243,6 +243,7 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/skbuff.h linux-4.14/include/linux/skbuff.h
---- linux-4.14.orig/include/linux/skbuff.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/skbuff.h 2018-09-05 11:05:07.000000000 +0200
-@@ -287,6 +287,7 @@
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f64e88444082..07576a062ac0 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -287,6 +287,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
};
struct sk_buff;
-@@ -1667,6 +1668,12 @@
+@@ -1672,6 +1673,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/smp.h linux-4.14/include/linux/smp.h
---- linux-4.14.orig/include/linux/smp.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/smp.h 2018-09-05 11:05:07.000000000 +0200
-@@ -202,6 +202,9 @@
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index 9fb239e12b82..5801e516ba63 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void)
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/spinlock_api_smp.h linux-4.14/include/linux/spinlock_api_smp.h
---- linux-4.14.orig/include/linux/spinlock_api_smp.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/spinlock_api_smp.h 2018-09-05 11:05:07.000000000 +0200
-@@ -187,6 +187,8 @@
- return 0;
- }
-
--#include <linux/rwlock_api_smp.h>
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# include <linux/rwlock_api_smp.h>
-+#endif
-
- #endif /* __LINUX_SPINLOCK_API_SMP_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/spinlock.h linux-4.14/include/linux/spinlock.h
---- linux-4.14.orig/include/linux/spinlock.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/spinlock.h 2018-09-05 11:05:07.000000000 +0200
-@@ -286,7 +286,11 @@
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 341e1a12bfc7..7c8f0a985b9e 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -286,7 +286,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
-@@ -297,6 +301,10 @@
+@@ -297,6 +301,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-@@ -421,4 +429,6 @@
+@@ -421,4 +429,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* __LINUX_SPINLOCK_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/spinlock_rt.h linux-4.14/include/linux/spinlock_rt.h
---- linux-4.14.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/spinlock_rt.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
+index 42dfab89e740..29d99ae5a8ab 100644
+--- a/include/linux/spinlock_api_smp.h
++++ b/include/linux/spinlock_api_smp.h
+@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ return 0;
+ }
+
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_api_smp.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
+new file mode 100644
+index 000000000000..c95e1f5145ac
+--- /dev/null
++++ b/include/linux/spinlock_rt.h
@@ -0,0 +1,159 @@
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+ atomic_dec_and_spin_lock(atomic, lock)
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/spinlock_types.h linux-4.14/include/linux/spinlock_types.h
---- linux-4.14.orig/include/linux/spinlock_types.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/spinlock_types.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
+index 73548eb13a5d..10bac715ea96 100644
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
@@ -9,80 +9,15 @@
* Released under the General Public License (GPL).
*/
+# include <linux/rwlock_types.h>
#else
-# define SPIN_DEBUG_INIT(lockname)
--#endif
--
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
+ #endif
+
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- u8 __padding[LOCK_PADSIZE];
- struct lockdep_map dep_map;
- };
-+# include <linux/rtmutex.h>
-+# include <linux/spinlock_types_rt.h>
-+# include <linux/rwlock_types_rt.h>
- #endif
+-#endif
- };
-} spinlock_t;
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-
-#include <linux/rwlock_types.h>
-
+-
#endif /* __LINUX_SPINLOCK_TYPES_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/spinlock_types_nort.h linux-4.14/include/linux/spinlock_types_nort.h
---- linux-4.14.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/spinlock_types_nort.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
+new file mode 100644
+index 000000000000..f1dac1fb1d6a
+--- /dev/null
++++ b/include/linux/spinlock_types_nort.h
@@ -0,0 +1,33 @@
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
+#define __LINUX_SPINLOCK_TYPES_NORT_H
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/spinlock_types_raw.h linux-4.14/include/linux/spinlock_types_raw.h
---- linux-4.14.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/spinlock_types_raw.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
+new file mode 100644
+index 000000000000..03235b475b77
+--- /dev/null
++++ b/include/linux/spinlock_types_raw.h
@@ -0,0 +1,58 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/spinlock_types_rt.h linux-4.14/include/linux/spinlock_types_rt.h
---- linux-4.14.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/spinlock_types_rt.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
+new file mode 100644
+index 000000000000..3e3d8c5f7a9a
+--- /dev/null
++++ b/include/linux/spinlock_types_rt.h
@@ -0,0 +1,48 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/spinlock_types_up.h linux-4.14/include/linux/spinlock_types_up.h
---- linux-4.14.orig/include/linux/spinlock_types_up.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/spinlock_types_up.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
+index c09b6407ae1b..b0243ba07fb7 100644
+--- a/include/linux/spinlock_types_up.h
++++ b/include/linux/spinlock_types_up.h
@@ -1,10 +1,6 @@
#ifndef __LINUX_SPINLOCK_TYPES_UP_H
#define __LINUX_SPINLOCK_TYPES_UP_H
/*
* include/linux/spinlock_types_up.h - spinlock type definitions for UP
*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/srcutiny.h linux-4.14/include/linux/srcutiny.h
---- linux-4.14.orig/include/linux/srcutiny.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/srcutiny.h 2018-09-05 11:05:07.000000000 +0200
-@@ -43,7 +43,7 @@
+diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
+index 261471f407a5..f41d2fb09f87 100644
+--- a/include/linux/srcutiny.h
++++ b/include/linux/srcutiny.h
+@@ -43,7 +43,7 @@ struct srcu_struct {
void srcu_drive_gp(struct work_struct *wp);
{ \
.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
.srcu_cb_tail = &name.srcu_cb_head, \
-@@ -56,9 +56,9 @@
+@@ -56,9 +56,9 @@ void srcu_drive_gp(struct work_struct *wp);
* Tree SRCU, which needs some per-CPU data.
*/
#define DEFINE_SRCU(name) \
void synchronize_srcu(struct srcu_struct *sp);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/srcutree.h linux-4.14/include/linux/srcutree.h
---- linux-4.14.orig/include/linux/srcutree.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/srcutree.h 2018-09-05 11:05:07.000000000 +0200
-@@ -40,7 +40,7 @@
+diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
+index a949f4f9e4d7..745d4ca4dd50 100644
+--- a/include/linux/srcutree.h
++++ b/include/linux/srcutree.h
+@@ -40,7 +40,7 @@ struct srcu_data {
unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
/* Update-side state. */
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
-@@ -58,7 +58,7 @@
+@@ -58,7 +58,7 @@ struct srcu_data {
* Node in SRCU combining tree, similar in function to rcu_data.
*/
struct srcu_node {
unsigned long srcu_have_cbs[4]; /* GP seq for children */
/* having CBs, but only */
/* is > ->srcu_gq_seq. */
-@@ -78,7 +78,7 @@
+@@ -78,7 +78,7 @@ struct srcu_struct {
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
-@@ -104,10 +104,10 @@
+@@ -104,10 +104,10 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
.srcu_gp_seq_needed = 0 - 1, \
__SRCU_DEP_MAP_INIT(name) \
}
-@@ -133,7 +133,7 @@
+@@ -133,7 +133,7 @@ struct srcu_struct {
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/suspend.h linux-4.14/include/linux/suspend.h
---- linux-4.14.orig/include/linux/suspend.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/suspend.h 2018-09-05 11:05:07.000000000 +0200
-@@ -196,6 +196,12 @@
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index 8544357d92d0..616ea66cd283 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -196,6 +196,12 @@ struct platform_s2idle_ops {
void (*end)(void);
};
#ifdef CONFIG_SUSPEND
extern suspend_state_t mem_sleep_current;
extern suspend_state_t mem_sleep_default;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/swait.h linux-4.14/include/linux/swait.h
---- linux-4.14.orig/include/linux/swait.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/swait.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/swait.h b/include/linux/swait.h
+index c98aaf677466..853f3e61a9f4 100644
+--- a/include/linux/swait.h
++++ b/include/linux/swait.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/stddef.h>
#include <asm/current.h>
/*
-@@ -147,6 +148,7 @@
+@@ -147,6 +148,7 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
extern void swake_up(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/swap.h linux-4.14/include/linux/swap.h
---- linux-4.14.orig/include/linux/swap.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/swap.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index f02fb5db8914..6c775168df67 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
@@ -12,6 +12,7 @@
#include <linux/fs.h>
#include <linux/atomic.h>
#include <asm/page.h>
struct notifier_block;
-@@ -297,7 +298,8 @@
+@@ -297,7 +298,8 @@ struct vma_swap_readahead {
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
-@@ -310,6 +312,7 @@
+@@ -310,6 +312,7 @@ extern unsigned long nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
extern void lru_cache_add(struct page *);
extern void lru_cache_add_anon(struct page *page);
extern void lru_cache_add_file(struct page *page);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/swork.h linux-4.14/include/linux/swork.h
---- linux-4.14.orig/include/linux/swork.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/linux/swork.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/swork.h b/include/linux/swork.h
+new file mode 100644
+index 000000000000..f175fa9a6016
+--- /dev/null
++++ b/include/linux/swork.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/thread_info.h linux-4.14/include/linux/thread_info.h
---- linux-4.14.orig/include/linux/thread_info.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/thread_info.h 2018-09-05 11:05:07.000000000 +0200
-@@ -86,7 +86,17 @@
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index cf2862bd134a..fd05d83740df 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -86,7 +86,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/timer.h linux-4.14/include/linux/timer.h
---- linux-4.14.orig/include/linux/timer.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/linux/timer.h 2018-09-05 11:05:07.000000000 +0200
-@@ -213,7 +213,7 @@
+diff --git a/include/linux/timer.h b/include/linux/timer.h
+index e0ea1fe87572..df3085ddf662 100644
+--- a/include/linux/timer.h
++++ b/include/linux/timer.h
+@@ -213,7 +213,7 @@ extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/trace_events.h linux-4.14/include/linux/trace_events.h
---- linux-4.14.orig/include/linux/trace_events.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/trace_events.h 2018-09-05 11:05:07.000000000 +0200
-@@ -62,6 +62,9 @@
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 2bcb4dc6df1a..edd1e42e8a2f 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -62,6 +62,9 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
};
#define TRACE_EVENT_TYPE_MAX \
-@@ -402,11 +405,13 @@
+@@ -402,11 +405,13 @@ enum event_trigger_type {
extern int filter_match_preds(struct event_filter *filter, void *rec);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
-@@ -426,7 +431,7 @@
+@@ -426,7 +431,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
return true;
if (eflags & EVENT_FILE_FL_PID_FILTER)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/uaccess.h linux-4.14/include/linux/uaccess.h
---- linux-4.14.orig/include/linux/uaccess.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/uaccess.h 2018-09-05 11:05:07.000000000 +0200
-@@ -185,6 +185,7 @@
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index 251e655d407f..57e8e32ef2b0 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -185,6 +185,7 @@ static __always_inline void pagefault_disabled_dec(void)
*/
static inline void pagefault_disable(void)
{
pagefault_disabled_inc();
/*
* make sure to have issued the store before a pagefault
-@@ -201,6 +202,7 @@
+@@ -201,6 +202,7 @@ static inline void pagefault_enable(void)
*/
barrier();
pagefault_disabled_dec();
}
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/vmstat.h linux-4.14/include/linux/vmstat.h
---- linux-4.14.orig/include/linux/vmstat.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/vmstat.h 2018-09-05 11:05:07.000000000 +0200
-@@ -33,7 +33,9 @@
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index 1e0cb72e0598..87ab0996a9b0 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
*/
static inline void __count_vm_event(enum vm_event_item item)
{
}
static inline void count_vm_event(enum vm_event_item item)
-@@ -43,7 +45,9 @@
+@@ -43,7 +45,9 @@ static inline void count_vm_event(enum vm_event_item item)
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
}
static inline void count_vm_events(enum vm_event_item item, long delta)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/linux/wait.h linux-4.14/include/linux/wait.h
---- linux-4.14.orig/include/linux/wait.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/linux/wait.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 158715445ffb..3451706a3074 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
@@ -10,6 +10,7 @@
#include <asm/current.h>
typedef struct wait_queue_entry wait_queue_entry_t;
-@@ -486,8 +487,8 @@
+@@ -486,8 +487,8 @@ do { \
int __ret = 0; \
struct hrtimer_sleeper __t; \
\
if ((timeout) != KTIME_MAX) \
hrtimer_start_range_ns(&__t.timer, timeout, \
current->timer_slack_ns, \
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/net/gen_stats.h linux-4.14/include/net/gen_stats.h
---- linux-4.14.orig/include/net/gen_stats.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/net/gen_stats.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
+index 304f7aa9cc01..00d3813cef26 100644
+--- a/include/net/gen_stats.h
++++ b/include/net/gen_stats.h
@@ -6,6 +6,7 @@
#include <linux/socket.h>
#include <linux/rtnetlink.h>
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
-@@ -36,11 +37,11 @@
+@@ -36,11 +37,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
-@@ -57,13 +58,13 @@
+@@ -57,13 +58,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
struct gnet_stats_rate_est64 *sample);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/net/neighbour.h linux-4.14/include/net/neighbour.h
---- linux-4.14.orig/include/net/neighbour.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/include/net/neighbour.h 2018-09-05 11:05:07.000000000 +0200
-@@ -450,7 +450,7 @@
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index a964366a7ef5..51c854583987 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -450,7 +450,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
}
#endif
{
unsigned int seq;
unsigned int hh_len;
-@@ -474,7 +474,7 @@
+@@ -474,7 +474,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
{
if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
return neigh_hh_output(hh, skb);
-@@ -515,7 +515,7 @@
+@@ -515,7 +515,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
const struct net_device *dev)
{
unsigned int seq;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/net/net_seq_lock.h linux-4.14/include/net/net_seq_lock.h
---- linux-4.14.orig/include/net/net_seq_lock.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/include/net/net_seq_lock.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h
+new file mode 100644
+index 000000000000..a7034298a82a
+--- /dev/null
++++ b/include/net/net_seq_lock.h
@@ -0,0 +1,15 @@
+#ifndef __NET_NET_SEQ_LOCK_H__
+#define __NET_NET_SEQ_LOCK_H__
+#endif
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/net/sch_generic.h linux-4.14/include/net/sch_generic.h
---- linux-4.14.orig/include/net/sch_generic.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/net/sch_generic.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index f59acacaa265..6ac7c3659973 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
@@ -10,6 +10,7 @@
#include <linux/percpu.h>
#include <linux/dynamic_queue_limits.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
#include <net/gen_stats.h>
-@@ -90,7 +91,7 @@
+@@ -90,7 +91,7 @@ struct Qdisc {
struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
unsigned long state;
struct Qdisc *next_sched;
-@@ -109,13 +110,22 @@
+@@ -109,13 +110,22 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
refcount_inc(&qdisc->refcnt);
}
if (qdisc_is_running(qdisc))
return false;
/* Variant of write_seqcount_begin() telling lockdep a trylock
-@@ -124,11 +134,16 @@
+@@ -124,11 +134,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
raw_write_seqcount_begin(&qdisc->running);
seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
return true;
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
-@@ -337,7 +352,7 @@
+@@ -337,7 +352,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root);
}
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/net/xfrm.h linux-4.14/include/net/xfrm.h
---- linux-4.14.orig/include/net/xfrm.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/net/xfrm.h 2018-09-05 11:05:07.000000000 +0200
-@@ -217,7 +217,7 @@
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index db99efb2d1d0..a7b95ffbbf8b 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -217,7 +217,7 @@ struct xfrm_state {
struct xfrm_stats stats;
struct xfrm_lifetime_cur curlft;
struct xfrm_state_offload xso;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/include/trace/events/timer.h linux-4.14/include/trace/events/timer.h
---- linux-4.14.orig/include/trace/events/timer.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/include/trace/events/timer.h 2018-09-05 11:05:07.000000000 +0200
-@@ -148,7 +148,11 @@
+diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
+index c6f728037c53..a57e4ee989d6 100644
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -148,7 +148,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
{ HRTIMER_MODE_ABS, "ABS" }, \
{ HRTIMER_MODE_REL, "REL" }, \
{ HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
/**
* hrtimer_init - called when the hrtimer is initialized
-@@ -186,15 +190,16 @@
+@@ -186,15 +190,16 @@ TRACE_EVENT(hrtimer_init,
*/
TRACE_EVENT(hrtimer_start,
),
TP_fast_assign(
-@@ -202,12 +207,14 @@
+@@ -202,12 +207,14 @@ TRACE_EVENT(hrtimer_start,
__entry->function = hrtimer->function;
__entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer);
);
/**
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/init/Kconfig linux-4.14/init/Kconfig
---- linux-4.14.orig/init/Kconfig 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/init/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -744,6 +744,7 @@
+diff --git a/init/Kconfig b/init/Kconfig
+index 46075327c165..a7aff2c1a203 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -744,6 +744,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
default n
help
This feature lets you explicitly allocate real CPU bandwidth
-@@ -1533,6 +1534,7 @@
+@@ -1533,6 +1534,7 @@ choice
config SLAB
bool "SLAB"
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1553,6 +1555,7 @@
+@@ -1553,6 +1555,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
help
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
-@@ -1594,7 +1597,7 @@
+@@ -1594,7 +1597,7 @@ config SLAB_FREELIST_HARDENED
config SLUB_CPU_PARTIAL
default y
bool "SLUB per cpu partial cache"
help
Per cpu partial caches accellerate objects allocation and freeing
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/init/main.c linux-4.14/init/main.c
---- linux-4.14.orig/init/main.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/init/main.c 2018-09-05 11:05:07.000000000 +0200
-@@ -543,6 +543,7 @@
+diff --git a/init/Makefile b/init/Makefile
+index 1dbb23787290..eabf3f1b14be 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -36,4 +36,4 @@ silent_chk_compile.h = :
+ include/generated/compile.h: FORCE
+ @$($(quiet)chk_compile.h)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+diff --git a/init/main.c b/init/main.c
+index c4a45145e102..c86f3d3b9a72 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -543,6 +543,7 @@ asmlinkage __visible void __init start_kernel(void)
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
boot_cpu_hotplug_init();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/init/Makefile linux-4.14/init/Makefile
---- linux-4.14.orig/init/Makefile 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/init/Makefile 2018-09-05 11:05:07.000000000 +0200
-@@ -36,4 +36,4 @@
- include/generated/compile.h: FORCE
- @$($(quiet)chk_compile.h)
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
-- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
-+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/cgroup/cgroup.c linux-4.14/kernel/cgroup/cgroup.c
---- linux-4.14.orig/kernel/cgroup/cgroup.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/cgroup/cgroup.c 2018-09-05 11:05:07.000000000 +0200
-@@ -4508,10 +4508,10 @@
+diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
+index 84d882f3e299..af27c4000812 100644
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
+
+ config MUTEX_SPIN_ON_OWNER
+ def_bool y
+- depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+
+ config RWSEM_SPIN_ON_OWNER
+ def_bool y
+- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+
+ config LOCK_SPIN_ON_OWNER
+ def_bool y
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 3f9c97419f02..11dbe26a8279 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -1,3 +1,16 @@
++config PREEMPT
++ bool
++ select PREEMPT_COUNT
++
++config PREEMPT_RT_BASE
++ bool
++ select PREEMPT
++
++config HAVE_PREEMPT_LAZY
++ bool
++
++config PREEMPT_LAZY
++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
+
+ choice
+ prompt "Preemption Model"
+@@ -33,9 +46,9 @@ config PREEMPT_VOLUNTARY
+
+ Select this if you are building a kernel for a desktop system.
+
+-config PREEMPT
++config PREEMPT__LL
+ bool "Preemptible Kernel (Low-Latency Desktop)"
+- select PREEMPT_COUNT
++ select PREEMPT
+ select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+ help
+ This option reduces the latency of the kernel by making
+@@ -52,6 +65,22 @@ config PREEMPT
+ embedded system with latency requirements in the milliseconds
+ range.
+
++config PREEMPT_RTB
++ bool "Preemptible Kernel (Basic RT)"
++ select PREEMPT_RT_BASE
++ help
++ This option is basically the same as (Low-Latency Desktop) but
++ enables changes which are preliminary for the full preemptible
++ RT kernel.
++
++config PREEMPT_RT_FULL
++ bool "Fully Preemptible Kernel (RT)"
++ depends on IRQ_FORCED_THREADING
++ select PREEMPT_RT_BASE
++ select PREEMPT_RCU
++ help
++ All and everything
++
+ endchoice
+
+ config PREEMPT_COUNT
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 76c0ef2cb509..cfa3505f2b3b 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -4508,10 +4508,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4562,8 +4562,8 @@
+@@ -4562,8 +4562,8 @@ static void css_release(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5269,6 +5269,7 @@
+@@ -5269,6 +5269,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
return 0;
}
core_initcall(cgroup_wq_init);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/cgroup/cpuset.c linux-4.14/kernel/cgroup/cpuset.c
---- linux-4.14.orig/kernel/cgroup/cpuset.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/cgroup/cpuset.c 2018-09-05 11:05:07.000000000 +0200
-@@ -288,7 +288,7 @@
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 4657e2924ecb..bda2af78277a 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -288,7 +288,7 @@ static struct cpuset top_cpuset = {
*/
static DEFINE_MUTEX(cpuset_mutex);
static struct workqueue_struct *cpuset_migrate_mm_wq;
-@@ -926,9 +926,9 @@
+@@ -926,9 +926,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
continue;
rcu_read_unlock();
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
-@@ -993,9 +993,9 @@
+@@ -993,9 +993,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
return retval;
/* use trialcs->cpus_allowed as a temp variable */
update_cpumasks_hier(cs, trialcs->cpus_allowed);
-@@ -1179,9 +1179,9 @@
+@@ -1179,9 +1179,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
continue;
rcu_read_unlock();
WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
-@@ -1249,9 +1249,9 @@
+@@ -1249,9 +1249,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
goto done;
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
-@@ -1342,9 +1342,9 @@
+@@ -1342,9 +1342,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
-@@ -1759,7 +1759,7 @@
+@@ -1759,7 +1759,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
switch (type) {
case FILE_CPULIST:
-@@ -1778,7 +1778,7 @@
+@@ -1778,7 +1778,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
ret = -EINVAL;
}
return ret;
}
-@@ -1993,12 +1993,12 @@
+@@ -1993,12 +1993,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpuset_inc();
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
-@@ -2025,12 +2025,12 @@
+@@ -2025,12 +2025,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
}
rcu_read_unlock();
out_unlock:
mutex_unlock(&cpuset_mutex);
return 0;
-@@ -2069,7 +2069,7 @@
+@@ -2069,7 +2069,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
-@@ -2080,7 +2080,7 @@
+@@ -2080,7 +2080,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
mutex_unlock(&cpuset_mutex);
}
-@@ -2094,7 +2094,7 @@
+@@ -2094,7 +2094,7 @@ static void cpuset_fork(struct task_struct *task)
if (task_css_is_root(task, cpuset_cgrp_id))
return;
task->mems_allowed = current->mems_allowed;
}
-@@ -2178,12 +2178,12 @@
+@@ -2178,12 +2178,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
{
bool is_empty;
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
-@@ -2220,10 +2220,10 @@
+@@ -2220,10 +2220,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
if (cpus_updated)
update_tasks_cpumask(cs);
-@@ -2316,21 +2316,21 @@
+@@ -2316,21 +2316,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
update_tasks_nodemask(&top_cpuset);
}
-@@ -2429,11 +2429,11 @@
+@@ -2429,11 +2429,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
unsigned long flags;
}
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
-@@ -2481,11 +2481,11 @@
+@@ -2481,11 +2481,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask;
unsigned long flags;
return mask;
}
-@@ -2577,14 +2577,14 @@
+@@ -2577,14 +2577,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
return allowed;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/cpu.c linux-4.14/kernel/cpu.c
---- linux-4.14.orig/kernel/cpu.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/cpu.c 2018-09-05 11:05:07.000000000 +0200
-@@ -74,6 +74,11 @@
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index f3f389e33343..7d777b62e4eb 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -74,6 +74,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
.fail = CPUHP_INVALID,
};
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
-@@ -287,6 +292,55 @@
+@@ -287,6 +292,55 @@ static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
void cpus_read_lock(void)
-@@ -843,6 +897,9 @@
+@@ -843,6 +897,9 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -855,11 +912,18 @@
+@@ -855,11 +912,18 @@ static int takedown_cpu(unsigned int cpu)
*/
irq_lock_sparse();
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -878,6 +942,9 @@
+@@ -878,6 +942,9 @@ static int takedown_cpu(unsigned int cpu)
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/debug/kdb/kdb_io.c linux-4.14/kernel/debug/kdb/kdb_io.c
---- linux-4.14.orig/kernel/debug/kdb/kdb_io.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/debug/kdb/kdb_io.c 2018-09-05 11:05:07.000000000 +0200
-@@ -854,9 +854,11 @@
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index ed5d34925ad0..c0d4c24fc241 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -854,9 +854,11 @@ int kdb_printf(const char *fmt, ...)
va_list ap;
int r;
return r;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/events/core.c linux-4.14/kernel/events/core.c
---- linux-4.14.orig/kernel/events/core.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/events/core.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1065,7 +1065,7 @@
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7c394ddf1ce6..178d9c5feb62 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1065,7 +1065,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
raw_spin_lock_init(&cpuctx->hrtimer_lock);
timer->function = perf_mux_hrtimer_handler;
}
-@@ -8750,7 +8750,7 @@
+@@ -8750,7 +8750,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
if (!is_sampling_event(event))
return;
hwc->hrtimer.function = perf_swevent_hrtimer;
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/exit.c linux-4.14/kernel/exit.c
---- linux-4.14.orig/kernel/exit.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/exit.c 2018-09-05 11:05:07.000000000 +0200
-@@ -159,7 +159,7 @@
+diff --git a/kernel/exit.c b/kernel/exit.c
+index e3a08761eb40..26f3b352b37a 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -159,7 +159,7 @@ static void __exit_signal(struct task_struct *tsk)
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/fork.c linux-4.14/kernel/fork.c
---- linux-4.14.orig/kernel/fork.c 2018-09-05 11:03:28.000000000 +0200
-+++ linux-4.14/kernel/fork.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 6a219fea4926..bc849ac60aa6 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
@@ -40,6 +40,7 @@
#include <linux/hmm.h>
#include <linux/fs.h>
#include <linux/vmacache.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
-@@ -407,13 +408,24 @@
+@@ -407,13 +408,24 @@ static inline void put_signal_struct(struct signal_struct *sig)
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
cgroup_free(tsk);
task_numa_free(tsk);
security_task_free(tsk);
-@@ -424,7 +436,18 @@
+@@ -424,7 +436,18 @@ void __put_task_struct(struct task_struct *tsk)
if (!profile_handoff_task(tsk))
free_task(tsk);
}
void __init __weak arch_task_cache_init(void) { }
-@@ -563,7 +586,8 @@
+@@ -563,7 +586,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
#endif
/*
* One for us, one for whoever does the "release_task()" (usually
* parent)
-@@ -575,6 +599,7 @@
+@@ -575,6 +599,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
account_kernel_stack(tsk, 1);
-@@ -915,6 +940,19 @@
+@@ -915,6 +940,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
static inline void __mmput(struct mm_struct *mm)
{
VM_BUG_ON(atomic_read(&mm->mm_users));
-@@ -1494,6 +1532,9 @@
+@@ -1496,6 +1534,9 @@ static void rt_mutex_init_task(struct task_struct *p)
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
tsk->cputime_expires.prof_exp = 0;
tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0;
-@@ -1646,6 +1687,7 @@
+@@ -1648,6 +1689,7 @@ static __latent_entropy struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
p->utime = p->stime = p->gtime = 0;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/futex.c linux-4.14/kernel/futex.c
---- linux-4.14.orig/kernel/futex.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/futex.c 2018-09-05 11:05:07.000000000 +0200
-@@ -936,7 +936,9 @@
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 046cd780d057..2ba7fb04a107 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -936,7 +936,9 @@ void exit_pi_state_list(struct task_struct *curr)
if (head->next != next) {
/* retain curr->pi_lock for the loop invariant */
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
put_pi_state(pi_state);
continue;
}
-@@ -1430,6 +1432,7 @@
+@@ -1430,6 +1432,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_WAKE_Q(wake_q);
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1491,13 +1494,13 @@
+@@ -1491,13 +1494,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
return ret;
}
-@@ -2104,6 +2107,16 @@
+@@ -2104,6 +2107,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -2642,10 +2655,9 @@
+@@ -2642,10 +2655,9 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
if (abs_time) {
to = &timeout;
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
-@@ -2744,9 +2756,8 @@
+@@ -2744,9 +2756,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
if (time) {
to = &timeout;
hrtimer_set_expires(&to->timer, *time);
}
-@@ -2801,7 +2812,7 @@
+@@ -2801,7 +2812,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
goto no_block;
}
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -2816,9 +2827,18 @@
+@@ -2816,9 +2827,18 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
* lock handoff sequence.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
if (ret) {
if (ret == 1)
-@@ -2965,11 +2985,21 @@
+@@ -2965,11 +2985,21 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* observed.
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
put_pi_state(pi_state);
/*
-@@ -3127,7 +3157,7 @@
+@@ -3127,7 +3157,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3143,10 +3173,9 @@
+@@ -3143,10 +3173,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (abs_time) {
to = &timeout;
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
-@@ -3155,7 +3184,7 @@
+@@ -3155,7 +3184,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
-@@ -3186,20 +3215,55 @@
+@@ -3186,20 +3215,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3208,7 +3272,8 @@
+@@ -3208,7 +3272,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3219,7 +3284,7 @@
+@@ -3219,7 +3284,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3233,7 +3298,8 @@
+@@ -3233,7 +3298,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
ret = 0;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/irq/handle.c linux-4.14/kernel/irq/handle.c
---- linux-4.14.orig/kernel/irq/handle.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/irq/handle.c 2018-09-05 11:05:07.000000000 +0200
-@@ -183,10 +183,16 @@
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index 79f987b942b8..d1dbacc29941 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -183,10 +183,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
{
irqreturn_t retval;
unsigned int flags = 0;
if (!noirqdebug)
note_interrupt(desc, retval);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/irq/manage.c linux-4.14/kernel/irq/manage.c
---- linux-4.14.orig/kernel/irq/manage.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/irq/manage.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 069311541577..f82dcca81712 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
@@ -24,6 +24,7 @@
#include "internals.h"
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
-@@ -32,6 +33,7 @@
+@@ -32,6 +33,7 @@ static int __init setup_forced_irqthreads(char *arg)
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
#endif
static void __synchronize_hardirq(struct irq_desc *desc)
-@@ -224,7 +226,12 @@
+@@ -224,7 +226,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
}
irqd_set(data, IRQD_AFFINITY_SET);
-@@ -262,10 +269,8 @@
+@@ -262,10 +269,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
-@@ -287,6 +292,35 @@
+@@ -287,6 +292,35 @@ static void irq_affinity_notify(struct work_struct *work)
kref_put(¬ify->kref, notify->release);
}
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
-@@ -315,7 +349,12 @@
+@@ -315,7 +349,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
if (notify) {
notify->irq = irq;
kref_init(¬ify->kref);
}
raw_spin_lock_irqsave(&desc->lock, flags);
-@@ -883,7 +922,15 @@
+@@ -883,7 +922,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
return ret;
}
-@@ -980,6 +1027,12 @@
+@@ -980,6 +1027,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
wake_threads_waitq(desc);
}
-@@ -1378,6 +1431,9 @@
+@@ -1378,6 +1431,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
if (irq_settings_can_autoenable(desc)) {
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
} else {
-@@ -2159,7 +2215,7 @@
+@@ -2159,7 +2215,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
* interrupt controller has per-cpu registers.
*/
int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/irq/settings.h linux-4.14/kernel/irq/settings.h
---- linux-4.14.orig/kernel/irq/settings.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/irq/settings.h 2018-09-05 11:05:07.000000000 +0200
-@@ -17,6 +17,7 @@
+diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
+index e43795cd2ccf..47e2f9e23586 100644
+--- a/kernel/irq/settings.h
++++ b/kernel/irq/settings.h
+@@ -17,6 +17,7 @@ enum {
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQ_IS_POLLED = IRQ_IS_POLLED,
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
-@@ -31,6 +32,7 @@
+@@ -31,6 +32,7 @@ enum {
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#define IRQ_IS_POLLED GOT_YOU_MORON
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
-@@ -41,6 +43,16 @@
+@@ -41,6 +43,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/irq/spurious.c linux-4.14/kernel/irq/spurious.c
---- linux-4.14.orig/kernel/irq/spurious.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/irq/spurious.c 2018-09-05 11:05:07.000000000 +0200
-@@ -445,6 +445,10 @@
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index 987d7bca4864..75347fb1dfea 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -445,6 +445,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
-@@ -457,6 +461,10 @@
+@@ -457,6 +461,10 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/irq_work.c linux-4.14/kernel/irq_work.c
---- linux-4.14.orig/kernel/irq_work.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/irq_work.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index bcf107ce0854..2899ba0d23d1 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <asm/processor.h>
-@@ -65,6 +66,8 @@
+@@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void)
*/
bool irq_work_queue_on(struct irq_work *work, int cpu)
{
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu));
-@@ -75,7 +78,12 @@
+@@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work))
return false;
arch_send_call_function_single_ipi(cpu);
return true;
-@@ -86,6 +94,9 @@
+@@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on);
/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
-@@ -93,13 +104,15 @@
+@@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *work)
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
arch_irq_work_raise();
}
-@@ -116,9 +129,8 @@
+@@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void)
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
-@@ -132,7 +144,7 @@
+@@ -132,7 +144,7 @@ static void irq_work_run_list(struct llist_head *list)
struct irq_work *work;
struct llist_node *llnode;
if (llist_empty(list))
return;
-@@ -169,7 +181,16 @@
+@@ -169,7 +181,16 @@ static void irq_work_run_list(struct llist_head *list)
void irq_work_run(void)
{
irq_work_run_list(this_cpu_ptr(&raised_list));
}
EXPORT_SYMBOL_GPL(irq_work_run);
-@@ -179,8 +200,17 @@
+@@ -179,8 +200,17 @@ void irq_work_tick(void)
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
/*
* Synchronize against the irq_work @entry, ensures the entry is not
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/Kconfig.locks linux-4.14/kernel/Kconfig.locks
---- linux-4.14.orig/kernel/Kconfig.locks 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/Kconfig.locks 2018-09-05 11:05:07.000000000 +0200
-@@ -225,11 +225,11 @@
-
- config MUTEX_SPIN_ON_OWNER
- def_bool y
-- depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
-+ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
-
- config RWSEM_SPIN_ON_OWNER
- def_bool y
-- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
-+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
-
- config LOCK_SPIN_ON_OWNER
- def_bool y
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/Kconfig.preempt linux-4.14/kernel/Kconfig.preempt
---- linux-4.14.orig/kernel/Kconfig.preempt 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/Kconfig.preempt 2018-09-05 11:05:07.000000000 +0200
-@@ -1,3 +1,16 @@
-+config PREEMPT
-+ bool
-+ select PREEMPT_COUNT
-+
-+config PREEMPT_RT_BASE
-+ bool
-+ select PREEMPT
-+
-+config HAVE_PREEMPT_LAZY
-+ bool
-+
-+config PREEMPT_LAZY
-+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
-
- choice
- prompt "Preemption Model"
-@@ -33,9 +46,9 @@
-
- Select this if you are building a kernel for a desktop system.
-
--config PREEMPT
-+config PREEMPT__LL
- bool "Preemptible Kernel (Low-Latency Desktop)"
-- select PREEMPT_COUNT
-+ select PREEMPT
- select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
- help
- This option reduces the latency of the kernel by making
-@@ -52,6 +65,22 @@
- embedded system with latency requirements in the milliseconds
- range.
-
-+config PREEMPT_RTB
-+ bool "Preemptible Kernel (Basic RT)"
-+ select PREEMPT_RT_BASE
-+ help
-+ This option is basically the same as (Low-Latency Desktop) but
-+ enables changes which are preliminary for the full preemptible
-+ RT kernel.
-+
-+config PREEMPT_RT_FULL
-+ bool "Fully Preemptible Kernel (RT)"
-+ depends on IRQ_FORCED_THREADING
-+ select PREEMPT_RT_BASE
-+ select PREEMPT_RCU
-+ help
-+ All and everything
-+
- endchoice
-
- config PREEMPT_COUNT
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/ksysfs.c linux-4.14/kernel/ksysfs.c
---- linux-4.14.orig/kernel/ksysfs.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/ksysfs.c 2018-09-05 11:05:07.000000000 +0200
-@@ -140,6 +140,15 @@
+diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
+index 46ba853656f6..9a23632b6294 100644
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -140,6 +140,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
#endif /* CONFIG_CRASH_CORE */
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
-@@ -231,6 +240,9 @@
+@@ -230,6 +239,9 @@ static struct attribute * kernel_attrs[] = {
+ #ifndef CONFIG_TINY_RCU
&rcu_expedited_attr.attr,
&rcu_normal_attr.attr,
- #endif
++#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ &realtime_attr.attr,
-+#endif
+ #endif
NULL
};
-
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/lockdep.c linux-4.14/kernel/locking/lockdep.c
---- linux-4.14.orig/kernel/locking/lockdep.c 2018-09-05 11:03:29.000000000 +0200
-+++ linux-4.14/kernel/locking/lockdep.c 2018-09-05 11:05:07.000000000 +0200
-@@ -3916,6 +3916,7 @@
- }
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * We dont accurately track softirq state in e.g.
- * hardirq contexts (such as on 4KSTACKS), so only
-@@ -3930,6 +3931,7 @@
- DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
- }
- }
-+#endif
-
- if (!debug_locks)
- print_irqtrace_events(current);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/locktorture.c linux-4.14/kernel/locking/locktorture.c
---- linux-4.14.orig/kernel/locking/locktorture.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/locking/locktorture.c 2018-09-05 11:05:07.000000000 +0200
-@@ -26,7 +26,6 @@
- #include <linux/kthread.h>
- #include <linux/sched/rt.h>
- #include <linux/spinlock.h>
--#include <linux/rwlock.h>
- #include <linux/mutex.h>
- #include <linux/rwsem.h>
- #include <linux/smp.h>
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/Makefile linux-4.14/kernel/locking/Makefile
---- linux-4.14.orig/kernel/locking/Makefile 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/locking/Makefile 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
+index 392c7f23af76..c0bf04b6b965 100644
+--- a/kernel/locking/Makefile
++++ b/kernel/locking/Makefile
@@ -3,7 +3,7 @@
# and is generally not a function of system call inputs.
KCOV_INSTRUMENT := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
-@@ -12,7 +12,11 @@
+@@ -12,7 +12,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
endif
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -25,8 +29,11 @@
+@@ -25,8 +29,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/mutex-rt.c linux-4.14/kernel/locking/mutex-rt.c
---- linux-4.14.orig/kernel/locking/mutex-rt.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/kernel/locking/mutex-rt.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index d7c155048ea9..def51a27f20f 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3914,6 +3914,7 @@ static void check_flags(unsigned long flags)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * We dont accurately track softirq state in e.g.
+ * hardirq contexts (such as on 4KSTACKS), so only
+@@ -3928,6 +3929,7 @@ static void check_flags(unsigned long flags)
+ DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ }
+ }
++#endif
+
+ if (!debug_locks)
+ print_irqtrace_events(current);
+diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
+index 6dca260eeccf..5d01ac590d4c 100644
+--- a/kernel/locking/locktorture.c
++++ b/kernel/locking/locktorture.c
+@@ -26,7 +26,6 @@
+ #include <linux/kthread.h>
+ #include <linux/sched/rt.h>
+ #include <linux/spinlock.h>
+-#include <linux/rwlock.h>
+ #include <linux/mutex.h>
+ #include <linux/rwsem.h>
+ #include <linux/smp.h>
+diff --git a/kernel/locking/mutex-rt.c b/kernel/locking/mutex-rt.c
+new file mode 100644
+index 000000000000..4f81595c0f52
+--- /dev/null
++++ b/kernel/locking/mutex-rt.c
@@ -0,0 +1,223 @@
+/*
+ * kernel/rt.c
+ return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/rtmutex.c linux-4.14/kernel/locking/rtmutex.c
---- linux-4.14.orig/kernel/locking/rtmutex.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/locking/rtmutex.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 4ad35718f123..08e233b7dc21 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
@@ -7,6 +7,11 @@
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
#include "rtmutex_common.h"
-@@ -135,6 +142,12 @@
+@@ -135,6 +142,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
-@@ -228,7 +241,7 @@
+@@ -228,7 +241,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
* Only use with rt_mutex_waiter_{less,equal}()
*/
#define task_to_waiter(p) \
static inline int
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
-@@ -268,6 +281,27 @@
+@@ -268,6 +281,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
return 1;
}
static void
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
-@@ -372,6 +406,14 @@
+@@ -372,6 +406,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -379,7 +421,8 @@
+@@ -379,7 +421,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
}
/*
-@@ -515,7 +558,7 @@
+@@ -515,7 +558,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
goto out_unlock_pi;
/*
-@@ -696,13 +739,16 @@
+@@ -696,13 +739,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
-@@ -804,9 +850,11 @@
+@@ -804,9 +850,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* @task: The task which wants to acquire the lock
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
{
lockdep_assert_held(&lock->wait_lock);
-@@ -842,12 +890,11 @@
+@@ -842,12 +890,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
*/
if (waiter) {
/*
/*
* We can acquire the lock. Remove the waiter from the
* lock waiters tree.
-@@ -865,14 +912,12 @@
+@@ -865,14 +912,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
*/
if (rt_mutex_has_waiters(lock)) {
/*
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -919,6 +964,351 @@
+@@ -919,6 +964,351 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
return 1;
}
/*
* Task blocks on lock.
*
-@@ -951,6 +1341,22 @@
+@@ -951,6 +1341,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
-@@ -974,7 +1380,7 @@
+@@ -974,7 +1380,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
rt_mutex_enqueue_pi(owner, waiter);
rt_mutex_adjust_prio(owner);
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1016,6 +1422,7 @@
+@@ -1016,6 +1422,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
* Called with lock->wait_lock held and interrupts disabled.
*/
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
-@@ -1055,7 +1462,10 @@
+@@ -1055,7 +1462,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
* Pairs with preempt_enable() in rt_mutex_postunlock();
*/
preempt_disable();
raw_spin_unlock(¤t->pi_lock);
}
-@@ -1070,7 +1480,7 @@
+@@ -1070,7 +1480,7 @@ static void remove_waiter(struct rt_mutex *lock,
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
lockdep_assert_held(&lock->wait_lock);
-@@ -1096,7 +1506,8 @@
+@@ -1096,7 +1506,8 @@ static void remove_waiter(struct rt_mutex *lock,
rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
raw_spin_unlock(&owner->pi_lock);
-@@ -1132,26 +1543,28 @@
+@@ -1132,26 +1543,28 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
}
/**
-@@ -1167,7 +1580,8 @@
+@@ -1167,7 +1580,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
{
int ret = 0;
-@@ -1176,16 +1590,17 @@
+@@ -1176,16 +1590,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
if (try_to_take_rt_mutex(lock, current, waiter))
break;
if (ret)
break;
}
-@@ -1224,33 +1639,104 @@
+@@ -1224,33 +1639,104 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
}
}
return 0;
}
-@@ -1260,17 +1746,27 @@
+@@ -1260,17 +1746,27 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
if (unlikely(timeout))
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
}
/*
-@@ -1278,6 +1774,36 @@
+@@ -1278,6 +1774,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-@@ -1338,7 +1864,8 @@
+@@ -1338,7 +1864,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
* Return whether the current task needs to call rt_mutex_postunlock().
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
{
unsigned long flags;
-@@ -1392,7 +1919,7 @@
+@@ -1392,7 +1919,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return true; /* call rt_mutex_postunlock() */
-@@ -1406,29 +1933,45 @@
+@@ -1406,29 +1933,45 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
}
static inline int
-@@ -1444,9 +1987,11 @@
+@@ -1444,9 +1987,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
/*
* Performs the wakeup of the the top-waiter and re-enables preemption.
*/
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
preempt_enable();
-@@ -1455,15 +2000,40 @@
+@@ -1455,23 +2000,40 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q)
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
- rt_mutex_postunlock(&wake_q);
+ if (slowfn(lock, &wake_q, &wake_sleeper_q))
+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
-+}
-+
+ }
+
+-static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
-+{
-+ might_sleep();
+ {
+ might_sleep();
+ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
+}
+
+ * @lock: The rt_mutex to be locked
+ * @state: The state to set when blocking on the rt_mutex
+ */
-+static int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state)
++static int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state, unsigned int subclass)
+{
+ int ret;
-+
-+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+ ret = __rt_mutex_lock_state(lock, state);
+ if (ret)
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
+ return ret;
}
- /**
-@@ -1473,10 +2043,7 @@
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+@@ -1483,7 +2045,7 @@ static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+ */
+ void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+ {
+- __rt_mutex_lock(lock, subclass);
++ rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE, subclass);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+ #endif
+@@ -1496,7 +2058,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
*/
void __sched rt_mutex_lock(struct rt_mutex *lock)
{
-- might_sleep();
--
-- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
-+ rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE);
+- __rt_mutex_lock(lock, 0);
++ rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-
-@@ -1491,16 +2058,7 @@
+ #endif
+@@ -1512,16 +2074,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
- mutex_release(&lock->dep_map, 1, _RET_IP_);
-
- return ret;
-+ return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE);
++ return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1518,6 +2076,22 @@
+@@ -1538,6 +2091,22 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+ return __rt_mutex_slowtrylock(lock);
}
- /**
++/**
+ * rt_mutex_lock_killable - lock a rt_mutex killable
+ *
+ * @lock: the rt_mutex to be locked
+ */
+int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
+{
-+ return rt_mutex_lock_state(lock, TASK_KILLABLE);
++ return rt_mutex_lock_state(lock, TASK_KILLABLE, 0);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
+
-+/**
+ /**
* rt_mutex_timed_lock - lock a rt_mutex interruptible
* the timeout structure is provided
- * by the caller
-@@ -1540,6 +2114,7 @@
+@@ -1561,6 +2130,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
-@@ -1548,6 +2123,18 @@
+@@ -1569,6 +2139,18 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
/**
* rt_mutex_trylock - try to lock a rt_mutex
*
-@@ -1563,10 +2150,7 @@
+@@ -1584,10 +2166,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
int ret;
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-@@ -1574,6 +2158,11 @@
+@@ -1595,6 +2174,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
/**
* rt_mutex_unlock - unlock a rt_mutex
*
-@@ -1582,16 +2171,13 @@
+@@ -1603,16 +2187,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, 1, _RET_IP_);
{
lockdep_assert_held(&lock->wait_lock);
-@@ -1608,22 +2194,35 @@
+@@ -1629,22 +2210,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
* avoid inversion prior to the wakeup. preempt_disable()
* therein pairs with rt_mutex_postunlock().
*/
}
/**
-@@ -1662,7 +2261,7 @@
+@@ -1683,7 +2277,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name,
if (name && key)
debug_rt_mutex_init(lock, name, key);
}
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1682,6 +2281,14 @@
+@@ -1703,6 +2297,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
__rt_mutex_init(lock, NULL, NULL);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}
-@@ -1714,6 +2321,34 @@
+@@ -1735,6 +2337,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
/* We enforce deadlock detection for futexes */
ret = task_blocks_on_rt_mutex(lock, waiter, task,
RT_MUTEX_FULL_CHAINWALK);
-@@ -1728,7 +2363,7 @@
+@@ -1749,7 +2379,7 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
ret = 0;
}
remove_waiter(lock, waiter);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1803,17 +2438,36 @@
+@@ -1824,17 +2454,36 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
-@@ -1874,3 +2528,99 @@
+@@ -1895,3 +2544,99 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
return cleanup;
}
+}
+EXPORT_SYMBOL(__rt_mutex_owner_current);
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/rtmutex_common.h linux-4.14/kernel/locking/rtmutex_common.h
---- linux-4.14.orig/kernel/locking/rtmutex_common.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/locking/rtmutex_common.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 68686b3ec3c1..2a157c78e18c 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
@@ -15,6 +15,7 @@
#include <linux/rtmutex.h>
/*
* This is the control structure for tasks blocked on a rt_mutex,
-@@ -29,6 +30,7 @@
+@@ -29,6 +30,7 @@ struct rt_mutex_waiter {
struct rb_node pi_tree_entry;
struct task_struct *task;
struct rt_mutex *lock;
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
-@@ -129,12 +131,15 @@
+@@ -129,12 +131,15 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
-@@ -152,9 +157,27 @@
+@@ -152,9 +157,27 @@ extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
- struct wake_q_head *wqh);
+-
+-extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
+ struct wake_q_head *wqh,
+ struct wake_q_head *wq_sleeper);
-
--extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
++
+extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
+ struct wake_q_head *wake_sleeper_q);
+
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/rwlock-rt.c linux-4.14/kernel/locking/rwlock-rt.c
---- linux-4.14.orig/kernel/locking/rwlock-rt.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/kernel/locking/rwlock-rt.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c
+new file mode 100644
+index 000000000000..f2e155b2c4a8
+--- /dev/null
++++ b/kernel/locking/rwlock-rt.c
@@ -0,0 +1,378 @@
+/*
+ */
+ do_rwlock_rt_init(rwlock, name, key);
+}
+EXPORT_SYMBOL(__rt_rwlock_init);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/rwsem-rt.c linux-4.14/kernel/locking/rwsem-rt.c
---- linux-4.14.orig/kernel/locking/rwsem-rt.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/kernel/locking/rwsem-rt.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c
+new file mode 100644
+index 000000000000..26991ddb6c5a
+--- /dev/null
++++ b/kernel/locking/rwsem-rt.c
@@ -0,0 +1,269 @@
+/*
+ */
+ /* Release it and account current as reader */
+ __up_write_unlock(sem, WRITER_BIAS - 1, flags);
+}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/spinlock.c linux-4.14/kernel/locking/spinlock.c
---- linux-4.14.orig/kernel/locking/spinlock.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/locking/spinlock.c 2018-09-05 11:05:07.000000000 +0200
-@@ -125,8 +125,11 @@
+diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
+index 6e40fdfba326..401bda23f786 100644
+--- a/kernel/locking/spinlock.c
++++ b/kernel/locking/spinlock.c
+@@ -125,8 +125,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
* __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, raw_spinlock);
#endif
-@@ -210,6 +213,8 @@
+@@ -210,6 +213,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
#ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
-@@ -354,6 +359,8 @@
+@@ -354,6 +359,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/locking/spinlock_debug.c linux-4.14/kernel/locking/spinlock_debug.c
---- linux-4.14.orig/kernel/locking/spinlock_debug.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/locking/spinlock_debug.c 2018-09-05 11:05:07.000000000 +0200
-@@ -31,6 +31,7 @@
+diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
+index 9aa0fccd5d43..76d0b40d9193 100644
+--- a/kernel/locking/spinlock_debug.c
++++ b/kernel/locking/spinlock_debug.c
+@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__raw_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
-@@ -48,6 +49,7 @@
+@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
}
EXPORT_SYMBOL(__rwlock_init);
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
-@@ -135,6 +137,7 @@
+@@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
arch_spin_unlock(&lock->raw_lock);
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
-@@ -224,3 +227,5 @@
+@@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
+
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/panic.c linux-4.14/kernel/panic.c
---- linux-4.14.orig/kernel/panic.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/panic.c 2018-09-05 11:05:07.000000000 +0200
-@@ -482,9 +482,11 @@
+diff --git a/kernel/panic.c b/kernel/panic.c
+index bdd18afa19a4..5da649633795 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -482,9 +482,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
oops_id++;
return 0;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/power/hibernate.c linux-4.14/kernel/power/hibernate.c
---- linux-4.14.orig/kernel/power/hibernate.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/power/hibernate.c 2018-09-05 11:05:07.000000000 +0200
-@@ -287,6 +287,8 @@
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index a5c36e9c56a6..a4b83cb0c6e5 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -287,6 +287,8 @@ static int create_image(int platform_mode)
local_irq_disable();
error = syscore_suspend();
if (error) {
pr_err("Some system devices failed to power down, aborting hibernation\n");
-@@ -317,6 +319,7 @@
+@@ -317,6 +319,7 @@ static int create_image(int platform_mode)
syscore_resume();
Enable_irqs:
local_irq_enable();
Enable_cpus:
-@@ -445,6 +448,7 @@
+@@ -445,6 +448,7 @@ static int resume_target_kernel(bool platform_mode)
goto Enable_cpus;
local_irq_disable();
error = syscore_suspend();
if (error)
-@@ -478,6 +482,7 @@
+@@ -478,6 +482,7 @@ static int resume_target_kernel(bool platform_mode)
syscore_resume();
Enable_irqs:
local_irq_enable();
Enable_cpus:
-@@ -563,6 +568,7 @@
+@@ -563,6 +568,7 @@ int hibernation_platform_enter(void)
goto Enable_cpus;
local_irq_disable();
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
-@@ -575,6 +581,7 @@
+@@ -575,6 +581,7 @@ int hibernation_platform_enter(void)
Power_up:
syscore_resume();
local_irq_enable();
Enable_cpus:
-@@ -672,6 +679,10 @@
+@@ -672,6 +679,10 @@ static int load_image_and_restore(void)
return error;
}
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
-@@ -685,6 +696,8 @@
+@@ -685,6 +696,8 @@ int hibernate(void)
return -EPERM;
}
lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -763,6 +776,7 @@
+@@ -763,6 +776,7 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
pr_info("hibernation exit\n");
return error;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/power/suspend.c linux-4.14/kernel/power/suspend.c
---- linux-4.14.orig/kernel/power/suspend.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/power/suspend.c 2018-09-05 11:05:07.000000000 +0200
-@@ -428,6 +428,8 @@
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index c0bc2c89697a..b89605fe0e88 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -27,6 +27,7 @@
+ #include <linux/export.h>
+ #include <linux/suspend.h>
+ #include <linux/syscore_ops.h>
++#include <linux/swait.h>
+ #include <linux/ftrace.h>
+ #include <trace/events/power.h>
+ #include <linux/compiler.h>
+@@ -57,7 +58,7 @@ EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
+
+ static const struct platform_suspend_ops *suspend_ops;
+ static const struct platform_s2idle_ops *s2idle_ops;
+-static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head);
++static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
+
+ enum s2idle_states __read_mostly s2idle_state;
+ static DEFINE_RAW_SPINLOCK(s2idle_lock);
+@@ -91,8 +92,8 @@ static void s2idle_enter(void)
+ /* Push all the CPUs into the idle loop. */
+ wake_up_all_idle_cpus();
+ /* Make the current CPU wait so it can enter the idle loop too. */
+- wait_event(s2idle_wait_head,
+- s2idle_state == S2IDLE_STATE_WAKE);
++ swait_event(s2idle_wait_head,
++ s2idle_state == S2IDLE_STATE_WAKE);
+
+ cpuidle_pause();
+ put_online_cpus();
+@@ -159,7 +160,7 @@ void s2idle_wake(void)
+ raw_spin_lock_irqsave(&s2idle_lock, flags);
+ if (s2idle_state > S2IDLE_STATE_NONE) {
+ s2idle_state = S2IDLE_STATE_WAKE;
+- wake_up(&s2idle_wait_head);
++ swake_up(&s2idle_wait_head);
+ }
+ raw_spin_unlock_irqrestore(&s2idle_lock, flags);
+ }
+@@ -428,6 +429,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
-@@ -443,6 +445,8 @@
+@@ -443,6 +446,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
syscore_resume();
}
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
-@@ -589,6 +593,8 @@
+@@ -589,6 +594,8 @@ static int enter_state(suspend_state_t state)
return error;
}
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
-@@ -603,6 +609,7 @@
+@@ -603,6 +610,7 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
if (error) {
-@@ -612,6 +619,7 @@
+@@ -612,6 +620,7 @@ int pm_suspend(suspend_state_t state)
suspend_stats.success++;
}
pr_info("suspend exit\n");
return error;
}
EXPORT_SYMBOL(pm_suspend);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/printk/printk.c linux-4.14/kernel/printk/printk.c
---- linux-4.14.orig/kernel/printk/printk.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/printk/printk.c 2018-09-05 11:05:07.000000000 +0200
-@@ -400,6 +400,65 @@
- printk_safe_exit_irqrestore(flags); \
- } while (0)
-
-+#ifdef CONFIG_EARLY_PRINTK
-+struct console *early_console;
-+
-+static void early_vprintk(const char *fmt, va_list ap)
-+{
-+ if (early_console) {
-+ char buf[512];
-+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
-+
-+ early_console->write(early_console, buf, n);
-+ }
-+}
-+
-+asmlinkage void early_printk(const char *fmt, ...)
-+{
-+ va_list ap;
-+
-+ va_start(ap, fmt);
-+ early_vprintk(fmt, ap);
-+ va_end(ap);
-+}
-+
-+/*
-+ * This is independent of any log levels - a global
-+ * kill switch that turns off all of printk.
-+ *
-+ * Used by the NMI watchdog if early-printk is enabled.
-+ */
-+static bool __read_mostly printk_killswitch;
-+
-+static int __init force_early_printk_setup(char *str)
-+{
-+ printk_killswitch = true;
-+ return 0;
-+}
-+early_param("force_early_printk", force_early_printk_setup);
-+
-+void printk_kill(void)
-+{
-+ printk_killswitch = true;
-+}
-+
-+#ifdef CONFIG_PRINTK
-+static int forced_early_printk(const char *fmt, va_list ap)
-+{
-+ if (!printk_killswitch)
-+ return 0;
-+ early_vprintk(fmt, ap);
-+ return 1;
-+}
-+#endif
-+
-+#else
-+static inline int forced_early_printk(const char *fmt, va_list ap)
-+{
-+ return 0;
-+}
-+#endif
-+
- #ifdef CONFIG_PRINTK
- DECLARE_WAIT_QUEUE_HEAD(log_wait);
- /* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1348,6 +1407,8 @@
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index f0223a7d9ed1..13fd0bcf2367 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1348,6 +1348,8 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
{
char *text;
int len = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
-@@ -1359,6 +1420,14 @@
+@@ -1359,6 +1361,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
u64 seq;
u32 idx;
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
-@@ -1371,6 +1440,14 @@
+@@ -1371,6 +1381,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
len += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
}
/* move first record forward until length fits into the buffer */
-@@ -1382,6 +1459,14 @@
+@@ -1382,6 +1400,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
len -= msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
}
/* last message fitting into this dump */
-@@ -1420,6 +1505,7 @@
+@@ -1420,6 +1446,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
logbuf_unlock_irq();
kfree(text);
-@@ -1558,6 +1644,12 @@
+@@ -1558,6 +1585,12 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
if (!console_drivers)
return;
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
-@@ -1573,6 +1665,7 @@
+@@ -1573,6 +1606,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
else
con->write(con, text, len);
}
}
int printk_delay_msec __read_mostly;
-@@ -1692,6 +1785,13 @@
- int printed_len;
- bool in_sched = false;
-
-+ /*
-+ * Fall back to early_printk if a debugging subsystem has
-+ * killed printk output
-+ */
-+ if (unlikely(forced_early_printk(fmt, args)))
-+ return 1;
-+
- if (level == LOGLEVEL_SCHED) {
- level = LOGLEVEL_DEFAULT;
- in_sched = true;
-@@ -1748,12 +1848,22 @@
+@@ -1757,12 +1791,22 @@ asmlinkage int vprintk_emit(int facility, int level,
/* If called from the scheduler, we can not call up(). */
if (!in_sched) {
console_unlock();
}
-@@ -1863,26 +1973,6 @@
+@@ -1872,26 +1916,6 @@ static bool suppress_message_printing(int level) { return false; }
#endif /* CONFIG_PRINTK */
static int __add_preferred_console(char *name, int idx, char *options,
char *brl_options)
{
-@@ -2229,10 +2319,15 @@
+@@ -2238,10 +2262,15 @@ void console_unlock(void)
console_seq++;
raw_spin_unlock(&logbuf_lock);
if (do_cond_resched)
cond_resched();
-@@ -2286,6 +2381,11 @@
+@@ -2295,6 +2324,11 @@ void console_unblank(void)
{
struct console *c;
/*
* console_unblank can no longer be called in interrupt context unless
* oops_in_progress is set to 1..
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/ptrace.c linux-4.14/kernel/ptrace.c
---- linux-4.14.orig/kernel/ptrace.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/ptrace.c 2018-09-05 11:05:07.000000000 +0200
-@@ -175,7 +175,14 @@
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index 64f8046586b6..a24e16bef51c 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -22,6 +22,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/irq_work.h>
+ #include <linux/printk.h>
++#include <linux/console.h>
+
+ #include "internal.h"
+
+@@ -373,8 +374,74 @@ void __printk_safe_exit(void)
+ this_cpu_dec(printk_context);
+ }
+
++#ifdef CONFIG_EARLY_PRINTK
++struct console *early_console;
++
++static void early_vprintk(const char *fmt, va_list ap)
++{
++ if (early_console) {
++ char buf[512];
++ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
++
++ early_console->write(early_console, buf, n);
++ }
++}
++
++asmlinkage void early_printk(const char *fmt, ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ early_vprintk(fmt, ap);
++ va_end(ap);
++}
++
++/*
++ * This is independent of any log levels - a global
++ * kill switch that turns off all of printk.
++ *
++ * Used by the NMI watchdog if early-printk is enabled.
++ */
++static bool __read_mostly printk_killswitch;
++
++static int __init force_early_printk_setup(char *str)
++{
++ printk_killswitch = true;
++ return 0;
++}
++early_param("force_early_printk", force_early_printk_setup);
++
++void printk_kill(void)
++{
++ printk_killswitch = true;
++}
++
++#ifdef CONFIG_PRINTK
++static int forced_early_printk(const char *fmt, va_list ap)
++{
++ if (!printk_killswitch)
++ return 0;
++ early_vprintk(fmt, ap);
++ return 1;
++}
++#endif
++
++#else
++static inline int forced_early_printk(const char *fmt, va_list ap)
++{
++ return 0;
++}
++#endif
++
+ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
+ {
++ /*
++ * Fall back to early_printk if a debugging subsystem has
++ * killed printk output
++ */
++ if (unlikely(forced_early_printk(fmt, args)))
++ return 1;
++
+ /*
+ * Try to use the main logbuf even in NMI. But avoid calling console
+ * drivers that might have their own locks.
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 84b1367935e4..b32a86f63522 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -175,7 +175,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/Kconfig linux-4.14/kernel/rcu/Kconfig
---- linux-4.14.orig/kernel/rcu/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/rcu/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -36,7 +36,7 @@
+diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
+index 9210379c0353..0be2c96fb640 100644
+--- a/kernel/rcu/Kconfig
++++ b/kernel/rcu/Kconfig
+@@ -36,7 +36,7 @@ config TINY_RCU
config RCU_EXPERT
bool "Make expert-level adjustments to RCU configuration"
help
This option needs to be enabled if you wish to make
expert-level adjustments to RCU configuration. By default,
-@@ -172,7 +172,7 @@
+@@ -172,7 +172,7 @@ config RCU_FANOUT_LEAF
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
default n
help
This option permits CPUs to enter dynticks-idle state even if
-@@ -191,7 +191,7 @@
+@@ -191,7 +191,7 @@ config RCU_FAST_NO_HZ
config RCU_BOOST
bool "Enable RCU priority boosting"
depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
help
This option boosts the priority of preempted RCU readers that
block the current preemptible RCU grace period for too long.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/rcu.h linux-4.14/kernel/rcu/rcu.h
---- linux-4.14.orig/kernel/rcu/rcu.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/rcu/rcu.h 2018-09-05 11:05:07.000000000 +0200
-@@ -462,18 +462,26 @@
+diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
+index e4b43fef89f5..0b056c30e9b1 100644
+--- a/kernel/rcu/rcu.h
++++ b/kernel/rcu/rcu.h
+@@ -462,18 +462,26 @@ static inline void show_rcu_gp_kthreads(void) { }
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
unsigned long rcu_batches_started(void);
#endif /* #else #ifdef CONFIG_TINY_RCU */
#ifdef CONFIG_RCU_NOCB_CPU
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/rcu_segcblist.c linux-4.14/kernel/rcu/rcu_segcblist.c
---- linux-4.14.orig/kernel/rcu/rcu_segcblist.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/rcu/rcu_segcblist.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
+index 7649fcd2c4c7..88cba7c2956c 100644
+--- a/kernel/rcu/rcu_segcblist.c
++++ b/kernel/rcu/rcu_segcblist.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include "rcu_segcblist.h"
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/rcutorture.c linux-4.14/kernel/rcu/rcutorture.c
---- linux-4.14.orig/kernel/rcu/rcutorture.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/rcu/rcutorture.c 2018-09-05 11:05:07.000000000 +0200
-@@ -417,6 +417,7 @@
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 45f2ffbc1e78..2e9dbb734d5a 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -417,6 +417,7 @@ static struct rcu_torture_ops rcu_ops = {
.name = "rcu"
};
/*
* Definitions for rcu_bh torture testing.
*/
-@@ -456,6 +457,12 @@
+@@ -456,6 +457,12 @@ static struct rcu_torture_ops rcu_bh_ops = {
.name = "rcu_bh"
};
/*
* Don't even think about trying any of these in real life!!!
* The names includes "busted", and they really means it!
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/srcutree.c linux-4.14/kernel/rcu/srcutree.c
---- linux-4.14.orig/kernel/rcu/srcutree.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/rcu/srcutree.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 6d5880089ff6..0e3b2bd3f2ac 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
@@ -36,6 +36,8 @@
#include <linux/delay.h>
#include <linux/module.h>
#include "rcu.h"
#include "rcu_segcblist.h"
-@@ -53,6 +55,33 @@
+@@ -53,6 +55,33 @@ static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
static void process_srcu(struct work_struct *work);
/*
* Initialize SRCU combining tree. Note that statically allocated
* srcu_struct structures might already have srcu_read_lock() and
-@@ -77,7 +106,7 @@
+@@ -77,7 +106,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
/* Each pass through this loop initializes one srcu_node structure. */
rcu_for_each_node_breadth_first(sp, snp) {
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
-@@ -111,7 +140,7 @@
+@@ -111,7 +140,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
snp_first = sp->level[level];
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false;
sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
-@@ -170,7 +199,7 @@
+@@ -170,7 +199,7 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
-@@ -187,7 +216,7 @@
+@@ -187,7 +216,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
*/
int init_srcu_struct(struct srcu_struct *sp)
{
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
-@@ -210,13 +239,13 @@
+@@ -210,13 +239,13 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
/* The smp_load_acquire() pairs with the smp_store_release(). */
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
}
/*
-@@ -425,21 +454,6 @@
+@@ -424,21 +453,6 @@ static void srcu_gp_start(struct srcu_struct *sp)
+ WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
}
- /*
+-/*
- * Track online CPUs to guide callback workqueue placement.
- */
-DEFINE_PER_CPU(bool, srcu_online);
- WRITE_ONCE(per_cpu(srcu_online, cpu), false);
-}
-
--/*
+ /*
* Place the workqueue handler on the specified CPU if online, otherwise
* just run it whereever. This is useful for placing workqueue handlers
- * that are to invoke the specified CPU's callbacks.
-@@ -450,12 +464,12 @@
+@@ -450,12 +464,12 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
{
bool ret;
return ret;
}
-@@ -513,7 +527,7 @@
+@@ -513,7 +527,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
mutex_lock(&sp->srcu_cb_mutex);
/* End the current grace period. */
idx = rcu_seq_state(sp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp);
-@@ -522,7 +536,7 @@
+@@ -522,7 +536,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
sp->srcu_gp_seq_needed_exp = gpseq;
mutex_unlock(&sp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */
-@@ -530,7 +544,7 @@
+@@ -530,7 +544,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
rcu_for_each_node_breadth_first(sp, snp) {
cbs = false;
if (snp >= sp->level[rcu_num_lvls - 1])
cbs = snp->srcu_have_cbs[idx] == gpseq;
-@@ -540,7 +554,7 @@
+@@ -540,7 +554,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
snp->srcu_gp_seq_needed_exp = gpseq;
mask = snp->srcu_data_have_cbs[idx];
snp->srcu_data_have_cbs[idx] = 0;
if (cbs)
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
-@@ -548,11 +562,11 @@
+@@ -548,11 +562,11 @@ static void srcu_gp_end(struct srcu_struct *sp)
if (!(gpseq & counter_wrap_check))
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
sdp = per_cpu_ptr(sp->sda, cpu);
}
}
-@@ -560,17 +574,17 @@
+@@ -560,17 +574,17 @@ static void srcu_gp_end(struct srcu_struct *sp)
mutex_unlock(&sp->srcu_cb_mutex);
/* Start a new grace period if needed. */
}
}
-@@ -590,18 +604,18 @@
+@@ -590,18 +604,18 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return;
}
/*
-@@ -623,12 +637,12 @@
+@@ -623,12 +637,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
for (; snp != NULL; snp = snp->srcu_parent) {
if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */
if (snp == sdp->mynode && snp_seq != s) {
srcu_schedule_cbs_sdp(sdp, do_norm
? SRCU_INTERVAL
-@@ -644,11 +658,11 @@
+@@ -644,11 +658,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
snp->srcu_gp_seq_needed_exp = s;
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
/*
* Record need for grace period s. Pair with load
-@@ -667,7 +681,7 @@
+@@ -667,7 +681,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
queue_delayed_work(system_power_efficient_wq, &sp->work,
srcu_get_delay(sp));
}
}
/*
-@@ -736,6 +750,8 @@
+@@ -736,6 +750,8 @@ static void srcu_flip(struct srcu_struct *sp)
* negligible when amoritized over that time period, and the extra latency
* of a needlessly non-expedited grace period is similarly negligible.
*/
static bool srcu_might_be_idle(struct srcu_struct *sp)
{
unsigned long curseq;
-@@ -744,13 +760,13 @@
+@@ -744,13 +760,13 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
unsigned long t;
/* If the local srcu_data structure has callbacks, not idle. */
/*
* No local callbacks, so probabalistically probe global state.
-@@ -828,9 +844,9 @@
+@@ -828,9 +844,9 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
return;
}
rhp->func = func;
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
-@@ -844,7 +860,8 @@
+@@ -844,7 +860,8 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
sdp->srcu_gp_seq_needed_exp = s;
needexp = true;
}
if (needgp)
srcu_funnel_gp_start(sp, sdp, s, do_norm);
else if (needexp)
-@@ -900,7 +917,7 @@
+@@ -900,7 +917,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
/*
* Make sure that later code is ordered after the SRCU grace
* in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
* because the current CPU might have been totally uninvolved with
* (and thus unordered against) that grace period.
-@@ -1024,7 +1041,7 @@
+@@ -1024,7 +1041,7 @@ void srcu_barrier(struct srcu_struct *sp)
*/
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
atomic_inc(&sp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head);
-@@ -1033,7 +1050,7 @@
+@@ -1033,7 +1050,7 @@ void srcu_barrier(struct srcu_struct *sp)
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
atomic_dec(&sp->srcu_barrier_cpu_cnt);
}
}
/* Remove the initial count, at which point reaching zero can happen. */
-@@ -1082,17 +1099,17 @@
+@@ -1082,17 +1099,17 @@ static void srcu_advance_state(struct srcu_struct *sp)
*/
idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) {
if (idx != SRCU_STATE_IDLE) {
mutex_unlock(&sp->srcu_gp_mutex);
return; /* Someone else started the grace period. */
-@@ -1141,19 +1158,19 @@
+@@ -1141,19 +1158,19 @@ static void srcu_invoke_callbacks(struct work_struct *work)
sdp = container_of(work, struct srcu_data, work.work);
sp = sdp->sp;
rcu_cblist_init(&ready_cbs);
rhp = rcu_cblist_dequeue(&ready_cbs);
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
debug_rcu_head_unqueue(rhp);
-@@ -1166,13 +1183,13 @@
+@@ -1166,13 +1183,13 @@ static void srcu_invoke_callbacks(struct work_struct *work)
* Update counts, accelerate new callbacks, and if needed,
* schedule another round of callback invocation.
*/
if (more)
srcu_schedule_cbs_sdp(sdp, 0);
}
-@@ -1185,7 +1202,7 @@
+@@ -1185,7 +1202,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
{
bool pushgp = true;
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */
-@@ -1195,7 +1212,7 @@
+@@ -1195,7 +1212,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
/* Outstanding request and no GP. Start one. */
srcu_gp_start(sp);
}
if (pushgp)
queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/tree.c linux-4.14/kernel/rcu/tree.c
---- linux-4.14.orig/kernel/rcu/tree.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/rcu/tree.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 3e3650e94ae6..0a722b56d90b 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
@@ -58,6 +58,11 @@
#include <linux/trace_events.h>
#include <linux/suspend.h>
#include "tree.h"
#include "rcu.h"
-@@ -243,6 +248,19 @@
+@@ -243,6 +248,19 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
void rcu_bh_qs(void)
{
RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
-@@ -253,6 +271,7 @@
+@@ -253,6 +271,7 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
/*
* Steal a bit from the bottom of ->dynticks for idle entry/exit
-@@ -564,11 +583,13 @@
+@@ -564,11 +583,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
/*
* Return the number of RCU BH batches started thus far for debug & stats.
*/
/*
* Return the number of RCU batches completed thus far for debug & stats.
-@@ -588,6 +609,7 @@
+@@ -588,6 +609,7 @@ unsigned long rcu_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
-@@ -596,6 +618,7 @@
+@@ -596,6 +618,7 @@ unsigned long rcu_batches_completed_bh(void)
return rcu_bh_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
/*
* Return the number of RCU expedited batches completed thus far for
-@@ -619,6 +642,7 @@
+@@ -619,6 +642,7 @@ unsigned long rcu_exp_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
/*
* Force a quiescent state.
*/
-@@ -637,6 +661,13 @@
+@@ -637,6 +661,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
/*
* Force a quiescent state for RCU-sched.
*/
-@@ -687,9 +718,11 @@
+@@ -687,9 +718,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
case RCU_FLAVOR:
rsp = rcu_state_p;
break;
case RCU_SCHED_FLAVOR:
rsp = &rcu_sched_state;
break;
-@@ -2918,18 +2951,17 @@
+@@ -2918,18 +2951,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -2941,18 +2973,105 @@
+@@ -2941,19 +2973,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
- if (likely(!rsp->boost)) {
- rcu_do_batch(rsp, rdp);
+- return;
+- }
+- invoke_rcu_callbacks_kthread();
+ rcu_do_batch(rsp, rdp);
-+}
-+
+ }
+
+static void rcu_wake_cond(struct task_struct *t, int status)
+{
+ /*
+/*
+ * Wake up this CPU's rcuc kthread to do RCU core processing.
+ */
-+static void invoke_rcu_core(void)
-+{
+ static void invoke_rcu_core(void)
+ {
+- if (cpu_online(smp_processor_id()))
+- raise_softirq(RCU_SOFTIRQ);
+ unsigned long flags;
+ struct task_struct *t;
+
+ if (!cpu_online(smp_processor_id()))
- return;
++ return;
+ local_irq_save(flags);
+ __this_cpu_write(rcu_cpu_has_work, 1);
+ t = __this_cpu_read(rcu_cpu_kthread_task);
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
+ return __this_cpu_read(rcu_cpu_has_work);
-+}
-+
+ }
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
+ * RCU softirq used in flavors and configurations of RCU that do not
+ *statusp = RCU_KTHREAD_WAITING;
+ return;
+ }
- }
-- invoke_rcu_callbacks_kthread();
++ }
+ *statusp = RCU_KTHREAD_YIELDING;
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+ schedule_timeout_interruptible(2);
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+ *statusp = RCU_KTHREAD_WAITING;
- }
-
--static void invoke_rcu_core(void)
++}
++
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+ .store = &rcu_cpu_kthread_task,
+ .thread_should_run = rcu_cpu_kthread_should_run,
+ * Spawn per-CPU RCU core processing kthreads.
+ */
+static int __init rcu_spawn_core_kthreads(void)
- {
-- if (cpu_online(smp_processor_id()))
-- raise_softirq(RCU_SOFTIRQ);
++{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(rcu_cpu_has_work, cpu) = 0;
+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+ return 0;
- }
++}
+early_initcall(rcu_spawn_core_kthreads);
-
++
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -3113,6 +3232,7 @@
+ */
+@@ -3113,6 +3232,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -3140,6 +3260,7 @@
+@@ -3140,6 +3260,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3225,6 +3346,7 @@
+@@ -3225,6 +3346,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3251,6 +3373,7 @@
+@@ -3251,6 +3373,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3601,6 +3724,7 @@
+@@ -3601,6 +3724,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
mutex_unlock(&rsp->barrier_mutex);
}
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -3609,6 +3733,7 @@
+@@ -3609,6 +3733,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -3741,8 +3866,6 @@
+@@ -3741,8 +3866,6 @@ int rcutree_online_cpu(unsigned int cpu)
{
sync_sched_exp_online_cleanup(cpu);
rcutree_affinity_setting(cpu, -1);
return 0;
}
-@@ -3753,8 +3876,6 @@
+@@ -3753,8 +3876,6 @@ int rcutree_online_cpu(unsigned int cpu)
int rcutree_offline_cpu(unsigned int cpu)
{
rcutree_affinity_setting(cpu, cpu);
return 0;
}
-@@ -4184,12 +4305,13 @@
+@@ -4184,12 +4305,13 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
/*
* We don't need protection against CPU-hotplug here because
-@@ -4200,8 +4322,6 @@
+@@ -4200,8 +4322,6 @@ void __init rcu_init(void)
for_each_online_cpu(cpu) {
rcutree_prepare_cpu(cpu);
rcu_cpu_starting(cpu);
}
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/tree.h linux-4.14/kernel/rcu/tree.h
---- linux-4.14.orig/kernel/rcu/tree.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/rcu/tree.h 2018-09-05 11:05:07.000000000 +0200
-@@ -427,7 +427,9 @@
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 8e1f285f0a70..7acc23da94e2 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -427,7 +427,9 @@ extern struct list_head rcu_struct_flavors;
*/
extern struct rcu_state rcu_sched_state;
#ifdef CONFIG_PREEMPT_RCU
extern struct rcu_state rcu_preempt_state;
-@@ -436,12 +438,10 @@
+@@ -436,12 +438,10 @@ extern struct rcu_state rcu_preempt_state;
int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
bool rcu_eqs_special_set(int cpu);
#ifndef RCU_TREE_NONCORE
-@@ -461,10 +461,9 @@
+@@ -461,10 +461,9 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/tree_plugin.h linux-4.14/kernel/rcu/tree_plugin.h
---- linux-4.14.orig/kernel/rcu/tree_plugin.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/rcu/tree_plugin.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 8b3102d22823..17ee8d1f38c4 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
@@ -24,39 +24,16 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
-@@ -324,9 +301,13 @@
+@@ -324,9 +301,13 @@ static void rcu_preempt_note_context_switch(bool preempt)
struct task_struct *t = current;
struct rcu_data *rdp;
struct rcu_node *rnp;
if (t->rcu_read_lock_nesting > 0 &&
!t->rcu_read_unlock_special.b.blocked) {
-@@ -463,7 +444,7 @@
+@@ -463,7 +444,7 @@ void rcu_read_unlock_special(struct task_struct *t)
}
/* Hardware IRQ handlers cannot block, complain if they get here. */
lockdep_rcu_suspicious(__FILE__, __LINE__,
"rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
-@@ -530,7 +511,7 @@
+@@ -530,7 +511,7 @@ void rcu_read_unlock_special(struct task_struct *t)
/* Unboost if we were boosted. */
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
/*
* If this was the last task on the expedited lists,
-@@ -684,15 +665,6 @@
+@@ -684,15 +665,6 @@ static void rcu_preempt_check_callbacks(void)
t->rcu_read_unlock_special.b.need_qs = true;
}
/**
* call_rcu() - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -915,20 +887,23 @@
+@@ -915,20 +887,23 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+ */
+static void rcu_cpu_kthread_setup(unsigned int cpu)
+{
- #ifdef CONFIG_RCU_BOOST
++#ifdef CONFIG_RCU_BOOST
+ struct sched_param sp;
++
++ sp.sched_priority = kthread_prio;
++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
++#endif /* #ifdef CONFIG_RCU_BOOST */
++}
++
+ #ifdef CONFIG_RCU_BOOST
+
+ #include "../locking/rtmutex_common.h"
--#include "../locking/rtmutex_common.h"
--
-static void rcu_wake_cond(struct task_struct *t, int status)
-{
- /*
- */
- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
- wake_up_process(t);
-+ sp.sched_priority = kthread_prio;
-+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
-+#endif /* #ifdef CONFIG_RCU_BOOST */
- }
-
-+#ifdef CONFIG_RCU_BOOST
-+
-+#include "../locking/rtmutex_common.h"
-+
+-}
+-
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1071,23 +1046,6 @@
+@@ -1070,23 +1045,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+ }
}
- /*
+-/*
- * Wake up the per-CPU kthread to invoke RCU callbacks.
- */
-static void invoke_rcu_callbacks_kthread(void)
- local_irq_restore(flags);
-}
-
--/*
+ /*
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
- */
-@@ -1141,67 +1099,6 @@
+@@ -1141,67 +1099,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return 0;
}
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1232,26 +1129,12 @@
+@@ -1232,26 +1129,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
free_cpumask_var(cm);
}
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
-@@ -1274,11 +1157,6 @@
+@@ -1274,11 +1157,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
static bool rcu_is_callbacks_kthread(void)
{
return false;
-@@ -1302,7 +1180,7 @@
+@@ -1302,7 +1180,7 @@ static void rcu_prepare_kthreads(int cpu)
#endif /* #else #ifdef CONFIG_RCU_BOOST */
/*
* Check to see if any future RCU-related work will need to be done
-@@ -1318,7 +1196,9 @@
+@@ -1318,7 +1196,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
*nextevt = KTIME_MAX;
return rcu_cpu_has_callbacks(NULL);
}
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
* after it.
-@@ -1414,6 +1294,8 @@
+@@ -1414,6 +1294,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
return cbs_ready;
}
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
-@@ -1456,6 +1338,7 @@
+@@ -1456,6 +1338,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
*nextevt = basemono + dj * TICK_NSEC;
return 0;
}
/*
* Prepare a CPU for idle from an RCU perspective. The first major task
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/rcu/update.c linux-4.14/kernel/rcu/update.c
---- linux-4.14.orig/kernel/rcu/update.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/rcu/update.c 2018-09-05 11:05:07.000000000 +0200
-@@ -66,7 +66,7 @@
+diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
+index 7a577bd989a4..2006a09680aa 100644
+--- a/kernel/rcu/update.c
++++ b/kernel/rcu/update.c
+@@ -66,7 +66,7 @@ extern int rcu_expedited; /* from sysctl */
module_param(rcu_expedited, int, 0);
extern int rcu_normal; /* from sysctl */
module_param(rcu_normal, int, 0);
module_param(rcu_normal_after_boot, int, 0);
#endif /* #ifndef CONFIG_TINY_RCU */
-@@ -333,6 +333,7 @@
+@@ -333,6 +333,7 @@ int rcu_read_lock_held(void)
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
-@@ -359,6 +360,7 @@
+@@ -359,6 +360,7 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/completion.c linux-4.14/kernel/sched/completion.c
---- linux-4.14.orig/kernel/sched/completion.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/sched/completion.c 2018-09-05 11:05:07.000000000 +0200
-@@ -32,7 +32,7 @@
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index a9ee16bbc693..9943019095e9 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -18,7 +18,7 @@ endif
+
+ obj-y += core.o loadavg.o clock.o cputime.o
+ obj-y += idle_task.o fair.o rt.o deadline.o
+-obj-y += wait.o wait_bit.o swait.o completion.o idle.o
++obj-y += wait.o wait_bit.o swait.o swork.o completion.o idle.o
+ obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+ obj-$(CONFIG_SCHEDSTATS) += stats.o
+diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
+index 2ddaec40956f..0fe2982e46a0 100644
+--- a/kernel/sched/completion.c
++++ b/kernel/sched/completion.c
+@@ -32,7 +32,7 @@ void complete(struct completion *x)
{
unsigned long flags;
/*
* Perform commit of crossrelease here.
-@@ -41,8 +41,8 @@
+@@ -41,8 +41,8 @@ void complete(struct completion *x)
if (x->done != UINT_MAX)
x->done++;
}
EXPORT_SYMBOL(complete);
-@@ -66,10 +66,10 @@
+@@ -66,10 +66,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
}
EXPORT_SYMBOL(complete_all);
-@@ -78,20 +78,20 @@
+@@ -78,20 +78,20 @@ do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
if (!x->done) {
if (!x->done)
return timeout;
}
-@@ -108,9 +108,9 @@
+@@ -108,9 +108,9 @@ __wait_for_common(struct completion *x,
complete_acquire(x);
complete_release(x);
-@@ -299,12 +299,12 @@
+@@ -299,12 +299,12 @@ bool try_wait_for_completion(struct completion *x)
if (!READ_ONCE(x->done))
return 0;
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
-@@ -330,8 +330,8 @@
+@@ -330,8 +330,8 @@ bool completion_done(struct completion *x)
* otherwise we can end up freeing the completion before complete()
* is done referencing it.
*/
return true;
}
EXPORT_SYMBOL(completion_done);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/core.c linux-4.14/kernel/sched/core.c
---- linux-4.14.orig/kernel/sched/core.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/sched/core.c 2018-09-05 11:05:07.000000000 +0200
-@@ -59,7 +59,11 @@
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 4e89ed8a0fb2..6e6bd5262f23 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -59,7 +59,11 @@ const_debug unsigned int sysctl_sched_features =
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
/*
* period over which we average the RT time consumption, measured
-@@ -341,7 +345,7 @@
+@@ -341,7 +345,7 @@ static void init_rq_hrtick(struct rq *rq)
rq->hrtick_csd.info = rq;
#endif
rq->hrtick_timer.function = hrtick;
}
#else /* CONFIG_SCHED_HRTICK */
-@@ -423,9 +427,15 @@
+@@ -423,9 +427,15 @@ static bool set_nr_if_polling(struct task_struct *p)
#endif
#endif
/*
* Atomically grab the task, if ->wake_q is !nil already it means
-@@ -447,24 +457,32 @@
+@@ -447,24 +457,32 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
head->lastp = &node->next;
}
put_task_struct(task);
}
}
-@@ -500,6 +518,48 @@
+@@ -500,6 +518,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -523,11 +583,14 @@
+@@ -523,11 +583,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -546,6 +609,8 @@
+@@ -546,6 +609,8 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu();
unlock:
rcu_read_unlock();
return cpu;
}
-@@ -912,7 +977,7 @@
+@@ -912,10 +977,10 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
- if (is_per_cpu_kthread(p))
-@@ -1007,7 +1072,7 @@
+- if (is_per_cpu_kthread(p))
++ if (is_per_cpu_kthread(p) || __migrate_disabled(p))
+ return cpu_online(cpu);
+
+ return cpu_active(cpu);
+@@ -1007,7 +1072,7 @@ static int migration_cpu_stop(void *data)
local_irq_disable();
/*
* We need to explicitly wake pending tasks before running
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/
sched_ttwu_pending();
-@@ -1038,11 +1103,19 @@
+@@ -1038,11 +1103,19 @@ static int migration_cpu_stop(void *data)
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
}
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+int __migrate_disabled(struct task_struct *p)
+{
+ return p->migrate_disable;
{
struct rq *rq = task_rq(p);
bool queued, running;
-@@ -1071,6 +1144,20 @@
+@@ -1071,6 +1144,20 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
set_curr_task(rq, p);
}
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
-+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ if (__migrate_disabled(p)) {
+ lockdep_assert_held(&p->pi_lock);
+
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -1108,7 +1195,7 @@
+@@ -1108,7 +1195,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
goto out;
}
goto out;
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-@@ -1129,9 +1216,16 @@
+@@ -1129,9 +1216,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
}
/* Can the task run on the task's current CPU? If so, we're done */
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
-+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ if (__migrate_disabled(p)) {
+ p->migrate_disable_update = 1;
+ goto out;
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
-@@ -1269,10 +1363,10 @@
+@@ -1269,10 +1363,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
-@@ -1313,10 +1407,10 @@
+@@ -1313,10 +1407,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
-@@ -1326,6 +1420,18 @@
+@@ -1326,6 +1420,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
return ret;
}
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1370,7 +1476,7 @@
+@@ -1370,7 +1476,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
return 0;
cpu_relax();
}
-@@ -1385,7 +1491,8 @@
+@@ -1385,7 +1491,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
-@@ -1460,7 +1567,7 @@
+@@ -1460,7 +1567,7 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
/*
*
* A few notes on cpu_active vs cpu_online:
*
-@@ -1500,14 +1607,14 @@
+@@ -1500,14 +1607,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu))
continue;
if (!is_cpu_allowed(p, dest_cpu))
continue;
-@@ -1551,7 +1658,7 @@
+@@ -1551,7 +1658,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
}
/*
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
-@@ -1561,11 +1668,11 @@
+@@ -1561,11 +1668,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
if (p->nr_cpus_allowed > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else
* CPU.
*
* Since this is common to all placement strategies, this lives here.
-@@ -1668,10 +1775,6 @@
+@@ -1668,10 +1775,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
}
/*
-@@ -1995,8 +2098,27 @@
+@@ -1995,8 +2098,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
trace_sched_waking(p);
-@@ -2093,56 +2215,6 @@
+@@ -2092,56 +2214,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ return success;
}
- /**
+-/**
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- * @rf: request-queue flags for pinning
- raw_spin_unlock(&p->pi_lock);
-}
-
--/**
+ /**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
- *
-@@ -2160,6 +2232,18 @@
+@@ -2160,6 +2232,18 @@ int wake_up_process(struct task_struct *p)
}
EXPORT_SYMBOL(wake_up_process);
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
-@@ -2420,6 +2504,9 @@
+@@ -2420,6 +2504,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -2462,7 +2549,7 @@
+@@ -2462,7 +2549,7 @@ void wake_up_new_task(struct task_struct *p)
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-@@ -2675,21 +2762,16 @@
+@@ -2675,21 +2762,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
put_task_struct(prev);
}
-@@ -3336,25 +3418,13 @@
+@@ -3336,25 +3418,13 @@ static void __sched notrace __schedule(bool preempt)
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3407,8 +3477,19 @@
+@@ -3407,8 +3477,24 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
- if (!tsk->state || tsk_is_pi_blocked(tsk))
+ if (!tsk->state)
-+ return;
+ return;
+ /*
+ * If a worker went to sleep, notify and ask workqueue whether
+ * it wants to wake up a task to maintain concurrency.
++ * As this function is called inside the schedule() context,
++ * we disable preemption to avoid it calling schedule() again
++ * in the possible wakeup of a kworker.
+ */
-+ if (tsk->flags & PF_WQ_WORKER)
++ if (tsk->flags & PF_WQ_WORKER) {
++ preempt_disable();
+ wq_worker_sleeping(tsk);
-+
++ preempt_enable_no_resched();
++ }
+
+ if (tsk_is_pi_blocked(tsk))
- return;
++ return;
+
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3417,6 +3498,12 @@
+@@ -3417,6 +3503,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3427,6 +3514,7 @@
+@@ -3427,6 +3519,7 @@ asmlinkage __visible void __sched schedule(void)
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
}
EXPORT_SYMBOL(schedule);
-@@ -3515,6 +3603,30 @@
+@@ -3515,6 +3608,30 @@ static void __sched notrace preempt_schedule_common(void)
} while (need_resched());
}
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3529,7 +3641,8 @@
+@@ -3529,7 +3646,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
*/
if (likely(!preemptible()))
return;
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3556,6 +3669,9 @@
+@@ -3556,6 +3674,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
if (likely(!preemptible()))
return;
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -3578,7 +3694,16 @@
+@@ -3578,7 +3699,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
* an infinite recursion.
*/
prev_ctx = exception_enter();
exception_exit(prev_ctx);
preempt_latency_stop(1);
-@@ -4164,7 +4289,7 @@
+@@ -4164,7 +4294,7 @@ static int __sched_setscheduler(struct task_struct *p,
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
-@@ -4758,7 +4883,7 @@
+@@ -4758,7 +4888,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
-@@ -4877,6 +5002,7 @@
+@@ -4877,6 +5007,7 @@ int __cond_resched_lock(spinlock_t *lock)
}
EXPORT_SYMBOL(__cond_resched_lock);
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4890,6 +5016,7 @@
+@@ -4890,6 +5021,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
/**
* yield - yield the current processor to other threads.
-@@ -5284,7 +5411,9 @@
+@@ -5284,7 +5416,9 @@ void init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -5323,7 +5452,7 @@
+@@ -5323,7 +5457,7 @@ int task_can_attach(struct task_struct *p,
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
-@@ -5350,7 +5479,7 @@
+@@ -5350,7 +5484,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
if (curr_cpu == target_cpu)
return 0;
return -EINVAL;
/* TODO: This is not properly updating schedstats */
-@@ -5389,6 +5518,8 @@
+@@ -5389,6 +5523,8 @@ void sched_setnuma(struct task_struct *p, int nid)
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5403,7 +5534,12 @@
+@@ -5403,7 +5539,12 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
}
/*
-@@ -5487,7 +5623,7 @@
+@@ -5487,7 +5628,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
put_prev_task(rq, next);
/*
* both pi_lock and rq->lock, such that holding either
* stabilizes the mask.
*
-@@ -5718,6 +5854,10 @@
+@@ -5718,6 +5859,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(cpu);
hrtick_clear(rq);
return 0;
}
#endif
-@@ -5964,7 +6104,7 @@
+@@ -5964,7 +6109,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
return (nested == preempt_offset);
}
-@@ -6756,3 +6896,197 @@
+@@ -6756,3 +6901,196 @@ const u32 sched_prio_to_wmult[40] = {
/* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
+
-+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+
+static inline void
+update_nr_migratory(struct task_struct *p, long delta)
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+void migrate_disable(void)
+{
++#ifdef CONFIG_SCHED_DEBUG
+ struct task_struct *p = current;
+
+ if (in_atomic() || irqs_disabled()) {
-+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
-+#endif
+ return;
+ }
-+#ifdef CONFIG_SCHED_DEBUG
++
+ if (unlikely(p->migrate_disable_atomic)) {
+ tracing_off();
+ WARN_ON_ONCE(1);
+ }
-+#endif
+
+ p->migrate_disable++;
++#endif
++ barrier();
+}
+EXPORT_SYMBOL(migrate_disable);
+
+void migrate_enable(void)
+{
++#ifdef CONFIG_SCHED_DEBUG
+ struct task_struct *p = current;
+
+ if (in_atomic() || irqs_disabled()) {
-+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
-+#endif
+ return;
+ }
+
-+#ifdef CONFIG_SCHED_DEBUG
+ if (unlikely(p->migrate_disable_atomic)) {
+ tracing_off();
+ WARN_ON_ONCE(1);
+ }
-+#endif
+
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+ p->migrate_disable--;
++#endif
++ barrier();
+}
+EXPORT_SYMBOL(migrate_enable);
+#endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/cpudeadline.c linux-4.14/kernel/sched/cpudeadline.c
---- linux-4.14.orig/kernel/sched/cpudeadline.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/sched/cpudeadline.c 2018-09-05 11:05:07.000000000 +0200
-@@ -127,13 +127,13 @@
+diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
+index 8d9562d890d3..91a0702fe3df 100644
+--- a/kernel/sched/cpudeadline.c
++++ b/kernel/sched/cpudeadline.c
+@@ -127,13 +127,13 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/cpupri.c linux-4.14/kernel/sched/cpupri.c
---- linux-4.14.orig/kernel/sched/cpupri.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/sched/cpupri.c 2018-09-05 11:05:07.000000000 +0200
-@@ -103,11 +103,11 @@
+diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
+index 2511aba36b89..7b9bc1de0e6c 100644
+--- a/kernel/sched/cpupri.c
++++ b/kernel/sched/cpupri.c
+@@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
if (skip)
continue;
/*
* We have to ensure that we have at least one bit
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/deadline.c linux-4.14/kernel/sched/deadline.c
---- linux-4.14.orig/kernel/sched/deadline.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/sched/deadline.c 2018-09-05 11:05:07.000000000 +0200
-@@ -504,7 +504,7 @@
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index b2589c7e9439..28a75a9526ac 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -504,7 +504,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
* If we cannot preempt any rq, fall back to pick any
* online cpu.
*/
if (cpu >= nr_cpu_ids) {
/*
* Fail to find any suitable cpu.
-@@ -1020,7 +1020,7 @@
+@@ -1020,7 +1020,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->dl_timer;
timer->function = dl_task_timer;
}
-@@ -1749,7 +1749,7 @@
+@@ -1753,7 +1753,7 @@ static void set_curr_task_dl(struct rq *rq)
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
return 1;
return 0;
}
-@@ -1899,7 +1899,7 @@
+@@ -1903,7 +1903,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
task_running(rq, task) ||
!dl_task(task) ||
!task_on_rq_queued(task))) {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/debug.c linux-4.14/kernel/sched/debug.c
---- linux-4.14.orig/kernel/sched/debug.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/sched/debug.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1017,6 +1017,10 @@
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 2f93e4a2d9f6..b5b43861c2b6 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -1017,6 +1017,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(dl.runtime);
P(dl.deadline);
}
-+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ P(migrate_disable);
+#endif
+ P(nr_cpus_allowed);
#undef PN_SCHEDSTAT
#undef PN
#undef __PN
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/fair.c linux-4.14/kernel/sched/fair.c
---- linux-4.14.orig/kernel/sched/fair.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/sched/fair.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1596,7 +1596,7 @@
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0cc7098c6dfd..51ecea4f5d16 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1596,7 +1596,7 @@ static void task_numa_compare(struct task_numa_env *env,
*/
if (cur) {
/* Skip this swap candidate if cannot move to the source cpu */
goto unlock;
/*
-@@ -1706,7 +1706,7 @@
+@@ -1706,7 +1706,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
continue;
env->dst_cpu = cpu;
-@@ -3840,7 +3840,7 @@
+@@ -3840,7 +3840,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3864,7 +3864,7 @@
+@@ -3864,7 +3864,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;
if (delta > ideal_runtime)
}
static void
-@@ -4006,7 +4006,7 @@
+@@ -4006,7 +4006,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* validating it and just reschedule.
*/
if (queued) {
return;
}
/*
-@@ -4188,7 +4188,7 @@
+@@ -4188,7 +4188,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
}
static __always_inline
-@@ -4837,7 +4837,7 @@
+@@ -4684,9 +4684,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+ cfs_b->period = ns_to_ktime(default_cfs_period());
+
+ INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
+- hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
++ hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
+ cfs_b->period_timer.function = sched_cfs_period_timer;
+- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ cfs_b->slack_timer.function = sched_cfs_slack_timer;
+ }
+
+@@ -4837,7 +4837,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
if (delta < 0) {
if (rq->curr == p)
return;
}
hrtick_start(rq, delta);
-@@ -5475,7 +5475,7 @@
+@@ -5475,7 +5475,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
continue;
local_group = cpumask_test_cpu(this_cpu,
-@@ -5595,7 +5595,7 @@
+@@ -5595,7 +5595,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
if (idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
-@@ -5698,7 +5698,7 @@
+@@ -5698,7 +5698,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
if (!test_idle_cores(target, false))
return -1;
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
-@@ -5732,7 +5732,7 @@
+@@ -5732,7 +5732,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
continue;
if (idle_cpu(cpu))
return cpu;
-@@ -5795,7 +5795,7 @@
+@@ -5795,7 +5795,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
continue;
if (idle_cpu(cpu))
break;
-@@ -5950,7 +5950,7 @@
+@@ -5950,7 +5950,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
}
rcu_read_lock();
-@@ -6231,7 +6231,7 @@
+@@ -6231,7 +6231,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
preempt:
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -6699,14 +6699,14 @@
+@@ -6699,14 +6699,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -6726,7 +6726,7 @@
+@@ -6726,7 +6726,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/* Prevent to re-select dst_cpu via env's cpus */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -7295,7 +7295,7 @@
+@@ -7295,7 +7295,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
*
* Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
* cpumask covering 1 cpu of the first group and 3 cpus of the second group.
-@@ -7871,7 +7871,7 @@
+@@ -7871,7 +7871,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -8263,7 +8263,7 @@
+@@ -8263,7 +8263,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* if the curr task on busiest cpu can't be
* moved to this_cpu
*/
raw_spin_unlock_irqrestore(&busiest->lock,
flags);
env.flags |= LBF_ALL_PINNED;
-@@ -9085,7 +9085,7 @@
+@@ -9085,7 +9085,7 @@ static void task_fork_fair(struct task_struct *p)
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -9109,7 +9109,7 @@
+@@ -9109,7 +9109,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
*/
if (rq->curr == p) {
if (p->prio > oldprio)
} else
check_preempt_curr(rq, p, 0);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/features.h linux-4.14/kernel/sched/features.h
---- linux-4.14.orig/kernel/sched/features.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/sched/features.h 2018-09-05 11:05:07.000000000 +0200
-@@ -46,11 +46,19 @@
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 9552fd5854bf..fb069998b518 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -46,11 +46,19 @@ SCHED_FEAT(LB_BIAS, true)
*/
SCHED_FEAT(NONTASK_CAPACITY, true)
/*
* When doing wakeups, attempt to limit superfluous scans of the LLC domain.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/Makefile linux-4.14/kernel/sched/Makefile
---- linux-4.14.orig/kernel/sched/Makefile 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/sched/Makefile 2018-09-05 11:05:07.000000000 +0200
-@@ -18,7 +18,7 @@
-
- obj-y += core.o loadavg.o clock.o cputime.o
- obj-y += idle_task.o fair.o rt.o deadline.o
--obj-y += wait.o wait_bit.o swait.o completion.o idle.o
-+obj-y += wait.o wait_bit.o swait.o swork.o completion.o idle.o
- obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
- obj-$(CONFIG_SCHEDSTATS) += stats.o
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/rt.c linux-4.14/kernel/sched/rt.c
---- linux-4.14.orig/kernel/sched/rt.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/sched/rt.c 2018-09-05 11:05:07.000000000 +0200
-@@ -47,8 +47,8 @@
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index cb9a5b8532fa..6c72332dab3f 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -47,8 +47,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
raw_spin_lock_init(&rt_b->rt_runtime_lock);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
-@@ -1594,7 +1594,7 @@
+@@ -1596,7 +1596,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
return 1;
return 0;
}
-@@ -1729,7 +1729,7 @@
+@@ -1731,7 +1731,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
task_running(rq, task) ||
!rt_task(task) ||
!task_on_rq_queued(task))) {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/sched.h linux-4.14/kernel/sched/sched.h
---- linux-4.14.orig/kernel/sched/sched.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/sched/sched.h 2018-09-05 11:05:07.000000000 +0200
-@@ -1354,6 +1354,7 @@
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index b29376169f3f..96481980c8c7 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1354,6 +1354,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
-@@ -1545,6 +1546,15 @@
+@@ -1545,6 +1546,15 @@ extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/swait.c linux-4.14/kernel/sched/swait.c
---- linux-4.14.orig/kernel/sched/swait.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/sched/swait.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
+index 9ff1555341ed..b14638a05ec9 100644
+--- a/kernel/sched/swait.c
++++ b/kernel/sched/swait.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/sched/signal.h>
void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
struct lock_class_key *key)
-@@ -30,6 +31,25 @@
+@@ -30,6 +31,25 @@ void swake_up_locked(struct swait_queue_head *q)
}
EXPORT_SYMBOL(swake_up_locked);
void swake_up(struct swait_queue_head *q)
{
unsigned long flags;
-@@ -49,6 +69,7 @@
+@@ -49,6 +69,7 @@ void swake_up_all(struct swait_queue_head *q)
struct swait_queue *curr;
LIST_HEAD(tmp);
raw_spin_lock_irq(&q->lock);
list_splice_init(&q->task_list, &tmp);
while (!list_empty(&tmp)) {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/swork.c linux-4.14/kernel/sched/swork.c
---- linux-4.14.orig/kernel/sched/swork.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/kernel/sched/swork.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c
+new file mode 100644
+index 000000000000..1950f40ca725
+--- /dev/null
++++ b/kernel/sched/swork.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
+ mutex_unlock(&worker_mutex);
+}
+EXPORT_SYMBOL_GPL(swork_put);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/sched/topology.c linux-4.14/kernel/sched/topology.c
---- linux-4.14.orig/kernel/sched/topology.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/sched/topology.c 2018-09-05 11:05:07.000000000 +0200
-@@ -286,6 +286,7 @@
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 659e075ef70b..bb22e3620a90 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -286,6 +286,7 @@ static int init_rootdomain(struct root_domain *rd)
rd->rto_cpu = -1;
raw_spin_lock_init(&rd->rto_lock);
init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
#endif
init_dl_bw(&rd->dl_bw);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/signal.c linux-4.14/kernel/signal.c
---- linux-4.14.orig/kernel/signal.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/signal.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 4439ba9dc5d9..d8f75a030292 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
@@ -19,6 +19,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
-@@ -360,13 +361,30 @@
+@@ -360,13 +361,30 @@ static bool task_participate_group_stop(struct task_struct *task)
return false;
}
{
struct sigqueue *q = NULL;
struct user_struct *user;
-@@ -383,7 +401,10 @@
+@@ -383,7 +401,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
} else {
print_dropped_signal(sig);
}
-@@ -400,6 +421,13 @@
+@@ -400,6 +421,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
return q;
}
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
-@@ -409,6 +437,21 @@
+@@ -409,6 +437,21 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
-@@ -422,6 +465,21 @@
+@@ -421,6 +464,21 @@ void flush_sigqueue(struct sigpending *queue)
+ }
}
- /*
++/*
+ * Called from __exit_signal. Flush tsk->pending and
+ * tsk->sigqueue_cache
+ */
+ kmem_cache_free(sigqueue_cachep, q);
+}
+
-+/*
+ /*
* Flush all pending signals for this kthread.
*/
- void flush_signals(struct task_struct *t)
-@@ -542,7 +600,7 @@
+@@ -542,7 +600,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
(info->si_code == SI_TIMER) &&
(info->si_sys_private);
} else {
/*
* Ok, it wasn't in the queue. This must be
-@@ -578,6 +636,8 @@
+@@ -578,6 +636,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
bool resched_timer = false;
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
-@@ -1177,8 +1237,8 @@
+@@ -1177,8 +1237,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
{
unsigned long int flags;
int ret, blocked, ignored;
-@@ -1207,6 +1267,39 @@
+@@ -1207,6 +1267,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return ret;
}
/*
* Nuke all other threads in the group.
*/
-@@ -1241,12 +1334,12 @@
+@@ -1241,12 +1334,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
* Disable interrupts early to avoid deadlocks.
* See rcu_read_unlock() comment header for details.
*/
break;
}
/*
-@@ -1267,7 +1360,7 @@
+@@ -1267,7 +1360,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
}
return sighand;
-@@ -1514,7 +1607,8 @@
+@@ -1514,7 +1607,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
if (q)
q->flags |= SIGQUEUE_PREALLOC;
-@@ -1888,15 +1982,7 @@
+@@ -1888,15 +1982,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
freezable_schedule();
} else {
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/softirq.c linux-4.14/kernel/softirq.c
---- linux-4.14.orig/kernel/softirq.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/softirq.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index a4c87cf27f9d..ec801952785a 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
@@ -21,11 +21,14 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
-@@ -56,12 +59,108 @@
+@@ -56,12 +59,108 @@ EXPORT_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
-@@ -77,6 +176,38 @@
+@@ -77,6 +176,38 @@ static void wakeup_softirqd(void)
wake_up_process(tsk);
}
/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
-@@ -92,6 +223,47 @@
+@@ -92,6 +223,47 @@ static bool ksoftirqd_running(unsigned long pending)
return tsk && (tsk->state == TASK_RUNNING);
}
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -247,10 +419,8 @@
+@@ -247,10 +419,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
-@@ -269,36 +439,7 @@
+@@ -269,36 +439,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
pending = local_softirq_pending();
if (pending) {
-@@ -335,6 +476,309 @@
+@@ -334,6 +475,309 @@ asmlinkage __visible void do_softirq(void)
+ local_irq_restore(flags);
}
- /*
++/*
+ * This function must run with irqs disabled!
+ */
+void raise_softirq_irqoff(unsigned int nr)
+}
+
+#endif /* PREEMPT_RT_FULL */
-+/*
+ /*
* Enter an interrupt context.
*/
- void irq_enter(void)
-@@ -345,9 +789,9 @@
+@@ -345,9 +789,9 @@ void irq_enter(void)
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
}
__irq_enter();
-@@ -355,6 +799,7 @@
+@@ -355,6 +799,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
if (ksoftirqd_running(local_softirq_pending()))
return;
-@@ -377,6 +822,18 @@
+@@ -377,6 +822,18 @@ static inline void invoke_softirq(void)
} else {
wakeup_softirqd();
}
}
static inline void tick_irq_exit(void)
-@@ -385,7 +842,13 @@
+@@ -385,7 +842,8 @@ static inline void tick_irq_exit(void)
int cpu = smp_processor_id();
/* Make sure that timer wheel updates are propagated */
- if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
-+#ifdef CONFIG_PREEMPT_RT_BASE
+ if ((idle_cpu(cpu) || tick_nohz_full_cpu(cpu)) &&
-+ !need_resched() && !local_softirq_pending())
-+#else
-+ if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu))
-+#endif
-+ {
++ !need_resched() && !local_softirq_pending()) {
if (!in_irq())
tick_nohz_irq_exit();
}
-@@ -413,26 +876,6 @@
+@@ -413,26 +871,6 @@ void irq_exit(void)
trace_hardirq_exit(); /* must be last! */
}
void raise_softirq(unsigned int nr)
{
unsigned long flags;
-@@ -442,12 +885,6 @@
+@@ -442,12 +880,6 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
-@@ -464,15 +901,45 @@
+@@ -464,15 +896,45 @@ struct tasklet_head {
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__tasklet_schedule);
-@@ -482,50 +949,108 @@
+@@ -482,50 +944,108 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
unsigned long flags;
local_irq_save(flags);
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
{
struct tasklet_struct *list;
-@@ -536,30 +1061,7 @@
+@@ -536,30 +1056,7 @@ static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
local_irq_enable();
}
void tasklet_init(struct tasklet_struct *t,
-@@ -580,7 +1082,7 @@
+@@ -580,7 +1077,7 @@ void tasklet_kill(struct tasklet_struct *t)
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
-@@ -588,57 +1090,6 @@
+@@ -588,57 +1085,6 @@ void tasklet_kill(struct tasklet_struct *t)
}
EXPORT_SYMBOL(tasklet_kill);
void __init softirq_init(void)
{
int cpu;
-@@ -654,25 +1105,26 @@
+@@ -654,25 +1100,26 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
}
#ifdef CONFIG_HOTPLUG_CPU
-@@ -739,17 +1191,31 @@
+@@ -739,17 +1186,31 @@ static int takeover_tasklets(unsigned int cpu)
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
return 0;
}
early_initcall(spawn_ksoftirqd);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/stop_machine.c linux-4.14/kernel/stop_machine.c
---- linux-4.14.orig/kernel/stop_machine.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/stop_machine.c 2018-09-05 11:05:07.000000000 +0200
-@@ -496,6 +496,8 @@
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 067cb83f37ea..56f2f2e01229 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -503,6 +503,8 @@ static void cpu_stopper_thread(unsigned int cpu)
struct cpu_stop_done *done = work->done;
int ret;
/* cpu stop callbacks must not sleep, make in_atomic() == T */
preempt_count_inc();
ret = fn(arg);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/alarmtimer.c linux-4.14/kernel/time/alarmtimer.c
---- linux-4.14.orig/kernel/time/alarmtimer.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/time/alarmtimer.c 2018-09-05 11:05:07.000000000 +0200
-@@ -436,7 +436,7 @@
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 639321bf2e39..0c7227f89349 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -436,7 +436,7 @@ int alarm_cancel(struct alarm *alarm)
int ret = alarm_try_to_cancel(alarm);
if (ret >= 0)
return ret;
}
}
EXPORT_SYMBOL_GPL(alarm_cancel);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/hrtimer.c linux-4.14/kernel/time/hrtimer.c
---- linux-4.14.orig/kernel/time/hrtimer.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/time/hrtimer.c 2018-09-05 11:05:07.000000000 +0200
-@@ -60,6 +60,15 @@
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index d00e85ac10d6..b59e009087a9 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -59,6 +59,15 @@
+
#include "tick-internal.h"
- /*
++/*
+ * Masks for selecting the soft and hard context timers from
+ * cpu_base->active
+ */
+#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
+#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+
-+/*
+ /*
* The timer bases:
*
- * There are more clockids than hrtimer bases. Thus, we index
@@ -70,7 +79,6 @@
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{
.clock_base =
{
{
-@@ -93,6 +101,26 @@
+@@ -93,6 +101,26 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
.clockid = CLOCK_TAI,
.get_time = &ktime_get_clocktai,
},
}
};
-@@ -118,7 +146,6 @@
+@@ -118,7 +146,6 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
* timer->base->cpu_base
*/
static struct hrtimer_cpu_base migration_cpu_base = {
.clock_base = { { .cpu_base = &migration_cpu_base, }, },
};
-@@ -156,45 +183,33 @@
+@@ -156,45 +183,33 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
}
/*
/*
* We switch the timer base to a power-optimized selected CPU target,
-@@ -396,7 +411,8 @@
+@@ -396,7 +411,8 @@ static inline void debug_hrtimer_init(struct hrtimer *timer)
debug_object_init(timer, &hrtimer_debug_descr);
}
{
debug_object_activate(timer, &hrtimer_debug_descr);
}
-@@ -429,8 +445,10 @@
+@@ -429,8 +445,10 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
-@@ -442,10 +460,11 @@
+@@ -442,10 +460,11 @@ debug_init(struct hrtimer *timer, clockid_t clockid,
trace_hrtimer_init(timer, clockid, mode);
}
}
static inline void debug_deactivate(struct hrtimer *timer)
-@@ -454,35 +473,43 @@
+@@ -454,35 +473,43 @@ static inline void debug_deactivate(struct hrtimer *timer)
trace_hrtimer_cancel(timer);
}
}
}
/*
-@@ -494,7 +521,47 @@
+@@ -494,7 +521,47 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
expires_next = 0;
return expires_next;
}
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
{
-@@ -502,36 +569,14 @@
+@@ -502,36 +569,14 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
}
/*
-@@ -539,7 +584,8 @@
+@@ -539,7 +584,8 @@ static inline int hrtimer_is_hres_enabled(void)
*/
static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
{
}
static inline int hrtimer_hres_active(void)
-@@ -557,10 +603,23 @@
+@@ -557,10 +603,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;
if (skip_equal && expires_next == cpu_base->expires_next)
return;
-@@ -568,6 +627,9 @@
+@@ -568,6 +627,9 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
cpu_base->expires_next = expires_next;
/*
* If a hang was detected in the last timer interrupt then we
* leave the hang delay active in the hardware. We want the
* system to make progress. That also prevents the following
-@@ -581,83 +643,38 @@
+@@ -581,83 +643,38 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
* set. So we'd effectivly block all timers until the T2 event
* fires.
*/
}
/*
-@@ -669,7 +686,7 @@
+@@ -669,7 +686,7 @@ static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
return;
raw_spin_lock(&base->lock);
-@@ -698,6 +715,29 @@
+@@ -698,6 +715,29 @@ static void hrtimer_switch_to_hres(void)
retrigger_next_event(NULL);
}
static void clock_was_set_work(struct work_struct *work)
{
clock_was_set();
-@@ -713,26 +753,106 @@
+@@ -713,25 +753,105 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
#endif /* CONFIG_HIGH_RES_TIMERS */
- /*
++/*
+ * When a timer is enqueued and expires earlier than the already enqueued
+ * timers, we have to check, whether it expires earlier than the timer for
+ * which the clock event device was armed.
+ tick_program_event(expires, 1);
+}
+
-+/*
+ /*
* Clock realtime was set
*
- * Change the offset of the realtime clock vs. the monotonic
-@@ -830,6 +950,33 @@
+@@ -830,6 +950,33 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -839,9 +986,10 @@
+@@ -839,9 +986,10 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
* Returns 1 when the new timer is the leftmost timer in the tree.
*/
static int enqueue_hrtimer(struct hrtimer *timer,
base->cpu_base->active_bases |= 1 << base->index;
-@@ -874,7 +1022,6 @@
+@@ -874,7 +1022,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
/*
* Note: If reprogram is false we do not update
* cpu_base->next_timer. This happens when we remove the first
-@@ -885,7 +1032,6 @@
+@@ -885,7 +1032,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
*/
if (reprogram && timer == cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
}
/*
-@@ -934,22 +1080,36 @@
+@@ -934,22 +1080,36 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
return tim;
}
/* Remove an active timer from the queue: */
remove_hrtimer(timer, base, true);
-@@ -964,21 +1124,37 @@
+@@ -964,21 +1124,37 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
unlock_hrtimer_base(timer, &flags);
}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
-@@ -1035,7 +1211,7 @@
+@@ -1035,7 +1211,7 @@ int hrtimer_cancel(struct hrtimer *timer)
if (ret >= 0)
return ret;
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1076,7 +1252,7 @@
+@@ -1076,7 +1252,7 @@ u64 hrtimer_get_next_event(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!__hrtimer_hres_active(cpu_base))
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
-@@ -1099,8 +1275,16 @@
+@@ -1099,8 +1275,16 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
memset(timer, 0, sizeof(struct hrtimer));
-@@ -1114,7 +1298,8 @@
+@@ -1114,7 +1298,8 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
clock_id = CLOCK_MONOTONIC;
timer->base = &cpu_base->clock_base[base];
timerqueue_init(&timer->node);
}
-@@ -1123,7 +1308,13 @@
+@@ -1123,7 +1308,13 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
* hrtimer_init - initialize a timer to the given clock
* @timer: the timer to be initialized
* @clock_id: the clock to be used
*/
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
-@@ -1142,19 +1333,19 @@
+@@ -1142,19 +1333,19 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
*/
bool hrtimer_active(const struct hrtimer *timer)
{
return false;
}
-@@ -1180,7 +1371,8 @@
+@@ -1180,7 +1371,8 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
struct hrtimer_clock_base *base,
{
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
-@@ -1188,16 +1380,16 @@
+@@ -1188,16 +1380,16 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
lockdep_assert_held(&cpu_base->lock);
debug_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
fn = timer->function;
-@@ -1211,15 +1403,15 @@
+@@ -1211,15 +1403,15 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
timer->is_rel = false;
/*
/*
* Note: We clear the running state after enqueue_hrtimer and
-@@ -1232,33 +1424,31 @@
+@@ -1232,33 +1424,31 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
*/
if (restart != HRTIMER_NORESTART &&
!(timer->state & HRTIMER_STATE_ENQUEUED))
basenow = ktime_add(now, base->offset);
while ((node = timerqueue_getnext(&base->active))) {
-@@ -1281,11 +1471,29 @@
+@@ -1281,11 +1471,29 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
if (basenow < hrtimer_get_softexpires_tv64(timer))
break;
#ifdef CONFIG_HIGH_RES_TIMERS
/*
-@@ -1296,13 +1504,14 @@
+@@ -1296,13 +1504,14 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
entry_time = now = hrtimer_update_base(cpu_base);
retry:
cpu_base->in_hrtirq = 1;
-@@ -1315,17 +1524,23 @@
+@@ -1315,17 +1524,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)
*/
cpu_base->expires_next = KTIME_MAX;
/* Reprogramming necessary ? */
if (!tick_program_event(expires_next, 0)) {
-@@ -1346,7 +1561,7 @@
+@@ -1346,7 +1561,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
* Acquire base lock for updating the offsets and retrieving
* the current time.
*/
now = hrtimer_update_base(cpu_base);
cpu_base->nr_retries++;
if (++retries < 3)
-@@ -1359,7 +1574,8 @@
+@@ -1359,7 +1574,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
*/
cpu_base->nr_hangs++;
cpu_base->hang_detected = 1;
delta = ktime_sub(now, entry_time);
if ((unsigned int)delta > cpu_base->max_hang_time)
cpu_base->max_hang_time = (unsigned int) delta;
-@@ -1401,6 +1617,7 @@
+@@ -1401,6 +1617,7 @@ static inline void __hrtimer_peek_ahead_timers(void) { }
void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t now;
if (__hrtimer_hres_active(cpu_base))
-@@ -1418,10 +1635,17 @@
+@@ -1418,10 +1635,17 @@ void hrtimer_run_queues(void)
return;
}
}
/*
-@@ -1440,13 +1664,65 @@
+@@ -1440,13 +1664,65 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
{
switch(restart->nanosleep.type) {
-@@ -1470,8 +1746,6 @@
+@@ -1470,8 +1746,6 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
{
struct restart_block *restart;
do {
set_current_state(TASK_INTERRUPTIBLE);
hrtimer_start_expires(&t->timer, mode);
-@@ -1508,10 +1782,9 @@
+@@ -1508,10 +1782,9 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
struct hrtimer_sleeper t;
int ret;
ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
destroy_hrtimer_on_stack(&t.timer);
return ret;
-@@ -1529,7 +1802,7 @@
+@@ -1529,7 +1802,7 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp,
if (dl_task(current) || rt_task(current))
slack = 0;
hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
ret = do_nanosleep(&t, mode);
if (ret != -ERESTART_RESTARTBLOCK)
-@@ -1585,6 +1858,27 @@
+@@ -1585,6 +1858,27 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
}
#endif
/*
* Functions related to boot-time initialization:
*/
-@@ -1598,9 +1892,17 @@
+@@ -1598,9 +1892,17 @@ int hrtimers_prepare_cpu(unsigned int cpu)
timerqueue_init_head(&cpu_base->clock_base[i].active);
}
return 0;
}
-@@ -1632,7 +1934,7 @@
+@@ -1632,7 +1934,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
* sort out already expired timers and reprogram the
* event device.
*/
}
}
-@@ -1644,6 +1946,12 @@
+@@ -1644,6 +1946,12 @@ int hrtimers_dead_cpu(unsigned int scpu)
BUG_ON(cpu_online(scpu));
tick_cancel_sched_timer(scpu);
local_irq_disable();
old_base = &per_cpu(hrtimer_bases, scpu);
new_base = this_cpu_ptr(&hrtimer_bases);
-@@ -1659,12 +1967,19 @@
+@@ -1659,12 +1967,19 @@ int hrtimers_dead_cpu(unsigned int scpu)
&new_base->clock_base[i]);
}
return 0;
}
-@@ -1673,18 +1988,19 @@
+@@ -1673,18 +1988,19 @@ int hrtimers_dead_cpu(unsigned int scpu)
void __init hrtimers_init(void)
{
hrtimers_prepare_cpu(smp_processor_id());
{
struct hrtimer_sleeper t;
-@@ -1705,11 +2021,9 @@
+@@ -1705,11 +2021,9 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
return -EINTR;
}
hrtimer_start_expires(&t.timer, mode);
if (likely(t.task))
-@@ -1727,7 +2041,7 @@
+@@ -1727,7 +2041,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
* schedule_hrtimeout_range - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
-@@ -1766,7 +2080,7 @@
+@@ -1766,7 +2080,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
/**
* schedule_hrtimeout - sleep until timeout
* @expires: timeout value (ktime_t)
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/itimer.c linux-4.14/kernel/time/itimer.c
---- linux-4.14.orig/kernel/time/itimer.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/time/itimer.c 2018-09-05 11:05:07.000000000 +0200
-@@ -214,6 +214,7 @@
+diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
+index f26acef5d7b4..760f38528365 100644
+--- a/kernel/time/itimer.c
++++ b/kernel/time/itimer.c
+@@ -214,6 +214,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
goto again;
}
expires = timeval_to_ktime(value->it_value);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/jiffies.c linux-4.14/kernel/time/jiffies.c
---- linux-4.14.orig/kernel/time/jiffies.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/time/jiffies.c 2018-09-05 11:05:07.000000000 +0200
-@@ -74,7 +74,8 @@
+diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
+index 497719127bf9..62acb8914c9e 100644
+--- a/kernel/time/jiffies.c
++++ b/kernel/time/jiffies.c
+@@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = {
.max_cycles = 10,
};
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
-@@ -83,9 +84,9 @@
+@@ -83,9 +84,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/posix-cpu-timers.c linux-4.14/kernel/time/posix-cpu-timers.c
---- linux-4.14.orig/kernel/time/posix-cpu-timers.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/time/posix-cpu-timers.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 5b117110b55b..47d063c4ed03 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
@@ -3,8 +3,10 @@
* Implement CPU time clocks for the POSIX clock interface.
*/
#include "posix-timers.h"
-@@ -603,7 +606,7 @@
+@@ -603,7 +606,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
ret = 0;
old_incr = timer->it.cpu.incr;
-@@ -1034,7 +1037,7 @@
+@@ -1034,7 +1037,7 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
arm_timer(timer);
unlock:
unlock_task_sighand(p, &flags);
-@@ -1119,13 +1122,13 @@
+@@ -1119,13 +1122,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
/*
* The fast path checks that there are no expired thread or thread
-@@ -1179,6 +1182,152 @@
+@@ -1179,6 +1182,152 @@ void run_posix_cpu_timers(struct task_struct *tsk)
}
}
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/posix-timers.c linux-4.14/kernel/time/posix-timers.c
---- linux-4.14.orig/kernel/time/posix-timers.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/time/posix-timers.c 2018-09-05 11:05:07.000000000 +0200
-@@ -434,6 +434,7 @@
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 708992708332..c5866984f12d 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -434,6 +434,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
switch (event->sigev_notify) {
case SIGEV_SIGNAL | SIGEV_THREAD_ID:
-@@ -443,7 +444,8 @@
+@@ -443,7 +444,8 @@ static struct pid *good_sigevent(sigevent_t * event)
/* FALLTHRU */
case SIGEV_SIGNAL:
case SIGEV_THREAD:
return NULL;
/* FALLTHRU */
case SIGEV_NONE:
-@@ -469,7 +471,7 @@
+@@ -469,7 +471,7 @@ static struct k_itimer * alloc_posix_timer(void)
static void k_itimer_rcu_free(struct rcu_head *head)
{
kmem_cache_free(posix_timers_cache, tmr);
}
-@@ -486,7 +488,7 @@
+@@ -486,7 +488,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
}
put_pid(tmr->it_pid);
sigqueue_free(tmr->sigq);
}
static int common_timer_create(struct k_itimer *new_timer)
-@@ -825,6 +827,22 @@
+@@ -825,6 +827,22 @@ static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
{
return hrtimer_try_to_cancel(&timr->it.real.timer);
-@@ -889,6 +907,7 @@
+@@ -889,6 +907,7 @@ static int do_timer_settime(timer_t timer_id, int flags,
if (!timr)
return -EINVAL;
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
-@@ -897,9 +916,12 @@
+@@ -897,9 +916,12 @@ static int do_timer_settime(timer_t timer_id, int flags,
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
return error;
}
-@@ -981,10 +1003,15 @@
+@@ -981,10 +1003,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
if (!timer)
return -EINVAL;
spin_lock(¤t->sighand->siglock);
list_del(&timer->list);
-@@ -1010,8 +1037,18 @@
+@@ -1010,8 +1037,18 @@ static void itimer_delete(struct k_itimer *timer)
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
goto retry_delete;
}
list_del(&timer->list);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/tick-broadcast-hrtimer.c linux-4.14/kernel/time/tick-broadcast-hrtimer.c
---- linux-4.14.orig/kernel/time/tick-broadcast-hrtimer.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/time/tick-broadcast-hrtimer.c 2018-09-05 11:05:07.000000000 +0200
-@@ -106,7 +106,7 @@
+diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
+index 58045eb976c3..f0a34afbc252 100644
+--- a/kernel/time/tick-broadcast-hrtimer.c
++++ b/kernel/time/tick-broadcast-hrtimer.c
+@@ -106,7 +106,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
void tick_setup_hrtimer_broadcast(void)
{
bctimer.function = bc_handler;
clockevents_register_device(&ce_broadcast_hrtimer);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/tick-common.c linux-4.14/kernel/time/tick-common.c
---- linux-4.14.orig/kernel/time/tick-common.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/time/tick-common.c 2018-09-05 11:05:07.000000000 +0200
-@@ -79,13 +79,15 @@
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index 49edc1c4f3e6..7a87a4488a5e 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -79,13 +79,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
update_wall_time();
}
-@@ -157,9 +159,9 @@
+@@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/tick-internal.h linux-4.14/kernel/time/tick-internal.h
---- linux-4.14.orig/kernel/time/tick-internal.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/time/tick-internal.h 2018-09-05 11:05:07.000000000 +0200
-@@ -150,16 +150,15 @@
+@@ -490,6 +492,7 @@ void tick_freeze(void)
+ if (tick_freeze_depth == num_online_cpus()) {
+ trace_suspend_resume(TPS("timekeeping_freeze"),
+ smp_processor_id(), true);
++ system_state = SYSTEM_SUSPEND;
+ timekeeping_suspend();
+ } else {
+ tick_suspend_local();
+@@ -513,6 +516,7 @@ void tick_unfreeze(void)
+
+ if (tick_freeze_depth == num_online_cpus()) {
+ timekeeping_resume();
++ system_state = SYSTEM_RUNNING;
+ trace_suspend_resume(TPS("timekeeping_freeze"),
+ smp_processor_id(), false);
+ } else {
+diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
+index f8e1845aa464..e277284c2831 100644
+--- a/kernel/time/tick-internal.h
++++ b/kernel/time/tick-internal.h
+@@ -150,16 +150,15 @@ static inline void tick_nohz_init(void) { }
#ifdef CONFIG_NO_HZ_COMMON
extern unsigned long tick_nohz_active;
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/tick-sched.c linux-4.14/kernel/time/tick-sched.c
---- linux-4.14.orig/kernel/time/tick-sched.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/time/tick-sched.c 2018-09-05 11:05:07.000000000 +0200
-@@ -66,7 +66,8 @@
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index ea3c062e7e1c..643b36a0b8e1 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -66,7 +66,8 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevaluate with jiffies_lock held */
delta = ktime_sub(now, last_jiffies_update);
if (delta >= tick_period) {
-@@ -89,10 +90,12 @@
+@@ -89,10 +90,12 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
} else {
update_wall_time();
}
-@@ -103,12 +106,14 @@
+@@ -103,12 +106,14 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
return period;
}
-@@ -225,6 +230,7 @@
+@@ -225,6 +230,7 @@ static void nohz_full_kick_func(struct irq_work *work)
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_func,
};
/*
-@@ -689,10 +695,10 @@
+@@ -689,10 +695,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/* Read jiffies and the time when jiffies were updated last */
do {
ts->last_jiffies = basejiff;
/*
-@@ -906,14 +912,7 @@
+@@ -906,14 +912,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
return false;
}
-@@ -1132,7 +1131,7 @@
+@@ -1132,7 +1131,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
ts->nohz_mode = mode;
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))
}
/**
-@@ -1250,7 +1249,7 @@
+@@ -1250,7 +1249,7 @@ void tick_setup_sched_timer(void)
/*
* Emulate tick processing via per-CPU hrtimers:
*/
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per-CPU) */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/timekeeping.c linux-4.14/kernel/time/timekeeping.c
---- linux-4.14.orig/kernel/time/timekeeping.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/time/timekeeping.c 2018-09-05 11:05:07.000000000 +0200
-@@ -2326,8 +2326,10 @@
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 2cafb49aa65e..2720f2c29a6d 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -2326,8 +2326,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/timekeeping.h linux-4.14/kernel/time/timekeeping.h
---- linux-4.14.orig/kernel/time/timekeeping.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/time/timekeeping.h 2018-09-05 11:05:07.000000000 +0200
-@@ -18,7 +18,8 @@
+diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
+index c9f9af339914..0c0f52bf1927 100644
+--- a/kernel/time/timekeeping.h
++++ b/kernel/time/timekeeping.h
+@@ -18,7 +18,8 @@ extern void timekeeping_resume(void);
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
#define CS_NAME_LEN 32
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/time/timer.c linux-4.14/kernel/time/timer.c
---- linux-4.14.orig/kernel/time/timer.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/time/timer.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index f17c76a1a05f..5fadd754ce20 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
@@ -44,6 +44,7 @@
#include <linux/sched/debug.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
-@@ -197,11 +198,12 @@
+@@ -197,11 +198,12 @@ EXPORT_SYMBOL(jiffies_64);
struct timer_base {
raw_spinlock_t lock;
struct timer_list *running_timer;
bool is_idle;
bool must_forward_clk;
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
-@@ -210,45 +212,73 @@
+@@ -210,45 +212,73 @@ struct timer_base {
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
+ timers_update_migration();
+ static_branch_enable(&timers_nohz_active);
+ mutex_unlock(&timer_keys_mutex);
- }
-
++}
++
+void timers_update_nohz(void)
+{
+ swork_queue(&timer_update_swork);
+ WARN_ON(swork_get());
+ INIT_SWORK(&timer_update_swork, timer_update_keys);
+ return 0;
-+}
+ }
+early_initcall(hrtimer_init_thread);
-+
+
int timer_migration_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
static unsigned long round_jiffies_common(unsigned long j, int cpu,
bool force_up)
-@@ -534,7 +564,7 @@
+@@ -534,7 +564,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
static void
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
{
return;
/*
-@@ -840,21 +870,20 @@
+@@ -840,21 +870,20 @@ static inline struct timer_base *get_timer_base(u32 tflags)
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
}
unsigned long jnow;
/*
-@@ -878,16 +907,8 @@
+@@ -878,16 +907,8 @@ static inline void forward_timer_base(struct timer_base *base)
base->clk = jnow;
else
base->clk = base->next_expiry;
/*
-@@ -1130,6 +1151,33 @@
+@@ -1130,6 +1151,33 @@ void add_timer_on(struct timer_list *timer, int cpu)
}
EXPORT_SYMBOL_GPL(add_timer_on);
/**
* del_timer - deactivate a timer.
* @timer: the timer to be deactivated
-@@ -1185,7 +1233,7 @@
+@@ -1185,7 +1233,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
}
EXPORT_SYMBOL(try_to_del_timer_sync);
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
-@@ -1245,7 +1293,7 @@
+@@ -1245,7 +1293,7 @@ int del_timer_sync(struct timer_list *timer)
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
}
}
EXPORT_SYMBOL(del_timer_sync);
-@@ -1309,13 +1357,16 @@
+@@ -1309,13 +1357,16 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
fn = timer->function;
data = timer->data;
raw_spin_lock_irq(&base->lock);
}
}
-@@ -1584,13 +1635,13 @@
-
- /* Note: this timer irq context must be accounted for as well. */
+@@ -1586,7 +1637,7 @@ void update_process_times(int user_tick)
account_process_tick(p, user_tick);
-+ scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
-#ifdef CONFIG_IRQ_WORK
if (in_irq())
irq_work_tick();
#endif
-- scheduler_tick();
- if (IS_ENABLED(CONFIG_POSIX_TIMERS))
- run_posix_cpu_timers(p);
- }
-@@ -1617,8 +1668,8 @@
+@@ -1633,8 +1684,8 @@ static inline void __run_timers(struct timer_base *base)
while (levels--)
expire_timers(base, heads + levels);
}
}
/*
-@@ -1628,6 +1679,7 @@
+@@ -1644,6 +1695,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ irq_work_tick_soft();
- /*
- * must_forward_clk must be cleared before running timers so that any
- * timer functions that call mod_timer will not try to forward the
-@@ -1864,6 +1916,9 @@
+ __run_timers(base);
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
+ __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+@@ -1867,6 +1919,9 @@ static void __init init_timer_cpu(int cpu)
base->cpu = cpu;
raw_spin_lock_init(&base->lock);
base->clk = jiffies;
}
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/Kconfig linux-4.14/kernel/trace/Kconfig
---- linux-4.14.orig/kernel/trace/Kconfig 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -585,7 +585,10 @@
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index 4ad6f6ca18c1..55d39a3fbdf7 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -585,7 +585,10 @@ config HIST_TRIGGERS
event activity as an initial guide for further investigation
using more advanced tools.
If in doubt, say N.
config MMIOTRACE_TEST
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/ring_buffer.c linux-4.14/kernel/trace/ring_buffer.c
---- linux-4.14.orig/kernel/trace/ring_buffer.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/ring_buffer.c 2018-09-05 11:05:07.000000000 +0200
-@@ -41,6 +41,8 @@
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index fd7809004297..75fea9321ffb 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -41,6 +41,8 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
RINGBUF_TYPE_PADDING);
trace_seq_printf(s, "\ttime_extend : type == %d\n",
RINGBUF_TYPE_TIME_EXTEND);
trace_seq_printf(s, "\tdata max type_len == %d\n",
RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
-@@ -140,12 +142,15 @@
+@@ -140,12 +142,15 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
enum {
RB_LEN_TIME_EXTEND = 8,
static inline int rb_null_event(struct ring_buffer_event *event)
{
return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
-@@ -209,7 +214,7 @@
+@@ -209,7 +214,7 @@ rb_event_ts_length(struct ring_buffer_event *event)
{
unsigned len = 0;
/* time extends include the data event after it */
len = RB_LEN_TIME_EXTEND;
event = skip_time_extend(event);
-@@ -231,7 +236,7 @@
+@@ -231,7 +236,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
{
unsigned length;
event = skip_time_extend(event);
length = rb_event_length(event);
-@@ -248,7 +253,7 @@
+@@ -248,7 +253,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_length);
static __always_inline void *
rb_event_data(struct ring_buffer_event *event)
{
event = skip_time_extend(event);
BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
/* If length is in len field, then array[0] has the data */
-@@ -275,6 +280,27 @@
+@@ -275,6 +280,27 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
#define TS_DELTA_TEST (~TS_MASK)
/* Flag when events were overwritten */
#define RB_MISSED_EVENTS (1 << 31)
/* Missed count stored at end */
-@@ -451,6 +477,7 @@
+@@ -451,6 +477,7 @@ struct ring_buffer_per_cpu {
struct buffer_page *reader_page;
unsigned long lost_events;
unsigned long last_overrun;
local_t entries_bytes;
local_t entries;
local_t overrun;
-@@ -488,6 +515,7 @@
+@@ -488,6 +515,7 @@ struct ring_buffer {
u64 (*clock)(void);
struct rb_irq_work irq_work;
};
struct ring_buffer_iter {
-@@ -1387,6 +1415,16 @@
+@@ -1387,6 +1415,16 @@ void ring_buffer_set_clock(struct ring_buffer *buffer,
buffer->clock = clock;
}
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static inline unsigned long rb_page_entries(struct buffer_page *bpage)
-@@ -2217,12 +2255,15 @@
+@@ -2217,12 +2255,15 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* Slow path, do not inline */
static noinline struct ring_buffer_event *
event->time_delta = delta & TS_MASK;
event->array[0] = delta >> TS_SHIFT;
} else {
-@@ -2265,7 +2306,9 @@
+@@ -2265,7 +2306,9 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
* add it to the start of the resevered space.
*/
if (unlikely(info->add_timestamp)) {
length -= RB_LEN_TIME_EXTEND;
delta = 0;
}
-@@ -2453,7 +2496,7 @@
+@@ -2453,7 +2496,7 @@ static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer
static inline void rb_event_discard(struct ring_buffer_event *event)
{
event = skip_time_extend(event);
/* array[0] holds the actual length for the discarded event */
-@@ -2497,10 +2540,11 @@
+@@ -2497,10 +2540,11 @@ rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer->write_stamp =
cpu_buffer->commit_page->page->time_stamp;
else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
} else
cpu_buffer->write_stamp += event->time_delta;
}
-@@ -2583,22 +2627,19 @@
+@@ -2583,22 +2627,19 @@ static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned int val = cpu_buffer->current_context;
cpu_buffer->current_context = val;
return 0;
-@@ -2607,7 +2648,57 @@
+@@ -2607,7 +2648,57 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{
}
/**
-@@ -2683,7 +2774,7 @@
+@@ -2683,7 +2774,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* If this is the first commit on the page, then it has the same
* timestamp as the page itself.
*/
info->delta = 0;
/* See if we shot pass the end of this buffer page */
-@@ -2760,8 +2851,11 @@
+@@ -2760,8 +2851,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
/* make sure this diff is calculated here */
barrier();
info.delta = diff;
if (unlikely(test_time_stamp(info.delta)))
rb_handle_timestamp(cpu_buffer, &info);
-@@ -3459,14 +3553,13 @@
+@@ -3459,14 +3553,13 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
return;
case RINGBUF_TYPE_TIME_EXTEND:
return;
case RINGBUF_TYPE_DATA:
-@@ -3490,14 +3583,13 @@
+@@ -3490,14 +3583,13 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
return;
case RINGBUF_TYPE_TIME_EXTEND:
return;
case RINGBUF_TYPE_DATA:
-@@ -3721,6 +3813,8 @@
+@@ -3721,6 +3813,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
struct buffer_page *reader;
int nr_loops = 0;
again:
/*
* We repeat when a time extend is encountered.
-@@ -3757,12 +3851,17 @@
+@@ -3757,12 +3851,17 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
goto again;
case RINGBUF_TYPE_TIME_STAMP:
*ts = cpu_buffer->read_stamp + event->time_delta;
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts);
-@@ -3787,6 +3886,9 @@
+@@ -3787,6 +3886,9 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *event;
int nr_loops = 0;
cpu_buffer = iter->cpu_buffer;
buffer = cpu_buffer->buffer;
-@@ -3839,12 +3941,17 @@
+@@ -3839,12 +3941,17 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
goto again;
case RINGBUF_TYPE_TIME_STAMP:
*ts = iter->read_stamp + event->time_delta;
ring_buffer_normalize_time_stamp(buffer,
cpu_buffer->cpu, ts);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace.c linux-4.14/kernel/trace/trace.c
---- linux-4.14.orig/kernel/trace/trace.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1170,6 +1170,14 @@
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index e9cbb96cd99e..4fc60e5ec4b9 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1170,6 +1170,14 @@ static struct {
ARCH_TRACE_CLOCKS
};
/*
* trace_parser_get_init - gets the buffer for trace parser
*/
-@@ -2127,6 +2135,7 @@
+@@ -2127,6 +2135,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -2137,8 +2146,11 @@
+@@ -2137,8 +2146,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2275,7 +2287,7 @@
+@@ -2275,7 +2287,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
*current_rb = trace_file->tr->trace_buffer.buffer;
(EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
-@@ -3342,14 +3354,17 @@
+@@ -3342,14 +3354,17 @@ get_total_entries(struct trace_buffer *buf,
static void print_lat_help_header(struct seq_file *m)
{
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -3385,15 +3400,17 @@
+@@ -3385,15 +3400,17 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
tgid ? tgid_space : space);
seq_printf(m, "# %s / _----=> need-resched\n",
tgid ? tgid_space : space);
- seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
+ seq_printf(m, "# %s| / _----=> need-resched_lazy\n",
++ tgid ? tgid_space : space);
++ seq_printf(m, "# %s|| / _---=> hardirq/softirq\n",
tgid ? tgid_space : space);
- seq_printf(m, "# %s|| / _--=> preempt-depth\n",
-+ seq_printf(m, "# %s|| / _---=> hardirq/softirq\n",
++ seq_printf(m, "# %s||| / _--=> preempt-depth\n",
tgid ? tgid_space : space);
- seq_printf(m, "# %s||| / delay\n",
-+ seq_printf(m, "# %s||| / _--=> preempt-depth\n",
++ seq_printf(m, "# %s|||| / delay\n",
tgid ? tgid_space : space);
- seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
-+ seq_printf(m, "# %s|||| / delay\n",
-+ tgid ? tgid_space : space);
+ seq_printf(m, "# TASK-PID %sCPU# ||||| TIMESTAMP FUNCTION\n",
tgid ? " TGID " : space);
- seq_printf(m, "# | | %s | |||| | |\n",
tgid ? " | " : space);
}
-@@ -4531,6 +4548,9 @@
+@@ -4531,6 +4548,9 @@ static const char readme_msg[] =
#ifdef CONFIG_X86_64
" x86-tsc: TSC cycle counter\n"
#endif
"\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
"\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
" tracing_cpumask\t- Limit which CPUs to trace\n"
-@@ -4707,8 +4727,9 @@
+@@ -4707,8 +4727,9 @@ static const char readme_msg[] =
"\t .sym display an address as a symbol\n"
"\t .sym-offset display an address as a symbol and offset\n"
"\t .execname display a common_pid as a program name\n"
"\t The 'pause' parameter can be used to pause an existing hist\n"
"\t trigger or to start a hist trigger but not log any events\n"
"\t until told to do so. 'continue' can be used to start or\n"
-@@ -6218,7 +6239,7 @@
+@@ -6218,7 +6239,7 @@ static int tracing_clock_show(struct seq_file *m, void *v)
return 0;
}
{
int i;
-@@ -6298,6 +6319,71 @@
+@@ -6298,6 +6319,71 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
return ret;
}
struct ftrace_buffer_info {
struct trace_iterator iter;
void *spare;
-@@ -6545,6 +6631,13 @@
+@@ -6545,6 +6631,13 @@ static const struct file_operations trace_clock_fops = {
.write = tracing_clock_write,
};
#ifdef CONFIG_TRACER_SNAPSHOT
static const struct file_operations snapshot_fops = {
.open = tracing_snapshot_open,
-@@ -7682,6 +7775,7 @@
+@@ -7684,6 +7777,7 @@ static int instance_mkdir(const char *name)
struct trace_array *tr;
int ret;
mutex_lock(&trace_types_lock);
ret = -EEXIST;
-@@ -7714,6 +7808,7 @@
+@@ -7716,6 +7810,7 @@ static int instance_mkdir(const char *name)
INIT_LIST_HEAD(&tr->systems);
INIT_LIST_HEAD(&tr->events);
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
-@@ -7737,6 +7832,7 @@
+@@ -7739,6 +7834,7 @@ static int instance_mkdir(const char *name)
list_add(&tr->list, &ftrace_trace_arrays);
mutex_unlock(&trace_types_lock);
return 0;
-@@ -7748,6 +7844,7 @@
+@@ -7750,6 +7846,7 @@ static int instance_mkdir(const char *name)
out_unlock:
mutex_unlock(&trace_types_lock);
return ret;
-@@ -7760,6 +7857,7 @@
+@@ -7762,6 +7859,7 @@ static int instance_rmdir(const char *name)
int ret;
int i;
mutex_lock(&trace_types_lock);
ret = -ENODEV;
-@@ -7805,6 +7903,7 @@
+@@ -7807,6 +7905,7 @@ static int instance_rmdir(const char *name)
out_unlock:
mutex_unlock(&trace_types_lock);
return ret;
}
-@@ -7862,6 +7961,9 @@
+@@ -7864,6 +7963,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
trace_create_file("tracing_on", 0644, d_tracer,
tr, &rb_simple_fops);
create_trace_options_dir(tr);
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
-@@ -8271,6 +8373,92 @@
+@@ -8275,6 +8377,92 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
}
EXPORT_SYMBOL_GPL(ftrace_dump);
__init static int tracer_alloc_buffers(void)
{
int ring_buf_size;
-@@ -8371,6 +8559,7 @@
+@@ -8375,6 +8563,7 @@ __init static int tracer_alloc_buffers(void)
INIT_LIST_HEAD(&global_trace.systems);
INIT_LIST_HEAD(&global_trace.events);
list_add(&global_trace.list, &ftrace_trace_arrays);
apply_trace_boot_options();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_events.c linux-4.14/kernel/trace/trace_events.c
---- linux-4.14.orig/kernel/trace/trace_events.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace_events.c 2018-09-05 11:05:07.000000000 +0200
-@@ -187,6 +187,8 @@
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 851cd1605085..18bf383f46e8 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -127,6 +127,7 @@ struct kretprobe_trace_entry_head {
+ * NEED_RESCHED - reschedule is requested
+ * HARDIRQ - inside an interrupt handler
+ * SOFTIRQ - inside a softirq handler
++ * NEED_RESCHED_LAZY - lazy reschedule is requested
+ */
+ enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+@@ -136,6 +137,7 @@ enum trace_flag_type {
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+ TRACE_FLAG_NMI = 0x40,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
+ };
+
+ #define TRACE_BUF_SIZE 1024
+@@ -273,6 +275,8 @@ struct trace_array {
+ /* function tracing enabled */
+ int function_enabled;
+ #endif
++ int time_stamp_abs_ref;
++ struct list_head hist_vars;
+ };
+
+ enum {
+@@ -286,6 +290,11 @@ extern struct mutex trace_types_lock;
+ extern int trace_array_get(struct trace_array *tr);
+ extern void trace_array_put(struct trace_array *tr);
+
++extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
++extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
++
++extern bool trace_clock_in_ns(struct trace_array *tr);
++
+ /*
+ * The global tracer (top) should be the first trace array added,
+ * but we check the flag anyway.
+@@ -1293,7 +1302,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
+ unsigned long eflags = file->flags;
+
+ if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+- *tt = event_triggers_call(file, entry);
++ *tt = event_triggers_call(file, entry, event);
+
+ if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+ (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+@@ -1330,7 +1339,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
+ trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
+
+ if (tt)
+- event_triggers_post_call(file, tt, entry);
++ event_triggers_post_call(file, tt, entry, event);
+ }
+
+ /**
+@@ -1363,7 +1372,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
+ irq_flags, pc, regs);
+
+ if (tt)
+- event_triggers_post_call(file, tt, entry);
++ event_triggers_post_call(file, tt, entry, event);
+ }
+
+ #define FILTER_PRED_INVALID ((unsigned short)-1)
+@@ -1545,6 +1554,8 @@ extern void pause_named_trigger(struct event_trigger_data *data);
+ extern void unpause_named_trigger(struct event_trigger_data *data);
+ extern void set_named_trigger_data(struct event_trigger_data *data,
+ struct event_trigger_data *named_data);
++extern struct event_trigger_data *
++get_named_trigger_data(struct event_trigger_data *data);
+ extern int register_event_command(struct event_command *cmd);
+ extern int unregister_event_command(struct event_command *cmd);
+ extern int register_trigger_hist_enable_disable_cmds(void);
+@@ -1588,7 +1599,8 @@ extern int register_trigger_hist_enable_disable_cmds(void);
+ */
+ struct event_trigger_ops {
+ void (*func)(struct event_trigger_data *data,
+- void *rec);
++ void *rec,
++ struct ring_buffer_event *rbe);
+ int (*init)(struct event_trigger_ops *ops,
+ struct event_trigger_data *data);
+ void (*free)(struct event_trigger_ops *ops,
+@@ -1755,6 +1767,13 @@ void trace_printk_start_comm(void);
+ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
+ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
+
++#define MAX_EVENT_NAME_LEN 64
++
++extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
++extern ssize_t trace_parse_run_command(struct file *file,
++ const char __user *buffer, size_t count, loff_t *ppos,
++ int (*createfn)(int, char**));
++
+ /*
+ * Normal trace_printk() and friends allocates special buffers
+ * to do the manipulation, as well as saves the print formats
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index d53268a4e167..9ba230a4052f 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -187,6 +187,8 @@ static int trace_define_common_fields(void)
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
return ret;
}
-@@ -1406,8 +1408,8 @@
+@@ -1406,8 +1408,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
return -ENODEV;
/* Make sure the system still exists */
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
list_for_each_entry(dir, &tr->systems, list) {
if (dir == inode->i_private) {
-@@ -1421,8 +1423,8 @@
+@@ -1421,8 +1423,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
}
}
exit_loop:
if (!system)
return -ENODEV;
-@@ -2308,15 +2310,15 @@
+@@ -2308,15 +2310,15 @@ static void __add_event_to_tracers(struct trace_event_call *call);
int trace_add_event_call(struct trace_event_call *call)
{
int ret;
return ret;
}
-@@ -2370,13 +2372,13 @@
+@@ -2370,13 +2372,13 @@ int trace_remove_event_call(struct trace_event_call *call)
{
int ret;
return ret;
}
-@@ -2438,8 +2440,8 @@
+@@ -2438,8 +2440,8 @@ static int trace_module_notify(struct notifier_block *self,
{
struct module *mod = data;
switch (val) {
case MODULE_STATE_COMING:
trace_module_add_events(mod);
-@@ -2448,8 +2450,8 @@
+@@ -2448,8 +2450,8 @@ static int trace_module_notify(struct notifier_block *self,
trace_module_remove_events(mod);
break;
}
return 0;
}
-@@ -2964,24 +2966,24 @@
+@@ -2964,24 +2966,24 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
* creates the event hierachry in the @parent/events directory.
*
* Returns 0 on success.
return ret;
}
-@@ -3010,9 +3012,10 @@
+@@ -3010,9 +3012,10 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
return ret;
}
/* Disable any event triggers and associated soft-disabled events */
clear_event_triggers(tr);
-@@ -3033,8 +3036,6 @@
+@@ -3033,8 +3036,6 @@ int event_trace_del_tracer(struct trace_array *tr)
tr->event_dir = NULL;
return 0;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_events_hist.c linux-4.14/kernel/trace/trace_events_hist.c
---- linux-4.14.orig/kernel/trace/trace_events_hist.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace_events_hist.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 7eb975a2d0e1..24bc0769fdd6 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
@@ -20,13 +20,39 @@
#include <linux/slab.h>
#include <linux/stacktrace.h>
struct hist_field {
struct ftrace_event_field *field;
-@@ -34,26 +60,50 @@
+@@ -34,26 +60,50 @@ struct hist_field {
hist_field_fn_t fn;
unsigned int size;
unsigned int offset;
{
u32 str_item = *(u32 *)(event + hist_field->field->offset);
int str_loc = str_item & 0xffff;
-@@ -62,22 +112,74 @@
+@@ -62,22 +112,74 @@ static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
return (u64)(unsigned long)addr;
}
{ \
type *addr = (type *)(event + hist_field->field->offset); \
\
-@@ -110,16 +212,29 @@
+@@ -110,16 +212,29 @@ DEFINE_HIST_FIELD_FN(u8);
#define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
enum hist_field_flags {
};
struct hist_trigger_attrs {
-@@ -127,25 +242,1474 @@
+@@ -127,298 +242,3585 @@ struct hist_trigger_attrs {
char *vals_str;
char *sort_key_str;
char *name;
+ struct field_var *max_vars[SYNTH_FIELDS_MAX];
+ unsigned int n_max_vars;
+ unsigned int n_max_var_str;
-+};
-+
+ };
+
+-static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
+-{
+- hist_field_fn_t fn = NULL;
+struct synth_field {
+ char *type;
+ char *name;
+ bool is_signed;
+ bool is_string;
+};
-+
+
+- switch (field_size) {
+- case 8:
+- if (field_is_signed)
+- fn = hist_field_s64;
+- else
+- fn = hist_field_u64;
+- break;
+- case 4:
+- if (field_is_signed)
+- fn = hist_field_s32;
+- else
+- fn = hist_field_u32;
+- break;
+- case 2:
+- if (field_is_signed)
+- fn = hist_field_s16;
+- else
+- fn = hist_field_u16;
+- break;
+- case 1:
+- if (field_is_signed)
+- fn = hist_field_s8;
+- else
+- fn = hist_field_u8;
+- break;
+- }
+struct synth_event {
+ struct list_head list;
+ int ref;
+ struct trace_event_call call;
+ struct tracepoint *tp;
+};
-+
+
+- return fn;
+struct action_data;
+
+typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
+ return;
+
+ strncpy(last_hist_cmd, str, MAX_FILTER_STR_VAL - 1);
-+}
-+
+ }
+
+-static int parse_map_size(char *str)
+static void hist_err(char *str, char *var)
-+{
+ {
+- unsigned long size, map_bits;
+- int ret;
+ int maxlen = MAX_FILTER_STR_VAL - 1;
-+
+
+- strsep(&str, "=");
+- if (!str) {
+- ret = -EINVAL;
+- goto out;
+- }
+ if (!str)
+ return;
-+
+
+- ret = kstrtoul(str, 0, &size);
+- if (ret)
+- goto out;
+ if (strlen(hist_err_str))
+ return;
-+
+
+- map_bits = ilog2(roundup_pow_of_two(size));
+- if (map_bits < TRACING_MAP_BITS_MIN ||
+- map_bits > TRACING_MAP_BITS_MAX)
+- ret = -EINVAL;
+- else
+- ret = map_bits;
+- out:
+- return ret;
+ if (!var)
+ var = "";
+
+
+ strcat(hist_err_str, str);
+ strcat(hist_err_str, var);
-+}
-+
+ }
+
+-static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
+static void hist_err_event(char *str, char *system, char *event, char *var)
-+{
+ {
+- if (!attrs)
+- return;
+ char err[MAX_FILTER_STR_VAL];
-+
+
+- kfree(attrs->name);
+- kfree(attrs->sort_key_str);
+- kfree(attrs->keys_str);
+- kfree(attrs->vals_str);
+- kfree(attrs);
+ if (system && var)
+ snprintf(err, MAX_FILTER_STR_VAL, "%s.%s.%s", system, event, var);
+ else if (system)
+ strncpy(err, var, MAX_FILTER_STR_VAL);
+
+ hist_err(str, err);
-+}
-+
+ }
+
+-static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
+static void hist_err_clear(void)
-+{
+ {
+- struct hist_trigger_attrs *attrs;
+- int ret = 0;
+ hist_err_str[0] = '\0';
+}
-+
+
+- attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+- if (!attrs)
+- return ERR_PTR(-ENOMEM);
+static bool have_hist_err(void)
+{
+ if (strlen(hist_err_str))
+ return true;
-+
+
+- while (trigger_str) {
+- char *str = strsep(&trigger_str, ":");
+ return false;
+}
-+
+
+- if ((strncmp(str, "key=", strlen("key=")) == 0) ||
+- (strncmp(str, "keys=", strlen("keys=")) == 0))
+- attrs->keys_str = kstrdup(str, GFP_KERNEL);
+- else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
+- (strncmp(str, "vals=", strlen("vals=")) == 0) ||
+- (strncmp(str, "values=", strlen("values=")) == 0))
+- attrs->vals_str = kstrdup(str, GFP_KERNEL);
+- else if (strncmp(str, "sort=", strlen("sort=")) == 0)
+- attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
+- else if (strncmp(str, "name=", strlen("name=")) == 0)
+- attrs->name = kstrdup(str, GFP_KERNEL);
+- else if (strcmp(str, "pause") == 0)
+- attrs->pause = true;
+- else if ((strcmp(str, "cont") == 0) ||
+- (strcmp(str, "continue") == 0))
+- attrs->cont = true;
+- else if (strcmp(str, "clear") == 0)
+- attrs->clear = true;
+- else if (strncmp(str, "size=", strlen("size=")) == 0) {
+- int map_bits = parse_map_size(str);
+static LIST_HEAD(synth_event_list);
+static DEFINE_MUTEX(synth_event_mutex);
-+
+
+- if (map_bits < 0) {
+- ret = map_bits;
+- goto free;
+- }
+- attrs->map_bits = map_bits;
+struct synth_trace_event {
+ struct trace_entry ent;
+ u64 fields[];
+ if (event->fields[i]->is_string) {
+ offset += STR_VAR_LEN_MAX;
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
-+ } else {
+ } else {
+- ret = -EINVAL;
+- goto free;
+ offset += sizeof(u64);
+ n_u64++;
-+ }
-+ }
-+
+ }
+ }
+
+- if (!attrs->keys_str) {
+- ret = -EINVAL;
+- goto free;
+- }
+ event->n_u64 = n_u64;
-+
+
+- return attrs;
+- free:
+- destroy_hist_trigger_attrs(attrs);
+ return ret;
+}
-+
+
+- return ERR_PTR(ret);
+static bool synth_field_signed(char *type)
+{
+ if (strncmp(type, "u", 1) == 0)
+ return false;
+
+ return true;
-+}
-+
+ }
+
+-static inline void save_comm(char *comm, struct task_struct *task)
+static int synth_field_is_string(char *type)
-+{
+ {
+- if (!task->pid) {
+- strcpy(comm, "<idle>");
+- return;
+- }
+ if (strstr(type, "char[") != NULL)
+ return true;
-+
+
+- if (WARN_ON_ONCE(task->pid < 0)) {
+- strcpy(comm, "<XXX>");
+- return;
+- }
+ return false;
+}
-+
+
+- memcpy(comm, task->comm, TASK_COMM_LEN);
+static int synth_field_string_size(char *type)
+{
+ char buf[4], *end, *start;
+ return -EINVAL;
+
+ return size;
-+}
-+
+ }
+
+-static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
+static int synth_field_size(char *type)
-+{
+ {
+- kfree((char *)elt->private_data);
+ int size = 0;
+
+ if (strcmp(type, "s64") == 0)
+ size = synth_field_string_size(type);
+
+ return size;
-+}
-+
+ }
+
+-static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
+static const char *synth_field_fmt(char *type)
-+{
+ {
+- struct hist_trigger_data *hist_data = elt->map->private_data;
+- struct hist_field *key_field;
+- unsigned int i;
+ const char *fmt = "%llu";
+
+ if (strcmp(type, "s64") == 0)
+
+ return fmt;
+}
-+
+
+- for_each_hist_key_field(i, hist_data) {
+- key_field = hist_data->fields[i];
+static enum print_line_t print_synth_event(struct trace_iterator *iter,
+ int flags,
+ struct trace_event *event)
+ unsigned int i, n_u64;
+ char print_fmt[32];
+ const char *fmt;
-+
+
+- if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
+- unsigned int size = TASK_COMM_LEN + 1;
+ entry = (struct synth_trace_event *)iter->ent;
+ se = container_of(event, struct synth_event, call.event);
-+
+
+- elt->private_data = kzalloc(size, GFP_KERNEL);
+- if (!elt->private_data)
+- return -ENOMEM;
+- break;
+ trace_seq_printf(s, "%s: ", se->name);
+
+ for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
+ entry->fields[n_u64],
+ i == se->n_fields - 1 ? "" : " ");
+ n_u64++;
-+ }
-+ }
+ }
+ }
+end:
+ trace_seq_putc(s, '\n');
-+
+
+- return 0;
+ return trace_handle_return(s);
-+}
-+
+ }
+
+-static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
+- struct tracing_map_elt *from)
+static struct trace_event_functions synth_event_funcs = {
+ .trace = print_synth_event
+};
+static notrace void trace_event_raw_event_synth(void *__data,
+ u64 *var_ref_vals,
+ unsigned int var_ref_idx)
-+{
+ {
+- char *comm_from = from->private_data;
+- char *comm_to = to->private_data;
+ struct trace_event_file *trace_file = __data;
+ struct synth_trace_event *entry;
+ struct trace_event_buffer fbuffer;
+out:
+ ring_buffer_nest_end(buffer);
+}
-+
+
+- if (comm_from)
+- memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
+static void free_synth_event_print_fmt(struct trace_event_call *call)
+{
+ if (call) {
+ kfree(call->print_fmt);
+ call->print_fmt = NULL;
+ }
-+}
-+
+ }
+
+-static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
+static int __set_synth_event_print_fmt(struct synth_event *event,
+ char *buf, int len)
-+{
+ {
+- char *comm = elt->private_data;
+ const char *fmt;
+ int pos = 0;
+ int i;
+ }
+
+#undef LEN_OR_ZERO
-+
+
+- if (comm)
+- save_comm(comm, current);
+ /* return the length of print_fmt */
+ return pos;
-+}
-+
+ }
+
+-static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
+- .elt_alloc = hist_trigger_elt_comm_alloc,
+- .elt_copy = hist_trigger_elt_comm_copy,
+- .elt_free = hist_trigger_elt_comm_free,
+- .elt_init = hist_trigger_elt_comm_init,
+-};
+static int set_synth_event_print_fmt(struct trace_event_call *call)
+{
+ struct synth_event *event = call->data;
+ /* Second: actually write the @print_fmt */
+ __set_synth_event_print_fmt(event, print_fmt, len + 1);
+ call->print_fmt = print_fmt;
-+
+
+-static void destroy_hist_field(struct hist_field *hist_field)
+ return 0;
+}
+
+static void free_synth_field(struct synth_field *field)
-+{
+ {
+- kfree(hist_field);
+ kfree(field->type);
+ kfree(field->name);
+ kfree(field);
-+}
-+
+ }
+
+-static struct hist_field *create_hist_field(struct ftrace_event_field *field,
+- unsigned long flags)
+static struct synth_field *parse_synth_field(char *field_type,
+ char *field_name)
-+{
+ {
+- struct hist_field *hist_field;
+ struct synth_field *field;
+ int len, ret = 0;
+ char *array;
-+
+
+- if (field && is_function_field(field))
+- return NULL;
+ if (field_type[0] == ';')
+ field_type++;
-+
+
+- hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
+- if (!hist_field)
+- return NULL;
+ len = strlen(field_name);
+ if (field_name[len - 1] == ';')
+ field_name[len - 1] = '\0';
-+
+
+- if (flags & HIST_FIELD_FL_HITCOUNT) {
+- hist_field->fn = hist_field_counter;
+- goto out;
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return ERR_PTR(-ENOMEM);
+ if (array) {
+ strcat(field->type, array);
+ *array = '\0';
-+ }
-+
+ }
+
+- if (flags & HIST_FIELD_FL_STACKTRACE) {
+- hist_field->fn = hist_field_none;
+- goto out;
+ field->size = synth_field_size(field->type);
+ if (!field->size) {
+ ret = -EINVAL;
+ goto free;
-+ }
-+
+ }
+
+- if (flags & HIST_FIELD_FL_LOG2) {
+- hist_field->fn = hist_field_log2;
+- goto out;
+ if (synth_field_is_string(field->type))
+ field->is_string = true;
+
+ char *comm;
+ u64 *var_ref_vals;
+ char *field_var_str[SYNTH_FIELDS_MAX];
- };
-
++};
++
+static u64 hist_field_var_ref(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct ring_buffer_event *rbe,
+ return field_name;
+}
+
- static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
- {
- hist_field_fn_t fn = NULL;
-@@ -207,16 +1771,119 @@
-
- static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
- {
++static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
++{
++ hist_field_fn_t fn = NULL;
++
++ switch (field_size) {
++ case 8:
++ if (field_is_signed)
++ fn = hist_field_s64;
++ else
++ fn = hist_field_u64;
++ break;
++ case 4:
++ if (field_is_signed)
++ fn = hist_field_s32;
++ else
++ fn = hist_field_u32;
++ break;
++ case 2:
++ if (field_is_signed)
++ fn = hist_field_s16;
++ else
++ fn = hist_field_u16;
++ break;
++ case 1:
++ if (field_is_signed)
++ fn = hist_field_s8;
++ else
++ fn = hist_field_u8;
++ break;
++ }
++
++ return fn;
++}
++
++static int parse_map_size(char *str)
++{
++ unsigned long size, map_bits;
++ int ret;
++
++ strsep(&str, "=");
++ if (!str) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ ret = kstrtoul(str, 0, &size);
++ if (ret)
++ goto out;
++
++ map_bits = ilog2(roundup_pow_of_two(size));
++ if (map_bits < TRACING_MAP_BITS_MIN ||
++ map_bits > TRACING_MAP_BITS_MAX)
++ ret = -EINVAL;
++ else
++ ret = map_bits;
++ out:
++ return ret;
++}
++
++static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
++{
+ unsigned int i;
+
- if (!attrs)
- return;
-
++ if (!attrs)
++ return;
++
+ for (i = 0; i < attrs->n_assignments; i++)
+ kfree(attrs->assignment_str[i]);
+
+ for (i = 0; i < attrs->n_actions; i++)
+ kfree(attrs->action_str[i]);
+
- kfree(attrs->name);
- kfree(attrs->sort_key_str);
- kfree(attrs->keys_str);
- kfree(attrs->vals_str);
++ kfree(attrs->name);
++ kfree(attrs->sort_key_str);
++ kfree(attrs->keys_str);
++ kfree(attrs->vals_str);
+ kfree(attrs->clock);
- kfree(attrs);
- }
-
++ kfree(attrs);
++}
++
+static int parse_action(char *str, struct hist_trigger_attrs *attrs)
+{
+ int ret = -EINVAL;
+ return ret;
+}
+
- static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
- {
- struct hist_trigger_attrs *attrs;
-@@ -229,35 +1896,21 @@
- while (trigger_str) {
- char *str = strsep(&trigger_str, ":");
-
-- if ((strncmp(str, "key=", strlen("key=")) == 0) ||
-- (strncmp(str, "keys=", strlen("keys=")) == 0))
-- attrs->keys_str = kstrdup(str, GFP_KERNEL);
-- else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
-- (strncmp(str, "vals=", strlen("vals=")) == 0) ||
-- (strncmp(str, "values=", strlen("values=")) == 0))
-- attrs->vals_str = kstrdup(str, GFP_KERNEL);
-- else if (strncmp(str, "sort=", strlen("sort=")) == 0)
-- attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
-- else if (strncmp(str, "name=", strlen("name=")) == 0)
-- attrs->name = kstrdup(str, GFP_KERNEL);
-- else if (strcmp(str, "pause") == 0)
++static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
++{
++ struct hist_trigger_attrs *attrs;
++ int ret = 0;
++
++ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
++ if (!attrs)
++ return ERR_PTR(-ENOMEM);
++
++ while (trigger_str) {
++ char *str = strsep(&trigger_str, ":");
++
+ if (strchr(str, '=')) {
+ ret = parse_assignment(str, attrs);
+ if (ret)
+ goto free;
+ } else if (strcmp(str, "pause") == 0)
- attrs->pause = true;
- else if ((strcmp(str, "cont") == 0) ||
- (strcmp(str, "continue") == 0))
- attrs->cont = true;
- else if (strcmp(str, "clear") == 0)
- attrs->clear = true;
-- else if (strncmp(str, "size=", strlen("size=")) == 0) {
-- int map_bits = parse_map_size(str);
--
-- if (map_bits < 0) {
-- ret = map_bits;
++ attrs->pause = true;
++ else if ((strcmp(str, "cont") == 0) ||
++ (strcmp(str, "continue") == 0))
++ attrs->cont = true;
++ else if (strcmp(str, "clear") == 0)
++ attrs->clear = true;
+ else {
+ ret = parse_action(str, attrs);
+ if (ret)
- goto free;
-- }
-- attrs->map_bits = map_bits;
-- } else {
-- ret = -EINVAL;
-- goto free;
- }
- }
-
-@@ -266,6 +1919,14 @@
- goto free;
- }
-
++ goto free;
++ }
++ }
++
++ if (!attrs->keys_str) {
++ ret = -EINVAL;
++ goto free;
++ }
++
+ if (!attrs->clock) {
+ attrs->clock = kstrdup("global", GFP_KERNEL);
+ if (!attrs->clock) {
+ }
+ }
+
- return attrs;
- free:
- destroy_hist_trigger_attrs(attrs);
-@@ -288,65 +1949,222 @@
- memcpy(comm, task->comm, TASK_COMM_LEN);
- }
-
--static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
++ return attrs;
++ free:
++ destroy_hist_trigger_attrs(attrs);
++
++ return ERR_PTR(ret);
++}
++
++static inline void save_comm(char *comm, struct task_struct *task)
++{
++ if (!task->pid) {
++ strcpy(comm, "<idle>");
++ return;
++ }
++
++ if (WARN_ON_ONCE(task->pid < 0)) {
++ strcpy(comm, "<XXX>");
++ return;
++ }
++
++ memcpy(comm, task->comm, TASK_COMM_LEN);
++}
++
+static void hist_elt_data_free(struct hist_elt_data *elt_data)
- {
-- kfree((char *)elt->private_data);
++{
+ unsigned int i;
+
+ for (i = 0; i < SYNTH_FIELDS_MAX; i++)
+
+ kfree(elt_data->comm);
+ kfree(elt_data);
- }
-
--static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
++}
++
+static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
+{
+ struct hist_elt_data *elt_data = elt->private_data;
+}
+
+static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
- {
- struct hist_trigger_data *hist_data = elt->map->private_data;
++{
++ struct hist_trigger_data *hist_data = elt->map->private_data;
+ unsigned int size = TASK_COMM_LEN;
+ struct hist_elt_data *elt_data;
- struct hist_field *key_field;
-- unsigned int i;
++ struct hist_field *key_field;
+ unsigned int i, n_str;
+
+ elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
+ if (!elt_data)
+ return -ENOMEM;
-
- for_each_hist_key_field(i, hist_data) {
- key_field = hist_data->fields[i];
-
- if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
-- unsigned int size = TASK_COMM_LEN + 1;
--
-- elt->private_data = kzalloc(size, GFP_KERNEL);
-- if (!elt->private_data)
++
++ for_each_hist_key_field(i, hist_data) {
++ key_field = hist_data->fields[i];
++
++ if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
+ elt_data->comm = kzalloc(size, GFP_KERNEL);
+ if (!elt_data->comm) {
+ kfree(elt_data);
- return -ENOMEM;
++ return -ENOMEM;
+ }
- break;
- }
- }
-
++ break;
++ }
++ }
++
+ n_str = hist_data->n_field_var_str + hist_data->n_max_var_str;
+
+ size = STR_VAR_LEN_MAX;
+
+ elt->private_data = elt_data;
+
- return 0;
- }
-
--static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
-- struct tracing_map_elt *from)
++ return 0;
++}
++
+static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
- {
-- char *comm_from = from->private_data;
-- char *comm_to = to->private_data;
++{
+ struct hist_elt_data *elt_data = elt->private_data;
-
-- if (comm_from)
-- memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
++
+ if (elt_data->comm)
+ save_comm(elt_data->comm, current);
- }
-
--static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
++}
++
+static const struct tracing_map_ops hist_trigger_elt_data_ops = {
+ .elt_alloc = hist_trigger_elt_data_alloc,
+ .elt_free = hist_trigger_elt_data_free,
+};
+
+static const char *get_hist_field_flags(struct hist_field *hist_field)
- {
-- char *comm = elt->private_data;
++{
+ const char *flags_str = NULL;
-
-- if (comm)
-- save_comm(comm, current);
++
+ if (hist_field->flags & HIST_FIELD_FL_HEX)
+ flags_str = "hex";
+ else if (hist_field->flags & HIST_FIELD_FL_SYM)
+ flags_str = "usecs";
+
+ return flags_str;
- }
-
--static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
-- .elt_alloc = hist_trigger_elt_comm_alloc,
-- .elt_copy = hist_trigger_elt_comm_copy,
-- .elt_free = hist_trigger_elt_comm_free,
-- .elt_init = hist_trigger_elt_comm_init,
--};
++}
++
+static void expr_field_str(struct hist_field *field, char *expr)
+{
+ if (field->flags & HIST_FIELD_FL_VAR_REF)
+ strcat(expr, "$");
-
--static void destroy_hist_field(struct hist_field *hist_field)
++
+ strcat(expr, hist_field_name(field, 0));
+
+ if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
+
+static void destroy_hist_field(struct hist_field *hist_field,
+ unsigned int level)
- {
++{
+ unsigned int i;
+
+ if (level > 3)
+ kfree(hist_field->name);
+ kfree(hist_field->type);
+
- kfree(hist_field);
- }
-
--static struct hist_field *create_hist_field(struct ftrace_event_field *field,
-- unsigned long flags)
++ kfree(hist_field);
++}
++
+static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
+ struct ftrace_event_field *field,
+ unsigned long flags,
+ char *var_name)
- {
- struct hist_field *hist_field;
-
-@@ -357,8 +2175,22 @@
- if (!hist_field)
- return NULL;
-
++{
++ struct hist_field *hist_field;
++
++ if (field && is_function_field(field))
++ return NULL;
++
++ hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
++ if (!hist_field)
++ return NULL;
++
+ hist_field->hist_data = hist_data;
+
+ if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
+ goto out;
+ }
+
- if (flags & HIST_FIELD_FL_HITCOUNT) {
- hist_field->fn = hist_field_counter;
++ if (flags & HIST_FIELD_FL_HITCOUNT) {
++ hist_field->fn = hist_field_counter;
+ hist_field->size = sizeof(u64);
+ hist_field->type = kstrdup("u64", GFP_KERNEL);
+ if (!hist_field->type)
+ goto free;
- goto out;
- }
-
-@@ -368,7 +2200,31 @@
- }
-
- if (flags & HIST_FIELD_FL_LOG2) {
++ goto out;
++ }
++
++ if (flags & HIST_FIELD_FL_STACKTRACE) {
++ hist_field->fn = hist_field_none;
++ goto out;
++ }
++
++ if (flags & HIST_FIELD_FL_LOG2) {
+ unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
- hist_field->fn = hist_field_log2;
++ hist_field->fn = hist_field_log2;
+ hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
+ hist_field->size = hist_field->operands[0]->size;
+ hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
+ hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
+ if (!hist_field->type)
+ goto free;
- goto out;
- }
-
-@@ -378,6 +2234,11 @@
- if (is_string_field(field)) {
- flags |= HIST_FIELD_FL_STRING;
-
++ goto out;
++ }
++
++ if (WARN_ON_ONCE(!field))
++ goto out;
++
++ if (is_string_field(field)) {
++ flags |= HIST_FIELD_FL_STRING;
++
+ hist_field->size = MAX_FILTER_STR_VAL;
+ hist_field->type = kstrdup(field->type, GFP_KERNEL);
+ if (!hist_field->type)
+ goto free;
+
- if (field->filter_type == FILTER_STATIC_STRING)
- hist_field->fn = hist_field_string;
- else if (field->filter_type == FILTER_DYN_STRING)
-@@ -385,10 +2246,16 @@
- else
- hist_field->fn = hist_field_pstring;
- } else {
++ if (field->filter_type == FILTER_STATIC_STRING)
++ hist_field->fn = hist_field_string;
++ else if (field->filter_type == FILTER_DYN_STRING)
++ hist_field->fn = hist_field_dynstring;
++ else
++ hist_field->fn = hist_field_pstring;
++ } else {
+ hist_field->size = field->size;
+ hist_field->is_signed = field->is_signed;
+ hist_field->type = kstrdup(field->type, GFP_KERNEL);
+ if (!hist_field->type)
+ goto free;
+
- hist_field->fn = select_value_fn(field->size,
- field->is_signed);
- if (!hist_field->fn) {
-- destroy_hist_field(hist_field);
++ hist_field->fn = select_value_fn(field->size,
++ field->is_signed);
++ if (!hist_field->fn) {
+ destroy_hist_field(hist_field, 0);
- return NULL;
- }
- }
-@@ -396,84 +2263,1636 @@
- hist_field->field = field;
- hist_field->flags = flags;
-
++ return NULL;
++ }
++ }
++ out:
++ hist_field->field = field;
++ hist_field->flags = flags;
++
+ if (var_name) {
+ hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
+ if (!hist_field->var.name)
+ goto free;
+ }
+
- return hist_field;
++ return hist_field;
+ free:
+ destroy_hist_field(hist_field, 0);
+ return NULL;
- }
-
- static void destroy_hist_fields(struct hist_trigger_data *hist_data)
- {
- unsigned int i;
-
-- for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
++}
++
++static void destroy_hist_fields(struct hist_trigger_data *hist_data)
++{
++ unsigned int i;
++
+ for (i = 0; i < HIST_FIELDS_MAX; i++) {
- if (hist_data->fields[i]) {
-- destroy_hist_field(hist_data->fields[i]);
++ if (hist_data->fields[i]) {
+ destroy_hist_field(hist_data->fields[i], 0);
- hist_data->fields[i] = NULL;
- }
- }
- }
-
--static int create_hitcount_val(struct hist_trigger_data *hist_data)
++ hist_data->fields[i] = NULL;
++ }
++ }
++}
++
+static int init_var_ref(struct hist_field *ref_field,
+ struct hist_field *var_field,
+ char *system, char *event_name)
- {
-- hist_data->fields[HITCOUNT_IDX] =
-- create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
-- if (!hist_data->fields[HITCOUNT_IDX])
-- return -ENOMEM;
++{
+ int err = 0;
-
-- hist_data->n_vals++;
++
+ ref_field->var.idx = var_field->var.idx;
+ ref_field->var.hist_data = var_field->hist_data;
+ ref_field->size = var_field->size;
+ ref_field->is_signed = var_field->is_signed;
+ ref_field->flags |= var_field->flags &
+ (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
-
-- if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
++
+ if (system) {
+ ref_field->system = kstrdup(system, GFP_KERNEL);
+ if (!ref_field->system)
+ if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
+ (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
+ hist_err("Timestamp units in expression don't match", NULL);
- return -EINVAL;
++ return -EINVAL;
+ }
-
- return 0;
- }
-
--static int create_val_field(struct hist_trigger_data *hist_data,
-- unsigned int val_idx,
-- struct trace_event_file *file,
-- char *field_str)
++
++ return 0;
++}
++
+static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
+ struct trace_event_file *file,
+ char *str, unsigned long flags,
+ char *var_name, unsigned int level)
- {
-- struct ftrace_event_field *field = NULL;
-- unsigned long flags = 0;
-- char *field_name;
++{
+ struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
+ unsigned long operand_flags;
+ int field_op, ret = -EINVAL;
+ struct hist_field *val = NULL, *var = NULL;
+ unsigned long flags = HIST_FIELD_FL_VAR;
+ struct field_var *field_var;
- int ret = 0;
-
-- if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
++ int ret = 0;
++
+ if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
+ hist_err("Too many field variables defined: ", field_name);
+ ret = -EINVAL;
+ onmax_var_str = data->onmax.var_str;
+ if (onmax_var_str[0] != '$') {
+ hist_err("onmax: For onmax(x), x must be a variable: ", onmax_var_str);
- return -EINVAL;
++ return -EINVAL;
+ }
+ onmax_var_str++;
-
-- field_name = strsep(&field_str, ".");
-- if (field_str) {
-- if (strcmp(field_str, "hex") == 0)
-- flags |= HIST_FIELD_FL_HEX;
-- else {
++
+ var_field = find_target_event_var(hist_data, NULL, NULL, onmax_var_str);
+ if (!var_field) {
+ hist_err("onmax: Couldn't find onmax variable: ", onmax_var_str);
+ param = strstrip(param);
+ if (strlen(param) < 2) {
+ hist_err("Invalid action param: ", param);
- ret = -EINVAL;
- goto out;
- }
++ ret = -EINVAL;
++ goto out;
++ }
+
+ saved_param = kstrdup(param, GFP_KERNEL);
+ if (!saved_param) {
+ }
+
+ data->params[data->n_params++] = saved_param;
- }
++ }
+ out:
+ return ret;
+}
-
-- field = trace_find_event_field(file->event_call, field_name);
-- if (!field || !field->size) {
++
+static struct action_data *onmax_parse(char *str)
+{
+ char *onmax_fn_name, *onmax_var_str;
+
+ onmax_var_str = strsep(&str, ")");
+ if (!onmax_var_str || !str) {
- ret = -EINVAL;
-- goto out;
++ ret = -EINVAL;
+ goto free;
- }
-
-- hist_data->fields[val_idx] = create_hist_field(field, flags);
-- if (!hist_data->fields[val_idx]) {
++ }
++
+ data->onmax.var_str = kstrdup(onmax_var_str, GFP_KERNEL);
+ if (!data->onmax.var_str) {
+ ret = -ENOMEM;
+ hist_err_event("onmatch: Invalid subsystem or event name: ",
+ match_event_system, match_event, NULL);
+ goto free;
-+ }
-+
+ }
+
+- if (WARN_ON_ONCE(!field))
+- goto out;
+ data->onmatch.match_event = kstrdup(match_event, GFP_KERNEL);
+ if (!data->onmatch.match_event) {
+ ret = -ENOMEM;
+ goto free;
+ }
-+
+
+- if (is_string_field(field)) {
+- flags |= HIST_FIELD_FL_STRING;
+ data->onmatch.match_event_system = kstrdup(match_event_system, GFP_KERNEL);
+ if (!data->onmatch.match_event_system) {
+ ret = -ENOMEM;
+ goto free;
+ }
-+
+
+- if (field->filter_type == FILTER_STATIC_STRING)
+- hist_field->fn = hist_field_string;
+- else if (field->filter_type == FILTER_DYN_STRING)
+- hist_field->fn = hist_field_dynstring;
+- else
+- hist_field->fn = hist_field_pstring;
+- } else {
+- hist_field->fn = select_value_fn(field->size,
+- field->is_signed);
+- if (!hist_field->fn) {
+- destroy_hist_field(hist_field);
+- return NULL;
+- }
+ strsep(&str, ".");
+ if (!str) {
+ hist_err("onmatch: Missing . after onmatch(): ", str);
+ goto free;
-+ }
-+
+ }
+- out:
+- hist_field->field = field;
+- hist_field->flags = flags;
+
+- return hist_field;
+-}
+ synth_event_name = strsep(&str, "(");
+ if (!synth_event_name || !str) {
+ hist_err("onmatch: Missing opening paramlist paren: ", synth_event_name);
+ goto free;
+ }
-+
+
+-static void destroy_hist_fields(struct hist_trigger_data *hist_data)
+-{
+- unsigned int i;
+ data->onmatch.synth_event_name = kstrdup(synth_event_name, GFP_KERNEL);
+ if (!data->onmatch.synth_event_name) {
- ret = -ENOMEM;
++ ret = -ENOMEM;
+ goto free;
+ }
-+
+
+- for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
+- if (hist_data->fields[i]) {
+- destroy_hist_field(hist_data->fields[i]);
+- hist_data->fields[i] = NULL;
+- }
+ params = strsep(&str, ")");
+ if (!params || !str || (str && strlen(str))) {
+ hist_err("onmatch: Missing closing paramlist paren: ", params);
+ goto free;
-+ }
+ }
+
+ ret = parse_action_params(params, data);
+ if (ret)
+ onmatch_destroy(data);
+ data = ERR_PTR(ret);
+ goto out;
-+}
-+
-+static int create_hitcount_val(struct hist_trigger_data *hist_data)
-+{
-+ hist_data->fields[HITCOUNT_IDX] =
+ }
+
+ static int create_hitcount_val(struct hist_trigger_data *hist_data)
+ {
+ hist_data->fields[HITCOUNT_IDX] =
+- create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
+ create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
-+ if (!hist_data->fields[HITCOUNT_IDX])
-+ return -ENOMEM;
-+
-+ hist_data->n_vals++;
+ if (!hist_data->fields[HITCOUNT_IDX])
+ return -ENOMEM;
+
+ hist_data->n_vals++;
+ hist_data->n_fields++;
-+
-+ if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
-+ return -EINVAL;
-+
-+ return 0;
-+}
-+
+
+ if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
+ return -EINVAL;
+@@ -426,54 +3828,71 @@ static int create_hitcount_val(struct hist_trigger_data *hist_data)
+ return 0;
+ }
+
+static int __create_val_field(struct hist_trigger_data *hist_data,
+ unsigned int val_idx,
+ struct trace_event_file *file,
+ hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
+ if (IS_ERR(hist_field)) {
+ ret = PTR_ERR(hist_field);
- goto out;
- }
-
++ goto out;
++ }
++
+ hist_data->fields[val_idx] = hist_field;
+
- ++hist_data->n_vals;
++ ++hist_data->n_vals;
+ ++hist_data->n_fields;
-
-- if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
++
+ if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
- ret = -EINVAL;
- out:
- return ret;
- }
-
-+static int create_val_field(struct hist_trigger_data *hist_data,
-+ unsigned int val_idx,
-+ struct trace_event_file *file,
-+ char *field_str)
-+{
-+ if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
-+ return -EINVAL;
++ ret = -EINVAL;
++ out:
++ return ret;
++}
+
+ static int create_val_field(struct hist_trigger_data *hist_data,
+ unsigned int val_idx,
+ struct trace_event_file *file,
+ char *field_str)
+ {
+- struct ftrace_event_field *field = NULL;
+- unsigned long flags = 0;
+- char *field_name;
+- int ret = 0;
+-
+ if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
+ return -EINVAL;
+
+- field_name = strsep(&field_str, ".");
+- if (field_str) {
+- if (strcmp(field_str, "hex") == 0)
+- flags |= HIST_FIELD_FL_HEX;
+- else {
+- ret = -EINVAL;
+- goto out;
+- }
+- }
+ return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
+}
-+
+
+- field = trace_find_event_field(file->event_call, field_name);
+- if (!field || !field->size) {
+- ret = -EINVAL;
+- goto out;
+- }
+static int create_var_field(struct hist_trigger_data *hist_data,
+ unsigned int val_idx,
+ struct trace_event_file *file,
+ char *var_name, char *expr_str)
+{
+ unsigned long flags = 0;
-+
+
+- hist_data->fields[val_idx] = create_hist_field(field, flags);
+- if (!hist_data->fields[val_idx]) {
+- ret = -ENOMEM;
+- goto out;
+ if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
+ return -EINVAL;
+
+ if (find_var(hist_data, file, var_name) && !hist_data->remove) {
+ hist_err("Variable already defined: ", var_name);
+ return -EINVAL;
-+ }
-+
+ }
+
+- ++hist_data->n_vals;
+ flags |= HIST_FIELD_FL_VAR;
+ hist_data->n_vars++;
+ if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
+ return -EINVAL;
-+
+
+- if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
+- ret = -EINVAL;
+- out:
+- return ret;
+ return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
-+}
-+
+ }
+
static int create_val_fields(struct hist_trigger_data *hist_data,
struct trace_event_file *file)
{
int ret;
ret = create_hitcount_val(hist_data);
-@@ -493,12 +3912,15 @@
+@@ -493,12 +3912,15 @@ static int create_val_fields(struct hist_trigger_data *hist_data,
field_str = strsep(&fields_str, ",");
if (!field_str)
break;
if (fields_str && (strcmp(fields_str, "hitcount") != 0))
ret = -EINVAL;
out:
-@@ -511,12 +3933,13 @@
+@@ -511,12 +3933,13 @@ static int create_key_field(struct hist_trigger_data *hist_data,
struct trace_event_file *file,
char *field_str)
{
return -EINVAL;
flags |= HIST_FIELD_FL_KEY;
-@@ -524,57 +3947,40 @@
+@@ -524,57 +3947,40 @@ static int create_key_field(struct hist_trigger_data *hist_data,
if (strcmp(field_str, "stacktrace") == 0) {
flags |= HIST_FIELD_FL_STACKTRACE;
key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
return -EINVAL;
-@@ -618,21 +4024,113 @@
+@@ -618,21 +4024,113 @@ static int create_key_fields(struct hist_trigger_data *hist_data,
return ret;
}
return ret;
}
-@@ -653,10 +4151,9 @@
+@@ -653,10 +4151,9 @@ static int is_descending(const char *str)
static int create_sort_keys(struct hist_trigger_data *hist_data)
{
char *fields_str = hist_data->attrs->sort_key_str;
hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
-@@ -670,7 +4167,9 @@
+@@ -670,7 +4167,9 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
}
for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
sort_key = &hist_data->sort_keys[i];
-@@ -702,10 +4201,19 @@
+@@ -702,10 +4201,19 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
continue;
}
descending = is_descending(field_str);
if (descending < 0) {
ret = descending;
-@@ -720,16 +4228,230 @@
+@@ -720,16 +4228,230 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
break;
}
}
+- hist_data->n_sort_keys = i;
+- out:
+- return ret;
++
++ hist_data->n_sort_keys = i;
++ out:
++ return ret;
++}
+
- hist_data->n_sort_keys = i;
- out:
- return ret;
- }
-
+static void destroy_actions(struct hist_trigger_data *hist_data)
+{
+ unsigned int i;
+ kfree(hist_data->field_var_hists[i]->cmd);
+ kfree(hist_data->field_var_hists[i]);
+ }
-+}
-+
+ }
+
static void destroy_hist_data(struct hist_trigger_data *hist_data)
{
+ if (!hist_data)
kfree(hist_data);
}
-@@ -738,7 +4460,7 @@
+@@ -738,7 +4460,7 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
struct tracing_map *map = hist_data->map;
struct ftrace_event_field *field;
struct hist_field *hist_field;
for_each_hist_field(i, hist_data) {
hist_field = hist_data->fields[i];
-@@ -749,6 +4471,9 @@
+@@ -749,6 +4471,9 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
cmp_fn = tracing_map_cmp_none;
else if (is_string_field(field))
cmp_fn = tracing_map_cmp_string;
else
-@@ -757,36 +4482,29 @@
+@@ -757,36 +4482,29 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
idx = tracing_map_add_key_field(map,
hist_field->offset,
cmp_fn);
{
const struct tracing_map_ops *map_ops = NULL;
struct hist_trigger_data *hist_data;
-@@ -797,6 +4515,12 @@
+@@ -797,6 +4515,12 @@ create_hist_data(unsigned int map_bits,
return ERR_PTR(-ENOMEM);
hist_data->attrs = attrs;
ret = create_hist_fields(hist_data, file);
if (ret)
-@@ -806,8 +4530,7 @@
+@@ -806,8 +4530,7 @@ create_hist_data(unsigned int map_bits,
if (ret)
goto free;
hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
map_ops, hist_data);
-@@ -820,12 +4543,6 @@
+@@ -820,12 +4543,6 @@ create_hist_data(unsigned int map_bits,
ret = create_tracing_map_fields(hist_data);
if (ret)
goto free;
out:
return hist_data;
free:
-@@ -839,18 +4556,39 @@
+@@ -839,18 +4556,39 @@ create_hist_data(unsigned int map_bits,
}
static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
}
static inline void add_to_key(char *compound_key, void *key,
-@@ -877,15 +4615,31 @@
+@@ -877,15 +4615,31 @@ static inline void add_to_key(char *compound_key, void *key,
memcpy(compound_key + key_field->offset, key, size);
}
u64 field_contents;
void *key = NULL;
unsigned int i;
-@@ -906,7 +4660,7 @@
+@@ -906,7 +4660,7 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec)
key = entries;
} else {
if (key_field->flags & HIST_FIELD_FL_STRING) {
key = (void *)(unsigned long)field_contents;
use_compound_key = true;
-@@ -921,9 +4675,18 @@
+@@ -921,9 +4675,18 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec)
if (use_compound_key)
key = compound_key;
}
static void hist_trigger_stacktrace_print(struct seq_file *m,
-@@ -952,6 +4715,7 @@
+@@ -952,6 +4715,7 @@ hist_trigger_entry_print(struct seq_file *m,
struct hist_field *key_field;
char str[KSYM_SYMBOL_LEN];
bool multiline = false;
unsigned int i;
u64 uval;
-@@ -963,26 +4727,33 @@
+@@ -963,26 +4727,33 @@ hist_trigger_entry_print(struct seq_file *m,
if (i > hist_data->n_vals)
seq_puts(m, ", ");
} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
const char *syscall_name;
-@@ -991,8 +4762,8 @@
+@@ -991,8 +4762,8 @@ hist_trigger_entry_print(struct seq_file *m,
if (!syscall_name)
syscall_name = "unknown_syscall";
} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
seq_puts(m, "stacktrace:\n");
hist_trigger_stacktrace_print(m,
-@@ -1000,15 +4771,14 @@
+@@ -1000,15 +4771,14 @@ hist_trigger_entry_print(struct seq_file *m,
HIST_STACKTRACE_DEPTH);
multiline = true;
} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
}
}
-@@ -1021,17 +4791,23 @@
+@@ -1021,17 +4791,23 @@ hist_trigger_entry_print(struct seq_file *m,
tracing_map_read_sum(elt, HITCOUNT_IDX));
for (i = 1; i < hist_data->n_vals; i++) {
seq_puts(m, "\n");
}
-@@ -1102,6 +4878,11 @@
+@@ -1102,6 +4878,11 @@ static int hist_show(struct seq_file *m, void *v)
hist_trigger_show(m, data, n++);
}
out_unlock:
mutex_unlock(&event_mutex);
-@@ -1120,34 +4901,31 @@
+@@ -1120,34 +4901,31 @@ const struct file_operations event_hist_fops = {
.release = single_release,
};
}
}
-@@ -1156,7 +4934,8 @@
+@@ -1156,7 +4934,8 @@ static int event_hist_trigger_print(struct seq_file *m,
struct event_trigger_data *data)
{
struct hist_trigger_data *hist_data = data->private_data;
unsigned int i;
seq_puts(m, "hist:");
-@@ -1167,25 +4946,47 @@
+@@ -1167,25 +4946,47 @@ static int event_hist_trigger_print(struct seq_file *m,
seq_puts(m, "keys=");
for_each_hist_key_field(i, hist_data) {
}
}
-@@ -1193,28 +4994,36 @@
+@@ -1193,28 +4994,36 @@ static int event_hist_trigger_print(struct seq_file *m,
for (i = 0; i < hist_data->n_sort_keys; i++) {
struct tracing_map_sort_key *sort_key;
if (data->filter_str)
seq_printf(m, " if %s", data->filter_str);
-@@ -1242,6 +5051,21 @@
+@@ -1242,6 +5051,21 @@ static int event_hist_trigger_init(struct event_trigger_ops *ops,
return 0;
}
static void event_hist_trigger_free(struct event_trigger_ops *ops,
struct event_trigger_data *data)
{
-@@ -1254,7 +5078,13 @@
+@@ -1254,7 +5078,13 @@ static void event_hist_trigger_free(struct event_trigger_ops *ops,
if (!data->ref) {
if (data->name)
del_named_trigger(data);
destroy_hist_data(hist_data);
}
}
-@@ -1381,6 +5211,15 @@
+@@ -1381,6 +5211,15 @@ static bool hist_trigger_match(struct event_trigger_data *data,
return false;
if (key_field->offset != key_field_test->offset)
return false;
}
for (i = 0; i < hist_data->n_sort_keys; i++) {
-@@ -1396,6 +5235,9 @@
+@@ -1396,6 +5235,9 @@ static bool hist_trigger_match(struct event_trigger_data *data,
(strcmp(data->filter_str, data_test->filter_str) != 0))
return false;
return true;
}
-@@ -1412,6 +5254,7 @@
+@@ -1412,6 +5254,7 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
if (named_data) {
if (!hist_trigger_match(data, named_data, named_data,
true)) {
ret = -EINVAL;
goto out;
}
-@@ -1431,13 +5274,16 @@
+@@ -1431,13 +5274,16 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
test->paused = false;
else if (hist_data->attrs->clear)
hist_clear(test);
ret = -ENOENT;
goto out;
}
-@@ -1446,7 +5292,6 @@
+@@ -1446,7 +5292,6 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
data->paused = true;
if (named_data) {
data->private_data = named_data->private_data;
set_named_trigger_data(data, named_data);
data->ops = &event_hist_trigger_named_ops;
-@@ -1458,8 +5303,32 @@
+@@ -1458,8 +5303,32 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
goto out;
}
update_cond_flag(file);
-@@ -1468,10 +5337,55 @@
+@@ -1468,10 +5337,55 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
update_cond_flag(file);
ret--;
}
static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file)
-@@ -1497,17 +5411,55 @@
+@@ -1497,17 +5411,55 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
if (unregistered && test->ops->free)
test->ops->free(test->ops, test);
if (test->ops->free)
test->ops->free(test->ops, test);
}
-@@ -1523,16 +5475,54 @@
+@@ -1523,16 +5475,54 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
struct hist_trigger_attrs *attrs;
struct event_trigger_ops *trigger_ops;
struct hist_trigger_data *hist_data;
attrs = parse_hist_trigger_attrs(trigger);
if (IS_ERR(attrs))
-@@ -1541,7 +5531,7 @@
+@@ -1541,7 +5531,7 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
if (attrs->map_bits)
hist_trigger_bits = attrs->map_bits;
if (IS_ERR(hist_data)) {
destroy_hist_trigger_attrs(attrs);
return PTR_ERR(hist_data);
-@@ -1549,10 +5539,11 @@
+@@ -1549,10 +5539,11 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
trigger_data->count = -1;
trigger_data->ops = trigger_ops;
-@@ -1570,8 +5561,24 @@
+@@ -1570,8 +5561,24 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
goto out_free;
}
ret = 0;
goto out_free;
}
-@@ -1588,14 +5595,47 @@
+@@ -1588,14 +5595,47 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
goto out_free;
} else if (ret < 0)
goto out_free;
kfree(trigger_data);
destroy_hist_data(hist_data);
-@@ -1625,7 +5665,8 @@
+@@ -1625,7 +5665,8 @@ __init int register_trigger_hist_cmd(void)
}
static void
{
struct enable_trigger_data *enable_data = data->private_data;
struct event_trigger_data *test;
-@@ -1641,7 +5682,8 @@
+@@ -1641,7 +5682,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec)
}
static void
{
if (!data->count)
return;
-@@ -1649,7 +5691,7 @@
+@@ -1649,7 +5691,7 @@ hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
if (data->count != -1)
(data->count)--;
}
static struct event_trigger_ops hist_enable_trigger_ops = {
-@@ -1754,3 +5796,31 @@
+@@ -1754,3 +5796,31 @@ __init int register_trigger_hist_enable_disable_cmds(void)
return ret;
}
+}
+
+fs_initcall(trace_events_hist_init);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_events_trigger.c linux-4.14/kernel/trace/trace_events_trigger.c
---- linux-4.14.orig/kernel/trace/trace_events_trigger.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace_events_trigger.c 2018-09-05 11:05:07.000000000 +0200
-@@ -63,7 +63,8 @@
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 43254c5e7e16..24d42350d738 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -63,7 +63,8 @@ void trigger_data_free(struct event_trigger_data *data)
* any trigger that should be deferred, ETT_NONE if nothing to defer.
*/
enum event_trigger_type
{
struct event_trigger_data *data;
enum event_trigger_type tt = ETT_NONE;
-@@ -76,7 +77,7 @@
+@@ -76,7 +77,7 @@ event_triggers_call(struct trace_event_file *file, void *rec)
if (data->paused)
continue;
if (!rec) {
continue;
}
filter = rcu_dereference_sched(data->filter);
-@@ -86,7 +87,7 @@
+@@ -86,7 +87,7 @@ event_triggers_call(struct trace_event_file *file, void *rec)
tt |= data->cmd_ops->trigger_type;
continue;
}
}
return tt;
}
-@@ -108,7 +109,7 @@
+@@ -108,7 +109,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
void
event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt,
{
struct event_trigger_data *data;
-@@ -116,7 +117,7 @@
+@@ -116,7 +117,7 @@ event_triggers_post_call(struct trace_event_file *file,
if (data->paused)
continue;
if (data->cmd_ops->trigger_type & tt)
}
}
EXPORT_SYMBOL_GPL(event_triggers_post_call);
-@@ -914,8 +915,15 @@
+@@ -914,8 +915,15 @@ void set_named_trigger_data(struct event_trigger_data *data,
data->named_data = named_data;
}
{
if (tracing_is_on())
return;
-@@ -924,7 +932,8 @@
+@@ -924,7 +932,8 @@ traceon_trigger(struct event_trigger_data *data, void *rec)
}
static void
{
if (tracing_is_on())
return;
-@@ -939,7 +948,8 @@
+@@ -939,7 +948,8 @@ traceon_count_trigger(struct event_trigger_data *data, void *rec)
}
static void
{
if (!tracing_is_on())
return;
-@@ -948,7 +958,8 @@
+@@ -948,7 +958,8 @@ traceoff_trigger(struct event_trigger_data *data, void *rec)
}
static void
{
if (!tracing_is_on())
return;
-@@ -1045,7 +1056,8 @@
+@@ -1045,7 +1056,8 @@ static struct event_command trigger_traceoff_cmd = {
#ifdef CONFIG_TRACER_SNAPSHOT
static void
{
struct trace_event_file *file = data->private_data;
-@@ -1056,7 +1068,8 @@
+@@ -1056,7 +1068,8 @@ snapshot_trigger(struct event_trigger_data *data, void *rec)
}
static void
{
if (!data->count)
return;
-@@ -1064,7 +1077,7 @@
+@@ -1064,7 +1077,7 @@ snapshot_count_trigger(struct event_trigger_data *data, void *rec)
if (data->count != -1)
(data->count)--;
}
static int
-@@ -1143,13 +1156,15 @@
+@@ -1143,13 +1156,15 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
#define STACK_SKIP 3
static void
{
if (!data->count)
return;
-@@ -1157,7 +1172,7 @@
+@@ -1157,7 +1172,7 @@ stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
if (data->count != -1)
(data->count)--;
}
static int
-@@ -1219,7 +1234,8 @@
+@@ -1219,7 +1234,8 @@ static __init void unregister_trigger_traceon_traceoff_cmds(void)
}
static void
{
struct enable_trigger_data *enable_data = data->private_data;
-@@ -1230,7 +1246,8 @@
+@@ -1230,7 +1246,8 @@ event_enable_trigger(struct event_trigger_data *data, void *rec)
}
static void
{
struct enable_trigger_data *enable_data = data->private_data;
-@@ -1244,7 +1261,7 @@
+@@ -1244,7 +1261,7 @@ event_enable_count_trigger(struct event_trigger_data *data, void *rec)
if (data->count != -1)
(data->count)--;
}
int event_enable_trigger_print(struct seq_file *m,
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace.h linux-4.14/kernel/trace/trace.h
---- linux-4.14.orig/kernel/trace/trace.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace.h 2018-09-05 11:05:07.000000000 +0200
-@@ -127,6 +127,7 @@
- * NEED_RESCHED - reschedule is requested
- * HARDIRQ - inside an interrupt handler
- * SOFTIRQ - inside a softirq handler
-+ * NEED_RESCHED_LAZY - lazy reschedule is requested
- */
- enum trace_flag_type {
- TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -136,6 +137,7 @@
- TRACE_FLAG_SOFTIRQ = 0x10,
- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
- TRACE_FLAG_NMI = 0x40,
-+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
- };
-
- #define TRACE_BUF_SIZE 1024
-@@ -273,6 +275,8 @@
- /* function tracing enabled */
- int function_enabled;
- #endif
-+ int time_stamp_abs_ref;
-+ struct list_head hist_vars;
- };
-
- enum {
-@@ -286,6 +290,11 @@
- extern int trace_array_get(struct trace_array *tr);
- extern void trace_array_put(struct trace_array *tr);
-
-+extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
-+extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
-+
-+extern bool trace_clock_in_ns(struct trace_array *tr);
-+
- /*
- * The global tracer (top) should be the first trace array added,
- * but we check the flag anyway.
-@@ -1293,7 +1302,7 @@
- unsigned long eflags = file->flags;
-
- if (eflags & EVENT_FILE_FL_TRIGGER_COND)
-- *tt = event_triggers_call(file, entry);
-+ *tt = event_triggers_call(file, entry, event);
-
- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
-@@ -1330,7 +1339,7 @@
- trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
-
- if (tt)
-- event_triggers_post_call(file, tt, entry);
-+ event_triggers_post_call(file, tt, entry, event);
- }
-
- /**
-@@ -1363,7 +1372,7 @@
- irq_flags, pc, regs);
-
- if (tt)
-- event_triggers_post_call(file, tt, entry);
-+ event_triggers_post_call(file, tt, entry, event);
- }
-
- #define FILTER_PRED_INVALID ((unsigned short)-1)
-@@ -1545,6 +1554,8 @@
- extern void unpause_named_trigger(struct event_trigger_data *data);
- extern void set_named_trigger_data(struct event_trigger_data *data,
- struct event_trigger_data *named_data);
-+extern struct event_trigger_data *
-+get_named_trigger_data(struct event_trigger_data *data);
- extern int register_event_command(struct event_command *cmd);
- extern int unregister_event_command(struct event_command *cmd);
- extern int register_trigger_hist_enable_disable_cmds(void);
-@@ -1588,7 +1599,8 @@
- */
- struct event_trigger_ops {
- void (*func)(struct event_trigger_data *data,
-- void *rec);
-+ void *rec,
-+ struct ring_buffer_event *rbe);
- int (*init)(struct event_trigger_ops *ops,
- struct event_trigger_data *data);
- void (*free)(struct event_trigger_ops *ops,
-@@ -1755,6 +1767,13 @@
- int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
- int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
-
-+#define MAX_EVENT_NAME_LEN 64
-+
-+extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
-+extern ssize_t trace_parse_run_command(struct file *file,
-+ const char __user *buffer, size_t count, loff_t *ppos,
-+ int (*createfn)(int, char**));
-+
- /*
- * Normal trace_printk() and friends allocates special buffers
- * to do the manipulation, as well as saves the print formats
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_hwlat.c linux-4.14/kernel/trace/trace_hwlat.c
---- linux-4.14.orig/kernel/trace/trace_hwlat.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/trace/trace_hwlat.c 2018-09-05 11:05:07.000000000 +0200
-@@ -279,7 +279,7 @@
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index d7c8e4ec3d9d..518c61a1bceb 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -279,7 +279,7 @@ static void move_to_next_cpu(void)
* of this thread, than stop migrating for the duration
* of the current test.
*/
goto disable;
get_online_cpus();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_kprobe.c linux-4.14/kernel/trace/trace_kprobe.c
---- linux-4.14.orig/kernel/trace/trace_kprobe.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace_kprobe.c 2018-09-05 11:05:07.000000000 +0200
-@@ -918,8 +918,8 @@
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index ea20274a105a..3c40d4174052 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -918,8 +918,8 @@ static int probes_open(struct inode *inode, struct file *file)
static ssize_t probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
}
static const struct file_operations kprobe_events_ops = {
-@@ -1444,9 +1444,9 @@
+@@ -1444,9 +1444,9 @@ static __init int kprobe_trace_self_tests_init(void)
pr_info("Testing kprobe tracing: ");
if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function entry.\n");
warn++;
-@@ -1466,8 +1466,8 @@
+@@ -1466,8 +1466,8 @@ static __init int kprobe_trace_self_tests_init(void)
}
}
if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function return.\n");
warn++;
-@@ -1537,13 +1537,13 @@
+@@ -1537,13 +1537,13 @@ static __init int kprobe_trace_self_tests_init(void)
disable_trace_kprobe(tk, file);
}
if (WARN_ON_ONCE(ret)) {
pr_warn("error on deleting a probe.\n");
warn++;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_output.c linux-4.14/kernel/trace/trace_output.c
---- linux-4.14.orig/kernel/trace/trace_output.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace_output.c 2018-09-05 11:05:07.000000000 +0200
-@@ -447,6 +447,7 @@
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 4500b00e4e36..74a4bfc2c6b7 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -447,6 +447,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
char hardsoft_irq;
char need_resched;
char irqs_off;
int hardirq;
int softirq;
-@@ -477,6 +478,9 @@
+@@ -477,6 +478,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
break;
}
hardsoft_irq =
(nmi && hardirq) ? 'Z' :
nmi ? 'z' :
-@@ -485,14 +489,25 @@
+@@ -485,14 +489,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
softirq ? 's' :
'.' ;
return !trace_seq_has_overflowed(s);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_probe.c linux-4.14/kernel/trace/trace_probe.c
---- linux-4.14.orig/kernel/trace/trace_probe.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace_probe.c 2018-09-05 11:05:07.000000000 +0200
-@@ -621,92 +621,6 @@
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index fe4513330412..daf54bda4dc8 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -621,92 +621,6 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
kfree(arg->comm);
}
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
bool is_return)
{
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_probe.h linux-4.14/kernel/trace/trace_probe.h
---- linux-4.14.orig/kernel/trace/trace_probe.h 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace_probe.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index dc39472ca9e4..a0d750e3d17c 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
@@ -42,7 +42,6 @@
#define MAX_TRACE_ARGS 128
#define MAX_STRING_SIZE PATH_MAX
/* Reserved field names */
-@@ -356,12 +355,6 @@
+@@ -356,12 +355,6 @@ extern void traceprobe_free_probe_arg(struct probe_arg *arg);
extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
/* Sum up total data length for dynamic arraies (strings) */
static nokprobe_inline int
__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/trace_uprobe.c linux-4.14/kernel/trace/trace_uprobe.c
---- linux-4.14.orig/kernel/trace/trace_uprobe.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/trace/trace_uprobe.c 2018-09-05 11:05:07.000000000 +0200
-@@ -647,7 +647,7 @@
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index ea0d90a31fc9..2ccfbb8efeb2 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -647,7 +647,7 @@ static int probes_open(struct inode *inode, struct file *file)
static ssize_t probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
}
static const struct file_operations uprobe_events_ops = {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/tracing_map.c linux-4.14/kernel/trace/tracing_map.c
---- linux-4.14.orig/kernel/trace/tracing_map.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/trace/tracing_map.c 2018-09-05 11:05:07.000000000 +0200
-@@ -66,6 +66,73 @@
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index 305039b122fa..5cadb1b8b5fe 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -66,6 +66,73 @@ u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
return (u64)atomic64_read(&elt->fields[i].sum);
}
int tracing_map_cmp_string(void *val_a, void *val_b)
{
char *a = val_a;
-@@ -171,6 +238,28 @@
+@@ -170,6 +237,28 @@ int tracing_map_add_sum_field(struct tracing_map *map)
+ return tracing_map_add_field(map, tracing_map_cmp_atomic64);
}
- /**
++/**
+ * tracing_map_add_var - Add a field describing a tracing_map var
+ * @map: The tracing_map
+ *
+ return ret;
+}
+
-+/**
+ /**
* tracing_map_add_key_field - Add a field describing a tracing_map key
* @map: The tracing_map
- * @offset: The offset within the key
-@@ -280,6 +369,11 @@
+@@ -280,6 +369,11 @@ static void tracing_map_elt_clear(struct tracing_map_elt *elt)
if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
atomic64_set(&elt->fields[i].sum, 0);
if (elt->map->ops && elt->map->ops->elt_clear)
elt->map->ops->elt_clear(elt);
}
-@@ -306,6 +400,8 @@
+@@ -306,6 +400,8 @@ static void tracing_map_elt_free(struct tracing_map_elt *elt)
if (elt->map->ops && elt->map->ops->elt_free)
elt->map->ops->elt_free(elt);
kfree(elt->fields);
kfree(elt->key);
kfree(elt);
}
-@@ -333,6 +429,18 @@
+@@ -333,6 +429,18 @@ static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
goto free;
}
tracing_map_elt_init_fields(elt);
if (map->ops && map->ops->elt_alloc) {
-@@ -414,7 +522,9 @@
+@@ -414,7 +522,9 @@ static inline struct tracing_map_elt *
__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
{
u32 idx, key_hash, test_key;
key_hash = jhash(key, map->key_size, 0);
if (key_hash == 0)
-@@ -426,10 +536,33 @@
+@@ -426,10 +536,33 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
entry = TRACING_MAP_ENTRY(map->map, idx);
test_key = entry->key;
}
if (!test_key) {
-@@ -451,6 +584,13 @@
+@@ -451,6 +584,13 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
atomic64_inc(&map->hits);
return entry->val;
}
}
-@@ -815,67 +955,15 @@
+@@ -815,67 +955,15 @@ create_sort_entry(void *key, struct tracing_map_elt *elt)
return sort_entry;
}
sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
(int (*)(const void *, const void *))cmp_entries_dup, NULL);
-@@ -884,30 +972,14 @@
+@@ -884,30 +972,14 @@ static int merge_dups(struct tracing_map_sort_entry **sort_entries,
for (i = 1; i < n_entries; i++) {
if (!memcmp(sort_entries[i]->key, key, key_size)) {
dups++; total_dups++;
}
static bool is_key(struct tracing_map *map, unsigned int field_idx)
-@@ -1033,10 +1105,7 @@
+@@ -1033,10 +1105,7 @@ int tracing_map_sort_entries(struct tracing_map *map,
return 1;
}
if (is_key(map, sort_keys[0].field_idx))
cmp_entries_fn = cmp_entries_key;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/trace/tracing_map.h linux-4.14/kernel/trace/tracing_map.h
---- linux-4.14.orig/kernel/trace/tracing_map.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/trace/tracing_map.h 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/trace/tracing_map.h b/kernel/trace/tracing_map.h
+index ab0ca77331d0..053eb92b2d31 100644
+--- a/kernel/trace/tracing_map.h
++++ b/kernel/trace/tracing_map.h
@@ -6,10 +6,11 @@
#define TRACING_MAP_BITS_MAX 17
#define TRACING_MAP_BITS_MIN 7
#define TRACING_MAP_SORT_KEYS_MAX 2
typedef int (*tracing_map_cmp_fn_t) (void *val_a, void *val_b);
-@@ -137,6 +138,8 @@
+@@ -137,6 +138,8 @@ struct tracing_map_field {
struct tracing_map_elt {
struct tracing_map *map;
struct tracing_map_field *fields;
void *key;
void *private_data;
};
-@@ -192,6 +195,7 @@
+@@ -192,6 +195,7 @@ struct tracing_map {
int key_idx[TRACING_MAP_KEYS_MAX];
unsigned int n_keys;
struct tracing_map_sort_key sort_key;
atomic64_t hits;
atomic64_t drops;
};
-@@ -215,11 +219,6 @@
+@@ -215,11 +219,6 @@ struct tracing_map {
* Element allocation occurs before tracing begins, when the
* tracing_map_init() call is made by client code.
*
* @elt_free: When a tracing_map_elt is freed, this function is called
* and allows client-allocated per-element data to be freed.
*
-@@ -233,8 +232,6 @@
+@@ -233,8 +232,6 @@ struct tracing_map {
*/
struct tracing_map_ops {
int (*elt_alloc)(struct tracing_map_elt *elt);
void (*elt_free)(struct tracing_map_elt *elt);
void (*elt_clear)(struct tracing_map_elt *elt);
void (*elt_init)(struct tracing_map_elt *elt);
-@@ -248,6 +245,7 @@
+@@ -248,6 +245,7 @@ tracing_map_create(unsigned int map_bits,
extern int tracing_map_init(struct tracing_map *map);
extern int tracing_map_add_sum_field(struct tracing_map *map);
extern int tracing_map_add_key_field(struct tracing_map *map,
unsigned int offset,
tracing_map_cmp_fn_t cmp_fn);
-@@ -267,7 +265,13 @@
+@@ -267,7 +265,13 @@ extern int tracing_map_cmp_none(void *val_a, void *val_b);
extern void tracing_map_update_sum(struct tracing_map_elt *elt,
unsigned int i, u64 n);
extern void tracing_map_set_field_descr(struct tracing_map *map,
unsigned int i,
unsigned int key_offset,
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/user.c linux-4.14/kernel/user.c
---- linux-4.14.orig/kernel/user.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/user.c 2018-09-05 11:05:07.000000000 +0200
-@@ -162,11 +162,11 @@
+diff --git a/kernel/user.c b/kernel/user.c
+index 00281add65b2..f4cf1841f2fd 100644
+--- a/kernel/user.c
++++ b/kernel/user.c
+@@ -162,11 +162,11 @@ void free_uid(struct user_struct *up)
if (!up)
return;
}
struct user_struct *alloc_uid(kuid_t uid)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/watchdog.c linux-4.14/kernel/watchdog.c
---- linux-4.14.orig/kernel/watchdog.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/watchdog.c 2018-09-05 11:05:07.000000000 +0200
-@@ -462,7 +462,7 @@
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 087994b23f8b..ea4c09109ce4 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -462,7 +462,7 @@ static void watchdog_enable(unsigned int cpu)
* Start the timer first to prevent the NMI watchdog triggering
* before the timer has a chance to fire.
*/
hrtimer->function = watchdog_timer_fn;
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
HRTIMER_MODE_REL_PINNED);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/watchdog_hld.c linux-4.14/kernel/watchdog_hld.c
---- linux-4.14.orig/kernel/watchdog_hld.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/watchdog_hld.c 2018-09-05 11:05:07.000000000 +0200
-@@ -24,6 +24,8 @@
+diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
+index 4ece6028007a..210dccc57c04 100644
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -24,6 +24,8 @@ static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
static DEFINE_PER_CPU(struct perf_event *, dead_event);
static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped;
-@@ -134,6 +136,13 @@
+@@ -134,6 +136,13 @@ static void watchdog_overflow_callback(struct perf_event *event,
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
print_modules();
-@@ -151,6 +160,7 @@
+@@ -151,6 +160,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
!test_and_set_bit(0, &hardlockup_allcpu_dumped))
trigger_allbutself_cpu_backtrace();
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/workqueue.c linux-4.14/kernel/workqueue.c
---- linux-4.14.orig/kernel/workqueue.c 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/kernel/workqueue.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 08bc551976b2..76297cce5602 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
@@ -49,6 +49,8 @@
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include "workqueue_internal.h"
-@@ -123,11 +125,16 @@
+@@ -123,11 +125,16 @@ enum {
* cpu or grabbing pool->lock is enough for read access. If
* POOL_DISASSOCIATED is set, it's identical to L.
*
*
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
*
-@@ -136,7 +143,7 @@
+@@ -136,7 +143,7 @@ enum {
*
* WQ: wq->mutex protected.
*
*
* MD: wq_mayday_lock protected.
*/
-@@ -186,7 +193,7 @@
+@@ -186,7 +193,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
* from get_work_pool().
*/
struct rcu_head rcu;
-@@ -215,7 +222,7 @@
+@@ -215,7 +222,7 @@ struct pool_workqueue {
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
-@@ -352,6 +359,8 @@
+@@ -352,6 +359,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-@@ -359,20 +368,20 @@
+@@ -359,20 +368,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
-@@ -384,7 +393,7 @@
+@@ -384,7 +393,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pool: iteration cursor
* @pi: integer used for iteration
*
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
-@@ -416,7 +425,7 @@
+@@ -416,7 +425,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pwq: iteration cursor
* @wq: the target workqueue
*
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -428,6 +437,31 @@
+@@ -428,6 +437,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -552,7 +586,7 @@
+@@ -552,7 +586,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* @wq: the target workqueue
* @node: the node ID
*
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
-@@ -696,8 +730,8 @@
+@@ -696,8 +730,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -834,50 +868,45 @@
+@@ -834,50 +868,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
*/
static void wake_up_worker(struct worker_pool *pool)
{
+ * wq_worker_running - a worker is running again
* @task: task waking up
- * @cpu: CPU @task is waking up to
-- *
+ *
- * This function is called during try_to_wake_up() when a worker is
- * being awoken.
- *
+- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
+ * This function is called when a worker returns from schedule()
struct worker_pool *pool;
/*
-@@ -886,29 +915,26 @@
+@@ -886,29 +915,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
}
/**
-@@ -1102,12 +1128,14 @@
+@@ -1102,12 +1128,14 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
}
}
-@@ -1211,7 +1239,7 @@
+@@ -1211,7 +1239,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
struct worker_pool *pool;
struct pool_workqueue *pwq;
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1230,6 +1258,7 @@
+@@ -1230,6 +1258,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1268,14 +1297,16 @@
+@@ -1268,14 +1297,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
return -EAGAIN;
}
-@@ -1377,7 +1408,7 @@
+@@ -1377,7 +1408,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
debug_work_activate(work);
-@@ -1385,6 +1416,7 @@
+@@ -1385,6 +1416,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-@@ -1441,10 +1473,8 @@
+@@ -1441,10 +1473,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1462,7 +1492,9 @@
+@@ -1462,7 +1492,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
insert_work(pwq, work, worklist, work_flags);
}
/**
-@@ -1482,14 +1514,14 @@
+@@ -1482,14 +1514,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1498,8 +1530,11 @@
+@@ -1498,8 +1530,11 @@ void delayed_work_timer_fn(unsigned long __data)
{
struct delayed_work *dwork = (struct delayed_work *)__data;
}
EXPORT_SYMBOL(delayed_work_timer_fn);
-@@ -1555,14 +1590,14 @@
+@@ -1555,14 +1590,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
unsigned long flags;
/* read the comment in __queue_work() */
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1597,7 +1632,7 @@
+@@ -1597,7 +1632,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -1630,7 +1665,9 @@
+@@ -1630,7 +1665,9 @@ static void worker_enter_idle(struct worker *worker)
worker->last_active = jiffies;
/* idle_list is LIFO */
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1663,7 +1700,9 @@
+@@ -1663,7 +1700,9 @@ static void worker_leave_idle(struct worker *worker)
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
}
static struct worker *alloc_worker(int node)
-@@ -1829,7 +1868,9 @@
+@@ -1829,7 +1868,9 @@ static void destroy_worker(struct worker *worker)
pool->nr_workers--;
pool->nr_idle--;
worker->flags |= WORKER_DIE;
wake_up_process(worker->task);
}
-@@ -2815,14 +2856,14 @@
+@@ -2815,14 +2856,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
might_sleep();
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2853,10 +2894,11 @@
+@@ -2853,10 +2894,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
}
return false;
}
-@@ -2946,7 +2988,7 @@
+@@ -2946,7 +2988,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
/*
* This allows canceling during early boot. We know that @work
-@@ -3007,10 +3049,10 @@
+@@ -3007,10 +3049,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -3028,7 +3070,7 @@
+@@ -3028,7 +3070,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
return ret;
}
-@@ -3284,7 +3326,7 @@
+@@ -3284,7 +3326,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3338,8 +3380,8 @@
+@@ -3338,8 +3380,8 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
}
/**
-@@ -3446,14 +3488,14 @@
+@@ -3446,14 +3488,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
}
/**
-@@ -4128,7 +4170,7 @@
+@@ -4128,7 +4170,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4238,7 +4280,8 @@
+@@ -4238,7 +4280,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq;
bool ret;
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4249,7 +4292,8 @@
+@@ -4249,7 +4292,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
return ret;
}
-@@ -4275,15 +4319,15 @@
+@@ -4275,15 +4319,15 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
return ret;
}
-@@ -4472,7 +4516,7 @@
+@@ -4472,7 +4516,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4537,7 +4581,7 @@
+@@ -4537,7 +4581,7 @@ void show_workqueue_state(void)
touch_nmi_watchdog();
}
}
/*
-@@ -4898,16 +4942,16 @@
+@@ -4898,16 +4942,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -5097,7 +5141,8 @@
+@@ -5097,7 +5141,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = "";
int node, written = 0;
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -5105,7 +5150,8 @@
+@@ -5105,7 +5150,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
return written;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/kernel/workqueue_internal.h linux-4.14/kernel/workqueue_internal.h
---- linux-4.14.orig/kernel/workqueue_internal.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/kernel/workqueue_internal.h 2018-09-05 11:05:07.000000000 +0200
-@@ -45,6 +45,7 @@
+diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
+index d390d1be3748..2dbcfe9bc364 100644
+--- a/kernel/workqueue_internal.h
++++ b/kernel/workqueue_internal.h
+@@ -45,6 +45,7 @@ struct worker {
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
/*
* Opaque string set with work_set_desc(). Printed out with task
-@@ -70,7 +71,7 @@
+@@ -70,7 +71,7 @@ static inline struct worker *current_wq_worker(void)
* Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched/core.c and workqueue.c.
*/
+void wq_worker_sleeping(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/debugobjects.c linux-4.14/lib/debugobjects.c
---- linux-4.14.orig/lib/debugobjects.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/lib/debugobjects.c 2018-09-05 11:05:07.000000000 +0200
-@@ -336,7 +336,10 @@
+diff --git a/lib/Kconfig b/lib/Kconfig
+index b1445b22a6de..9ab51b78991a 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -428,6 +428,7 @@ config CHECK_SIGNATURE
+
+ config CPUMASK_OFFSTACK
+ bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
++ depends on !PREEMPT_RT_FULL
+ help
+ Use dynamic allocation for cpumask_var_t, instead of putting
+ them on the stack. This is a bit more expensive, but avoids
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 62d0e25c054c..401b7ed164b5 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1197,7 +1197,7 @@ config DEBUG_ATOMIC_SLEEP
+
+ config DEBUG_LOCKING_API_SELFTESTS
+ bool "Locking API boot-time self-tests"
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && !PREEMPT_RT_FULL
+ help
+ Say Y here if you want the kernel to run a short self-test during
+ bootup. The self-test checks whether common types of locking bugs
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 99308479b1c8..161da6c6e173 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -339,7 +339,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
db = get_bucket((unsigned long) addr);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/irq_poll.c linux-4.14/lib/irq_poll.c
---- linux-4.14.orig/lib/irq_poll.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/lib/irq_poll.c 2018-09-05 11:05:07.000000000 +0200
-@@ -37,6 +37,7 @@
+diff --git a/lib/irq_poll.c b/lib/irq_poll.c
+index 86a709954f5a..9c069ef83d6d 100644
+--- a/lib/irq_poll.c
++++ b/lib/irq_poll.c
+@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop)
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_restore(flags);
}
EXPORT_SYMBOL(irq_poll_sched);
-@@ -72,6 +73,7 @@
+@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop)
local_irq_save(flags);
__irq_poll_complete(iop);
local_irq_restore(flags);
}
EXPORT_SYMBOL(irq_poll_complete);
-@@ -96,6 +98,7 @@
+@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
}
local_irq_enable();
/* Even though interrupts have been re-enabled, this
* access is safe because interrupts can only add new
-@@ -133,6 +136,7 @@
+@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
}
/**
-@@ -196,6 +200,7 @@
+@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
return 0;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/Kconfig linux-4.14/lib/Kconfig
---- linux-4.14.orig/lib/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/lib/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -428,6 +428,7 @@
-
- config CPUMASK_OFFSTACK
- bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
-+ depends on !PREEMPT_RT_FULL
- help
- Use dynamic allocation for cpumask_var_t, instead of putting
- them on the stack. This is a bit more expensive, but avoids
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/Kconfig.debug linux-4.14/lib/Kconfig.debug
---- linux-4.14.orig/lib/Kconfig.debug 2018-09-05 11:03:22.000000000 +0200
-+++ linux-4.14/lib/Kconfig.debug 2018-09-05 11:05:07.000000000 +0200
-@@ -1197,7 +1197,7 @@
-
- config DEBUG_LOCKING_API_SELFTESTS
- bool "Locking API boot-time self-tests"
-- depends on DEBUG_KERNEL
-+ depends on DEBUG_KERNEL && !PREEMPT_RT_FULL
- help
- Say Y here if you want the kernel to run a short self-test during
- bootup. The self-test checks whether common types of locking bugs
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/locking-selftest.c linux-4.14/lib/locking-selftest.c
---- linux-4.14.orig/lib/locking-selftest.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/lib/locking-selftest.c 2018-09-05 11:05:07.000000000 +0200
-@@ -742,6 +742,8 @@
+diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
+index b5c1293ce147..075e225f4111 100644
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -742,6 +742,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
-@@ -757,9 +759,12 @@
+@@ -757,9 +759,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
/*
* Enabling hardirqs with a softirq-safe lock held:
*/
-@@ -792,6 +797,8 @@
+@@ -792,6 +797,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#undef E1
#undef E2
/*
* Enabling irqs with an irq-safe lock held:
*/
-@@ -815,6 +822,8 @@
+@@ -815,6 +822,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
-@@ -830,6 +839,8 @@
+@@ -830,6 +839,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
#undef E1
#undef E2
-@@ -861,6 +872,8 @@
+@@ -861,6 +872,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
-@@ -876,6 +889,8 @@
+@@ -876,6 +889,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
#undef E1
#undef E2
#undef E3
-@@ -909,6 +924,8 @@
+@@ -909,6 +924,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
-@@ -924,10 +941,14 @@
+@@ -924,10 +941,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
/*
* read-lock / write-lock irq inversion.
*
-@@ -990,6 +1011,10 @@
+@@ -990,6 +1011,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
#undef E2
#undef E3
/*
* read-lock / write-lock recursion that is actually safe.
*/
-@@ -1028,6 +1053,8 @@
+@@ -1028,6 +1053,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
#undef E2
#undef E3
/*
* read-lock / write-lock recursion that is unsafe.
*/
-@@ -2057,6 +2084,7 @@
+@@ -2057,6 +2084,7 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
/*
* irq-context testcases:
*/
-@@ -2069,6 +2097,28 @@
+@@ -2069,6 +2097,28 @@ void locking_selftest(void)
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
ww_tests();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/percpu_ida.c linux-4.14/lib/percpu_ida.c
---- linux-4.14.orig/lib/percpu_ida.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/lib/percpu_ida.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
+index 6016f1deb1f5..cdd43086b55b 100644
+--- a/lib/percpu_ida.c
++++ b/lib/percpu_ida.c
@@ -27,6 +27,9 @@
#include <linux/string.h>
#include <linux/spinlock.h>
struct percpu_ida_cpu {
/*
-@@ -149,13 +152,13 @@
+@@ -149,13 +152,13 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
unsigned long flags;
int tag;
return tag;
}
-@@ -174,6 +177,7 @@
+@@ -174,6 +177,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
if (!tags->nr_free)
alloc_global_tags(pool, tags);
if (!tags->nr_free)
steal_tags(pool, tags);
-@@ -185,7 +189,7 @@
+@@ -185,7 +189,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
}
spin_unlock(&pool->lock);
if (tag >= 0 || state == TASK_RUNNING)
break;
-@@ -197,7 +201,7 @@
+@@ -197,7 +201,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
schedule();
tags = this_cpu_ptr(pool->tag_cpu);
}
if (state != TASK_RUNNING)
-@@ -222,7 +226,7 @@
+@@ -222,7 +226,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
BUG_ON(tag >= pool->nr_tags);
tags = this_cpu_ptr(pool->tag_cpu);
spin_lock(&tags->lock);
-@@ -254,7 +258,7 @@
+@@ -254,7 +258,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
spin_unlock(&pool->lock);
}
}
EXPORT_SYMBOL_GPL(percpu_ida_free);
-@@ -346,7 +350,7 @@
+@@ -346,7 +350,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
struct percpu_ida_cpu *remote;
unsigned cpu, i, err = 0;
for_each_possible_cpu(cpu) {
remote = per_cpu_ptr(pool->tag_cpu, cpu);
spin_lock(&remote->lock);
-@@ -368,7 +372,7 @@
+@@ -368,7 +372,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
}
spin_unlock(&pool->lock);
out:
return err;
}
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/radix-tree.c linux-4.14/lib/radix-tree.c
---- linux-4.14.orig/lib/radix-tree.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/lib/radix-tree.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index d172f0341b80..c1da1109a107 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
@@ -37,7 +37,7 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
/* Number of nodes in fully populated tree of given height */
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
-@@ -86,6 +86,7 @@
+@@ -86,6 +86,7 @@ struct radix_tree_preload {
struct radix_tree_node *nodes;
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
-@@ -404,12 +405,13 @@
+@@ -404,12 +405,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
/*
* Update the allocation stack trace as this is more useful
* for debugging.
-@@ -475,14 +477,14 @@
+@@ -475,14 +477,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
*/
gfp_mask &= ~__GFP_ACCOUNT;
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
-@@ -524,7 +526,7 @@
+@@ -524,7 +526,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
-@@ -562,7 +564,7 @@
+@@ -562,7 +564,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
/* Preloading doesn't help anything with this gfp mask, skip it */
if (!gfpflags_allow_blocking(gfp_mask)) {
return 0;
}
-@@ -596,6 +598,12 @@
+@@ -596,6 +598,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
return __radix_tree_preload(gfp_mask, nr_nodes);
}
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
-@@ -2105,10 +2113,16 @@
+@@ -2105,10 +2113,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
/**
* ida_pre_get - reserve resources for ida allocation
* @ida: ida handle
-@@ -2125,7 +2139,7 @@
+@@ -2125,7 +2139,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
* to return to the ida_pre_get() step.
*/
if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
if (!this_cpu_read(ida_bitmap)) {
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/scatterlist.c linux-4.14/lib/scatterlist.c
---- linux-4.14.orig/lib/scatterlist.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/lib/scatterlist.c 2018-09-05 11:05:07.000000000 +0200
-@@ -620,7 +620,7 @@
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index be7b4dd6b68d..d06c15d3d186 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -620,7 +620,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/smp_processor_id.c linux-4.14/lib/smp_processor_id.c
---- linux-4.14.orig/lib/smp_processor_id.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/lib/smp_processor_id.c 2018-09-05 11:05:07.000000000 +0200
-@@ -23,7 +23,7 @@
+diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
+index 835cc6df2776..6f4a4ae881c8 100644
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -23,7 +23,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
goto out;
/*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/lib/timerqueue.c linux-4.14/lib/timerqueue.c
---- linux-4.14.orig/lib/timerqueue.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/lib/timerqueue.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/lib/timerqueue.c b/lib/timerqueue.c
+index 4a720ed4fdaf..0d54bcbc8170 100644
+--- a/lib/timerqueue.c
++++ b/lib/timerqueue.c
@@ -33,8 +33,9 @@
* @head: head of timerqueue
* @node: timer node to be added
*/
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{
-@@ -70,7 +71,8 @@
+@@ -70,7 +71,8 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
* @head: head of timerqueue
* @node: timer node to be removed
*
*/
bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
{
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/localversion-rt linux-4.14/localversion-rt
---- linux-4.14.orig/localversion-rt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/localversion-rt 2018-09-05 11:05:07.000000000 +0200
+diff --git a/localversion-rt b/localversion-rt
+new file mode 100644
+index 000000000000..ac4d836a809d
+--- /dev/null
++++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt40
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/backing-dev.c linux-4.14/mm/backing-dev.c
---- linux-4.14.orig/mm/backing-dev.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/mm/backing-dev.c 2018-09-05 11:05:07.000000000 +0200
-@@ -470,9 +470,9 @@
++-rt44
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 59efbd3337e0..3df123c0bc3f 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -385,7 +385,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+
+ config TRANSPARENT_HUGEPAGE
+ bool "Transparent Hugepage Support"
+- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
+ select COMPACTION
+ select RADIX_TREE_MULTIORDER
+ help
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 9386c98dac12..5e9d804c37cb 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -470,9 +470,9 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
{
unsigned long flags;
return;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/compaction.c linux-4.14/mm/compaction.c
---- linux-4.14.orig/mm/compaction.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/mm/compaction.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1634,10 +1634,12 @@
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 85395dc6eb13..d6c8ed009e93 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1634,10 +1634,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
block_start_pfn(cc->migrate_pfn, cc->order);
if (cc->last_migrated_pfn < current_block_start) {
/* No more flushing until we migrate again */
cc->last_migrated_pfn = 0;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/filemap.c linux-4.14/mm/filemap.c
---- linux-4.14.orig/mm/filemap.c 2018-09-05 11:03:28.000000000 +0200
-+++ linux-4.14/mm/filemap.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/mm/filemap.c b/mm/filemap.c
+index e2e738cc08b1..c47070dae8b9 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
@@ -110,6 +110,7 @@
* ->i_mmap_rwsem
* ->tasklist_lock (memory_failure, collect_procs_ao)
static int page_cache_tree_insert(struct address_space *mapping,
struct page *page, void **shadowp)
-@@ -133,8 +134,10 @@
+@@ -133,8 +134,10 @@ static int page_cache_tree_insert(struct address_space *mapping,
if (shadowp)
*shadowp = p;
}
mapping->nrpages++;
return 0;
}
-@@ -151,6 +154,7 @@
+@@ -151,6 +154,7 @@ static void page_cache_tree_delete(struct address_space *mapping,
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(nr != 1 && shadow, page);
for (i = 0; i < nr; i++) {
struct radix_tree_node *node;
void **slot;
-@@ -162,8 +166,9 @@
+@@ -162,8 +166,9 @@ static void page_cache_tree_delete(struct address_space *mapping,
radix_tree_clear_tags(&mapping->page_tree, node, slot);
__radix_tree_replace(&mapping->page_tree, node, slot, shadow,
if (shadow) {
mapping->nrexceptional += nr;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/highmem.c linux-4.14/mm/highmem.c
---- linux-4.14.orig/mm/highmem.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/mm/highmem.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 59db3223a5d6..22aa3ddbd87b 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
@@ -30,10 +30,11 @@
#include <linux/kgdb.h>
#include <asm/tlbflush.h>
/*
* Virtual_count is not a pure "count".
-@@ -108,8 +109,9 @@
+@@ -108,8 +109,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
unsigned long totalhigh_pages __read_mostly;
EXPORT_SYMBOL(totalhigh_pages);
unsigned int nr_free_highpages (void)
{
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/Kconfig linux-4.14/mm/Kconfig
---- linux-4.14.orig/mm/Kconfig 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/mm/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -385,7 +385,7 @@
-
- config TRANSPARENT_HUGEPAGE
- bool "Transparent Hugepage Support"
-- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
-+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
- select COMPACTION
- select RADIX_TREE_MULTIORDER
- help
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/memcontrol.c linux-4.14/mm/memcontrol.c
---- linux-4.14.orig/mm/memcontrol.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/mm/memcontrol.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 6a9a7e1066ef..3cc297730103 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
@@ -69,6 +69,7 @@
#include <net/sock.h>
#include <net/ip.h>
#include <linux/uaccess.h>
-@@ -94,6 +95,8 @@
+@@ -94,6 +95,8 @@ int do_swap_account __read_mostly;
#define do_swap_account 0
#endif
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -1831,7 +1834,7 @@
+@@ -1831,7 +1834,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -1851,7 +1854,7 @@
+@@ -1851,7 +1854,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
}
css_put(&memcg->css);
}
mutex_unlock(&percpu_charge_mutex);
}
-@@ -4624,12 +4627,12 @@
+@@ -4631,12 +4634,12 @@ static int mem_cgroup_move_account(struct page *page,
ret = 0;
out_unlock:
unlock_page(page);
out:
-@@ -5572,10 +5575,10 @@
+@@ -5579,10 +5582,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
commit_charge(page, memcg, lrucare);
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5644,7 +5647,7 @@
+@@ -5651,7 +5654,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
memcg_oom_recover(ug->memcg);
}
__this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS], ug->nr_anon);
__this_cpu_sub(ug->memcg->stat->count[MEMCG_CACHE], ug->nr_file);
__this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS_HUGE], ug->nr_huge);
-@@ -5652,7 +5655,7 @@
+@@ -5659,7 +5662,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__this_cpu_add(ug->memcg->stat->events[PGPGOUT], ug->pgpgout);
__this_cpu_add(ug->memcg->stat->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, nr_pages);
-@@ -5815,10 +5818,10 @@
+@@ -5822,10 +5825,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
commit_charge(newpage, memcg, false);
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -6010,6 +6013,7 @@
+@@ -6017,6 +6020,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -6055,13 +6059,17 @@
+@@ -6062,13 +6066,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
}
/**
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/mmu_context.c linux-4.14/mm/mmu_context.c
---- linux-4.14.orig/mm/mmu_context.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/mm/mmu_context.c 2018-09-05 11:05:07.000000000 +0200
-@@ -25,6 +25,7 @@
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index 3e612ae748e9..d0ccc070979f 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -25,6 +25,7 @@ void use_mm(struct mm_struct *mm)
struct task_struct *tsk = current;
task_lock(tsk);
active_mm = tsk->active_mm;
if (active_mm != mm) {
mmgrab(mm);
-@@ -32,6 +33,7 @@
+@@ -32,6 +33,7 @@ void use_mm(struct mm_struct *mm)
}
tsk->mm = mm;
switch_mm(active_mm, mm, tsk);
task_unlock(tsk);
#ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch();
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/page_alloc.c linux-4.14/mm/page_alloc.c
---- linux-4.14.orig/mm/page_alloc.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/mm/page_alloc.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 59ccf455fcbd..fa17845aa179 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
-@@ -286,6 +287,18 @@
+@@ -286,6 +287,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1094,7 +1107,7 @@
+@@ -1094,7 +1107,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
#endif /* CONFIG_DEBUG_VM */
/*
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -1105,15 +1118,53 @@
+@@ -1105,15 +1118,53 @@ static bool bulkfree_pcp_prepare(struct page *page)
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
while (count) {
struct page *page;
struct list_head *list;
-@@ -1129,7 +1180,7 @@
+@@ -1129,7 +1180,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -1137,27 +1188,12 @@
+@@ -1137,27 +1188,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
batch_free = count;
do {
}
static void free_one_page(struct zone *zone,
-@@ -1165,13 +1201,15 @@
+@@ -1165,13 +1201,15 @@ static void free_one_page(struct zone *zone,
unsigned int order,
int migratetype)
{
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-@@ -1257,10 +1295,10 @@
+@@ -1257,10 +1295,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
return;
migratetype = get_pfnblock_migratetype(page, pfn);
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2378,16 +2416,18 @@
+@@ -2378,16 +2416,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
}
#endif
-@@ -2403,16 +2443,21 @@
+@@ -2403,16 +2443,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
}
/*
-@@ -2447,6 +2492,7 @@
+@@ -2447,6 +2492,7 @@ void drain_local_pages(struct zone *zone)
drain_pages(cpu);
}
static void drain_local_pages_wq(struct work_struct *work)
{
/*
-@@ -2460,6 +2506,7 @@
+@@ -2460,6 +2506,7 @@ static void drain_local_pages_wq(struct work_struct *work)
drain_local_pages(NULL);
preempt_enable();
}
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
-@@ -2526,7 +2573,14 @@
+@@ -2526,7 +2573,14 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
-@@ -2534,6 +2588,7 @@
+@@ -2534,6 +2588,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
mutex_unlock(&pcpu_drain_mutex);
}
-@@ -2610,7 +2665,7 @@
+@@ -2610,7 +2665,7 @@ void free_hot_cold_page(struct page *page, bool cold)
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
__count_vm_event(PGFREE);
/*
-@@ -2636,12 +2691,17 @@
+@@ -2636,12 +2691,17 @@ void free_hot_cold_page(struct page *page, bool cold)
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
}
/*
-@@ -2789,7 +2849,7 @@
+@@ -2789,7 +2849,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
struct page *page;
unsigned long flags;
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
-@@ -2797,7 +2857,7 @@
+@@ -2797,7 +2857,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
return page;
}
-@@ -2824,7 +2884,7 @@
+@@ -2824,7 +2884,7 @@ struct page *rmqueue(struct zone *preferred_zone,
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
do {
page = NULL;
-@@ -2844,14 +2904,14 @@
+@@ -2844,14 +2904,14 @@ struct page *rmqueue(struct zone *preferred_zone,
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
return NULL;
}
-@@ -6778,8 +6838,9 @@
+@@ -6778,8 +6838,9 @@ void __init free_area_init(unsigned long *zones_size)
static int page_alloc_cpu_dead(unsigned int cpu)
{
drain_pages(cpu);
/*
-@@ -7683,7 +7744,7 @@
+@@ -7683,7 +7744,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7692,7 +7753,7 @@
+@@ -7692,7 +7753,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/slab.h linux-4.14/mm/slab.h
---- linux-4.14.orig/mm/slab.h 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/mm/slab.h 2018-09-05 11:05:07.000000000 +0200
-@@ -451,7 +451,11 @@
+diff --git a/mm/slab.h b/mm/slab.h
+index 485d9fbb8802..f3b06c48bf39 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -451,7 +451,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
* The slab lists for all objects.
*/
struct kmem_cache_node {
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/slub.c linux-4.14/mm/slub.c
---- linux-4.14.orig/mm/slub.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/mm/slub.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1179,7 +1179,7 @@
+diff --git a/mm/slub.c b/mm/slub.c
+index 10e54c4acd19..13bb67ee32e8 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1179,7 +1179,7 @@ static noinline int free_debug_processing(
unsigned long uninitialized_var(flags);
int ret = 0;
slab_lock(page);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1214,7 +1214,7 @@
+@@ -1214,7 +1214,7 @@ static noinline int free_debug_processing(
bulk_cnt, cnt);
slab_unlock(page);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
-@@ -1342,6 +1342,12 @@
+@@ -1342,6 +1342,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
#endif /* CONFIG_SLUB_DEBUG */
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1561,10 +1567,17 @@
+@@ -1561,10 +1567,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
void *start, *p;
int idx, order;
bool shuffle;
local_irq_enable();
flags |= s->allocflags;
-@@ -1623,7 +1636,7 @@
+@@ -1623,7 +1636,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->frozen = 1;
out:
local_irq_disable();
if (!page)
return NULL;
-@@ -1681,6 +1694,16 @@
+@@ -1681,6 +1694,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__free_pages(page, order);
}
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1712,6 +1735,12 @@
+@@ -1712,6 +1735,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
}
call_rcu(head, rcu_free_slab);
} else
__free_slab(s, page);
}
-@@ -1819,7 +1848,7 @@
+@@ -1819,7 +1848,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
if (!n || !n->nr_partial)
return NULL;
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1844,7 +1873,7 @@
+@@ -1844,7 +1873,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
break;
}
return object;
}
-@@ -2090,7 +2119,7 @@
+@@ -2090,7 +2119,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
* that acquire_slab() will see a slab page that
* is frozen
*/
}
} else {
m = M_FULL;
-@@ -2101,7 +2130,7 @@
+@@ -2101,7 +2130,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
}
}
-@@ -2136,7 +2165,7 @@
+@@ -2136,7 +2165,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
goto redo;
if (lock)
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -2171,10 +2200,10 @@
+@@ -2171,10 +2200,10 @@ static void unfreeze_partials(struct kmem_cache *s,
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
}
do {
-@@ -2203,7 +2232,7 @@
+@@ -2203,7 +2232,7 @@ static void unfreeze_partials(struct kmem_cache *s,
}
if (n)
while (discard_page) {
page = discard_page;
-@@ -2242,14 +2271,21 @@
+@@ -2242,14 +2271,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2319,7 +2355,22 @@
+@@ -2319,7 +2355,22 @@ static bool has_cpu_slab(int cpu, void *info)
static void flush_all(struct kmem_cache *s)
{
}
/*
-@@ -2374,10 +2425,10 @@
+@@ -2374,10 +2425,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
unsigned long x = 0;
struct page *page;
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2515,8 +2566,10 @@
+@@ -2515,8 +2566,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
void *freelist;
struct page *page;
-@@ -2572,6 +2625,13 @@
+@@ -2572,6 +2625,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
return freelist;
new_slab:
-@@ -2587,7 +2647,7 @@
+@@ -2587,7 +2647,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(!freelist)) {
slab_out_of_memory(s, gfpflags, node);
}
page = c->page;
-@@ -2600,7 +2660,7 @@
+@@ -2600,7 +2660,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, page, get_freepointer(s, freelist), c);
}
/*
-@@ -2612,6 +2672,7 @@
+@@ -2612,6 +2672,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
{
void *p;
unsigned long flags;
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2623,8 +2684,9 @@
+@@ -2623,8 +2684,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c = this_cpu_ptr(s->cpu_slab);
#endif
return p;
}
-@@ -2810,7 +2872,7 @@
+@@ -2810,7 +2872,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
do {
if (unlikely(n)) {
n = NULL;
}
prior = page->freelist;
-@@ -2842,7 +2904,7 @@
+@@ -2842,7 +2904,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
}
}
-@@ -2884,7 +2946,7 @@
+@@ -2884,7 +2946,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
return;
slab_empty:
-@@ -2899,7 +2961,7 @@
+@@ -2899,7 +2961,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
remove_full(s, n, page);
}
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -3104,6 +3166,7 @@
+@@ -3104,6 +3166,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
{
struct kmem_cache_cpu *c;
int i;
/* memcg and kmem_cache debug support */
-@@ -3127,7 +3190,7 @@
+@@ -3127,7 +3190,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
if (unlikely(!p[i]))
goto error;
-@@ -3139,6 +3202,7 @@
+@@ -3139,6 +3202,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
c->tid = next_tid(c->tid);
local_irq_enable();
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
-@@ -3153,6 +3217,7 @@
+@@ -3153,6 +3217,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
return i;
error:
local_irq_enable();
slab_post_alloc_hook(s, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return 0;
-@@ -3286,7 +3351,7 @@
+@@ -3286,7 +3351,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3640,6 +3705,10 @@
+@@ -3640,6 +3705,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
void *p;
unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3660,6 +3729,7 @@
+@@ -3660,6 +3729,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
slab_unlock(page);
kfree(map);
#endif
}
/*
-@@ -3673,7 +3743,7 @@
+@@ -3673,7 +3743,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
struct page *page, *h;
BUG_ON(irqs_disabled());
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3683,7 +3753,7 @@
+@@ -3683,7 +3753,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
list_for_each_entry_safe(page, h, &discard, lru)
discard_slab(s, page);
-@@ -3927,7 +3997,7 @@
+@@ -3927,7 +3997,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
/*
* Build lists of slabs to discard or promote.
-@@ -3958,7 +4028,7 @@
+@@ -3958,7 +4028,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4171,6 +4241,12 @@
+@@ -4171,6 +4241,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -4379,7 +4455,7 @@
+@@ -4379,7 +4455,7 @@ static int validate_slab_node(struct kmem_cache *s,
struct page *page;
unsigned long flags;
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -4401,7 +4477,7 @@
+@@ -4401,7 +4477,7 @@ static int validate_slab_node(struct kmem_cache *s,
s->name, count, atomic_long_read(&n->nr_slabs));
out:
return count;
}
-@@ -4589,12 +4665,12 @@
+@@ -4589,12 +4665,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
if (!atomic_long_read(&n->nr_slabs))
continue;
}
for (i = 0; i < t.count; i++) {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/swap.c linux-4.14/mm/swap.c
---- linux-4.14.orig/mm/swap.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/mm/swap.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/mm/swap.c b/mm/swap.c
+index a77d68f2c1b6..30d62efe001b 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
@@ -32,6 +32,7 @@
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/hugetlb.h>
#include <linux/page_idle.h>
-@@ -50,6 +51,8 @@
+@@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
#endif
/*
* This path almost never happens for VM activity - pages are normally
-@@ -252,11 +255,11 @@
+@@ -252,11 +255,11 @@ void rotate_reclaimable_page(struct page *page)
unsigned long flags;
get_page(page);
}
}
-@@ -306,12 +309,13 @@
+@@ -306,12 +309,13 @@ void activate_page(struct page *page)
{
page = compound_head(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
}
}
-@@ -338,7 +342,7 @@
+@@ -338,7 +342,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
int i;
/*
-@@ -360,7 +364,7 @@
+@@ -360,7 +364,7 @@ static void __lru_cache_activate_page(struct page *page)
}
}
}
/*
-@@ -402,12 +406,12 @@
+@@ -402,12 +406,12 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
}
/**
-@@ -613,9 +617,15 @@
+@@ -613,9 +617,15 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -647,11 +657,12 @@
+@@ -647,11 +657,12 @@ void deactivate_file_page(struct page *page)
return;
if (likely(get_page_unless_zero(page))) {
}
}
-@@ -666,21 +677,32 @@
+@@ -666,21 +677,32 @@ void mark_page_lazyfree(struct page *page)
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
- put_cpu();
+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
+ local_unlock_cpu(swapvec_lock);
- }
-
++}
++
+#ifdef CONFIG_PREEMPT_RT_BASE
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+ local_lock_on(swapvec_lock, cpu);
+ lru_add_drain_cpu(cpu);
+ local_unlock_on(swapvec_lock, cpu);
-+}
-+
+ }
+
+#else
+
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
-@@ -688,6 +710,16 @@
+@@ -688,6 +710,16 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
void lru_add_drain_all_cpuslocked(void)
{
static DEFINE_MUTEX(lock);
-@@ -705,21 +737,19 @@
+@@ -705,21 +737,19 @@ void lru_add_drain_all_cpuslocked(void)
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
mutex_unlock(&lock);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/truncate.c linux-4.14/mm/truncate.c
---- linux-4.14.orig/mm/truncate.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/mm/truncate.c 2018-09-05 11:05:07.000000000 +0200
-@@ -41,8 +41,10 @@
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 2330223841fb..d0c8e6c8fef5 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -41,8 +41,10 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
goto unlock;
if (*slot != entry)
goto unlock;
mapping->nrexceptional--;
unlock:
spin_unlock_irq(&mapping->tree_lock);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/vmalloc.c linux-4.14/mm/vmalloc.c
---- linux-4.14.orig/mm/vmalloc.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/mm/vmalloc.c 2018-09-05 11:05:07.000000000 +0200
-@@ -865,7 +865,7 @@
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 9ff21a12ea00..95c83b291548 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -865,7 +865,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
void *vaddr;
node = numa_node_id();
-@@ -908,11 +908,12 @@
+@@ -908,11 +908,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
BUG_ON(err);
radix_tree_preload_end();
return vaddr;
}
-@@ -981,6 +982,7 @@
+@@ -981,6 +982,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -995,7 +997,8 @@
+@@ -995,7 +997,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
order = get_order(size);
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -1018,7 +1021,7 @@
+@@ -1018,7 +1021,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
break;
}
rcu_read_unlock();
/* Allocate new block if nothing was found */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/vmstat.c linux-4.14/mm/vmstat.c
---- linux-4.14.orig/mm/vmstat.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/mm/vmstat.c 2018-09-05 11:05:07.000000000 +0200
-@@ -249,6 +249,7 @@
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 4bb13e72ac97..0d17b8faeac7 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -249,6 +249,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long x;
long t;
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
-@@ -258,6 +259,7 @@
+@@ -258,6 +259,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
}
EXPORT_SYMBOL(__mod_zone_page_state);
-@@ -269,6 +271,7 @@
+@@ -269,6 +271,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
long x;
long t;
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
-@@ -278,6 +281,7 @@
+@@ -278,6 +281,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
}
EXPORT_SYMBOL(__mod_node_page_state);
-@@ -310,6 +314,7 @@
+@@ -310,6 +314,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
-@@ -318,6 +323,7 @@
+@@ -318,6 +323,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
}
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
-@@ -326,6 +332,7 @@
+@@ -326,6 +332,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
s8 __percpu *p = pcp->vm_node_stat_diff + item;
s8 v, t;
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
-@@ -334,6 +341,7 @@
+@@ -334,6 +341,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
}
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-@@ -354,6 +362,7 @@
+@@ -354,6 +362,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
-@@ -362,6 +371,7 @@
+@@ -362,6 +371,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
}
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
-@@ -370,6 +380,7 @@
+@@ -370,6 +380,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
s8 __percpu *p = pcp->vm_node_stat_diff + item;
s8 v, t;
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
-@@ -378,6 +389,7 @@
+@@ -378,6 +389,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
node_page_state_add(v - overstep, pgdat, item);
__this_cpu_write(*p, overstep);
}
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/workingset.c linux-4.14/mm/workingset.c
---- linux-4.14.orig/mm/workingset.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/mm/workingset.c 2018-09-05 11:05:07.000000000 +0200
-@@ -338,9 +338,10 @@
+diff --git a/mm/workingset.c b/mm/workingset.c
+index b997c9de28f6..e252cc69a3d4 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -338,9 +338,10 @@ void workingset_activation(struct page *page)
* point where they would still be useful.
*/
{
struct address_space *mapping = private;
-@@ -358,10 +359,10 @@
+@@ -358,10 +359,10 @@ void workingset_update_node(struct radix_tree_node *node, void *private)
*/
if (node->count && node->count == node->exceptional) {
if (list_empty(&node->private_list))
}
}
-@@ -373,9 +374,9 @@
+@@ -373,9 +374,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
unsigned long cache;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
/*
* Approximate a reasonable limit for the radix tree nodes
-@@ -475,15 +476,15 @@
+@@ -475,15 +476,15 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out_invalid;
inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
__radix_tree_delete_node(&mapping->page_tree, node,
spin_lock(lru_lock);
return ret;
}
-@@ -494,9 +495,9 @@
+@@ -494,9 +495,9 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
unsigned long ret;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
return ret;
}
-@@ -534,7 +535,7 @@
+@@ -534,7 +535,7 @@ static int __init workingset_init(void)
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -542,7 +543,7 @@
+@@ -542,7 +543,7 @@ static int __init workingset_init(void)
goto err_list_lru;
return 0;
err_list_lru:
err:
return ret;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/mm/zsmalloc.c linux-4.14/mm/zsmalloc.c
---- linux-4.14.orig/mm/zsmalloc.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/mm/zsmalloc.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 685049a9048d..8d1489fd1dbc 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
@@ -53,6 +53,7 @@
#include <linux/mount.h>
#include <linux/migrate.h>
/*
* Object location (<PFN>, <obj_idx>) is encoded as
* as single (unsigned long) handle value.
-@@ -320,7 +334,7 @@
+@@ -320,7 +334,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
static int create_cache(struct zs_pool *pool)
{
0, 0, NULL);
if (!pool->handle_cachep)
return 1;
-@@ -344,9 +358,26 @@
+@@ -344,10 +358,27 @@ static void destroy_cache(struct zs_pool *pool)
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
+ }
+#endif
+ return (unsigned long)p;
-+}
-+
+ }
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
+{
+ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
- }
++}
+#endif
-
++
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
{
-@@ -366,12 +397,18 @@
+ kmem_cache_free(pool->handle_cachep, (void *)handle);
+@@ -366,12 +397,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
static void record_obj(unsigned long handle, unsigned long obj)
{
}
/* zpool driver */
-@@ -460,6 +497,7 @@
+@@ -460,6 +497,7 @@ MODULE_ALIAS("zpool-zsmalloc");
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
static bool is_zspage_isolated(struct zspage *zspage)
{
-@@ -898,7 +936,13 @@
+@@ -898,7 +936,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
static unsigned long handle_to_obj(unsigned long handle)
{
}
static unsigned long obj_to_head(struct page *page, void *obj)
-@@ -912,22 +956,46 @@
+@@ -912,22 +956,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
static inline int testpin_tag(unsigned long handle)
{
}
static void reset_page(struct page *page)
-@@ -1365,7 +1433,7 @@
+@@ -1365,7 +1433,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
class = pool->size_class[class_idx];
off = (class->size * obj_idx) & ~PAGE_MASK;
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
-@@ -1419,7 +1487,7 @@
+@@ -1419,7 +1487,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
__zs_unmap_object(area, pages, off, class->size);
}
migrate_read_unlock(zspage);
unpin_tag(handle);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/9p/trans_xen.c linux-4.14/net/9p/trans_xen.c
---- linux-4.14.orig/net/9p/trans_xen.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/9p/trans_xen.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index c10bdf63eae7..84a49f2bcfbc 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
@@ -38,7 +38,6 @@
#include <linux/module.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/bluetooth/hci_sock.c linux-4.14/net/bluetooth/hci_sock.c
---- linux-4.14.orig/net/bluetooth/hci_sock.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/bluetooth/hci_sock.c 2018-09-05 11:05:07.000000000 +0200
-@@ -251,15 +251,13 @@
+diff --git a/net/Kconfig b/net/Kconfig
+index 9dba2715919d..9c7b38379c09 100644
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -272,7 +272,7 @@ config CGROUP_NET_CLASSID
+
+ config NET_RX_BUSY_POLL
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
+
+ config BQL
+ bool
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 65d734c165bd..923e9a271872 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -251,15 +251,13 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
}
/* Send frame to sockets with specific channel */
sk_for_each(sk, &hci_sk_list.head) {
struct sk_buff *nskb;
-@@ -285,6 +283,13 @@
+@@ -285,6 +283,13 @@ void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
kfree_skb(nskb);
}
read_unlock(&hci_sk_list.lock);
}
-@@ -388,8 +393,8 @@
+@@ -388,8 +393,8 @@ void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
hdr->index = index;
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
kfree_skb(skb);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/can/bcm.c linux-4.14/net/can/bcm.c
---- linux-4.14.orig/net/can/bcm.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/can/bcm.c 2018-09-05 11:05:07.000000000 +0200
-@@ -102,7 +102,6 @@
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 13690334efa3..9cc67ac257f1 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -102,7 +102,6 @@ struct bcm_op {
unsigned long frames_abs, frames_filtered;
struct bcm_timeval ival1, ival2;
struct hrtimer timer, thrtimer;
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
int rx_ifindex;
int cfsiz;
-@@ -364,25 +363,34 @@
+@@ -364,25 +363,34 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
}
}
op->count--;
if (!op->count && (op->flags & TX_COUNTEVT)) {
-@@ -399,22 +407,12 @@
+@@ -399,22 +407,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
}
bcm_can_tx(op);
}
/*
-@@ -480,7 +478,7 @@
+@@ -480,7 +478,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
/* do not send the saved data - only start throttle timer */
hrtimer_start(&op->thrtimer,
ktime_add(op->kt_lastmsg, op->kt_ival2),
return;
}
-@@ -539,14 +537,21 @@
+@@ -539,14 +537,21 @@ static void bcm_rx_starttimer(struct bcm_op *op)
return;
if (op->kt_ival1)
/* create notification to user */
msg_head.opcode = RX_TIMEOUT;
msg_head.flags = op->flags;
-@@ -557,25 +562,6 @@
+@@ -557,25 +562,6 @@ static void bcm_rx_timeout_tsklet(unsigned long data)
msg_head.nframes = 0;
bcm_send_to_user(op, &msg_head, NULL, 0);
return HRTIMER_NORESTART;
}
-@@ -583,14 +569,12 @@
+@@ -583,14 +569,12 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
/*
* bcm_rx_do_flush - helper for bcm_rx_thr_flush
*/
return 1;
}
return 0;
-@@ -598,11 +582,8 @@
+@@ -598,11 +582,8 @@ static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
/*
* bcm_rx_thr_flush - Check for throttled data and send it to the userspace
{
int updated = 0;
-@@ -611,24 +592,16 @@
+@@ -611,24 +592,16 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
/* for MUX filter we start at index 1 */
for (i = 1; i < op->nframes; i++)
/*
* bcm_rx_thr_handler - the time for blocked content updates is over now:
* Check for throttled data and send it to the userspace
-@@ -637,9 +610,7 @@
+@@ -637,9 +610,7 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
{
struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
return HRTIMER_RESTART;
} else {
-@@ -735,23 +706,8 @@
+@@ -735,23 +706,8 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
static void bcm_remove_op(struct bcm_op *op)
{
if ((op->frames) && (op->frames != &op->sframe))
kfree(op->frames);
-@@ -979,15 +935,13 @@
+@@ -979,15 +935,13 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
op->ifindex = ifindex;
/* initialize uninitialized (kzalloc) structure */
/* add this bcm_op to the list of the tx_ops */
list_add(&op->list, &bo->tx_ops);
-@@ -1150,20 +1104,14 @@
+@@ -1150,20 +1104,14 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
op->rx_ifindex = ifindex;
/* initialize uninitialized (kzalloc) structure */
/* add this bcm_op to the list of the rx_ops */
list_add(&op->list, &bo->rx_ops);
-@@ -1209,12 +1157,12 @@
+@@ -1209,12 +1157,12 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
*/
op->kt_lastmsg = 0;
hrtimer_cancel(&op->thrtimer);
}
/* now we can register for can_ids, if we added a new bcm_op */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/core/dev.c linux-4.14/net/core/dev.c
---- linux-4.14.orig/net/core/dev.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/core/dev.c 2018-09-05 11:05:07.000000000 +0200
-@@ -195,6 +195,7 @@
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 85f4a1047707..a8ab119258a9 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -195,6 +195,7 @@ static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
static inline void dev_base_seq_inc(struct net *net)
{
-@@ -217,14 +218,14 @@
+@@ -217,14 +218,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
#endif
}
-@@ -920,7 +921,8 @@
+@@ -920,7 +921,8 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
goto retry;
}
-@@ -1189,20 +1191,17 @@
+@@ -1189,20 +1191,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
if (dev->flags & IFF_UP)
return -EBUSY;
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1215,11 +1214,12 @@
+@@ -1215,11 +1214,12 @@ int dev_change_name(struct net_device *dev, const char *newname)
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
netdev_adjacent_rename_links(dev, oldname);
-@@ -1240,7 +1240,8 @@
+@@ -1240,7 +1240,8 @@ int dev_change_name(struct net_device *dev, const char *newname)
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1253,6 +1254,11 @@
+@@ -1253,6 +1254,11 @@ int dev_change_name(struct net_device *dev, const char *newname)
}
return err;
}
/**
-@@ -2438,6 +2444,7 @@
+@@ -2438,6 +2444,7 @@ static void __netif_reschedule(struct Qdisc *q)
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
void __netif_schedule(struct Qdisc *q)
-@@ -2500,6 +2507,7 @@
+@@ -2500,6 +2507,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3175,7 +3183,11 @@
+@@ -3175,7 +3183,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
if (unlikely(contended))
spin_lock(&q->busylock);
-@@ -3246,8 +3258,10 @@
+@@ -3246,8 +3258,10 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
/**
* dev_loopback_xmit - loop back @skb
-@@ -3487,9 +3501,12 @@
+@@ -3487,9 +3501,12 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -3499,9 +3516,9 @@
+@@ -3499,9 +3516,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
-@@ -3882,6 +3899,7 @@
+@@ -3882,6 +3899,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
rps_unlock(sd);
local_irq_restore(flags);
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4034,7 +4052,7 @@
+@@ -4034,7 +4052,7 @@ static int netif_rx_internal(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4044,14 +4062,14 @@
+@@ -4044,14 +4062,14 @@ static int netif_rx_internal(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
}
return ret;
}
-@@ -4085,11 +4103,9 @@
+@@ -4085,11 +4103,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
return err;
}
-@@ -4607,7 +4623,7 @@
+@@ -4607,7 +4623,7 @@ static void flush_backlog(struct work_struct *work)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
input_queue_head_incr(sd);
}
}
-@@ -4617,11 +4633,14 @@
+@@ -4617,11 +4633,14 @@ static void flush_backlog(struct work_struct *work)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
}
static void flush_all_backlogs(void)
-@@ -5131,12 +5150,14 @@
+@@ -5131,12 +5150,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
sd->rps_ipi_list = NULL;
local_irq_enable();
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -5166,7 +5187,9 @@
+@@ -5166,7 +5187,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
while (again) {
struct sk_buff *skb;
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -5174,9 +5197,9 @@
+@@ -5174,9 +5197,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
if (++work >= quota)
return work;
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -5214,6 +5237,7 @@
+@@ -5214,6 +5237,7 @@ void __napi_schedule(struct napi_struct *n)
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -5250,6 +5274,7 @@
+@@ -5250,6 +5274,7 @@ bool napi_schedule_prep(struct napi_struct *n)
}
EXPORT_SYMBOL(napi_schedule_prep);
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -5261,6 +5286,7 @@
+@@ -5261,6 +5286,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
bool napi_complete_done(struct napi_struct *n, int work_done)
{
-@@ -5615,13 +5641,21 @@
+@@ -5615,13 +5641,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
unsigned long time_limit = jiffies +
usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
for (;;) {
struct napi_struct *n;
-@@ -5651,7 +5685,7 @@
+@@ -5651,7 +5685,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
net_rps_action_and_irq_enable(sd);
out:
-@@ -7478,7 +7512,7 @@
+@@ -7478,7 +7512,7 @@ static void netdev_init_one_queue(struct net_device *dev,
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
queue->dev = dev;
#ifdef CONFIG_BQL
-@@ -8418,6 +8452,7 @@
+@@ -8418,6 +8452,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
#ifdef CONFIG_RPS
remsd = oldsd->rps_ipi_list;
-@@ -8431,10 +8466,13 @@
+@@ -8431,10 +8466,13 @@ static int dev_cpu_dead(unsigned int oldcpu)
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
return 0;
}
-@@ -8738,8 +8776,9 @@
+@@ -8738,8 +8776,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/core/filter.c linux-4.14/net/core/filter.c
---- linux-4.14.orig/net/core/filter.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/core/filter.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1696,7 +1696,7 @@
+diff --git a/net/core/filter.c b/net/core/filter.c
+index d5158a10ac8f..ad96ec78f7b8 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1696,7 +1696,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
-@@ -1704,9 +1704,9 @@
+@@ -1704,9 +1704,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
skb->dev = dev;
return ret;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/core/gen_estimator.c linux-4.14/net/core/gen_estimator.c
---- linux-4.14.orig/net/core/gen_estimator.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/core/gen_estimator.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
+index 7f980bd7426e..7250106015ef 100644
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
@@ -46,7 +46,7 @@
struct net_rate_estimator {
struct gnet_stats_basic_packed *bstats;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
u8 ewma_log;
u8 intvl_log; /* period : (250ms << intvl_log) */
-@@ -129,7 +129,7 @@
+@@ -129,7 +129,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
struct nlattr *opt)
{
struct gnet_estimator *parm = nla_data(opt);
-@@ -222,7 +222,7 @@
+@@ -222,7 +222,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
{
return gen_new_estimator(bstats, cpu_bstats, rate_est,
stats_lock, running, opt);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/core/gen_stats.c linux-4.14/net/core/gen_stats.c
---- linux-4.14.orig/net/core/gen_stats.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/core/gen_stats.c 2018-09-05 11:05:07.000000000 +0200
-@@ -142,7 +142,7 @@
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index 441c04adedba..07f9a6a1f8e4 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -142,7 +142,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
}
void
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
-@@ -155,10 +155,10 @@
+@@ -155,10 +155,10 @@ __gnet_stats_copy_basic(const seqcount_t *running,
}
do {
if (running)
}
EXPORT_SYMBOL(__gnet_stats_copy_basic);
-@@ -176,7 +176,7 @@
+@@ -176,7 +176,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
* if the room in the socket buffer was not sufficient.
*/
int
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/core/pktgen.c linux-4.14/net/core/pktgen.c
---- linux-4.14.orig/net/core/pktgen.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/core/pktgen.c 2018-09-05 11:05:07.000000000 +0200
-@@ -2252,7 +2252,8 @@
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 6e1e10ff433a..c1ae4075e0ed 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2252,7 +2252,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
s64 remaining;
struct hrtimer_sleeper t;
hrtimer_set_expires(&t.timer, spin_until);
remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
-@@ -2267,7 +2268,6 @@
+@@ -2267,7 +2268,6 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
} while (ktime_compare(end_time, spin_until) < 0);
} else {
/* see do_nanosleep */
do {
set_current_state(TASK_INTERRUPTIBLE);
hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/core/skbuff.c linux-4.14/net/core/skbuff.c
---- linux-4.14.orig/net/core/skbuff.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/core/skbuff.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 168a3e8883d4..0f512abfe4f2 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
@@ -63,6 +63,7 @@
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <net/protocol.h>
#include <net/dst.h>
-@@ -330,6 +331,8 @@
+@@ -330,6 +331,8 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -337,10 +340,10 @@
+@@ -337,10 +340,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
unsigned long flags;
void *data;
return data;
}
-@@ -359,9 +362,13 @@
+@@ -359,9 +362,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
}
void *napi_alloc_frag(unsigned int fragsz)
-@@ -408,13 +415,13 @@
+@@ -408,13 +415,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
if (unlikely(!data))
return NULL;
-@@ -455,9 +462,10 @@
+@@ -455,9 +462,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
len += NET_SKB_PAD + NET_IP_ALIGN;
-@@ -475,7 +483,10 @@
+@@ -475,7 +483,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
if (unlikely(!data))
return NULL;
-@@ -486,7 +497,7 @@
+@@ -486,7 +497,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
}
/* use OR instead of assignment to avoid clearing of bits in mask */
skb->pfmemalloc = 1;
skb->head_frag = 1;
-@@ -718,23 +729,26 @@
+@@ -718,23 +729,26 @@ void __consume_stateless_skb(struct sk_buff *skb)
void __kfree_skb_flush(void)
{
/* record skb to CPU local list */
nc->skb_cache[nc->skb_count++] = skb;
-@@ -749,6 +763,7 @@
+@@ -749,6 +763,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
nc->skb_cache);
nc->skb_count = 0;
}
}
void __kfree_skb_defer(struct sk_buff *skb)
{
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/core/sock.c linux-4.14/net/core/sock.c
---- linux-4.14.orig/net/core/sock.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/core/sock.c 2018-09-05 11:05:07.000000000 +0200
-@@ -2757,12 +2757,11 @@
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 68d08ed5521e..ee242ff5d4b1 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2757,12 +2757,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
}
EXPORT_SYMBOL(lock_sock_nested);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/ipv4/icmp.c linux-4.14/net/ipv4/icmp.c
---- linux-4.14.orig/net/ipv4/icmp.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/ipv4/icmp.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 3c1570d3e22f..0310ea93f877 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
@@ -77,6 +77,7 @@
#include <linux/string.h>
#include <linux/netfilter_ipv4.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/route.h>
-@@ -204,6 +205,8 @@
+@@ -204,6 +205,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
*
* On SMP we have one ICMP socket per-cpu.
*/
static struct sock *icmp_sk(struct net *net)
{
return *this_cpu_ptr(net->ipv4.icmp_sk);
-@@ -214,12 +217,16 @@
+@@ -214,12 +217,16 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
{
struct sock *sk;
return NULL;
}
return sk;
-@@ -228,6 +235,7 @@
+@@ -228,6 +235,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
static inline void icmp_xmit_unlock(struct sock *sk)
{
spin_unlock(&sk->sk_lock.slock);
}
int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/ipv4/tcp_ipv4.c linux-4.14/net/ipv4/tcp_ipv4.c
---- linux-4.14.orig/net/ipv4/tcp_ipv4.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/ipv4/tcp_ipv4.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 0e1a670dabd9..ca4507290102 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
@@ -62,6 +62,7 @@
#include <linux/init.h>
#include <linux/times.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
-@@ -580,6 +581,7 @@
+@@ -580,6 +581,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_v4_send_check);
/*
* This routine will send an RST to the other tcp.
*
-@@ -710,6 +712,7 @@
+@@ -710,6 +712,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
arg.tos = ip_hdr(skb)->tos;
arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
-@@ -717,6 +720,7 @@
+@@ -717,6 +720,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
local_bh_enable();
#ifdef CONFIG_TCP_MD5SIG
-@@ -796,12 +800,14 @@
+@@ -796,12 +800,14 @@ static void tcp_v4_send_ack(const struct sock *sk,
arg.tos = tos;
arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
local_bh_disable();
local_bh_enable();
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/Kconfig linux-4.14/net/Kconfig
---- linux-4.14.orig/net/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/Kconfig 2018-09-05 11:05:07.000000000 +0200
-@@ -272,7 +272,7 @@
-
- config NET_RX_BUSY_POLL
- bool
-- default y
-+ default y if !PREEMPT_RT_FULL
-
- config BQL
- bool
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/mac80211/rx.c linux-4.14/net/mac80211/rx.c
---- linux-4.14.orig/net/mac80211/rx.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/mac80211/rx.c 2018-09-05 11:05:07.000000000 +0200
-@@ -4252,7 +4252,7 @@
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index dddd498e1338..8f39b8162df8 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4252,7 +4252,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
if (WARN_ON(status->band >= NUM_NL80211_BANDS))
goto drop;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/netfilter/core.c linux-4.14/net/netfilter/core.c
---- linux-4.14.orig/net/netfilter/core.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/netfilter/core.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 52cd2901a097..c63e937b6676 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
@@ -21,6 +21,7 @@
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/packet/af_packet.c linux-4.14/net/packet/af_packet.c
---- linux-4.14.orig/net/packet/af_packet.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/packet/af_packet.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 8833a58ca3ee..1137bf87f944 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
@@ -63,6 +63,7 @@
#include <linux/if_packet.h>
#include <linux/wireless.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-@@ -707,7 +708,7 @@
+@@ -707,7 +708,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
}
}
-@@ -969,7 +970,7 @@
+@@ -969,7 +970,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
}
}
prb_close_block(pkc, pbd, po, status);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/rds/ib_rdma.c linux-4.14/net/rds/ib_rdma.c
---- linux-4.14.orig/net/rds/ib_rdma.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/rds/ib_rdma.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
+index 9a3c54e659e9..2a95f1d587ac 100644
+--- a/net/rds/ib_rdma.c
++++ b/net/rds/ib_rdma.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/rculist.h>
#include "rds_single_path.h"
#include "ib_mr.h"
-@@ -210,7 +211,7 @@
+@@ -210,7 +211,7 @@ static inline void wait_clean_list_grace(void)
for_each_online_cpu(cpu) {
flag = &per_cpu(clean_list_grace, cpu);
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
}
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/rxrpc/security.c linux-4.14/net/rxrpc/security.c
---- linux-4.14.orig/net/rxrpc/security.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/rxrpc/security.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
+index e9f428351293..c4479afe8ae7 100644
+--- a/net/rxrpc/security.c
++++ b/net/rxrpc/security.c
@@ -19,9 +19,6 @@
#include <keys/rxrpc-type.h>
#include "ar-internal.h"
static const struct rxrpc_security *rxrpc_security_types[] = {
[RXRPC_SECURITY_NONE] = &rxrpc_no_security,
#ifdef CONFIG_RXKAD
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/sched/sch_api.c linux-4.14/net/sched/sch_api.c
---- linux-4.14.orig/net/sched/sch_api.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/sched/sch_api.c 2018-09-05 11:05:07.000000000 +0200
-@@ -1081,7 +1081,7 @@
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 22bc6fc48311..131aac4cf2e0 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1081,7 +1081,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/sched/sch_generic.c linux-4.14/net/sched/sch_generic.c
---- linux-4.14.orig/net/sched/sch_generic.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/sched/sch_generic.c 2018-09-05 11:05:07.000000000 +0200
-@@ -429,7 +429,11 @@
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 79549baf5804..341f7895659c 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -429,7 +429,11 @@ struct Qdisc noop_qdisc = {
.ops = &noop_qdisc_ops,
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
};
EXPORT_SYMBOL(noop_qdisc);
-@@ -628,9 +632,17 @@
+@@ -628,9 +632,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
sch->ops = ops;
sch->enqueue = ops->enqueue;
-@@ -933,7 +945,7 @@
+@@ -933,7 +945,7 @@ void dev_deactivate_many(struct list_head *head)
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev))
/* The new qdisc is assigned at this point so we can safely
* unwind stale skb lists and qdisc statistics
*/
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/sunrpc/svc_xprt.c linux-4.14/net/sunrpc/svc_xprt.c
---- linux-4.14.orig/net/sunrpc/svc_xprt.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/net/sunrpc/svc_xprt.c 2018-09-05 11:05:07.000000000 +0200
-@@ -396,7 +396,7 @@
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index d16a8b423c20..cedaf909eb97 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -396,7 +396,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
goto out;
}
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
atomic_long_inc(&pool->sp_stats.packets);
-@@ -432,7 +432,7 @@
+@@ -432,7 +432,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
atomic_long_inc(&pool->sp_stats.threads_woken);
wake_up_process(rqstp->rq_task);
goto out;
}
rcu_read_unlock();
-@@ -453,7 +453,7 @@
+@@ -453,7 +453,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
goto redo_search;
}
rqstp = NULL;
out:
trace_svc_xprt_do_enqueue(xprt, rqstp);
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/net/xfrm/xfrm_state.c linux-4.14/net/xfrm/xfrm_state.c
---- linux-4.14.orig/net/xfrm/xfrm_state.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/net/xfrm/xfrm_state.c 2018-09-05 11:05:07.000000000 +0200
-@@ -427,7 +427,7 @@
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 6c4ec69e11a0..77f52dc790ec 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -427,7 +427,7 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
del_timer_sync(&x->rtimer);
kfree(x->aead);
kfree(x->aalg);
-@@ -472,8 +472,8 @@
+@@ -472,8 +472,8 @@ static void xfrm_state_gc_task(struct work_struct *work)
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
{
unsigned long now = get_seconds();
long next = LONG_MAX;
int warn = 0;
-@@ -537,7 +537,8 @@
+@@ -537,7 +537,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
km_state_expired(x, 0, 0);
resched:
if (next != LONG_MAX) {
}
goto out;
-@@ -554,7 +555,7 @@
+@@ -554,7 +555,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
out:
spin_unlock(&x->lock);
}
static void xfrm_replay_timer_handler(unsigned long data);
-@@ -573,8 +574,8 @@
+@@ -573,8 +574,8 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
setup_timer(&x->rtimer, xfrm_replay_timer_handler,
(unsigned long)x);
x->curlft.add_time = get_seconds();
-@@ -1031,7 +1032,9 @@
+@@ -1031,7 +1032,9 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
-@@ -1142,7 +1145,7 @@
+@@ -1142,7 +1145,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
if (x->replay_maxage)
mod_timer(&x->rtimer, jiffies + x->replay_maxage);
-@@ -1246,7 +1249,9 @@
+@@ -1246,7 +1249,9 @@ static struct xfrm_state *__find_acq_core(struct net *net,
x->mark.m = m->m;
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
xfrm_state_hold(x);
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, daddr, saddr, family);
-@@ -1546,7 +1551,8 @@
+@@ -1546,7 +1551,8 @@ int xfrm_state_update(struct xfrm_state *x)
memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
x1->km.dying = 0;
if (x1->curlft.use_time)
xfrm_state_check_expire(x1);
-@@ -1570,7 +1576,7 @@
+@@ -1570,7 +1576,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
x->km.state = XFRM_STATE_EXPIRED;
return -EINVAL;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/samples/trace_events/trace-events-sample.c linux-4.14/samples/trace_events/trace-events-sample.c
---- linux-4.14.orig/samples/trace_events/trace-events-sample.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/samples/trace_events/trace-events-sample.c 2018-09-05 11:05:07.000000000 +0200
-@@ -33,7 +33,7 @@
+diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
+index 5522692100ba..8b4be8e1802a 100644
+--- a/samples/trace_events/trace-events-sample.c
++++ b/samples/trace_events/trace-events-sample.c
+@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
/* Silly tracepoints */
trace_foo_bar("hello", cnt, array, random_strings[len],
trace_foo_with_template_simple("HELLO", cnt);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/scripts/mkcompile_h linux-4.14/scripts/mkcompile_h
---- linux-4.14.orig/scripts/mkcompile_h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/scripts/mkcompile_h 2018-09-05 11:05:07.000000000 +0200
-@@ -5,7 +5,8 @@
+diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
+index 959199c3147e..3e68004ed345 100755
+--- a/scripts/mkcompile_h
++++ b/scripts/mkcompile_h
+@@ -5,7 +5,8 @@ TARGET=$1
ARCH=$2
SMP=$3
PREEMPT=$4
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
-@@ -58,6 +59,7 @@
+@@ -58,6 +59,7 @@ UTS_VERSION="#$VERSION"
CONFIG_FLAGS=""
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
# Truncate to maximum length
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/security/apparmor/include/path.h linux-4.14/security/apparmor/include/path.h
---- linux-4.14.orig/security/apparmor/include/path.h 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/security/apparmor/include/path.h 2018-09-05 11:05:07.000000000 +0200
-@@ -39,9 +39,10 @@
+diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
+index 05fb3305671e..b26c16b02662 100644
+--- a/security/apparmor/include/path.h
++++ b/security/apparmor/include/path.h
+@@ -39,9 +39,10 @@ struct aa_buffers {
};
#include <linux/percpu.h>
#define COUNT_ARGS(X...) COUNT_ARGS_HELPER(, ##X, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#define COUNT_ARGS_HELPER(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, n, X...) n
-@@ -55,12 +56,24 @@
+@@ -55,12 +56,24 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
#define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++)
#define __get_buffer(N) ({ \
struct aa_buffers *__cpu_var; \
AA_BUG_PREEMPT_ENABLED("__get_buffer without preempt disabled"); \
-@@ -73,14 +86,14 @@
+@@ -73,14 +86,14 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
#define get_buffers(X...) \
do { \
} while (0)
#endif /* __AA_PATH_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/security/apparmor/lsm.c linux-4.14/security/apparmor/lsm.c
---- linux-4.14.orig/security/apparmor/lsm.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/security/apparmor/lsm.c 2018-09-05 11:05:07.000000000 +0200
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index 1346ee5be04f..aa7e4dee107b 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
@@ -44,7 +44,7 @@
int apparmor_initialized;
/*
* LSM hook functions
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/sound/core/pcm_native.c linux-4.14/sound/core/pcm_native.c
---- linux-4.14.orig/sound/core/pcm_native.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/sound/core/pcm_native.c 2018-09-05 11:05:07.000000000 +0200
-@@ -148,7 +148,7 @@
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index ab3bf36786b6..f0bb7c9aa4be 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -148,7 +148,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
if (!substream->pcm->nonatomic)
snd_pcm_stream_lock(substream);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
-@@ -163,7 +163,7 @@
+@@ -163,7 +163,7 @@ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
{
snd_pcm_stream_unlock(substream);
if (!substream->pcm->nonatomic)
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
-@@ -171,7 +171,7 @@
+@@ -171,7 +171,7 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
{
unsigned long flags = 0;
if (!substream->pcm->nonatomic)
snd_pcm_stream_lock(substream);
return flags;
}
-@@ -189,7 +189,7 @@
+@@ -189,7 +189,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
{
snd_pcm_stream_unlock(substream);
if (!substream->pcm->nonatomic)
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/sound/drivers/dummy.c linux-4.14/sound/drivers/dummy.c
---- linux-4.14.orig/sound/drivers/dummy.c 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/sound/drivers/dummy.c 2018-09-05 11:05:07.000000000 +0200
-@@ -376,17 +376,9 @@
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index c0939a0164a6..549e014ecc0d 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -376,17 +376,9 @@ struct dummy_hrtimer_pcm {
ktime_t period_time;
atomic_t running;
struct hrtimer timer;
static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
{
struct dummy_hrtimer_pcm *dpcm;
-@@ -394,7 +386,14 @@
+@@ -394,7 +386,14 @@ static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer);
if (!atomic_read(&dpcm->running))
return HRTIMER_NORESTART;
hrtimer_forward_now(timer, dpcm->period_time);
return HRTIMER_RESTART;
}
-@@ -404,7 +403,7 @@
+@@ -404,7 +403,7 @@ static int dummy_hrtimer_start(struct snd_pcm_substream *substream)
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer);
atomic_set(&dpcm->running, 1);
return 0;
}
-@@ -414,14 +413,14 @@
+@@ -414,14 +413,14 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
atomic_set(&dpcm->running, 0);
}
static snd_pcm_uframes_t
-@@ -466,12 +465,10 @@
+@@ -466,12 +465,10 @@ static int dummy_hrtimer_create(struct snd_pcm_substream *substream)
if (!dpcm)
return -ENOMEM;
substream->runtime->private_data = dpcm;
return 0;
}
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/tools/testing/selftests/ftrace/test.d/functions linux-4.14/tools/testing/selftests/ftrace/test.d/functions
---- linux-4.14.orig/tools/testing/selftests/ftrace/test.d/functions 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/tools/testing/selftests/ftrace/test.d/functions 2018-09-05 11:05:07.000000000 +0200
-@@ -70,6 +70,13 @@
+diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
+index 6a4982d029bf..843c2b0d948e 100644
+--- a/tools/testing/selftests/ftrace/test.d/functions
++++ b/tools/testing/selftests/ftrace/test.d/functions
+@@ -70,6 +70,13 @@ disable_events() {
echo 0 > events/enable
}
initialize_ftrace() { # Reset ftrace to initial-state
# As the initial state, ftrace will be set to nop tracer,
# no events, no triggers, no filters, no function filters,
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
---- linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc 2018-09-05 11:05:07.000000000 +0200
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
+new file mode 100644
+index 000000000000..786dce7e48be
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
@@ -0,0 +1,39 @@
+#!/bin/sh
+# description: event trigger - test extended error support
+do_reset
+
+exit 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
---- linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc 2018-09-05 11:05:07.000000000 +0200
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
+new file mode 100644
+index 000000000000..7fd5b4a8f060
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
@@ -0,0 +1,54 @@
+#!/bin/sh
+# description: event trigger - test field variable support
+do_reset
+
+exit 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
---- linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc 2018-09-05 11:05:07.000000000 +0200
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
+new file mode 100644
+index 000000000000..c93dbe38b5df
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
@@ -0,0 +1,58 @@
+#!/bin/sh
+# description: event trigger - test inter-event combined histogram trigger
+do_reset
+
+exit 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
---- linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc 2018-09-05 11:05:07.000000000 +0200
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
+new file mode 100644
+index 000000000000..e84e7d048566
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
@@ -0,0 +1,50 @@
+#!/bin/sh
+# description: event trigger - test inter-event histogram trigger onmatch action
+do_reset
+
+exit 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
---- linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc 2018-09-05 11:05:07.000000000 +0200
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
+new file mode 100644
+index 000000000000..7907d8aacde3
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
@@ -0,0 +1,50 @@
+#!/bin/sh
+# description: event trigger - test inter-event histogram trigger onmatch-onmax action
+do_reset
+
+exit 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
---- linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc 2018-09-05 11:05:07.000000000 +0200
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
+new file mode 100644
+index 000000000000..38b7ed6242b2
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
@@ -0,0 +1,48 @@
+#!/bin/sh
+# description: event trigger - test inter-event histogram trigger onmax action
+do_reset
+
+exit 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
---- linux-4.14.orig/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.14/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc 2018-09-05 11:05:07.000000000 +0200
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
+new file mode 100644
+index 000000000000..cef11377dcbd
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
@@ -0,0 +1,54 @@
+#!/bin/sh
+# description: event trigger - test synthetic event create remove
+do_reset
+
+exit 0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/virt/kvm/arm/arm.c linux-4.14/virt/kvm/arm/arm.c
---- linux-4.14.orig/virt/kvm/arm/arm.c 2018-09-05 11:03:25.000000000 +0200
-+++ linux-4.14/virt/kvm/arm/arm.c 2018-09-05 11:05:07.000000000 +0200
-@@ -69,7 +69,6 @@
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index d5f1d8364571..c09e04130bfe 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -69,7 +69,6 @@ static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
__this_cpu_write(kvm_arm_running_vcpu, vcpu);
}
-@@ -79,7 +78,6 @@
+@@ -79,7 +78,6 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
*/
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{
return __this_cpu_read(kvm_arm_running_vcpu);
}
-@@ -653,7 +651,7 @@
+@@ -653,7 +651,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
kvm_pmu_flush_hwstate(vcpu);
-@@ -690,7 +688,7 @@
+@@ -690,7 +688,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
continue;
}
-@@ -745,7 +743,7 @@
+@@ -745,7 +743,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vgic_sync_hwstate(vcpu);