[packages/kernel/LINUX_4_14] -rt updates

jajcus jajcus at pld-linux.org
Wed Sep 5 14:05:15 CEST 2018


commit e4b2b4a843c12a6e36d7fce14e4a2cdb0e3b39c4
Author: Jacek Konieczny <j.konieczny at eggsoft.pl>
Date:   Wed Sep 5 14:04:57 2018 +0200

    -rt updates

 kernel-rt.patch | 47612 +++++++++++++++++++++++++++++++++++-------------------
 kernel.spec     |     2 +-
 2 files changed, 30744 insertions(+), 16870 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index 53cbaf93..51366ce4 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -209,7 +209,7 @@ Patch146:	kernel-aufs4+vserver.patch
 Patch250:	kernel-fix_256colors_menuconfig.patch
 
 # https://rt.wiki.kernel.org/
-# https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/patch-4.9.6-rt4.patch.xz
+# https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/patch-4.14.63-rt40.patch.xz with small updates
 Patch500:	kernel-rt.patch
 
 Patch2000:	kernel-small_fixes.patch
diff --git a/kernel-rt.patch b/kernel-rt.patch
index 4ffbf4ba..445d59e4 100644
--- a/kernel-rt.patch
+++ b/kernel-rt.patch
@@ -1,265 +1,21 @@
-diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
-index 3a3b30ac2a75..9e0745cafbd8 100644
---- a/Documentation/sysrq.txt
-+++ b/Documentation/sysrq.txt
-@@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
- On other - If you know of the key combos for other architectures, please
-            let me know so I can add them to this section.
- 
--On all -  write a character to /proc/sysrq-trigger.  e.g.:
--
-+On all -  write a character to /proc/sysrq-trigger, e.g.:
- 		echo t > /proc/sysrq-trigger
- 
-+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
-+		echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
-+	 Send an ICMP echo request with this pattern plus the particular
-+	 SysRq command key. Example:
-+		# ping -c1 -s57 -p0102030468
-+	 will trigger the SysRq-H (help) command.
-+
-+
- *  What are the 'command' keys?
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 'b'     - Will immediately reboot the system without syncing or unmounting
-diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt
-new file mode 100644
-index 000000000000..6f2aeabf7faa
---- /dev/null
-+++ b/Documentation/trace/histograms.txt
-@@ -0,0 +1,186 @@
-+		Using the Linux Kernel Latency Histograms
-+
-+
-+This document gives a short explanation how to enable, configure and use
-+latency histograms. Latency histograms are primarily relevant in the
-+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
-+and are used in the quality management of the Linux real-time
-+capabilities.
-+
-+
-+* Purpose of latency histograms
-+
-+A latency histogram continuously accumulates the frequencies of latency
-+data. There are two types of histograms
-+- potential sources of latencies
-+- effective latencies
-+
-+
-+* Potential sources of latencies
-+
-+Potential sources of latencies are code segments where interrupts,
-+preemption or both are disabled (aka critical sections). To create
-+histograms of potential sources of latency, the kernel stores the time
-+stamp at the start of a critical section, determines the time elapsed
-+when the end of the section is reached, and increments the frequency
-+counter of that latency value - irrespective of whether any concurrently
-+running process is affected by latency or not.
-+- Configuration items (in the Kernel hacking/Tracers submenu)
-+  CONFIG_INTERRUPT_OFF_LATENCY
-+  CONFIG_PREEMPT_OFF_LATENCY
-+
-+
-+* Effective latencies
-+
-+Effective latencies are actually occuring during wakeup of a process. To
-+determine effective latencies, the kernel stores the time stamp when a
-+process is scheduled to be woken up, and determines the duration of the
-+wakeup time shortly before control is passed over to this process. Note
-+that the apparent latency in user space may be somewhat longer, since the
-+process may be interrupted after control is passed over to it but before
-+the execution in user space takes place. Simply measuring the interval
-+between enqueuing and wakeup may also not appropriate in cases when a
-+process is scheduled as a result of a timer expiration. The timer may have
-+missed its deadline, e.g. due to disabled interrupts, but this latency
-+would not be registered. Therefore, the offsets of missed timers are
-+recorded in a separate histogram. If both wakeup latency and missed timer
-+offsets are configured and enabled, a third histogram may be enabled that
-+records the overall latency as a sum of the timer latency, if any, and the
-+wakeup latency. This histogram is called "timerandwakeup".
-+- Configuration items (in the Kernel hacking/Tracers submenu)
-+  CONFIG_WAKEUP_LATENCY
-+  CONFIG_MISSED_TIMER_OFSETS
-+
-+
-+* Usage
-+
-+The interface to the administration of the latency histograms is located
-+in the debugfs file system. To mount it, either enter
-+
-+mount -t sysfs nodev /sys
-+mount -t debugfs nodev /sys/kernel/debug
-+
-+from shell command line level, or add
-+
-+nodev	/sys			sysfs	defaults	0 0
-+nodev	/sys/kernel/debug	debugfs	defaults	0 0
-+
-+to the file /etc/fstab. All latency histogram related files are then
-+available in the directory /sys/kernel/debug/tracing/latency_hist. A
-+particular histogram type is enabled by writing non-zero to the related
-+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
-+Select "preemptirqsoff" for the histograms of potential sources of
-+latencies and "wakeup" for histograms of effective latencies etc. The
-+histogram data - one per CPU - are available in the files
-+
-+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
-+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
-+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
-+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
-+
-+The histograms are reset by writing non-zero to the file "reset" in a
-+particular latency directory. To reset all latency data, use
-+
-+#!/bin/sh
-+
-+TRACINGDIR=/sys/kernel/debug/tracing
-+HISTDIR=$TRACINGDIR/latency_hist
-+
-+if test -d $HISTDIR
-+then
-+  cd $HISTDIR
-+  for i in `find . | grep /reset$`
-+  do
-+    echo 1 >$i
-+  done
-+fi
-+
-+
-+* Data format
-+
-+Latency data are stored with a resolution of one microsecond. The
-+maximum latency is 10,240 microseconds. The data are only valid, if the
-+overflow register is empty. Every output line contains the latency in
-+microseconds in the first row and the number of samples in the second
-+row. To display only lines with a positive latency count, use, for
-+example,
-+
-+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
-+
-+#Minimum latency: 0 microseconds.
-+#Average latency: 0 microseconds.
-+#Maximum latency: 25 microseconds.
-+#Total samples: 3104770694
-+#There are 0 samples greater or equal than 10240 microseconds
-+#usecs	         samples
-+    0	      2984486876
-+    1	        49843506
-+    2	        58219047
-+    3	         5348126
-+    4	         2187960
-+    5	         3388262
-+    6	          959289
-+    7	          208294
-+    8	           40420
-+    9	            4485
-+   10	           14918
-+   11	           18340
-+   12	           25052
-+   13	           19455
-+   14	            5602
-+   15	             969
-+   16	              47
-+   17	              18
-+   18	              14
-+   19	               1
-+   20	               3
-+   21	               2
-+   22	               5
-+   23	               2
-+   25	               1
-+
-+
-+* Wakeup latency of a selected process
-+
-+To only collect wakeup latency data of a particular process, write the
-+PID of the requested process to
-+
-+/sys/kernel/debug/tracing/latency_hist/wakeup/pid
-+
-+PIDs are not considered, if this variable is set to 0.
-+
-+
-+* Details of the process with the highest wakeup latency so far
-+
-+Selected data of the process that suffered from the highest wakeup
-+latency that occurred in a particular CPU are available in the file
-+
-+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
-+
-+In addition, other relevant system data at the time when the
-+latency occurred are given.
-+
-+The format of the data is (all in one line):
-+<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
-+<- <PID> <Priority> <Command> <Timestamp>
-+
-+The value of <Timeroffset> is only relevant in the combined timer
-+and wakeup latency recording. In the wakeup recording, it is
-+always 0, in the missed_timer_offsets recording, it is the same
-+as <Latency>.
-+
-+When retrospectively searching for the origin of a latency and
-+tracing was not enabled, it may be helpful to know the name and
-+some basic data of the task that (finally) was switching to the
-+late real-tlme task. In addition to the victim's data, also the
-+data of the possible culprit are therefore displayed after the
-+"<-" symbol.
-+
-+Finally, the timestamp of the time when the latency occurred
-+in <seconds>.<microseconds> after the most recent system boot
-+is provided.
-+
-+These data are also reset when the wakeup histogram is reset.
-diff --git a/arch/Kconfig b/arch/Kconfig
-index 659bdd079277..099fc0f5155e 100644
---- a/arch/Kconfig
-+++ b/arch/Kconfig
-@@ -9,6 +9,7 @@ config OPROFILE
- 	tristate "OProfile system profiling"
- 	depends on PROFILING
- 	depends on HAVE_OPROFILE
-+	depends on !PREEMPT_RT_FULL
- 	select RING_BUFFER
- 	select RING_BUFFER_ALLOW_SWAP
- 	help
-@@ -52,6 +53,7 @@ config KPROBES
- config JUMP_LABEL
-        bool "Optimize very unlikely/likely branches"
-        depends on HAVE_ARCH_JUMP_LABEL
-+       depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
-        help
-          This option enables a transparent branch optimization that
- 	 makes certain almost-always-true or almost-always-false branch
-diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index b5d529fdffab..5715844e83e3 100644
---- a/arch/arm/Kconfig
-+++ b/arch/arm/Kconfig
-@@ -36,7 +36,7 @@ config ARM
- 	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
- 	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
- 	select HAVE_ARCH_HARDENED_USERCOPY
--	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
-+	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
- 	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
- 	select HAVE_ARCH_MMAP_RND_BITS if MMU
- 	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
-@@ -75,6 +75,7 @@ config ARM
- 	select HAVE_PERF_EVENTS
- 	select HAVE_PERF_REGS
- 	select HAVE_PERF_USER_STACK_DUMP
-+	select HAVE_PREEMPT_LAZY
- 	select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
- 	select HAVE_REGS_AND_STACK_ACCESS_API
- 	select HAVE_SYSCALL_TRACEPOINTS
-diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
-index e53638c8ed8a..6095a1649865 100644
---- a/arch/arm/include/asm/irq.h
-+++ b/arch/arm/include/asm/irq.h
-@@ -22,6 +22,8 @@
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/alpha/include/asm/spinlock_types.h linux-4.14/arch/alpha/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/alpha/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/alpha/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef _ALPHA_SPINLOCK_TYPES_H
+ #define _ALPHA_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ 	volatile unsigned int lock;
+ } arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/irq.h linux-4.14/arch/arm/include/asm/irq.h
+--- linux-4.14.orig/arch/arm/include/asm/irq.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/include/asm/irq.h	2018-09-05 11:05:07.000000000 +0200
+@@ -23,6 +23,8 @@
  #endif
  
  #ifndef __ASSEMBLY__
@@ -268,11 +24,24 @@ index e53638c8ed8a..6095a1649865 100644
  struct irqaction;
  struct pt_regs;
  extern void migrate_irqs(void);
-diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
-index 12ebfcc1d539..c962084605bc 100644
---- a/arch/arm/include/asm/switch_to.h
-+++ b/arch/arm/include/asm/switch_to.h
-@@ -3,6 +3,13 @@
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/spinlock_types.h linux-4.14/arch/arm/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/arm/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #define TICKET_SHIFT	16
+ 
+ typedef struct {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/switch_to.h linux-4.14/arch/arm/include/asm/switch_to.h
+--- linux-4.14.orig/arch/arm/include/asm/switch_to.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/include/asm/switch_to.h	2018-09-05 11:05:07.000000000 +0200
+@@ -4,6 +4,13 @@
  
  #include <linux/thread_info.h>
  
@@ -286,7 +55,7 @@ index 12ebfcc1d539..c962084605bc 100644
  /*
   * For v7 SMP cores running a preemptible kernel we may be pre-empted
   * during a TLB maintenance operation, so execute an inner-shareable dsb
-@@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
+@@ -26,6 +33,7 @@
  #define switch_to(prev,next,last)					\
  do {									\
  	__complete_pending_tlbi();					\
@@ -294,11 +63,10 @@ index 12ebfcc1d539..c962084605bc 100644
  	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
  } while (0)
  
-diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index 776757d1604a..1f36a4eccc72 100644
---- a/arch/arm/include/asm/thread_info.h
-+++ b/arch/arm/include/asm/thread_info.h
-@@ -49,6 +49,7 @@ struct cpu_context_save {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/thread_info.h linux-4.14/arch/arm/include/asm/thread_info.h
+--- linux-4.14.orig/arch/arm/include/asm/thread_info.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/include/asm/thread_info.h	2018-09-05 11:05:07.000000000 +0200
+@@ -49,6 +49,7 @@
  struct thread_info {
  	unsigned long		flags;		/* low level flags */
  	int			preempt_count;	/* 0 => preemptable, <0 => bug */
@@ -306,7 +74,7 @@ index 776757d1604a..1f36a4eccc72 100644
  	mm_segment_t		addr_limit;	/* address limit */
  	struct task_struct	*task;		/* main task structure */
  	__u32			cpu;		/* cpu */
-@@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -142,7 +143,8 @@
  #define TIF_SYSCALL_TRACE	4	/* syscall trace active */
  #define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
  #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
@@ -316,7 +84,7 @@ index 776757d1604a..1f36a4eccc72 100644
  
  #define TIF_NOHZ		12	/* in adaptive nohz mode */
  #define TIF_USING_IWMMXT	17
-@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -152,6 +154,7 @@
  #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
  #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -324,7 +92,7 @@ index 776757d1604a..1f36a4eccc72 100644
  #define _TIF_UPROBE		(1 << TIF_UPROBE)
  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
-@@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -167,7 +170,8 @@
   * Change these and you break ASM code in entry-common.S
   */
  #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
@@ -334,11 +102,39 @@ index 776757d1604a..1f36a4eccc72 100644
  
  #endif /* __KERNEL__ */
  #endif /* __ASM_ARM_THREAD_INFO_H */
-diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
-index 608008229c7d..3866da3f7bb7 100644
---- a/arch/arm/kernel/asm-offsets.c
-+++ b/arch/arm/kernel/asm-offsets.c
-@@ -65,6 +65,7 @@ int main(void)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/Kconfig linux-4.14/arch/arm/Kconfig
+--- linux-4.14.orig/arch/arm/Kconfig	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -45,7 +45,7 @@
+ 	select HARDIRQS_SW_RESEND
+ 	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+ 	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+-	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
++	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
+ 	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
+ 	select HAVE_ARCH_MMAP_RND_BITS if MMU
+ 	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+@@ -85,6 +85,7 @@
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_PERF_REGS
+ 	select HAVE_PERF_USER_STACK_DUMP
++	select HAVE_PREEMPT_LAZY
+ 	select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+ 	select HAVE_REGS_AND_STACK_ACCESS_API
+ 	select HAVE_SYSCALL_TRACEPOINTS
+@@ -2164,7 +2165,7 @@
+ 
+ config KERNEL_MODE_NEON
+ 	bool "Support for NEON in kernel mode"
+-	depends on NEON && AEABI
++	depends on NEON && AEABI && !PREEMPT_RT_BASE
+ 	help
+ 	  Say Y to include support for NEON in kernel mode.
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/asm-offsets.c linux-4.14/arch/arm/kernel/asm-offsets.c
+--- linux-4.14.orig/arch/arm/kernel/asm-offsets.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/kernel/asm-offsets.c	2018-09-05 11:05:07.000000000 +0200
+@@ -65,6 +65,7 @@
    BLANK();
    DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
    DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
@@ -346,11 +142,10 @@ index 608008229c7d..3866da3f7bb7 100644
    DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
    DEFINE(TI_TASK,		offsetof(struct thread_info, task));
    DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
-diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
-index 9f157e7c51e7..468e224d76aa 100644
---- a/arch/arm/kernel/entry-armv.S
-+++ b/arch/arm/kernel/entry-armv.S
-@@ -220,11 +220,18 @@ ENDPROC(__dabt_svc)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/entry-armv.S linux-4.14/arch/arm/kernel/entry-armv.S
+--- linux-4.14.orig/arch/arm/kernel/entry-armv.S	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/kernel/entry-armv.S	2018-09-05 11:05:07.000000000 +0200
+@@ -220,11 +220,18 @@
  
  #ifdef CONFIG_PREEMPT
  	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
@@ -371,7 +166,7 @@ index 9f157e7c51e7..468e224d76aa 100644
  #endif
  
  	svc_exit r5, irq = 1			@ return from exception
-@@ -239,8 +246,14 @@ ENDPROC(__irq_svc)
+@@ -239,8 +246,14 @@
  1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
  	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
  	tst	r0, #_TIF_NEED_RESCHED
@@ -387,13 +182,12 @@ index 9f157e7c51e7..468e224d76aa 100644
  #endif
  
  __und_fault:
-diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
-index 10c3283d6c19..8872937862cc 100644
---- a/arch/arm/kernel/entry-common.S
-+++ b/arch/arm/kernel/entry-common.S
-@@ -36,7 +36,9 @@
-  UNWIND(.cantunwind	)
- 	disable_irq_notrace			@ disable interrupts
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/entry-common.S linux-4.14/arch/arm/kernel/entry-common.S
+--- linux-4.14.orig/arch/arm/kernel/entry-common.S	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/kernel/entry-common.S	2018-09-05 11:05:07.000000000 +0200
+@@ -53,7 +53,9 @@
+ 	cmp	r2, #TASK_SIZE
+ 	blne	addr_limit_check_failed
  	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 -	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 +	tst	r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
@@ -401,25 +195,24 @@ index 10c3283d6c19..8872937862cc 100644
 +	tst	r1, #_TIF_SECCOMP
  	bne	fast_work_pending
  
- 	/* perform architecture specific actions before user return */
-@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
- 	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
- 	disable_irq_notrace			@ disable interrupts
+ 
+@@ -83,8 +85,11 @@
+ 	cmp	r2, #TASK_SIZE
+ 	blne	addr_limit_check_failed
  	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 -	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 +	tst	r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
-+	bne 	do_slower_path
++	bne	do_slower_path
 +	tst	r1, #_TIF_SECCOMP
  	beq	no_work_pending
 +do_slower_path:
   UNWIND(.fnend		)
  ENDPROC(ret_fast_syscall)
  
-diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
-index 69bda1a5707e..1f665acaa6a9 100644
---- a/arch/arm/kernel/patch.c
-+++ b/arch/arm/kernel/patch.c
-@@ -15,7 +15,7 @@ struct patch {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/patch.c linux-4.14/arch/arm/kernel/patch.c
+--- linux-4.14.orig/arch/arm/kernel/patch.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/kernel/patch.c	2018-09-05 11:05:07.000000000 +0200
+@@ -16,7 +16,7 @@
  	unsigned int insn;
  };
  
@@ -428,7 +221,7 @@ index 69bda1a5707e..1f665acaa6a9 100644
  
  static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
  	__acquires(&patch_lock)
-@@ -32,7 +32,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+@@ -33,7 +33,7 @@
  		return addr;
  
  	if (flags)
@@ -437,7 +230,7 @@ index 69bda1a5707e..1f665acaa6a9 100644
  	else
  		__acquire(&patch_lock);
  
-@@ -47,7 +47,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+@@ -48,7 +48,7 @@
  	clear_fixmap(fixmap);
  
  	if (flags)
@@ -446,11 +239,10 @@ index 69bda1a5707e..1f665acaa6a9 100644
  	else
  		__release(&patch_lock);
  }
-diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 91d2d5b01414..750550098b59 100644
---- a/arch/arm/kernel/process.c
-+++ b/arch/arm/kernel/process.c
-@@ -322,6 +322,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/process.c linux-4.14/arch/arm/kernel/process.c
+--- linux-4.14.orig/arch/arm/kernel/process.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/kernel/process.c	2018-09-05 11:05:07.000000000 +0200
+@@ -325,6 +325,30 @@
  }
  
  #ifdef CONFIG_MMU
@@ -481,11 +273,10 @@ index 91d2d5b01414..750550098b59 100644
  #ifdef CONFIG_KUSER_HELPERS
  /*
   * The vectors page is always readable from user space for the
-diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
-index 7b8f2141427b..96541e00b74a 100644
---- a/arch/arm/kernel/signal.c
-+++ b/arch/arm/kernel/signal.c
-@@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/signal.c linux-4.14/arch/arm/kernel/signal.c
+--- linux-4.14.orig/arch/arm/kernel/signal.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/kernel/signal.c	2018-09-05 11:05:07.000000000 +0200
+@@ -615,7 +615,8 @@
  	 */
  	trace_hardirqs_off();
  	do {
@@ -495,11 +286,10 @@ index 7b8f2141427b..96541e00b74a 100644
  			schedule();
  		} else {
  			if (unlikely(!user_mode(regs)))
-diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index 7dd14e8395e6..4cd7e3d98035 100644
---- a/arch/arm/kernel/smp.c
-+++ b/arch/arm/kernel/smp.c
-@@ -234,8 +234,6 @@ int __cpu_disable(void)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/smp.c linux-4.14/arch/arm/kernel/smp.c
+--- linux-4.14.orig/arch/arm/kernel/smp.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/kernel/smp.c	2018-09-05 11:05:07.000000000 +0200
+@@ -236,8 +236,6 @@
  	flush_cache_louis();
  	local_flush_tlb_all();
  
@@ -508,21 +298,18 @@ index 7dd14e8395e6..4cd7e3d98035 100644
  	return 0;
  }
  
-@@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu)
- 		pr_err("CPU%u: cpu didn't die\n", cpu);
- 		return;
+@@ -255,6 +253,7 @@
  	}
-+
-+	clear_tasks_mm_cpumask(cpu);
-+
- 	pr_notice("CPU%u: shutdown\n", cpu);
+ 	pr_debug("CPU%u: shutdown\n", cpu);
  
++	clear_tasks_mm_cpumask(cpu);
  	/*
-diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
-index 0bee233fef9a..314cfb232a63 100644
---- a/arch/arm/kernel/unwind.c
-+++ b/arch/arm/kernel/unwind.c
-@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
+ 	 * platform_cpu_kill() is generally expected to do the powering off
+ 	 * and/or cutting of clocks to the dying CPU.  Optionally, this may
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/unwind.c linux-4.14/arch/arm/kernel/unwind.c
+--- linux-4.14.orig/arch/arm/kernel/unwind.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/kernel/unwind.c	2018-09-05 11:05:07.000000000 +0200
+@@ -93,7 +93,7 @@
  static const struct unwind_idx *__origin_unwind_idx;
  extern const struct unwind_idx __stop_unwind_idx[];
  
@@ -531,7 +318,7 @@ index 0bee233fef9a..314cfb232a63 100644
  static LIST_HEAD(unwind_tables);
  
  /* Convert a prel31 symbol to an absolute address */
-@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
+@@ -201,7 +201,7 @@
  		/* module unwind tables */
  		struct unwind_table *table;
  
@@ -540,7 +327,7 @@ index 0bee233fef9a..314cfb232a63 100644
  		list_for_each_entry(table, &unwind_tables, list) {
  			if (addr >= table->begin_addr &&
  			    addr < table->end_addr) {
-@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
+@@ -213,7 +213,7 @@
  				break;
  			}
  		}
@@ -549,7 +336,7 @@ index 0bee233fef9a..314cfb232a63 100644
  	}
  
  	pr_debug("%s: idx = %p\n", __func__, idx);
-@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
+@@ -529,9 +529,9 @@
  	tab->begin_addr = text_addr;
  	tab->end_addr = text_addr + text_size;
  
@@ -561,7 +348,7 @@ index 0bee233fef9a..314cfb232a63 100644
  
  	return tab;
  }
-@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
+@@ -543,9 +543,9 @@
  	if (!tab)
  		return;
  
@@ -573,42 +360,10 @@ index 0bee233fef9a..314cfb232a63 100644
  
  	kfree(tab);
  }
-diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index 19b5f5c1c0ff..82aa639e6737 100644
---- a/arch/arm/kvm/arm.c
-+++ b/arch/arm/kvm/arm.c
-@@ -619,7 +619,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
- 		 * involves poking the GIC, which must be done in a
- 		 * non-preemptible context.
- 		 */
--		preempt_disable();
-+		migrate_disable();
- 		kvm_pmu_flush_hwstate(vcpu);
- 		kvm_timer_flush_hwstate(vcpu);
- 		kvm_vgic_flush_hwstate(vcpu);
-@@ -640,7 +640,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
- 			kvm_pmu_sync_hwstate(vcpu);
- 			kvm_timer_sync_hwstate(vcpu);
- 			kvm_vgic_sync_hwstate(vcpu);
--			preempt_enable();
-+			migrate_enable();
- 			continue;
- 		}
- 
-@@ -696,7 +696,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
- 
- 		kvm_vgic_sync_hwstate(vcpu);
- 
--		preempt_enable();
-+		migrate_enable();
- 
- 		ret = handle_exit(vcpu, run, ret);
- 	}
-diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
-index 98ffe1e62ad5..df9769ddece5 100644
---- a/arch/arm/mach-exynos/platsmp.c
-+++ b/arch/arm/mach-exynos/platsmp.c
-@@ -229,7 +229,7 @@ static void __iomem *scu_base_addr(void)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-exynos/platsmp.c linux-4.14/arch/arm/mach-exynos/platsmp.c
+--- linux-4.14.orig/arch/arm/mach-exynos/platsmp.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/mach-exynos/platsmp.c	2018-09-05 11:05:07.000000000 +0200
+@@ -229,7 +229,7 @@
  	return (void __iomem *)(S5P_VA_SCU);
  }
  
@@ -617,7 +372,7 @@ index 98ffe1e62ad5..df9769ddece5 100644
  
  static void exynos_secondary_init(unsigned int cpu)
  {
-@@ -242,8 +242,8 @@ static void exynos_secondary_init(unsigned int cpu)
+@@ -242,8 +242,8 @@
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -628,7 +383,7 @@ index 98ffe1e62ad5..df9769ddece5 100644
  }
  
  int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
-@@ -307,7 +307,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -307,7 +307,7 @@
  	 * Set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -637,7 +392,7 @@ index 98ffe1e62ad5..df9769ddece5 100644
  
  	/*
  	 * The secondary processor is waiting to be released from
-@@ -334,7 +334,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -334,7 +334,7 @@
  
  		if (timeout == 0) {
  			printk(KERN_ERR "cpu1 power enable failed");
@@ -646,7 +401,7 @@ index 98ffe1e62ad5..df9769ddece5 100644
  			return -ETIMEDOUT;
  		}
  	}
-@@ -380,7 +380,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -380,7 +380,7 @@
  	 * calibrations, then wait for it to finish
  	 */
  fail:
@@ -655,10 +410,9 @@ index 98ffe1e62ad5..df9769ddece5 100644
  
  	return pen_release != -1 ? ret : 0;
  }
-diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
-index 4b653a8cb75c..b03d5a922cb1 100644
---- a/arch/arm/mach-hisi/platmcpm.c
-+++ b/arch/arm/mach-hisi/platmcpm.c
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-hisi/platmcpm.c linux-4.14/arch/arm/mach-hisi/platmcpm.c
+--- linux-4.14.orig/arch/arm/mach-hisi/platmcpm.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/mach-hisi/platmcpm.c	2018-09-05 11:05:07.000000000 +0200
 @@ -61,7 +61,7 @@
  
  static void __iomem *sysctrl, *fabric;
@@ -668,7 +422,7 @@ index 4b653a8cb75c..b03d5a922cb1 100644
  static u32 fabric_phys_addr;
  /*
   * [0]: bootwrapper physical address
-@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
+@@ -113,7 +113,7 @@
  	if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
  		return -EINVAL;
  
@@ -677,7 +431,7 @@ index 4b653a8cb75c..b03d5a922cb1 100644
  
  	if (hip04_cpu_table[cluster][cpu])
  		goto out;
-@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
+@@ -147,7 +147,7 @@
  
  out:
  	hip04_cpu_table[cluster][cpu]++;
@@ -686,7 +440,7 @@ index 4b653a8cb75c..b03d5a922cb1 100644
  
  	return 0;
  }
-@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
+@@ -162,11 +162,11 @@
  	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  
@@ -700,7 +454,7 @@ index 4b653a8cb75c..b03d5a922cb1 100644
  		return;
  	} else if (hip04_cpu_table[cluster][cpu] > 1) {
  		pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
-@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
+@@ -174,7 +174,7 @@
  	}
  
  	last_man = hip04_cluster_is_down(cluster);
@@ -709,7 +463,7 @@ index 4b653a8cb75c..b03d5a922cb1 100644
  	if (last_man) {
  		/* Since it's Cortex A15, disable L2 prefetching. */
  		asm volatile(
-@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+@@ -203,7 +203,7 @@
  	       cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
  
  	count = TIMEOUT_MSEC / POLL_MSEC;
@@ -718,7 +472,7 @@ index 4b653a8cb75c..b03d5a922cb1 100644
  	for (tries = 0; tries < count; tries++) {
  		if (hip04_cpu_table[cluster][cpu])
  			goto err;
-@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+@@ -211,10 +211,10 @@
  		data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
  		if (data & CORE_WFI_STATUS(cpu))
  			break;
@@ -731,7 +485,7 @@ index 4b653a8cb75c..b03d5a922cb1 100644
  	}
  	if (tries >= count)
  		goto err;
-@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+@@ -231,10 +231,10 @@
  		goto err;
  	if (hip04_cluster_is_down(cluster))
  		hip04_set_snoop_filter(cluster, 0);
@@ -744,11 +498,10 @@ index 4b653a8cb75c..b03d5a922cb1 100644
  	return 0;
  }
  #endif
-diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
-index b4de3da6dffa..b52893319d75 100644
---- a/arch/arm/mach-omap2/omap-smp.c
-+++ b/arch/arm/mach-omap2/omap-smp.c
-@@ -64,7 +64,7 @@ static const struct omap_smp_config omap5_cfg __initconst = {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-omap2/omap-smp.c linux-4.14/arch/arm/mach-omap2/omap-smp.c
+--- linux-4.14.orig/arch/arm/mach-omap2/omap-smp.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/arm/mach-omap2/omap-smp.c	2018-09-05 11:05:07.000000000 +0200
+@@ -69,7 +69,7 @@
  	.startup_addr = omap5_secondary_startup,
  };
  
@@ -757,7 +510,7 @@ index b4de3da6dffa..b52893319d75 100644
  
  void __iomem *omap4_get_scu_base(void)
  {
-@@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigned int cpu)
+@@ -177,8 +177,8 @@
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -768,7 +521,7 @@ index b4de3da6dffa..b52893319d75 100644
  }
  
  static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -191,7 +191,7 @@
  	 * Set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -777,7 +530,7 @@ index b4de3da6dffa..b52893319d75 100644
  
  	/*
  	 * Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -270,7 +270,7 @@
  	 * Now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
@@ -786,10 +539,9 @@ index b4de3da6dffa..b52893319d75 100644
  
  	return 0;
  }
-diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
-index 0875b99add18..18b6d98d2581 100644
---- a/arch/arm/mach-prima2/platsmp.c
-+++ b/arch/arm/mach-prima2/platsmp.c
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-prima2/platsmp.c linux-4.14/arch/arm/mach-prima2/platsmp.c
+--- linux-4.14.orig/arch/arm/mach-prima2/platsmp.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/mach-prima2/platsmp.c	2018-09-05 11:05:07.000000000 +0200
 @@ -22,7 +22,7 @@
  
  static void __iomem *clk_base;
@@ -799,7 +551,7 @@ index 0875b99add18..18b6d98d2581 100644
  
  static void sirfsoc_secondary_init(unsigned int cpu)
  {
-@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
+@@ -36,8 +36,8 @@
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -810,7 +562,7 @@ index 0875b99add18..18b6d98d2581 100644
  }
  
  static const struct of_device_id clk_ids[]  = {
-@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -75,7 +75,7 @@
  	/* make sure write buffer is drained */
  	mb();
  
@@ -819,7 +571,7 @@ index 0875b99add18..18b6d98d2581 100644
  
  	/*
  	 * The secondary processor is waiting to be released from
-@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -107,7 +107,7 @@
  	 * now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
@@ -828,10 +580,9 @@ index 0875b99add18..18b6d98d2581 100644
  
  	return pen_release != -1 ? -ENOSYS : 0;
  }
-diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
-index 5494c9e0c909..e8ce157d3548 100644
---- a/arch/arm/mach-qcom/platsmp.c
-+++ b/arch/arm/mach-qcom/platsmp.c
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-qcom/platsmp.c linux-4.14/arch/arm/mach-qcom/platsmp.c
+--- linux-4.14.orig/arch/arm/mach-qcom/platsmp.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/mach-qcom/platsmp.c	2018-09-05 11:05:07.000000000 +0200
 @@ -46,7 +46,7 @@
  
  extern void secondary_startup_arm(void);
@@ -841,7 +592,7 @@ index 5494c9e0c909..e8ce157d3548 100644
  
  #ifdef CONFIG_HOTPLUG_CPU
  static void qcom_cpu_die(unsigned int cpu)
-@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
+@@ -60,8 +60,8 @@
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -852,7 +603,7 @@ index 5494c9e0c909..e8ce157d3548 100644
  }
  
  static int scss_release_secondary(unsigned int cpu)
-@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
+@@ -284,7 +284,7 @@
  	 * set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -861,7 +612,7 @@ index 5494c9e0c909..e8ce157d3548 100644
  
  	/*
  	 * Send the secondary CPU a soft interrupt, thereby causing
-@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
+@@ -297,7 +297,7 @@
  	 * now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
@@ -870,11 +621,10 @@ index 5494c9e0c909..e8ce157d3548 100644
  
  	return ret;
  }
-diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
-index 8d1e2d551786..7fa56cc78118 100644
---- a/arch/arm/mach-spear/platsmp.c
-+++ b/arch/arm/mach-spear/platsmp.c
-@@ -32,7 +32,7 @@ static void write_pen_release(int val)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-spear/platsmp.c linux-4.14/arch/arm/mach-spear/platsmp.c
+--- linux-4.14.orig/arch/arm/mach-spear/platsmp.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/mach-spear/platsmp.c	2018-09-05 11:05:07.000000000 +0200
+@@ -32,7 +32,7 @@
  	sync_cache_w(&pen_release);
  }
  
@@ -883,7 +633,7 @@ index 8d1e2d551786..7fa56cc78118 100644
  
  static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
  
-@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
+@@ -47,8 +47,8 @@
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -894,7 +644,7 @@ index 8d1e2d551786..7fa56cc78118 100644
  }
  
  static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -59,7 +59,7 @@
  	 * set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -903,7 +653,7 @@ index 8d1e2d551786..7fa56cc78118 100644
  
  	/*
  	 * The secondary processor is waiting to be released from
-@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -84,7 +84,7 @@
  	 * now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
@@ -912,11 +662,10 @@ index 8d1e2d551786..7fa56cc78118 100644
  
  	return pen_release != -1 ? -ENOSYS : 0;
  }
-diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
-index ea5a2277ee46..b988e081ac79 100644
---- a/arch/arm/mach-sti/platsmp.c
-+++ b/arch/arm/mach-sti/platsmp.c
-@@ -35,7 +35,7 @@ static void write_pen_release(int val)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-sti/platsmp.c linux-4.14/arch/arm/mach-sti/platsmp.c
+--- linux-4.14.orig/arch/arm/mach-sti/platsmp.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/mach-sti/platsmp.c	2018-09-05 11:05:07.000000000 +0200
+@@ -35,7 +35,7 @@
  	sync_cache_w(&pen_release);
  }
  
@@ -925,7 +674,7 @@ index ea5a2277ee46..b988e081ac79 100644
  
  static void sti_secondary_init(unsigned int cpu)
  {
-@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
+@@ -48,8 +48,8 @@
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -936,7 +685,7 @@ index ea5a2277ee46..b988e081ac79 100644
  }
  
  static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -60,7 +60,7 @@
  	 * set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -945,7 +694,7 @@ index ea5a2277ee46..b988e081ac79 100644
  
  	/*
  	 * The secondary processor is waiting to be released from
-@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -91,7 +91,7 @@
  	 * now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
@@ -954,11 +703,10 @@ index ea5a2277ee46..b988e081ac79 100644
  
  	return pen_release != -1 ? -ENOSYS : 0;
  }
-diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index 3a2e678b8d30..3ed1e9ba6a01 100644
---- a/arch/arm/mm/fault.c
-+++ b/arch/arm/mm/fault.c
-@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mm/fault.c linux-4.14/arch/arm/mm/fault.c
+--- linux-4.14.orig/arch/arm/mm/fault.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/mm/fault.c	2018-09-05 11:05:07.000000000 +0200
+@@ -434,6 +434,9 @@
  	if (addr < TASK_SIZE)
  		return do_page_fault(addr, fsr, regs);
  
@@ -968,7 +716,7 @@ index 3a2e678b8d30..3ed1e9ba6a01 100644
  	if (user_mode(regs))
  		goto bad_area;
  
-@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+@@ -501,6 +504,9 @@
  static int
  do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  {
@@ -978,11 +726,10 @@ index 3a2e678b8d30..3ed1e9ba6a01 100644
  	do_bad_area(addr, fsr, regs);
  	return 0;
  }
-diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
-index d02f8187b1cc..542692dbd40a 100644
---- a/arch/arm/mm/highmem.c
-+++ b/arch/arm/mm/highmem.c
-@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mm/highmem.c linux-4.14/arch/arm/mm/highmem.c
+--- linux-4.14.orig/arch/arm/mm/highmem.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/mm/highmem.c	2018-09-05 11:05:07.000000000 +0200
+@@ -34,6 +34,11 @@
  	return *ptep;
  }
  
@@ -994,7 +741,7 @@ index d02f8187b1cc..542692dbd40a 100644
  void *kmap(struct page *page)
  {
  	might_sleep();
-@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
+@@ -54,12 +59,13 @@
  
  void *kmap_atomic(struct page *page)
  {
@@ -1009,7 +756,7 @@ index d02f8187b1cc..542692dbd40a 100644
  	pagefault_disable();
  	if (!PageHighMem(page))
  		return page_address(page);
-@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
+@@ -79,7 +85,7 @@
  
  	type = kmap_atomic_idx_push();
  
@@ -1018,7 +765,7 @@ index d02f8187b1cc..542692dbd40a 100644
  	vaddr = __fix_to_virt(idx);
  #ifdef CONFIG_DEBUG_HIGHMEM
  	/*
-@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
+@@ -93,7 +99,10 @@
  	 * in place, so the contained TLB flush ensures the TLB is updated
  	 * with the new mapping.
  	 */
@@ -1030,7 +777,7 @@ index d02f8187b1cc..542692dbd40a 100644
  
  	return (void *)vaddr;
  }
-@@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr)
+@@ -106,44 +115,75 @@
  
  	if (kvaddr >= (void *)FIXADDR_START) {
  		type = kmap_atomic_idx();
@@ -1112,11 +859,10 @@ index d02f8187b1cc..542692dbd40a 100644
 +	}
 +}
 +#endif
-diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
-index c2366510187a..6b60f582b738 100644
---- a/arch/arm/plat-versatile/platsmp.c
-+++ b/arch/arm/plat-versatile/platsmp.c
-@@ -32,7 +32,7 @@ static void write_pen_release(int val)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/plat-versatile/platsmp.c linux-4.14/arch/arm/plat-versatile/platsmp.c
+--- linux-4.14.orig/arch/arm/plat-versatile/platsmp.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm/plat-versatile/platsmp.c	2018-09-05 11:05:07.000000000 +0200
+@@ -32,7 +32,7 @@
  	sync_cache_w(&pen_release);
  }
  
@@ -1125,7 +871,7 @@ index c2366510187a..6b60f582b738 100644
  
  void versatile_secondary_init(unsigned int cpu)
  {
-@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu)
+@@ -45,8 +45,8 @@
  	/*
  	 * Synchronise with the boot thread.
  	 */
@@ -1136,7 +882,7 @@ index c2366510187a..6b60f582b738 100644
  }
  
  int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -57,7 +57,7 @@
  	 * Set synchronisation state between this boot processor
  	 * and the secondary one
  	 */
@@ -1145,7 +891,7 @@ index c2366510187a..6b60f582b738 100644
  
  	/*
  	 * This is really belt and braces; we hold unintended secondary
-@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -87,7 +87,7 @@
  	 * now the secondary core is starting up let it run its
  	 * calibrations, then wait for it to finish
  	 */
@@ -1154,48 +900,138 @@ index c2366510187a..6b60f582b738 100644
  
  	return pen_release != -1 ? -ENOSYS : 0;
  }
-diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 969ef880d234..1182fe883771 100644
---- a/arch/arm64/Kconfig
-+++ b/arch/arm64/Kconfig
-@@ -91,6 +91,7 @@ config ARM64
- 	select HAVE_PERF_EVENTS
- 	select HAVE_PERF_REGS
- 	select HAVE_PERF_USER_STACK_DUMP
-+	select HAVE_PREEMPT_LAZY
- 	select HAVE_REGS_AND_STACK_ACCESS_API
- 	select HAVE_RCU_TABLE_FREE
- 	select HAVE_SYSCALL_TRACEPOINTS
-@@ -694,7 +695,7 @@ config XEN_DOM0
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/crypto/crc32-ce-glue.c linux-4.14/arch/arm64/crypto/crc32-ce-glue.c
+--- linux-4.14.orig/arch/arm64/crypto/crc32-ce-glue.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/arm64/crypto/crc32-ce-glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -208,7 +208,8 @@
+ 
+ static int __init crc32_pmull_mod_init(void)
+ {
+-	if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) {
++	if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
++	    !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && (elf_hwcap & HWCAP_PMULL)) {
+ 		crc32_pmull_algs[0].update = crc32_pmull_update;
+ 		crc32_pmull_algs[1].update = crc32c_pmull_update;
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/crypto/Kconfig linux-4.14/arch/arm64/crypto/Kconfig
+--- linux-4.14.orig/arch/arm64/crypto/Kconfig	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm64/crypto/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -19,19 +19,19 @@
+ 
+ config CRYPTO_SHA1_ARM64_CE
+ 	tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
+-	depends on KERNEL_MODE_NEON
++	depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_HASH
+ 	select CRYPTO_SHA1
+ 
+ config CRYPTO_SHA2_ARM64_CE
+ 	tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
+-	depends on KERNEL_MODE_NEON
++	depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_HASH
+ 	select CRYPTO_SHA256_ARM64
+ 
+ config CRYPTO_GHASH_ARM64_CE
+ 	tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
+-	depends on KERNEL_MODE_NEON
++	depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_HASH
+ 	select CRYPTO_GF128MUL
+ 	select CRYPTO_AES
+@@ -39,7 +39,7 @@
+ 
+ config CRYPTO_CRCT10DIF_ARM64_CE
+ 	tristate "CRCT10DIF digest algorithm using PMULL instructions"
+-	depends on KERNEL_MODE_NEON && CRC_T10DIF
++	depends on KERNEL_MODE_NEON && CRC_T10DIF && !PREEMPT_RT_BASE
+ 	select CRYPTO_HASH
+ 
+ config CRYPTO_CRC32_ARM64_CE
+@@ -53,13 +53,13 @@
+ 
+ config CRYPTO_AES_ARM64_CE
+ 	tristate "AES core cipher using ARMv8 Crypto Extensions"
+-	depends on ARM64 && KERNEL_MODE_NEON
++	depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_ALGAPI
+ 	select CRYPTO_AES_ARM64
+ 
+ config CRYPTO_AES_ARM64_CE_CCM
+ 	tristate "AES in CCM mode using ARMv8 Crypto Extensions"
+-	depends on ARM64 && KERNEL_MODE_NEON
++	depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_ALGAPI
+ 	select CRYPTO_AES_ARM64_CE
+ 	select CRYPTO_AES_ARM64
+@@ -67,7 +67,7 @@
+ 
+ config CRYPTO_AES_ARM64_CE_BLK
+ 	tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
+-	depends on KERNEL_MODE_NEON
++	depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_BLKCIPHER
+ 	select CRYPTO_AES_ARM64_CE
+ 	select CRYPTO_AES_ARM64
+@@ -75,7 +75,7 @@
+ 
+ config CRYPTO_AES_ARM64_NEON_BLK
+ 	tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
+-	depends on KERNEL_MODE_NEON
++	depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_BLKCIPHER
+ 	select CRYPTO_AES_ARM64
+ 	select CRYPTO_AES
+@@ -83,13 +83,13 @@
+ 
+ config CRYPTO_CHACHA20_NEON
+ 	tristate "NEON accelerated ChaCha20 symmetric cipher"
+-	depends on KERNEL_MODE_NEON
++	depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_BLKCIPHER
+ 	select CRYPTO_CHACHA20
+ 
+ config CRYPTO_AES_ARM64_BS
+ 	tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
+-	depends on KERNEL_MODE_NEON
++	depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
+ 	select CRYPTO_BLKCIPHER
+ 	select CRYPTO_AES_ARM64_NEON_BLK
+ 	select CRYPTO_AES_ARM64
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/include/asm/spinlock_types.h linux-4.14/arch/arm64/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/arm64/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/arm64/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -16,10 +16,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+ 
+-#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <linux/types.h>
  
- config XEN
- 	bool "Xen guest support on ARM64"
--	depends on ARM64 && OF
-+	depends on ARM64 && OF && !PREEMPT_RT_FULL
- 	select SWIOTLB_XEN
- 	select PARAVIRT
- 	help
-diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
-index e9ea5a6bd449..6c500ad63c6a 100644
---- a/arch/arm64/include/asm/thread_info.h
-+++ b/arch/arm64/include/asm/thread_info.h
-@@ -49,6 +49,7 @@ struct thread_info {
- 	mm_segment_t		addr_limit;	/* address limit */
- 	struct task_struct	*task;		/* main task structure */
+ #define TICKET_SHIFT	16
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/include/asm/thread_info.h linux-4.14/arch/arm64/include/asm/thread_info.h
+--- linux-4.14.orig/arch/arm64/include/asm/thread_info.h	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/arm64/include/asm/thread_info.h	2018-09-05 11:05:07.000000000 +0200
+@@ -43,6 +43,7 @@
+ 	u64			ttbr0;		/* saved TTBR0_EL1 */
+ #endif
  	int			preempt_count;	/* 0 => preemptable, <0 => bug */
 +	int			preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- 	int			cpu;		/* cpu */
  };
  
-@@ -112,6 +113,7 @@ static inline struct thread_info *current_thread_info(void)
- #define TIF_NEED_RESCHED	1
- #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
+ #define INIT_THREAD_INFO(tsk)						\
+@@ -82,6 +83,7 @@
  #define TIF_FOREIGN_FPSTATE	3	/* CPU's FP state is not current's */
-+#define TIF_NEED_RESCHED_LAZY	4
+ #define TIF_UPROBE		4	/* uprobe breakpoint or singlestep */
+ #define TIF_FSCHECK		5	/* Check FS is USER_DS on return */
++#define TIF_NEED_RESCHED_LAZY	6
  #define TIF_NOHZ		7
  #define TIF_SYSCALL_TRACE	8
  #define TIF_SYSCALL_AUDIT	9
-@@ -127,6 +129,7 @@ static inline struct thread_info *current_thread_info(void)
+@@ -98,6 +100,7 @@
  #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
  #define _TIF_FOREIGN_FPSTATE	(1 << TIF_FOREIGN_FPSTATE)
@@ -1203,45 +1039,63 @@ index e9ea5a6bd449..6c500ad63c6a 100644
  #define _TIF_NOHZ		(1 << TIF_NOHZ)
  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
-@@ -135,7 +138,9 @@ static inline struct thread_info *current_thread_info(void)
- #define _TIF_32BIT		(1 << TIF_32BIT)
+@@ -109,8 +112,9 @@
  
  #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
--				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
-+				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
-+				 _TIF_NEED_RESCHED_LAZY)
-+#define _TIF_NEED_RESCHED_MASK	(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+ 				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+-				 _TIF_UPROBE | _TIF_FSCHECK)
++				 _TIF_UPROBE | _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
  
++#define _TIF_NEED_RESCHED_MASK	(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
  #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
  				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
-diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
-index 4a2f0f0fef32..6bf2bc17c400 100644
---- a/arch/arm64/kernel/asm-offsets.c
-+++ b/arch/arm64/kernel/asm-offsets.c
-@@ -38,6 +38,7 @@ int main(void)
+ 				 _TIF_NOHZ)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/Kconfig linux-4.14/arch/arm64/Kconfig
+--- linux-4.14.orig/arch/arm64/Kconfig	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/arm64/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -103,6 +103,7 @@
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_PERF_REGS
+ 	select HAVE_PERF_USER_STACK_DUMP
++	select HAVE_PREEMPT_LAZY
+ 	select HAVE_REGS_AND_STACK_ACCESS_API
+ 	select HAVE_RCU_TABLE_FREE
+ 	select HAVE_SYSCALL_TRACEPOINTS
+@@ -791,7 +792,7 @@
+ 
+ config XEN
+ 	bool "Xen guest support on ARM64"
+-	depends on ARM64 && OF
++	depends on ARM64 && OF && !PREEMPT_RT_FULL
+ 	select SWIOTLB_XEN
+ 	select PARAVIRT
+ 	help
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/asm-offsets.c linux-4.14/arch/arm64/kernel/asm-offsets.c
+--- linux-4.14.orig/arch/arm64/kernel/asm-offsets.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/arm64/kernel/asm-offsets.c	2018-09-05 11:05:07.000000000 +0200
+@@ -39,6 +39,7 @@
    BLANK();
-   DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
-   DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
-+  DEFINE(TI_PREEMPT_LAZY,	offsetof(struct thread_info, preempt_lazy_count));
-   DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
-   DEFINE(TI_TASK,		offsetof(struct thread_info, task));
-   DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
-diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
-index 79b0fe24d5b7..f3c959ade308 100644
---- a/arch/arm64/kernel/entry.S
-+++ b/arch/arm64/kernel/entry.S
-@@ -428,11 +428,16 @@ ENDPROC(el1_sync)
+   DEFINE(TSK_TI_FLAGS,		offsetof(struct task_struct, thread_info.flags));
+   DEFINE(TSK_TI_PREEMPT,	offsetof(struct task_struct, thread_info.preempt_count));
++  DEFINE(TSK_TI_PREEMPT_LAZY,	offsetof(struct task_struct, thread_info.preempt_lazy_count));
+   DEFINE(TSK_TI_ADDR_LIMIT,	offsetof(struct task_struct, thread_info.addr_limit));
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+   DEFINE(TSK_TI_TTBR0,		offsetof(struct task_struct, thread_info.ttbr0));
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/entry.S linux-4.14/arch/arm64/kernel/entry.S
+--- linux-4.14.orig/arch/arm64/kernel/entry.S	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/arm64/kernel/entry.S	2018-09-05 11:05:07.000000000 +0200
+@@ -637,11 +637,16 @@
  
  #ifdef CONFIG_PREEMPT
- 	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
+ 	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
 -	cbnz	w24, 1f				// preempt count != 0
 +	cbnz	w24, 2f				// preempt count != 0
- 	ldr	x0, [tsk, #TI_FLAGS]		// get flags
+ 	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
 -	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
 -	bl	el1_preempt
 +	tbnz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
 +
-+	ldr	w24, [tsk, #TI_PREEMPT_LAZY]	// get preempt lazy count
++	ldr	w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
 +	cbnz	w24, 2f				// preempt lazy count != 0
 +	tbz	x0, #TIF_NEED_RESCHED_LAZY, 2f	// needs rescheduling?
  1:
@@ -1250,32 +1104,144 @@ index 79b0fe24d5b7..f3c959ade308 100644
  #endif
  #ifdef CONFIG_TRACE_IRQFLAGS
  	bl	trace_hardirqs_on
-@@ -446,6 +451,7 @@ ENDPROC(el1_irq)
+@@ -655,6 +660,7 @@
  1:	bl	preempt_schedule_irq		// irq en/disable is done inside
- 	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
+ 	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
  	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
 +	tbnz	x0, #TIF_NEED_RESCHED_LAZY, 1b	// needs rescheduling?
  	ret	x24
  #endif
  
-diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
-index 404dd67080b9..639dc6d12e72 100644
---- a/arch/arm64/kernel/signal.c
-+++ b/arch/arm64/kernel/signal.c
-@@ -409,7 +409,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
- 	 */
- 	trace_hardirqs_off();
- 	do {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/signal.c linux-4.14/arch/arm64/kernel/signal.c
+--- linux-4.14.orig/arch/arm64/kernel/signal.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/arm64/kernel/signal.c	2018-09-05 11:05:07.000000000 +0200
+@@ -756,7 +756,7 @@
+ 		/* Check valid user FS if needed */
+ 		addr_limit_user_check();
+ 
 -		if (thread_flags & _TIF_NEED_RESCHED) {
 +		if (thread_flags & _TIF_NEED_RESCHED_MASK) {
  			schedule();
  		} else {
  			local_irq_enable();
-diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index b3c5bde43d34..8122bf058de0 100644
---- a/arch/mips/Kconfig
-+++ b/arch/mips/Kconfig
-@@ -2514,7 +2514,7 @@ config MIPS_ASID_BITS_VARIABLE
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/blackfin/include/asm/spinlock_types.h linux-4.14/arch/blackfin/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/blackfin/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/blackfin/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -7,10 +7,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <asm/rwlock.h>
+ 
+ typedef struct {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/hexagon/include/asm/spinlock_types.h linux-4.14/arch/hexagon/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/hexagon/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/hexagon/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -21,10 +21,6 @@
+ #ifndef _ASM_SPINLOCK_TYPES_H
+ #define _ASM_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ 	volatile unsigned int lock;
+ } arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/ia64/include/asm/spinlock_types.h linux-4.14/arch/ia64/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/ia64/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/ia64/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_IA64_SPINLOCK_TYPES_H
+ #define _ASM_IA64_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ 	volatile unsigned int lock;
+ } arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/ia64/kernel/mca.c linux-4.14/arch/ia64/kernel/mca.c
+--- linux-4.14.orig/arch/ia64/kernel/mca.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/ia64/kernel/mca.c	2018-09-05 11:05:07.000000000 +0200
+@@ -1824,7 +1824,7 @@
+ 	ti->cpu = cpu;
+ 	p->stack = ti;
+ 	p->state = TASK_UNINTERRUPTIBLE;
+-	cpumask_set_cpu(cpu, &p->cpus_allowed);
++	cpumask_set_cpu(cpu, &p->cpus_mask);
+ 	INIT_LIST_HEAD(&p->tasks);
+ 	p->parent = p->real_parent = p->group_leader = p;
+ 	INIT_LIST_HEAD(&p->children);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/Kconfig linux-4.14/arch/Kconfig
+--- linux-4.14.orig/arch/Kconfig	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -20,6 +20,7 @@
+ 	tristate "OProfile system profiling"
+ 	depends on PROFILING
+ 	depends on HAVE_OPROFILE
++	depends on !PREEMPT_RT_FULL
+ 	select RING_BUFFER
+ 	select RING_BUFFER_ALLOW_SWAP
+ 	help
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/m32r/include/asm/spinlock_types.h linux-4.14/arch/m32r/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/m32r/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/m32r/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_M32R_SPINLOCK_TYPES_H
+ #define _ASM_M32R_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ 	volatile int slock;
+ } arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/metag/include/asm/spinlock_types.h linux-4.14/arch/metag/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/metag/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/metag/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_METAG_SPINLOCK_TYPES_H
+ #define _ASM_METAG_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ 	volatile unsigned int lock;
+ } arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/include/asm/switch_to.h linux-4.14/arch/mips/include/asm/switch_to.h
+--- linux-4.14.orig/arch/mips/include/asm/switch_to.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/mips/include/asm/switch_to.h	2018-09-05 11:05:07.000000000 +0200
+@@ -42,7 +42,7 @@
+  * inline to try to keep the overhead down. If we have been forced to run on
+  * a "CPU" with an FPU because of a previous high level of FP computation,
+  * but did not actually use the FPU during the most recent time-slice (CU1
+- * isn't set), we undo the restriction on cpus_allowed.
++ * isn't set), we undo the restriction on cpus_mask.
+  *
+  * We're not calling set_cpus_allowed() here, because we have no need to
+  * force prompt migration - we're already switching the current CPU to a
+@@ -57,7 +57,7 @@
+ 	    test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&		\
+ 	    (!(KSTK_STATUS(prev) & ST0_CU1))) {				\
+ 		clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);		\
+-		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\
++		prev->cpus_mask = prev->thread.user_cpus_allowed;	\
+ 	}								\
+ 	next->thread.emulated_fp = 0;					\
+ } while(0)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/Kconfig linux-4.14/arch/mips/Kconfig
+--- linux-4.14.orig/arch/mips/Kconfig	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/mips/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -2519,7 +2519,7 @@
  #
  config HIGHMEM
  	bool "High Memory Support"
@@ -1284,45 +1250,69 @@ index b3c5bde43d34..8122bf058de0 100644
  
  config CPU_SUPPORTS_HIGHMEM
  	bool
-diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 65fba4c34cd7..4b5ba68910e0 100644
---- a/arch/powerpc/Kconfig
-+++ b/arch/powerpc/Kconfig
-@@ -52,10 +52,11 @@ config LOCKDEP_SUPPORT
- 
- config RWSEM_GENERIC_SPINLOCK
- 	bool
-+	default y if PREEMPT_RT_FULL
- 
- config RWSEM_XCHGADD_ALGORITHM
- 	bool
--	default y
-+	default y if !PREEMPT_RT_FULL
- 
- config GENERIC_LOCKBREAK
- 	bool
-@@ -134,6 +135,7 @@ config PPC
- 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- 	select GENERIC_STRNCPY_FROM_USER
- 	select GENERIC_STRNLEN_USER
-+	select HAVE_PREEMPT_LAZY
- 	select HAVE_MOD_ARCH_SPECIFIC
- 	select MODULES_USE_ELF_RELA
- 	select CLONE_BACKWARDS
-@@ -321,7 +323,7 @@ menu "Kernel options"
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/kernel/mips-mt-fpaff.c linux-4.14/arch/mips/kernel/mips-mt-fpaff.c
+--- linux-4.14.orig/arch/mips/kernel/mips-mt-fpaff.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/mips/kernel/mips-mt-fpaff.c	2018-09-05 11:05:07.000000000 +0200
+@@ -177,7 +177,7 @@
+ 	if (retval)
+ 		goto out_unlock;
  
- config HIGHMEM
- 	bool "High memory support"
--	depends on PPC32
-+	depends on PPC32 && !PREEMPT_RT_FULL
+-	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
++	cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
+ 	cpumask_and(&mask, &allowed, cpu_active_mask);
  
- source kernel/Kconfig.hz
- source kernel/Kconfig.preempt
-diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
-index 87e4b2d8dcd4..981e501a4359 100644
---- a/arch/powerpc/include/asm/thread_info.h
-+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -43,6 +43,8 @@ struct thread_info {
+ out_unlock:
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/kernel/traps.c linux-4.14/arch/mips/kernel/traps.c
+--- linux-4.14.orig/arch/mips/kernel/traps.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/mips/kernel/traps.c	2018-09-05 11:05:07.000000000 +0200
+@@ -1193,12 +1193,12 @@
+ 		 * restricted the allowed set to exclude any CPUs with FPUs,
+ 		 * we'll skip the procedure.
+ 		 */
+-		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
++		if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
+ 			cpumask_t tmask;
+ 
+ 			current->thread.user_cpus_allowed
+-				= current->cpus_allowed;
+-			cpumask_and(&tmask, &current->cpus_allowed,
++				= current->cpus_mask;
++			cpumask_and(&tmask, &current->cpus_mask,
+ 				    &mt_fpu_cpumask);
+ 			set_cpus_allowed_ptr(current, &tmask);
+ 			set_thread_flag(TIF_FPUBOUND);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mn10300/include/asm/spinlock_types.h linux-4.14/arch/mn10300/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/mn10300/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/mn10300/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_SPINLOCK_TYPES_H
+ #define _ASM_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct arch_spinlock {
+ 	unsigned int slock;
+ } arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/include/asm/spinlock_types.h linux-4.14/arch/powerpc/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/powerpc/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/powerpc/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+ #define _ASM_POWERPC_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ 	volatile unsigned int slock;
+ } arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/include/asm/thread_info.h linux-4.14/arch/powerpc/include/asm/thread_info.h
+--- linux-4.14.orig/arch/powerpc/include/asm/thread_info.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/powerpc/include/asm/thread_info.h	2018-09-05 11:05:07.000000000 +0200
+@@ -36,6 +36,8 @@
  	int		cpu;			/* cpu we're on */
  	int		preempt_count;		/* 0 => preemptable,
  						   <0 => BUG */
@@ -1331,7 +1321,7 @@ index 87e4b2d8dcd4..981e501a4359 100644
  	unsigned long	local_flags;		/* private flags for thread */
  #ifdef CONFIG_LIVEPATCH
  	unsigned long *livepatch_sp;
-@@ -88,8 +90,7 @@ static inline struct thread_info *current_thread_info(void)
+@@ -81,8 +83,7 @@
  #define TIF_SYSCALL_TRACE	0	/* syscall trace active */
  #define TIF_SIGPENDING		1	/* signal pending */
  #define TIF_NEED_RESCHED	2	/* rescheduling necessary */
@@ -1340,8 +1330,8 @@ index 87e4b2d8dcd4..981e501a4359 100644
 +#define TIF_NEED_RESCHED_LAZY	3	/* lazy rescheduling necessary */
  #define TIF_32BIT		4	/* 32 bit binary */
  #define TIF_RESTORE_TM		5	/* need to restore TM FP/VEC/VSX */
- #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
-@@ -107,6 +108,8 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_PATCH_PENDING	6	/* pending live patching update */
+@@ -101,6 +102,8 @@
  #if defined(CONFIG_PPC64)
  #define TIF_ELF2ABI		18	/* function descriptors must die! */
  #endif
@@ -1350,7 +1340,7 @@ index 87e4b2d8dcd4..981e501a4359 100644
  
  /* as above, but as bit values */
  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
-@@ -125,14 +128,16 @@ static inline struct thread_info *current_thread_info(void)
+@@ -120,14 +123,16 @@
  #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
  #define _TIF_EMULATE_STACK_STORE	(1<<TIF_EMULATE_STACK_STORE)
  #define _TIF_NOHZ		(1<<TIF_NOHZ)
@@ -1361,30 +1351,61 @@ index 87e4b2d8dcd4..981e501a4359 100644
  
  #define _TIF_USER_WORK_MASK	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
  				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
--				 _TIF_RESTORE_TM)
-+				 _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
+-				 _TIF_RESTORE_TM | _TIF_PATCH_PENDING)
++				 _TIF_RESTORE_TM | _TIF_PATCH_PENDING | _TIF_NEED_RESCHED_LAZY)
  #define _TIF_PERSYSCALL_MASK	(_TIF_RESTOREALL|_TIF_NOERROR)
 +#define _TIF_NEED_RESCHED_MASK	(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
  
  /* Bits in local_flags */
  /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
-diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
-index c833d88c423d..96e9fbc3f684 100644
---- a/arch/powerpc/kernel/asm-offsets.c
-+++ b/arch/powerpc/kernel/asm-offsets.c
-@@ -156,6 +156,7 @@ int main(void)
- 	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- 	DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
- 	DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+	DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- 	DEFINE(TI_TASK, offsetof(struct thread_info, task));
- 	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
- 
-diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
-index 3841d749a430..6dbaeff192b9 100644
---- a/arch/powerpc/kernel/entry_32.S
-+++ b/arch/powerpc/kernel/entry_32.S
-@@ -835,7 +835,14 @@ user_exc_return:		/* r10 contains MSR_KERNEL here */
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/Kconfig linux-4.14/arch/powerpc/Kconfig
+--- linux-4.14.orig/arch/powerpc/Kconfig	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/powerpc/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -111,10 +111,11 @@
+ 
+ config RWSEM_GENERIC_SPINLOCK
+ 	bool
++	default y if PREEMPT_RT_FULL
+ 
+ config RWSEM_XCHGADD_ALGORITHM
+ 	bool
+-	default y
++	default y if !PREEMPT_RT_FULL
+ 
+ config GENERIC_LOCKBREAK
+ 	bool
+@@ -215,6 +216,7 @@
+ 	select HAVE_HARDLOCKUP_DETECTOR_PERF	if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ 	select HAVE_PERF_REGS
+ 	select HAVE_PERF_USER_STACK_DUMP
++	select HAVE_PREEMPT_LAZY
+ 	select HAVE_RCU_TABLE_FREE		if SMP
+ 	select HAVE_REGS_AND_STACK_ACCESS_API
+ 	select HAVE_SYSCALL_TRACEPOINTS
+@@ -390,7 +392,7 @@
+ 
+ config HIGHMEM
+ 	bool "High memory support"
+-	depends on PPC32
++	depends on PPC32 && !PREEMPT_RT_FULL
+ 
+ source kernel/Kconfig.hz
+ source kernel/Kconfig.preempt
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/asm-offsets.c linux-4.14/arch/powerpc/kernel/asm-offsets.c
+--- linux-4.14.orig/arch/powerpc/kernel/asm-offsets.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/powerpc/kernel/asm-offsets.c	2018-09-05 11:05:07.000000000 +0200
+@@ -156,6 +156,7 @@
+ 	OFFSET(TI_FLAGS, thread_info, flags);
+ 	OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
+ 	OFFSET(TI_PREEMPT, thread_info, preempt_count);
++	OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
+ 	OFFSET(TI_TASK, thread_info, task);
+ 	OFFSET(TI_CPU, thread_info, cpu);
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/entry_32.S linux-4.14/arch/powerpc/kernel/entry_32.S
+--- linux-4.14.orig/arch/powerpc/kernel/entry_32.S	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/powerpc/kernel/entry_32.S	2018-09-05 11:05:07.000000000 +0200
+@@ -866,7 +866,14 @@
  	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
  	bne	restore
  	andi.	r8,r8,_TIF_NEED_RESCHED
@@ -1399,7 +1420,7 @@ index 3841d749a430..6dbaeff192b9 100644
  	lwz	r3,_MSR(r1)
  	andi.	r0,r3,MSR_EE	/* interrupts off? */
  	beq	restore		/* don't schedule if so */
-@@ -846,11 +853,11 @@ user_exc_return:		/* r10 contains MSR_KERNEL here */
+@@ -877,11 +884,11 @@
  	 */
  	bl	trace_hardirqs_off
  #endif
@@ -1414,7 +1435,7 @@ index 3841d749a430..6dbaeff192b9 100644
  #ifdef CONFIG_TRACE_IRQFLAGS
  	/* And now, to properly rebalance the above, we tell lockdep they
  	 * are being turned back on, which will happen when we return
-@@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
+@@ -1204,7 +1211,7 @@
  #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
  
  do_work:			/* r10 contains MSR_KERNEL here */
@@ -1423,7 +1444,7 @@ index 3841d749a430..6dbaeff192b9 100644
  	beq	do_user_signal
  
  do_resched:			/* r10 contains MSR_KERNEL here */
-@@ -1192,7 +1199,7 @@ do_resched:			/* r10 contains MSR_KERNEL here */
+@@ -1225,7 +1232,7 @@
  	MTMSRD(r10)		/* disable interrupts */
  	CURRENT_THREAD_INFO(r9, r1)
  	lwz	r9,TI_FLAGS(r9)
@@ -1432,11 +1453,10 @@ index 3841d749a430..6dbaeff192b9 100644
  	bne-	do_resched
  	andi.	r0,r9,_TIF_USER_WORK_MASK
  	beq	restore_user
-diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
-index 6432d4bf08c8..5509a26f1070 100644
---- a/arch/powerpc/kernel/entry_64.S
-+++ b/arch/powerpc/kernel/entry_64.S
-@@ -656,7 +656,7 @@ _GLOBAL(ret_from_except_lite)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/entry_64.S linux-4.14/arch/powerpc/kernel/entry_64.S
+--- linux-4.14.orig/arch/powerpc/kernel/entry_64.S	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/powerpc/kernel/entry_64.S	2018-09-05 11:05:07.000000000 +0200
+@@ -690,7 +690,7 @@
  	bl	restore_math
  	b	restore
  #endif
@@ -1445,29 +1465,27 @@ index 6432d4bf08c8..5509a26f1070 100644
  	beq	2f
  	bl	restore_interrupts
  	SCHEDULE_USER
-@@ -718,10 +718,18 @@ _GLOBAL(ret_from_except_lite)
+@@ -752,10 +752,18 @@
  
  #ifdef CONFIG_PREEMPT
  	/* Check if we need to preempt */
--	andi.	r0,r4,_TIF_NEED_RESCHED
--	beq+	restore
--	/* Check that preempt_count() == 0 and interrupts are enabled */
- 	lwz	r8,TI_PREEMPT(r9)
++	lwz	r8,TI_PREEMPT(r9)
 +	cmpwi	0,r8,0		/* if non-zero, just restore regs and return */
 +	bne	restore
-+	andi.	r0,r4,_TIF_NEED_RESCHED
+ 	andi.	r0,r4,_TIF_NEED_RESCHED
 +	bne+	check_count
 +
 +	andi.	r0,r4,_TIF_NEED_RESCHED_LAZY
-+	beq+	restore
+ 	beq+	restore
 +	lwz	r8,TI_PREEMPT_LAZY(r9)
 +
-+	/* Check that preempt_count() == 0 and interrupts are enabled */
+ 	/* Check that preempt_count() == 0 and interrupts are enabled */
+-	lwz	r8,TI_PREEMPT(r9)
 +check_count:
  	cmpwi	cr1,r8,0
  	ld	r0,SOFTE(r1)
  	cmpdi	r0,0
-@@ -738,7 +746,7 @@ _GLOBAL(ret_from_except_lite)
+@@ -772,7 +780,7 @@
  	/* Re-test flags and eventually loop */
  	CURRENT_THREAD_INFO(r9, r1)
  	ld	r4,TI_FLAGS(r9)
@@ -1476,11 +1494,10 @@ index 6432d4bf08c8..5509a26f1070 100644
  	bne	1b
  
  	/*
-diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index 3c05c311e35e..f83f6ac1274d 100644
---- a/arch/powerpc/kernel/irq.c
-+++ b/arch/powerpc/kernel/irq.c
-@@ -638,6 +638,7 @@ void irq_ctx_init(void)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/irq.c linux-4.14/arch/powerpc/kernel/irq.c
+--- linux-4.14.orig/arch/powerpc/kernel/irq.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/powerpc/kernel/irq.c	2018-09-05 11:05:07.000000000 +0200
+@@ -693,6 +693,7 @@
  	}
  }
  
@@ -1488,7 +1505,7 @@ index 3c05c311e35e..f83f6ac1274d 100644
  void do_softirq_own_stack(void)
  {
  	struct thread_info *curtp, *irqtp;
-@@ -655,6 +656,7 @@ void do_softirq_own_stack(void)
+@@ -710,6 +711,7 @@
  	if (irqtp->flags)
  		set_bits(irqtp->flags, &curtp->flags);
  }
@@ -1496,10 +1513,9 @@ index 3c05c311e35e..f83f6ac1274d 100644
  
  irq_hw_number_t virq_to_hw(unsigned int virq)
  {
-diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
-index 030d72df5dd5..b471a709e100 100644
---- a/arch/powerpc/kernel/misc_32.S
-+++ b/arch/powerpc/kernel/misc_32.S
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/misc_32.S linux-4.14/arch/powerpc/kernel/misc_32.S
+--- linux-4.14.orig/arch/powerpc/kernel/misc_32.S	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/powerpc/kernel/misc_32.S	2018-09-05 11:05:07.000000000 +0200
 @@ -41,6 +41,7 @@
   * We store the saved ksp_limit in the unused part
   * of the STACK_FRAME_OVERHEAD
@@ -1508,7 +1524,7 @@ index 030d72df5dd5..b471a709e100 100644
  _GLOBAL(call_do_softirq)
  	mflr	r0
  	stw	r0,4(r1)
-@@ -57,6 +58,7 @@ _GLOBAL(call_do_softirq)
+@@ -57,6 +58,7 @@
  	stw	r10,THREAD+KSP_LIMIT(r2)
  	mtlr	r0
  	blr
@@ -1516,10 +1532,9 @@ index 030d72df5dd5..b471a709e100 100644
  
  /*
   * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
-diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
-index 4f178671f230..39e7d84a3492 100644
---- a/arch/powerpc/kernel/misc_64.S
-+++ b/arch/powerpc/kernel/misc_64.S
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/misc_64.S linux-4.14/arch/powerpc/kernel/misc_64.S
+--- linux-4.14.orig/arch/powerpc/kernel/misc_64.S	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/powerpc/kernel/misc_64.S	2018-09-05 11:05:07.000000000 +0200
 @@ -31,6 +31,7 @@
  
  	.text
@@ -1528,7 +1543,7 @@ index 4f178671f230..39e7d84a3492 100644
  _GLOBAL(call_do_softirq)
  	mflr	r0
  	std	r0,16(r1)
-@@ -41,6 +42,7 @@ _GLOBAL(call_do_softirq)
+@@ -41,6 +42,7 @@
  	ld	r0,16(r1)
  	mtlr	r0
  	blr
@@ -1536,11 +1551,10 @@ index 4f178671f230..39e7d84a3492 100644
  
  _GLOBAL(call_do_irq)
  	mflr	r0
-diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
-index 029be26b5a17..9528089ea142 100644
---- a/arch/powerpc/kvm/Kconfig
-+++ b/arch/powerpc/kvm/Kconfig
-@@ -175,6 +175,7 @@ config KVM_E500MC
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kvm/Kconfig linux-4.14/arch/powerpc/kvm/Kconfig
+--- linux-4.14.orig/arch/powerpc/kvm/Kconfig	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/powerpc/kvm/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -177,6 +177,7 @@
  config KVM_MPIC
  	bool "KVM in-kernel MPIC emulation"
  	depends on KVM && E500
@@ -1548,11 +1562,22 @@ index 029be26b5a17..9528089ea142 100644
  	select HAVE_KVM_IRQCHIP
  	select HAVE_KVM_IRQFD
  	select HAVE_KVM_IRQ_ROUTING
-diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
-index e48462447ff0..2670cee66064 100644
---- a/arch/powerpc/platforms/ps3/device-init.c
-+++ b/arch/powerpc/platforms/ps3/device-init.c
-@@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/platforms/cell/spufs/sched.c linux-4.14/arch/powerpc/platforms/cell/spufs/sched.c
+--- linux-4.14.orig/arch/powerpc/platforms/cell/spufs/sched.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/powerpc/platforms/cell/spufs/sched.c	2018-09-05 11:05:07.000000000 +0200
+@@ -141,7 +141,7 @@
+ 	 * runqueue. The context will be rescheduled on the proper node
+ 	 * if it is timesliced or preempted.
+ 	 */
+-	cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
++	cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
+ 
+ 	/* Save the current cpu id for spu interrupt routing. */
+ 	ctx->last_ran = raw_smp_processor_id();
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.14/arch/powerpc/platforms/ps3/device-init.c
+--- linux-4.14.orig/arch/powerpc/platforms/ps3/device-init.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/powerpc/platforms/ps3/device-init.c	2018-09-05 11:05:07.000000000 +0200
+@@ -752,7 +752,7 @@
  	}
  	pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
  
@@ -1561,11 +1586,38 @@ index e48462447ff0..2670cee66064 100644
  				       dev->done.done || kthread_should_stop());
  	if (kthread_should_stop())
  		res = -EINTR;
-diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
-index 6c0378c0b8b5..abd58b4dff97 100644
---- a/arch/sh/kernel/irq.c
-+++ b/arch/sh/kernel/irq.c
-@@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/s390/include/asm/spinlock_types.h linux-4.14/arch/s390/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/s390/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/s390/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ 	int lock;
+ } __attribute__ ((aligned (4))) arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sh/include/asm/spinlock_types.h linux-4.14/arch/sh/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/sh/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/sh/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SH_SPINLOCK_TYPES_H
+ #define __ASM_SH_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ 	volatile unsigned int lock;
+ } arch_spinlock_t;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sh/kernel/irq.c linux-4.14/arch/sh/kernel/irq.c
+--- linux-4.14.orig/arch/sh/kernel/irq.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/sh/kernel/irq.c	2018-09-05 11:05:07.000000000 +0200
+@@ -148,6 +148,7 @@
  	hardirq_ctx[cpu] = NULL;
  }
  
@@ -1573,7 +1625,7 @@ index 6c0378c0b8b5..abd58b4dff97 100644
  void do_softirq_own_stack(void)
  {
  	struct thread_info *curctx;
-@@ -174,6 +175,7 @@ void do_softirq_own_stack(void)
+@@ -175,6 +176,7 @@
  		  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
  	);
  }
@@ -1581,11 +1633,10 @@ index 6c0378c0b8b5..abd58b4dff97 100644
  #else
  static inline void handle_one_irq(unsigned int irq)
  {
-diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
-index 165ecdd24d22..b68a464a22be 100644
---- a/arch/sparc/Kconfig
-+++ b/arch/sparc/Kconfig
-@@ -194,12 +194,10 @@ config NR_CPUS
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sparc/Kconfig linux-4.14/arch/sparc/Kconfig
+--- linux-4.14.orig/arch/sparc/Kconfig	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/sparc/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -206,12 +206,10 @@
  source kernel/Kconfig.hz
  
  config RWSEM_GENERIC_SPINLOCK
@@ -1600,11 +1651,10 @@ index 165ecdd24d22..b68a464a22be 100644
  
  config GENERIC_HWEIGHT
  	bool
-diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
-index 34a7930b76ef..773740521008 100644
---- a/arch/sparc/kernel/irq_64.c
-+++ b/arch/sparc/kernel/irq_64.c
-@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sparc/kernel/irq_64.c linux-4.14/arch/sparc/kernel/irq_64.c
+--- linux-4.14.orig/arch/sparc/kernel/irq_64.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/sparc/kernel/irq_64.c	2018-09-05 11:05:07.000000000 +0200
+@@ -855,6 +855,7 @@
  	set_irq_regs(old_regs);
  }
  
@@ -1612,7 +1662,7 @@ index 34a7930b76ef..773740521008 100644
  void do_softirq_own_stack(void)
  {
  	void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-@@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
+@@ -869,6 +870,7 @@
  	__asm__ __volatile__("mov %0, %%sp"
  			     : : "r" (orig_sp));
  }
@@ -1620,65 +1670,94 @@ index 34a7930b76ef..773740521008 100644
  
  #ifdef CONFIG_HOTPLUG_CPU
  void fixup_irqs(void)
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index bada636d1065..f8a995c90c01 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -17,6 +17,7 @@ config X86_64
- ### Arch settings
- config X86
- 	def_bool y
-+	select HAVE_PREEMPT_LAZY
- 	select ACPI_LEGACY_TABLES_LOOKUP	if ACPI
- 	select ACPI_SYSTEM_POWER_STATES_SUPPORT	if ACPI
- 	select ANON_INODES
-@@ -232,8 +233,11 @@ config ARCH_MAY_HAVE_PC_FDC
- 	def_bool y
- 	depends on ISA_DMA_API
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/tile/include/asm/setup.h linux-4.14/arch/tile/include/asm/setup.h
+--- linux-4.14.orig/arch/tile/include/asm/setup.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/tile/include/asm/setup.h	2018-09-05 11:05:07.000000000 +0200
+@@ -49,7 +49,7 @@
+ 
+ /* Hook hardwall code into changes in affinity. */
+ #define arch_set_cpus_allowed(p, new_mask) do { \
+-	if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
++	if (!cpumask_equal(p->cpus_ptr, new_mask)) \
+ 		hardwall_deactivate_all(p); \
+ } while (0)
+ #endif
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/tile/include/asm/spinlock_types.h linux-4.14/arch/tile/include/asm/spinlock_types.h
+--- linux-4.14.orig/arch/tile/include/asm/spinlock_types.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/tile/include/asm/spinlock_types.h	2018-09-05 11:05:07.000000000 +0200
+@@ -15,10 +15,6 @@
+ #ifndef _ASM_TILE_SPINLOCK_TYPES_H
+ #define _ASM_TILE_SPINLOCK_TYPES_H
+ 
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #ifdef __tilegx__
+ 
+ /* Low 15 bits are "next"; high 15 bits are "current". */
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/tile/kernel/hardwall.c linux-4.14/arch/tile/kernel/hardwall.c
+--- linux-4.14.orig/arch/tile/kernel/hardwall.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/tile/kernel/hardwall.c	2018-09-05 11:05:07.000000000 +0200
+@@ -590,12 +590,12 @@
+ 	 * Get our affinity; if we're not bound to this tile uniquely,
+ 	 * we can't access the network registers.
+ 	 */
+-	if (cpumask_weight(&p->cpus_allowed) != 1)
++	if (p->nr_cpus_allowed != 1)
+ 		return -EPERM;
  
-+config RWSEM_GENERIC_SPINLOCK
-+	def_bool PREEMPT_RT_FULL
-+
- config RWSEM_XCHGADD_ALGORITHM
--	def_bool y
-+	def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+ 	/* Make sure we are bound to a cpu assigned to this resource. */
+ 	cpu = smp_processor_id();
+-	BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
++	BUG_ON(cpumask_first(p->cpus_ptr) != cpu);
+ 	if (!cpumask_test_cpu(cpu, &info->cpumask))
+ 		return -EINVAL;
  
- config GENERIC_CALIBRATE_DELAY
- 	def_bool y
-@@ -897,7 +901,7 @@ config IOMMU_HELPER
- config MAXSMP
- 	bool "Enable Maximum number of SMP Processors and NUMA Nodes"
- 	depends on X86_64 && SMP && DEBUG_KERNEL
--	select CPUMASK_OFFSTACK
-+	select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
- 	---help---
- 	  Enable maximum number of CPUS and NUMA Nodes for this architecture.
- 	  If unsure, say N.
-diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
-index aa8b0672f87a..2429414bfc71 100644
---- a/arch/x86/crypto/aesni-intel_glue.c
-+++ b/arch/x86/crypto/aesni-intel_glue.c
-@@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
- 	err = blkcipher_walk_virt(desc, &walk);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -621,17 +621,17 @@
+  * Deactivate a task's hardwall.  Must hold lock for hardwall_type.
+  * This method may be called from exit_thread(), so we don't want to
+  * rely on too many fields of struct task_struct still being valid.
+- * We assume the cpus_allowed, pid, and comm fields are still valid.
++ * We assume the nr_cpus_allowed, pid, and comm fields are still valid.
+  */
+ static void _hardwall_deactivate(struct hardwall_type *hwt,
+ 				 struct task_struct *task)
+ {
+ 	struct thread_struct *ts = &task->thread;
+ 
+-	if (cpumask_weight(&task->cpus_allowed) != 1) {
++	if (task->nr_cpus_allowed != 1) {
+ 		pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
+ 		       task->pid, task->comm, hwt->name,
+-		       cpumask_weight(&task->cpus_allowed));
++		       task->nr_cpus_allowed);
+ 		BUG();
+ 	}
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.14/arch/x86/crypto/aesni-intel_glue.c
+--- linux-4.14.orig/arch/x86/crypto/aesni-intel_glue.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/crypto/aesni-intel_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -387,14 +387,14 @@
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes)) {
 +		kernel_fpu_begin();
  		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
--			      nbytes & AES_BLOCK_MASK);
-+				nbytes & AES_BLOCK_MASK);
+ 			      nbytes & AES_BLOCK_MASK);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-@@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
- 	err = blkcipher_walk_virt(desc, &walk);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -409,14 +409,14 @@
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes)) {
@@ -1687,15 +1766,15 @@ index aa8b0672f87a..2429414bfc71 100644
  			      nbytes & AES_BLOCK_MASK);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-@@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
- 	err = blkcipher_walk_virt(desc, &walk);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -431,14 +431,14 @@
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes)) {
@@ -1704,15 +1783,15 @@ index aa8b0672f87a..2429414bfc71 100644
  			      nbytes & AES_BLOCK_MASK, walk.iv);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-@@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
- 	err = blkcipher_walk_virt(desc, &walk);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -453,14 +453,14 @@
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes)) {
@@ -1721,15 +1800,15 @@ index aa8b0672f87a..2429414bfc71 100644
  			      nbytes & AES_BLOCK_MASK, walk.iv);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-@@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
- 	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -510,18 +510,20 @@
+ 
+ 	err = skcipher_walk_virt(&walk, req, true);
  
 -	kernel_fpu_begin();
  	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
@@ -1738,32 +1817,146 @@ index aa8b0672f87a..2429414bfc71 100644
  			              nbytes & AES_BLOCK_MASK, walk.iv);
 +		kernel_fpu_end();
  		nbytes &= AES_BLOCK_SIZE - 1;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 		err = skcipher_walk_done(&walk, nbytes);
  	}
  	if (walk.nbytes) {
 +		kernel_fpu_begin();
  		ctr_crypt_final(ctx, &walk);
 +		kernel_fpu_end();
- 		err = blkcipher_walk_done(desc, &walk, 0);
+ 		err = skcipher_walk_done(&walk, 0);
  	}
 -	kernel_fpu_end();
  
  	return err;
  }
-diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
-index 8648158f3916..d7699130ee36 100644
---- a/arch/x86/crypto/cast5_avx_glue.c
-+++ b/arch/x86/crypto/cast5_avx_glue.c
-@@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
- static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- 		     bool enc)
- {
--	bool fpu_enabled = false;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx2_glue.c linux-4.14/arch/x86/crypto/camellia_aesni_avx2_glue.c
+--- linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx2_glue.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/camellia_aesni_avx2_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -206,6 +206,20 @@
+ 	bool fpu_enabled;
+ };
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void camellia_fpu_end_rt(struct crypt_priv *ctx)
++{
++       bool fpu_enabled = ctx->fpu_enabled;
++
++       if (!fpu_enabled)
++               return;
++       camellia_fpu_end(fpu_enabled);
++       ctx->fpu_enabled = false;
++}
++#else
++static void camellia_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
++
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ 	const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
+@@ -221,16 +235,19 @@
+ 	}
+ 
+ 	if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
++		kernel_fpu_resched();
+ 		camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
+ 		srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ 		nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ 	}
+ 
+ 	while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
++		kernel_fpu_resched();
+ 		camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
+ 		srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ 		nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ 	}
++	camellia_fpu_end_rt(ctx);
+ 
+ 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ 		camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+@@ -251,16 +268,19 @@
+ 	}
+ 
+ 	if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
++		kernel_fpu_resched();
+ 		camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
+ 		srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ 		nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ 	}
+ 
+ 	while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
++		kernel_fpu_resched();
+ 		camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
+ 		srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ 		nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ 	}
++	camellia_fpu_end_rt(ctx);
+ 
+ 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ 		camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx_glue.c linux-4.14/arch/x86/crypto/camellia_aesni_avx_glue.c
+--- linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx_glue.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/camellia_aesni_avx_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -210,6 +210,21 @@
+ 	bool fpu_enabled;
+ };
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void camellia_fpu_end_rt(struct crypt_priv *ctx)
++{
++	bool fpu_enabled = ctx->fpu_enabled;
++
++	if (!fpu_enabled)
++		return;
++	camellia_fpu_end(fpu_enabled);
++	ctx->fpu_enabled = false;
++}
++
++#else
++static void camellia_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
++
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ 	const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
+@@ -225,10 +240,12 @@
+ 	}
+ 
+ 	while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
++		kernel_fpu_resched();
+ 		camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
+ 		srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ 		nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ 	}
++	camellia_fpu_end_rt(ctx);
+ 
+ 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ 		camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+@@ -249,10 +266,12 @@
+ 	}
+ 
+ 	while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
++		kernel_fpu_resched();
+ 		camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
+ 		srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ 		nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ 	}
++	camellia_fpu_end_rt(ctx);
+ 
+ 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ 		camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.14/arch/x86/crypto/cast5_avx_glue.c
+--- linux-4.14.orig/arch/x86/crypto/cast5_avx_glue.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/crypto/cast5_avx_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -59,7 +59,7 @@
+ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ 		     bool enc)
+ {
+-	bool fpu_enabled = false;
 +	bool fpu_enabled;
  	struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
  	const unsigned int bsize = CAST5_BLOCK_SIZE;
  	unsigned int nbytes;
-@@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+@@ -73,7 +73,7 @@
  		u8 *wsrc = walk->src.virt.addr;
  		u8 *wdst = walk->dst.virt.addr;
  
@@ -1772,7 +1965,7 @@ index 8648158f3916..d7699130ee36 100644
  
  		/* Process multi-block batch */
  		if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
-@@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+@@ -102,10 +102,9 @@
  		} while (nbytes >= bsize);
  
  done:
@@ -1784,7 +1977,7 @@ index 8648158f3916..d7699130ee36 100644
  	return err;
  }
  
-@@ -227,7 +226,7 @@ static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
+@@ -226,7 +225,7 @@
  static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  		       struct scatterlist *src, unsigned int nbytes)
  {
@@ -1793,7 +1986,7 @@ index 8648158f3916..d7699130ee36 100644
  	struct blkcipher_walk walk;
  	int err;
  
-@@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+@@ -235,12 +234,11 @@
  	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  
  	while ((nbytes = walk.nbytes)) {
@@ -1808,7 +2001,7 @@ index 8648158f3916..d7699130ee36 100644
  	return err;
  }
  
-@@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
+@@ -309,7 +307,7 @@
  static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  		     struct scatterlist *src, unsigned int nbytes)
  {
@@ -1817,7 +2010,7 @@ index 8648158f3916..d7699130ee36 100644
  	struct blkcipher_walk walk;
  	int err;
  
-@@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+@@ -318,13 +316,12 @@
  	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  
  	while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
@@ -1833,11 +2026,95 @@ index 8648158f3916..d7699130ee36 100644
  	if (walk.nbytes) {
  		ctr_crypt_final(desc, &walk);
  		err = blkcipher_walk_done(desc, &walk, 0);
-diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
-index 6a85598931b5..3a506ce7ed93 100644
---- a/arch/x86/crypto/glue_helper.c
-+++ b/arch/x86/crypto/glue_helper.c
-@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/cast6_avx_glue.c linux-4.14/arch/x86/crypto/cast6_avx_glue.c
+--- linux-4.14.orig/arch/x86/crypto/cast6_avx_glue.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/cast6_avx_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -205,19 +205,33 @@
+ 	bool fpu_enabled;
+ };
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void cast6_fpu_end_rt(struct crypt_priv *ctx)
++{
++	bool fpu_enabled = ctx->fpu_enabled;
++
++	if (!fpu_enabled)
++		return;
++	cast6_fpu_end(fpu_enabled);
++	ctx->fpu_enabled = false;
++}
++
++#else
++static void cast6_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
++
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ 	const unsigned int bsize = CAST6_BLOCK_SIZE;
+ 	struct crypt_priv *ctx = priv;
+ 	int i;
+ 
+-	ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ 	if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
++		ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
+ 		cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
++		cast6_fpu_end_rt(ctx);
+ 		return;
+ 	}
+-
+ 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ 		__cast6_encrypt(ctx->ctx, srcdst, srcdst);
+ }
+@@ -228,10 +242,10 @@
+ 	struct crypt_priv *ctx = priv;
+ 	int i;
+ 
+-	ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ 	if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
++		ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
+ 		cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
++		cast6_fpu_end_rt(ctx);
+ 		return;
+ 	}
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/chacha20_glue.c linux-4.14/arch/x86/crypto/chacha20_glue.c
+--- linux-4.14.orig/arch/x86/crypto/chacha20_glue.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/chacha20_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -81,23 +81,24 @@
+ 
+ 	crypto_chacha20_init(state, ctx, walk.iv);
+ 
+-	kernel_fpu_begin();
+-
+ 	while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
++		kernel_fpu_begin();
++
+ 		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+ 				rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
++		kernel_fpu_end();
+ 		err = skcipher_walk_done(&walk,
+ 					 walk.nbytes % CHACHA20_BLOCK_SIZE);
+ 	}
+ 
+ 	if (walk.nbytes) {
++		kernel_fpu_begin();
+ 		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+ 				walk.nbytes);
++		kernel_fpu_end();
+ 		err = skcipher_walk_done(&walk, 0);
+ 	}
+ 
+-	kernel_fpu_end();
+-
+ 	return err;
+ }
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/glue_helper.c linux-4.14/arch/x86/crypto/glue_helper.c
+--- linux-4.14.orig/arch/x86/crypto/glue_helper.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/glue_helper.c	2018-09-05 11:05:07.000000000 +0200
+@@ -40,7 +40,7 @@
  	void *ctx = crypto_blkcipher_ctx(desc->tfm);
  	const unsigned int bsize = 128 / 8;
  	unsigned int nbytes, i, func_bytes;
@@ -1846,7 +2123,7 @@ index 6a85598931b5..3a506ce7ed93 100644
  	int err;
  
  	err = blkcipher_walk_virt(desc, walk);
-@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+@@ -50,7 +50,7 @@
  		u8 *wdst = walk->dst.virt.addr;
  
  		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -1855,7 +2132,7 @@ index 6a85598931b5..3a506ce7ed93 100644
  
  		for (i = 0; i < gctx->num_funcs; i++) {
  			func_bytes = bsize * gctx->funcs[i].num_blocks;
-@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+@@ -72,10 +72,10 @@
  		}
  
  done:
@@ -1867,7 +2144,7 @@ index 6a85598931b5..3a506ce7ed93 100644
  	return err;
  }
  
-@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+@@ -192,7 +192,7 @@
  			    struct scatterlist *src, unsigned int nbytes)
  {
  	const unsigned int bsize = 128 / 8;
@@ -1876,7 +2153,7 @@ index 6a85598931b5..3a506ce7ed93 100644
  	struct blkcipher_walk walk;
  	int err;
  
-@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+@@ -201,12 +201,12 @@
  
  	while ((nbytes = walk.nbytes)) {
  		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -1891,7 +2168,7 @@ index 6a85598931b5..3a506ce7ed93 100644
  	return err;
  }
  EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
-@@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
+@@ -275,7 +275,7 @@
  			  struct scatterlist *src, unsigned int nbytes)
  {
  	const unsigned int bsize = 128 / 8;
@@ -1900,7 +2177,7 @@ index 6a85598931b5..3a506ce7ed93 100644
  	struct blkcipher_walk walk;
  	int err;
  
-@@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
+@@ -284,13 +284,12 @@
  
  	while ((nbytes = walk.nbytes) >= bsize) {
  		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -1916,7 +2193,7 @@ index 6a85598931b5..3a506ce7ed93 100644
  	if (walk.nbytes) {
  		glue_ctr_crypt_final_128bit(
  			gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
-@@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+@@ -380,7 +379,7 @@
  			  void *tweak_ctx, void *crypt_ctx)
  {
  	const unsigned int bsize = 128 / 8;
@@ -1925,7 +2202,7 @@ index 6a85598931b5..3a506ce7ed93 100644
  	struct blkcipher_walk walk;
  	int err;
  
-@@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+@@ -393,21 +392,21 @@
  
  	/* set minimum length to bsize, for tweak_fn */
  	fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -1952,20 +2229,230 @@ index 6a85598931b5..3a506ce7ed93 100644
  	return err;
  }
  EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
-index bdd9cc59d20f..56d01a339ba4 100644
---- a/arch/x86/entry/common.c
-+++ b/arch/x86/entry/common.c
-@@ -129,7 +129,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/serpent_avx2_glue.c linux-4.14/arch/x86/crypto/serpent_avx2_glue.c
+--- linux-4.14.orig/arch/x86/crypto/serpent_avx2_glue.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/serpent_avx2_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -184,6 +184,21 @@
+ 	bool fpu_enabled;
+ };
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void serpent_fpu_end_rt(struct crypt_priv *ctx)
++{
++       bool fpu_enabled = ctx->fpu_enabled;
++
++       if (!fpu_enabled)
++               return;
++       serpent_fpu_end(fpu_enabled);
++       ctx->fpu_enabled = false;
++}
++
++#else
++static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
++
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ 	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+@@ -199,10 +214,12 @@
+ 	}
+ 
+ 	while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
++		kernel_fpu_resched();
+ 		serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
+ 		srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
+ 		nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
+ 	}
++	serpent_fpu_end_rt(ctx);
+ 
+ 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ 		__serpent_encrypt(ctx->ctx, srcdst, srcdst);
+@@ -223,10 +240,12 @@
+ 	}
+ 
+ 	while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
++		kernel_fpu_resched();
+ 		serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
+ 		srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
+ 		nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
+ 	}
++	serpent_fpu_end_rt(ctx);
+ 
+ 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ 		__serpent_decrypt(ctx->ctx, srcdst, srcdst);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/serpent_avx_glue.c linux-4.14/arch/x86/crypto/serpent_avx_glue.c
+--- linux-4.14.orig/arch/x86/crypto/serpent_avx_glue.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/serpent_avx_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -218,16 +218,31 @@
+ 	bool fpu_enabled;
+ };
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void serpent_fpu_end_rt(struct crypt_priv *ctx)
++{
++	bool fpu_enabled = ctx->fpu_enabled;
++
++	if (!fpu_enabled)
++		return;
++	serpent_fpu_end(fpu_enabled);
++	ctx->fpu_enabled = false;
++}
++
++#else
++static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
++
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ 	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+ 	struct crypt_priv *ctx = priv;
+ 	int i;
+ 
+-	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ 	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
++		ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ 		serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
++		serpent_fpu_end_rt(ctx);
+ 		return;
+ 	}
+ 
+@@ -241,10 +256,10 @@
+ 	struct crypt_priv *ctx = priv;
+ 	int i;
+ 
+-	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ 	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
++		ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ 		serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
++		serpent_fpu_end_rt(ctx);
+ 		return;
+ 	}
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/serpent_sse2_glue.c linux-4.14/arch/x86/crypto/serpent_sse2_glue.c
+--- linux-4.14.orig/arch/x86/crypto/serpent_sse2_glue.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/serpent_sse2_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -187,16 +187,31 @@
+ 	bool fpu_enabled;
+ };
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void serpent_fpu_end_rt(struct crypt_priv *ctx)
++{
++	bool fpu_enabled = ctx->fpu_enabled;
++
++	if (!fpu_enabled)
++		return;
++	serpent_fpu_end(fpu_enabled);
++	ctx->fpu_enabled = false;
++}
++
++#else
++static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
++
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ 	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+ 	struct crypt_priv *ctx = priv;
+ 	int i;
+ 
+-	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ 	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
++		ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ 		serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
++		serpent_fpu_end_rt(ctx);
+ 		return;
+ 	}
+ 
+@@ -210,10 +225,10 @@
+ 	struct crypt_priv *ctx = priv;
+ 	int i;
+ 
+-	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+-
+ 	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
++		ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ 		serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
++		serpent_fpu_end_rt(ctx);
+ 		return;
+ 	}
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/twofish_avx_glue.c linux-4.14/arch/x86/crypto/twofish_avx_glue.c
+--- linux-4.14.orig/arch/x86/crypto/twofish_avx_glue.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/crypto/twofish_avx_glue.c	2018-09-05 11:05:07.000000000 +0200
+@@ -218,6 +218,21 @@
+ 	bool fpu_enabled;
+ };
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void twofish_fpu_end_rt(struct crypt_priv *ctx)
++{
++	bool fpu_enabled = ctx->fpu_enabled;
++
++	if (!fpu_enabled)
++		return;
++	twofish_fpu_end(fpu_enabled);
++	ctx->fpu_enabled = false;
++}
++
++#else
++static void twofish_fpu_end_rt(struct crypt_priv *ctx) { }
++#endif
++
+ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+ {
+ 	const unsigned int bsize = TF_BLOCK_SIZE;
+@@ -228,12 +243,16 @@
+ 
+ 	if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
+ 		twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
++		twofish_fpu_end_rt(ctx);
+ 		return;
+ 	}
+ 
+-	for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
++	for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) {
++		kernel_fpu_resched();
+ 		twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
++	}
+ 
++	twofish_fpu_end_rt(ctx);
+ 	nbytes %= bsize * 3;
+ 
+ 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+@@ -250,11 +269,15 @@
+ 
+ 	if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
+ 		twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
++		twofish_fpu_end_rt(ctx);
+ 		return;
+ 	}
+ 
+-	for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
++	for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) {
++		kernel_fpu_resched();
+ 		twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
++	}
++	twofish_fpu_end_rt(ctx);
+ 
+ 	nbytes %= bsize * 3;
+ 
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/common.c linux-4.14/arch/x86/entry/common.c
+--- linux-4.14.orig/arch/x86/entry/common.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/entry/common.c	2018-09-05 11:05:07.000000000 +0200
+@@ -133,7 +133,7 @@
  
  #define EXIT_TO_USERMODE_LOOP_FLAGS				\
  	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |	\
--	 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
-+	 _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY)
+-	 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
++	 _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
  
  static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
  {
-@@ -145,9 +145,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+@@ -148,9 +148,16 @@
  		/* We have work to do. */
  		local_irq_enable();
  
@@ -1983,14 +2470,13 @@ index bdd9cc59d20f..56d01a339ba4 100644
  		if (cached_flags & _TIF_UPROBE)
  			uprobe_notify_resume(regs);
  
-diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
-index edba8606b99a..4a3389535fc6 100644
---- a/arch/x86/entry/entry_32.S
-+++ b/arch/x86/entry/entry_32.S
-@@ -308,8 +308,25 @@ END(ret_from_exception)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/entry_32.S linux-4.14/arch/x86/entry/entry_32.S
+--- linux-4.14.orig/arch/x86/entry/entry_32.S	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/entry/entry_32.S	2018-09-05 11:05:07.000000000 +0200
+@@ -350,8 +350,25 @@
  ENTRY(resume_kernel)
  	DISABLE_INTERRUPTS(CLBR_ANY)
- need_resched:
+ .Lneed_resched:
 +	# preempt count == 0 + NEED_RS set?
  	cmpl	$0, PER_CPU_VAR(__preempt_count)
 +#ifndef CONFIG_PREEMPT_LAZY
@@ -2002,27 +2488,26 @@ index edba8606b99a..4a3389535fc6 100644
 +	cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
 +	jne restore_all
 +
-+	movl    PER_CPU_VAR(current_task), %ebp
-+	cmpl $0,TASK_TI_preempt_lazy_count(%ebp)	# non-zero preempt_lazy_count ?
-+	jnz restore_all
++	movl	PER_CPU_VAR(current_task), %ebp
++	cmpl	$0,TASK_TI_preempt_lazy_count(%ebp)	# non-zero preempt_lazy_count ?
++	jnz	restore_all
 +
-+	testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
-+	jz restore_all
++	testl	$_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
++	jz	restore_all
 +test_int_off:
 +#endif
  	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
  	jz	restore_all
  	call	preempt_schedule_irq
-diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
-index ef766a358b37..28401f826ab1 100644
---- a/arch/x86/entry/entry_64.S
-+++ b/arch/x86/entry/entry_64.S
-@@ -546,7 +546,23 @@ GLOBAL(retint_user)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/entry_64.S linux-4.14/arch/x86/entry/entry_64.S
+--- linux-4.14.orig/arch/x86/entry/entry_64.S	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/entry/entry_64.S	2018-09-05 11:05:07.000000000 +0200
+@@ -633,7 +633,23 @@
  	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
  	jnc	1f
  0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
 +#ifndef CONFIG_PREEMPT_LAZY
- 	jnz	1f
++	jnz	1f
 +#else
 +	jz	do_preempt_schedule_irq
 +
@@ -2032,7 +2517,7 @@ index ef766a358b37..28401f826ab1 100644
 +
 +	movq	PER_CPU_VAR(current_task), %rcx
 +	cmpl	$0, TASK_TI_preempt_lazy_count(%rcx)
-+	jnz	1f
+ 	jnz	1f
 +
 +	bt	$TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
 +	jnc	1f
@@ -2041,7 +2526,7 @@ index ef766a358b37..28401f826ab1 100644
  	call	preempt_schedule_irq
  	jmp	0b
  1:
-@@ -894,6 +910,7 @@ EXPORT_SYMBOL(native_load_gs_index)
+@@ -988,6 +1004,7 @@
  	jmp	2b
  	.previous
  
@@ -2049,19 +2534,29 @@ index ef766a358b37..28401f826ab1 100644
  /* Call softirq on interrupt stack. Interrupts are off. */
  ENTRY(do_softirq_own_stack)
  	pushq	%rbp
-@@ -906,6 +923,7 @@ ENTRY(do_softirq_own_stack)
- 	decl	PER_CPU_VAR(irq_count)
+@@ -998,6 +1015,7 @@
+ 	leaveq
  	ret
- END(do_softirq_own_stack)
+ ENDPROC(do_softirq_own_stack)
 +#endif
  
  #ifdef CONFIG_XEN
- idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
-index 17f218645701..11bd1b7ee6eb 100644
---- a/arch/x86/include/asm/preempt.h
-+++ b/arch/x86/include/asm/preempt.h
-@@ -79,17 +79,46 @@ static __always_inline void __preempt_count_sub(int val)
+ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/fpu/api.h linux-4.14/arch/x86/include/asm/fpu/api.h
+--- linux-4.14.orig/arch/x86/include/asm/fpu/api.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/include/asm/fpu/api.h	2018-09-05 11:05:07.000000000 +0200
+@@ -25,6 +25,7 @@
+ extern void __kernel_fpu_end(void);
+ extern void kernel_fpu_begin(void);
+ extern void kernel_fpu_end(void);
++extern void kernel_fpu_resched(void);
+ extern bool irq_fpu_usable(void);
+ 
+ /*
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/preempt.h linux-4.14/arch/x86/include/asm/preempt.h
+--- linux-4.14.orig/arch/x86/include/asm/preempt.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/include/asm/preempt.h	2018-09-05 11:05:07.000000000 +0200
+@@ -86,17 +86,46 @@
   * a decrement which hits zero means we have no preempt_count and should
   * reschedule.
   */
@@ -2109,11 +2604,10 @@ index 17f218645701..11bd1b7ee6eb 100644
  }
  
  #ifdef CONFIG_PREEMPT
-diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
-index 8af22be0fe61..d1328789b759 100644
---- a/arch/x86/include/asm/signal.h
-+++ b/arch/x86/include/asm/signal.h
-@@ -27,6 +27,19 @@ typedef struct {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/signal.h linux-4.14/arch/x86/include/asm/signal.h
+--- linux-4.14.orig/arch/x86/include/asm/signal.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/include/asm/signal.h	2018-09-05 11:05:07.000000000 +0200
+@@ -28,6 +28,19 @@
  #define SA_IA32_ABI	0x02000000u
  #define SA_X32_ABI	0x01000000u
  
@@ -2133,11 +2627,10 @@ index 8af22be0fe61..d1328789b759 100644
  #ifndef CONFIG_COMPAT
  typedef sigset_t compat_sigset_t;
  #endif
-diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
-index 58505f01962f..02fa39652cd6 100644
---- a/arch/x86/include/asm/stackprotector.h
-+++ b/arch/x86/include/asm/stackprotector.h
-@@ -59,7 +59,7 @@
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/stackprotector.h linux-4.14/arch/x86/include/asm/stackprotector.h
+--- linux-4.14.orig/arch/x86/include/asm/stackprotector.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/include/asm/stackprotector.h	2018-09-05 11:05:07.000000000 +0200
+@@ -60,7 +60,7 @@
   */
  static __always_inline void boot_init_stack_canary(void)
  {
@@ -2146,11 +2639,10 @@ index 58505f01962f..02fa39652cd6 100644
  	u64 tsc;
  
  #ifdef CONFIG_X86_64
-@@ -70,8 +70,15 @@ static __always_inline void boot_init_stack_canary(void)
+@@ -71,8 +71,14 @@
  	 * of randomness. The TSC only matters for very early init,
  	 * there it already has some randomness on most systems. Later
  	 * on during the bootup the random pool has true entropy too.
-+	 *
 +	 * For preempt-rt we need to weaken the randomness a bit, as
 +	 * we can't call into the random generator from atomic context
 +	 * due to locking constraints. We just leave canary
@@ -2161,17 +2653,16 @@ index 58505f01962f..02fa39652cd6 100644
 +#endif
  	tsc = rdtsc();
  	canary += tsc + (tsc << 32UL);
- 
-diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index ad6f5eb07a95..5ceb3a1c2b1a 100644
---- a/arch/x86/include/asm/thread_info.h
-+++ b/arch/x86/include/asm/thread_info.h
-@@ -54,11 +54,14 @@ struct task_struct;
- 
+ 	canary &= CANARY_MASK;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/thread_info.h linux-4.14/arch/x86/include/asm/thread_info.h
+--- linux-4.14.orig/arch/x86/include/asm/thread_info.h	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/include/asm/thread_info.h	2018-09-05 11:05:07.000000000 +0200
+@@ -56,11 +56,14 @@
  struct thread_info {
  	unsigned long		flags;		/* low level flags */
-+	int                     preempt_lazy_count;     /* 0 => lazy preemptable
-+							   <0 => BUG */
+ 	u32			status;		/* thread synchronous flags */
++	int			preempt_lazy_count;	/* 0 => lazy preemptable
++							  <0 => BUG */
  };
  
  #define INIT_THREAD_INFO(tsk)			\
@@ -2181,7 +2672,7 @@ index ad6f5eb07a95..5ceb3a1c2b1a 100644
  }
  
  #define init_stack		(init_thread_union.stack)
-@@ -67,6 +70,10 @@ struct thread_info {
+@@ -69,6 +72,10 @@
  
  #include <asm/asm-offsets.h>
  
@@ -2192,23 +2683,23 @@ index ad6f5eb07a95..5ceb3a1c2b1a 100644
  #endif
  
  /*
-@@ -85,6 +92,7 @@ struct thread_info {
+@@ -85,6 +92,7 @@
  #define TIF_SYSCALL_EMU		6	/* syscall emulation active */
  #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
  #define TIF_SECCOMP		8	/* secure computing */
 +#define TIF_NEED_RESCHED_LAZY	9	/* lazy rescheduling necessary */
  #define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
  #define TIF_UPROBE		12	/* breakpointed or singlestepping */
- #define TIF_NOTSC		16	/* TSC is not accessible in userland */
-@@ -108,6 +116,7 @@ struct thread_info {
+ #define TIF_PATCH_PENDING	13	/* pending live patching update */
+@@ -112,6 +120,7 @@
  #define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
  #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 +#define _TIF_NEED_RESCHED_LAZY	(1 << TIF_NEED_RESCHED_LAZY)
  #define _TIF_USER_RETURN_NOTIFY	(1 << TIF_USER_RETURN_NOTIFY)
  #define _TIF_UPROBE		(1 << TIF_UPROBE)
- #define _TIF_NOTSC		(1 << TIF_NOTSC)
-@@ -143,6 +152,8 @@ struct thread_info {
+ #define _TIF_PATCH_PENDING	(1 << TIF_PATCH_PENDING)
+@@ -153,6 +162,8 @@
  #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
  #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
  
@@ -2217,11 +2708,10 @@ index ad6f5eb07a95..5ceb3a1c2b1a 100644
  #define STACK_WARN		(THREAD_SIZE/8)
  
  /*
-diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
-index 57ab86d94d64..35d25e27180f 100644
---- a/arch/x86/include/asm/uv/uv_bau.h
-+++ b/arch/x86/include/asm/uv/uv_bau.h
-@@ -624,9 +624,9 @@ struct bau_control {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.14/arch/x86/include/asm/uv/uv_bau.h
+--- linux-4.14.orig/arch/x86/include/asm/uv/uv_bau.h	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/include/asm/uv/uv_bau.h	2018-09-05 11:05:07.000000000 +0200
+@@ -643,9 +643,9 @@
  	cycles_t		send_message;
  	cycles_t		period_end;
  	cycles_t		period_time;
@@ -2234,7 +2724,7 @@ index 57ab86d94d64..35d25e27180f 100644
  	/* tunables */
  	int			max_concurr;
  	int			max_concurr_const;
-@@ -815,15 +815,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
+@@ -847,15 +847,15 @@
   * to be lowered below the current 'v'.  atomic_add_unless can only stop
   * on equal.
   */
@@ -2254,25 +2744,43 @@ index 57ab86d94d64..35d25e27180f 100644
  	return 1;
  }
  
-diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index 931ced8ca345..167975ac8af7 100644
---- a/arch/x86/kernel/acpi/boot.c
-+++ b/arch/x86/kernel/acpi/boot.c
-@@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
-  *		->ioapic_mutex
-  *			->ioapic_lock
-  */
-+#ifdef CONFIG_X86_IO_APIC
- static DEFINE_MUTEX(acpi_ioapic_lock);
-+#endif
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/Kconfig linux-4.14/arch/x86/Kconfig
+--- linux-4.14.orig/arch/x86/Kconfig	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/Kconfig	2018-09-05 11:05:07.000000000 +0200
+@@ -169,6 +169,7 @@
+ 	select HAVE_HARDLOCKUP_DETECTOR_PERF	if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+ 	select HAVE_PERF_REGS
+ 	select HAVE_PERF_USER_STACK_DUMP
++	select HAVE_PREEMPT_LAZY
+ 	select HAVE_RCU_TABLE_FREE
+ 	select HAVE_REGS_AND_STACK_ACCESS_API
+ 	select HAVE_RELIABLE_STACKTRACE		if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
+@@ -256,8 +257,11 @@
+ 	def_bool y
+ 	depends on ISA_DMA_API
+ 
++config RWSEM_GENERIC_SPINLOCK
++	def_bool PREEMPT_RT_FULL
++
+ config RWSEM_XCHGADD_ALGORITHM
+-	def_bool y
++	def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
  
- /* --------------------------------------------------------------------------
-                               Boot-time Configuration
-diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index 3d8ff40ecc6f..2e96d4e0295b 100644
---- a/arch/x86/kernel/apic/io_apic.c
-+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
+ config GENERIC_CALIBRATE_DELAY
+ 	def_bool y
+@@ -932,7 +936,7 @@
+ config MAXSMP
+ 	bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+ 	depends on X86_64 && SMP && DEBUG_KERNEL
+-	select CPUMASK_OFFSTACK
++	select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+ 	---help---
+ 	  Enable maximum number of CPUS and NUMA Nodes for this architecture.
+ 	  If unsure, say N.
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/apic/io_apic.c linux-4.14/arch/x86/kernel/apic/io_apic.c
+--- linux-4.14.orig/arch/x86/kernel/apic/io_apic.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/kernel/apic/io_apic.c	2018-09-05 11:05:07.000000000 +0200
+@@ -1691,7 +1691,8 @@
  static inline bool ioapic_irqd_mask(struct irq_data *data)
  {
  	/* If we are moving the irq we need to mask it */
@@ -2282,11 +2790,10 @@ index 3d8ff40ecc6f..2e96d4e0295b 100644
  		mask_ioapic_irq(data);
  		return true;
  	}
-diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
-index c62e015b126c..0cc71257fca6 100644
---- a/arch/x86/kernel/asm-offsets.c
-+++ b/arch/x86/kernel/asm-offsets.c
-@@ -36,6 +36,7 @@ void common(void) {
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/asm-offsets.c linux-4.14/arch/x86/kernel/asm-offsets.c
+--- linux-4.14.orig/arch/x86/kernel/asm-offsets.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/kernel/asm-offsets.c	2018-09-05 11:05:07.000000000 +0200
+@@ -38,6 +38,7 @@
  
  	BLANK();
  	OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
@@ -2294,124 +2801,37 @@ index c62e015b126c..0cc71257fca6 100644
  	OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
  
  	BLANK();
-@@ -91,4 +92,5 @@ void common(void) {
+@@ -94,6 +95,7 @@
  
  	BLANK();
  	DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
 +	DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
- }
-diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index a7fdf453d895..e3a0e969a66e 100644
---- a/arch/x86/kernel/cpu/mcheck/mce.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce.c
-@@ -41,6 +41,8 @@
- #include <linux/debugfs.h>
- #include <linux/irq_work.h>
- #include <linux/export.h>
-+#include <linux/jiffies.h>
-+#include <linux/swork.h>
- #include <linux/jump_label.h>
  
- #include <asm/processor.h>
-@@ -1317,7 +1319,7 @@ void mce_log_therm_throt_event(__u64 status)
- static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
+ 	/* TLB state for the entry code */
+ 	OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/cpu/mcheck/dev-mcelog.c linux-4.14/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+--- linux-4.14.orig/arch/x86/kernel/cpu/mcheck/dev-mcelog.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/kernel/cpu/mcheck/dev-mcelog.c	2018-09-05 11:05:07.000000000 +0200
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/kmod.h>
+ #include <linux/poll.h>
++#include <linux/swork.h>
  
- static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
--static DEFINE_PER_CPU(struct timer_list, mce_timer);
-+static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+ #include "mce-internal.h"
  
- static unsigned long mce_adjust_timer_default(unsigned long interval)
- {
-@@ -1326,32 +1328,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
+@@ -86,13 +87,43 @@
  
- static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
+ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  
--static void __restart_timer(struct timer_list *t, unsigned long interval)
-+static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval)
- {
--	unsigned long when = jiffies + interval;
--	unsigned long flags;
--
--	local_irq_save(flags);
 -
--	if (timer_pending(t)) {
--		if (time_before(when, t->expires))
--			mod_timer(t, when);
--	} else {
--		t->expires = round_jiffies(when);
--		add_timer_on(t, smp_processor_id());
--	}
--
--	local_irq_restore(flags);
-+	if (!interval)
-+		return HRTIMER_NORESTART;
-+	hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
-+	return HRTIMER_RESTART;
+-void mce_work_trigger(void)
++static void __mce_work_trigger(struct swork_event *event)
+ {
+ 	if (mce_helper[0])
+ 		schedule_work(&mce_trigger_work);
  }
  
--static void mce_timer_fn(unsigned long data)
-+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
- {
--	struct timer_list *t = this_cpu_ptr(&mce_timer);
--	int cpu = smp_processor_id();
- 	unsigned long iv;
- 
--	WARN_ON(cpu != data);
--
- 	iv = __this_cpu_read(mce_next_interval);
- 
- 	if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1374,7 +1362,7 @@ static void mce_timer_fn(unsigned long data)
- 
- done:
- 	__this_cpu_write(mce_next_interval, iv);
--	__restart_timer(t, iv);
-+	return __restart_timer(timer, iv);
- }
- 
- /*
-@@ -1382,7 +1370,7 @@ static void mce_timer_fn(unsigned long data)
-  */
- void mce_timer_kick(unsigned long interval)
- {
--	struct timer_list *t = this_cpu_ptr(&mce_timer);
-+	struct hrtimer *t = this_cpu_ptr(&mce_timer);
- 	unsigned long iv = __this_cpu_read(mce_next_interval);
- 
- 	__restart_timer(t, interval);
-@@ -1397,7 +1385,7 @@ static void mce_timer_delete_all(void)
- 	int cpu;
- 
- 	for_each_online_cpu(cpu)
--		del_timer_sync(&per_cpu(mce_timer, cpu));
-+		hrtimer_cancel(&per_cpu(mce_timer, cpu));
- }
- 
- static void mce_do_trigger(struct work_struct *work)
-@@ -1407,6 +1395,56 @@ static void mce_do_trigger(struct work_struct *work)
- 
- static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
- 
-+static void __mce_notify_work(struct swork_event *event)
-+{
-+	/* Not more than two messages every minute */
-+	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
-+
-+	/* wake processes polling /dev/mcelog */
-+	wake_up_interruptible(&mce_chrdev_wait);
-+
-+	/*
-+	 * There is no risk of missing notifications because
-+	 * work_pending is always cleared before the function is
-+	 * executed.
-+	 */
-+	if (mce_helper[0] && !work_pending(&mce_trigger_work))
-+		schedule_work(&mce_trigger_work);
-+
-+	if (__ratelimit(&ratelimit))
-+		pr_info(HW_ERR "Machine check events logged\n");
-+}
-+
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +static bool notify_work_ready __read_mostly;
 +static struct swork_event notify_work;
@@ -2424,132 +2844,205 @@ index a7fdf453d895..e3a0e969a66e 100644
 +	if (err)
 +		return err;
 +
-+	INIT_SWORK(&notify_work, __mce_notify_work);
++	INIT_SWORK(&notify_work, __mce_work_trigger);
 +	notify_work_ready = true;
 +	return 0;
 +}
 +
-+static void mce_notify_work(void)
++void mce_work_trigger(void)
 +{
 +	if (notify_work_ready)
 +		swork_queue(&notify_work);
 +}
++
 +#else
-+static void mce_notify_work(void)
++void mce_work_trigger(void)
 +{
-+	__mce_notify_work(NULL);
++	__mce_work_trigger(NULL);
 +}
 +static inline int mce_notify_work_init(void) { return 0; }
 +#endif
 +
- /*
-  * Notify the user(s) about new machine check events.
-  * Can be called from interrupt context, but not from machine check/NMI
-@@ -1414,19 +1452,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
-  */
- int mce_notify_irq(void)
+ static ssize_t
+ show_trigger(struct device *s, struct device_attribute *attr, char *buf)
  {
--	/* Not more than two messages every minute */
--	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+@@ -356,7 +387,7 @@
+ 
+ 		return err;
+ 	}
 -
- 	if (test_and_clear_bit(0, &mce_need_notify)) {
--		/* wake processes polling /dev/mcelog */
--		wake_up_interruptible(&mce_chrdev_wait);
++	mce_notify_work_init();
+ 	mce_register_decode_chain(&dev_mcelog_nb);
+ 	return 0;
+ }
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.14/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-4.14.orig/arch/x86/kernel/cpu/mcheck/mce.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/kernel/cpu/mcheck/mce.c	2018-09-05 11:05:07.000000000 +0200
+@@ -42,6 +42,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
+ #include <linux/jump_label.h>
+ 
+ #include <asm/intel-family.h>
+@@ -1365,7 +1366,7 @@
+ static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
+ 
+ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+ 
+ static unsigned long mce_adjust_timer_default(unsigned long interval)
+ {
+@@ -1374,27 +1375,19 @@
+ 
+ static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
+ 
+-static void __start_timer(struct timer_list *t, unsigned long interval)
++static void __start_timer(struct hrtimer *t, unsigned long iv)
+ {
+-	unsigned long when = jiffies + interval;
+-	unsigned long flags;
 -
--		if (mce_helper[0])
--			schedule_work(&mce_trigger_work);
+-	local_irq_save(flags);
 -
--		if (__ratelimit(&ratelimit))
--			pr_info(HW_ERR "Machine check events logged\n");
+-	if (!timer_pending(t) || time_before(when, t->expires))
+-		mod_timer(t, round_jiffies(when));
++	if (!iv)
++		return;
+ 
+-	local_irq_restore(flags);
++	hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
++			       0, HRTIMER_MODE_REL_PINNED);
+ }
+ 
+-static void mce_timer_fn(unsigned long data)
++static  enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
+ {
+-	struct timer_list *t = this_cpu_ptr(&mce_timer);
+-	int cpu = smp_processor_id();
+ 	unsigned long iv;
+ 
+-	WARN_ON(cpu != data);
 -
-+		mce_notify_work();
- 		return 1;
- 	}
- 	return 0;
-@@ -1732,7 +1759,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
- 	}
+ 	iv = __this_cpu_read(mce_next_interval);
+ 
+ 	if (mce_available(this_cpu_ptr(&cpu_info))) {
+@@ -1417,7 +1410,11 @@
+ 
+ done:
+ 	__this_cpu_write(mce_next_interval, iv);
+-	__start_timer(t, iv);
++	if (!iv)
++		return HRTIMER_NORESTART;
++
++	hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(iv)));
++	return HRTIMER_RESTART;
  }
  
--static void mce_start_timer(unsigned int cpu, struct timer_list *t)
-+static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
+ /*
+@@ -1425,7 +1422,7 @@
+  */
+ void mce_timer_kick(unsigned long interval)
  {
- 	unsigned long iv = check_interval * HZ;
+-	struct timer_list *t = this_cpu_ptr(&mce_timer);
++	struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ 	unsigned long iv = __this_cpu_read(mce_next_interval);
  
-@@ -1741,16 +1768,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+ 	__start_timer(t, interval);
+@@ -1440,7 +1437,7 @@
+ 	int cpu;
  
- 	per_cpu(mce_next_interval, cpu) = iv;
+ 	for_each_online_cpu(cpu)
+-		del_timer_sync(&per_cpu(mce_timer, cpu));
++		hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
  
--	t->expires = round_jiffies(jiffies + iv);
--	add_timer_on(t, cpu);
-+	hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
-+			0, HRTIMER_MODE_REL_PINNED);
+ /*
+@@ -1769,7 +1766,7 @@
+ 	}
  }
  
- static void __mcheck_cpu_init_timer(void)
+-static void mce_start_timer(struct timer_list *t)
++static void mce_start_timer(struct hrtimer *t)
+ {
+ 	unsigned long iv = check_interval * HZ;
+ 
+@@ -1782,18 +1779,19 @@
+ 
+ static void __mcheck_cpu_setup_timer(void)
  {
 -	struct timer_list *t = this_cpu_ptr(&mce_timer);
+-	unsigned int cpu = smp_processor_id();
 +	struct hrtimer *t = this_cpu_ptr(&mce_timer);
- 	unsigned int cpu = smp_processor_id();
  
 -	setup_pinned_timer(t, mce_timer_fn, cpu);
 +	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 +	t->function = mce_timer_fn;
- 	mce_start_timer(cpu, t);
  }
  
-@@ -2475,6 +2503,8 @@ static void mce_disable_cpu(void *h)
- 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
- 		return;
- 
-+	hrtimer_cancel(this_cpu_ptr(&mce_timer));
+ static void __mcheck_cpu_init_timer(void)
+ {
+-	struct timer_list *t = this_cpu_ptr(&mce_timer);
+-	unsigned int cpu = smp_processor_id();
++	struct hrtimer *t = this_cpu_ptr(&mce_timer);
 +
- 	if (!(action & CPU_TASKS_FROZEN))
- 		cmci_clear();
++	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++	t->function = mce_timer_fn;
  
-@@ -2497,6 +2527,7 @@ static void mce_reenable_cpu(void *h)
- 		if (b->init)
- 			wrmsrl(msr_ops.ctl(i), b->ctl);
- 	}
-+	__mcheck_cpu_init_timer();
+-	setup_pinned_timer(t, mce_timer_fn, cpu);
+ 	mce_start_timer(t);
  }
  
- /* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2504,7 +2535,6 @@ static int
- mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+@@ -2309,7 +2307,7 @@
+ 
+ static int mce_cpu_online(unsigned int cpu)
  {
- 	unsigned int cpu = (unsigned long)hcpu;
--	struct timer_list *t = &per_cpu(mce_timer, cpu);
+-	struct timer_list *t = this_cpu_ptr(&mce_timer);
++	struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ 	int ret;
  
- 	switch (action & ~CPU_TASKS_FROZEN) {
- 	case CPU_ONLINE:
-@@ -2524,11 +2554,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
- 		break;
- 	case CPU_DOWN_PREPARE:
- 		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
--		del_timer_sync(t);
- 		break;
- 	case CPU_DOWN_FAILED:
- 		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
--		mce_start_timer(cpu, t);
- 		break;
- 	}
+ 	mce_device_create(cpu);
+@@ -2326,10 +2324,10 @@
  
-@@ -2567,6 +2595,10 @@ static __init int mcheck_init_device(void)
- 		goto err_out;
- 	}
+ static int mce_cpu_pre_down(unsigned int cpu)
+ {
+-	struct timer_list *t = this_cpu_ptr(&mce_timer);
++	struct hrtimer *t = this_cpu_ptr(&mce_timer);
  
-+	err = mce_notify_work_init();
-+	if (err)
-+		goto err_out;
-+
- 	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
- 		err = -ENOMEM;
- 		goto err_out;
-diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
-index 1f38d9a4d9de..053bf3b2ef39 100644
---- a/arch/x86/kernel/irq_32.c
-+++ b/arch/x86/kernel/irq_32.c
-@@ -127,6 +127,7 @@ void irq_ctx_init(int cpu)
+ 	mce_disable_cpu();
+-	del_timer_sync(t);
++	hrtimer_cancel(t);
+ 	mce_threshold_remove_device(cpu);
+ 	mce_device_remove(cpu);
+ 	return 0;
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/fpu/core.c linux-4.14/arch/x86/kernel/fpu/core.c
+--- linux-4.14.orig/arch/x86/kernel/fpu/core.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/kernel/fpu/core.c	2018-09-05 11:05:07.000000000 +0200
+@@ -138,6 +138,18 @@
+ }
+ EXPORT_SYMBOL_GPL(kernel_fpu_end);
+ 
++void kernel_fpu_resched(void)
++{
++	WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
++
++	if (should_resched(PREEMPT_OFFSET)) {
++		kernel_fpu_end();
++		cond_resched();
++		kernel_fpu_begin();
++	}
++}
++EXPORT_SYMBOL_GPL(kernel_fpu_resched);
++
+ /*
+  * Save the FPU state (mark it for reload if necessary):
+  *
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/irq_32.c linux-4.14/arch/x86/kernel/irq_32.c
+--- linux-4.14.orig/arch/x86/kernel/irq_32.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/kernel/irq_32.c	2018-09-05 11:05:07.000000000 +0200
+@@ -130,6 +130,7 @@
  	       cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
  }
  
@@ -2557,7 +3050,7 @@ index 1f38d9a4d9de..053bf3b2ef39 100644
  void do_softirq_own_stack(void)
  {
  	struct irq_stack *irqstk;
-@@ -143,6 +144,7 @@ void do_softirq_own_stack(void)
+@@ -146,6 +147,7 @@
  
  	call_on_stack(__do_softirq, isp);
  }
@@ -2565,19 +3058,18 @@ index 1f38d9a4d9de..053bf3b2ef39 100644
  
  bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
  {
-diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index bd7be8efdc4c..b3b0a7f7b1ca 100644
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -35,6 +35,7 @@
- #include <linux/uaccess.h>
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/process_32.c linux-4.14/arch/x86/kernel/process_32.c
+--- linux-4.14.orig/arch/x86/kernel/process_32.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/kernel/process_32.c	2018-09-05 11:05:07.000000000 +0200
+@@ -38,6 +38,7 @@
  #include <linux/io.h>
  #include <linux/kdebug.h>
+ #include <linux/syscalls.h>
 +#include <linux/highmem.h>
  
  #include <asm/pgtable.h>
  #include <asm/ldt.h>
-@@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+@@ -198,6 +199,35 @@
  }
  EXPORT_SYMBOL_GPL(start_thread);
  
@@ -2613,7 +3105,7 @@ index bd7be8efdc4c..b3b0a7f7b1ca 100644
  
  /*
   *	switch_to(x,y) should switch tasks from x to y.
-@@ -271,6 +301,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -273,6 +303,8 @@
  		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
  		__switch_to_xtra(prev_p, next_p, tss);
  
@@ -2622,23 +3114,22 @@ index bd7be8efdc4c..b3b0a7f7b1ca 100644
  	/*
  	 * Leave lazy mode, flushing any hypercalls made here.
  	 * This must be done before restoring TLS segments so
-diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 3f05c044720b..fe68afd37162 100644
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kvm/lapic.c linux-4.14/arch/x86/kvm/lapic.c
+--- linux-4.14.orig/arch/x86/kvm/lapic.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/kvm/lapic.c	2018-09-05 11:05:07.000000000 +0200
+@@ -2120,7 +2120,7 @@
+ 	apic->vcpu = vcpu;
+ 
  	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
- 		     HRTIMER_MODE_ABS_PINNED);
+-		     HRTIMER_MODE_ABS_PINNED);
++		     HRTIMER_MODE_ABS_PINNED_HARD);
  	apic->lapic_timer.timer.function = apic_timer_fn;
-+	apic->lapic_timer.timer.irqsafe = 1;
  
  	/*
- 	 * APIC is created enabled. This will prevent kvm_lapic_set_base from
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 487b957e7802..a144b8cb358b 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -5932,6 +5932,13 @@ int kvm_arch_init(void *opaque)
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kvm/x86.c linux-4.14/arch/x86/kvm/x86.c
+--- linux-4.14.orig/arch/x86/kvm/x86.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/kvm/x86.c	2018-09-05 11:05:07.000000000 +0200
+@@ -6285,6 +6285,13 @@
  		goto out;
  	}
  
@@ -2652,11 +3143,10 @@ index 487b957e7802..a144b8cb358b 100644
  	r = kvm_mmu_module_init();
  	if (r)
  		goto out_free_percpu;
-diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
-index 6d18b70ed5a9..f752724c22e8 100644
---- a/arch/x86/mm/highmem_32.c
-+++ b/arch/x86/mm/highmem_32.c
-@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/mm/highmem_32.c linux-4.14/arch/x86/mm/highmem_32.c
+--- linux-4.14.orig/arch/x86/mm/highmem_32.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/mm/highmem_32.c	2018-09-05 11:05:07.000000000 +0200
+@@ -32,10 +32,11 @@
   */
  void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  {
@@ -2669,7 +3159,7 @@ index 6d18b70ed5a9..f752724c22e8 100644
  	pagefault_disable();
  
  	if (!PageHighMem(page))
-@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+@@ -45,7 +46,10 @@
  	idx = type + KM_TYPE_NR*smp_processor_id();
  	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  	BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -2681,7 +3171,7 @@ index 6d18b70ed5a9..f752724c22e8 100644
  	arch_flush_lazy_mmu_mode();
  
  	return (void *)vaddr;
-@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
+@@ -88,6 +92,9 @@
  		 * is a bad idea also, in case the page changes cacheability
  		 * attributes or becomes a protected page in a hypervisor.
  		 */
@@ -2691,7 +3181,7 @@ index 6d18b70ed5a9..f752724c22e8 100644
  		kpte_clear_flush(kmap_pte-idx, vaddr);
  		kmap_atomic_idx_pop();
  		arch_flush_lazy_mmu_mode();
-@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
+@@ -100,7 +107,7 @@
  #endif
  
  	pagefault_enable();
@@ -2700,11 +3190,10 @@ index 6d18b70ed5a9..f752724c22e8 100644
  }
  EXPORT_SYMBOL(__kunmap_atomic);
  
-diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
-index ada98b39b8ad..585f6829653b 100644
---- a/arch/x86/mm/iomap_32.c
-+++ b/arch/x86/mm/iomap_32.c
-@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/mm/iomap_32.c linux-4.14/arch/x86/mm/iomap_32.c
+--- linux-4.14.orig/arch/x86/mm/iomap_32.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/mm/iomap_32.c	2018-09-05 11:05:07.000000000 +0200
+@@ -56,6 +56,7 @@
  
  void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
  {
@@ -2712,7 +3201,7 @@ index ada98b39b8ad..585f6829653b 100644
  	unsigned long vaddr;
  	int idx, type;
  
-@@ -65,7 +66,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+@@ -65,7 +66,12 @@
  	type = kmap_atomic_idx_push();
  	idx = type + KM_TYPE_NR * smp_processor_id();
  	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -2726,7 +3215,7 @@ index ada98b39b8ad..585f6829653b 100644
  	arch_flush_lazy_mmu_mode();
  
  	return (void *)vaddr;
-@@ -113,6 +119,9 @@ iounmap_atomic(void __iomem *kvaddr)
+@@ -113,6 +119,9 @@
  		 * is a bad idea also, in case the page changes cacheability
  		 * attributes or becomes a protected page in a hypervisor.
  		 */
@@ -2736,31 +3225,10 @@ index ada98b39b8ad..585f6829653b 100644
  		kpte_clear_flush(kmap_pte-idx, vaddr);
  		kmap_atomic_idx_pop();
  	}
-diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index e3353c97d086..01664968555c 100644
---- a/arch/x86/mm/pageattr.c
-+++ b/arch/x86/mm/pageattr.c
-@@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
- 			    int in_flags, struct page **pages)
- {
- 	unsigned int i, level;
-+#ifdef CONFIG_PREEMPT
-+	/*
-+	 * Avoid wbinvd() because it causes latencies on all CPUs,
-+	 * regardless of any CPU isolation that may be in effect.
-+	 */
-+	unsigned long do_wbinvd = 0;
-+#else
- 	unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
-+#endif
- 
- 	BUG_ON(irqs_disabled());
- 
-diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
-index 9e42842e924a..5398f97172f9 100644
---- a/arch/x86/platform/uv/tlb_uv.c
-+++ b/arch/x86/platform/uv/tlb_uv.c
-@@ -748,9 +748,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/platform/uv/tlb_uv.c linux-4.14/arch/x86/platform/uv/tlb_uv.c
+--- linux-4.14.orig/arch/x86/platform/uv/tlb_uv.c	2018-09-05 11:03:20.000000000 +0200
++++ linux-4.14/arch/x86/platform/uv/tlb_uv.c	2018-09-05 11:05:07.000000000 +0200
+@@ -740,9 +740,9 @@
  
  		quiesce_local_uvhub(hmaster);
  
@@ -2772,7 +3240,7 @@ index 9e42842e924a..5398f97172f9 100644
  
  		end_uvhub_quiesce(hmaster);
  
-@@ -770,9 +770,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
+@@ -762,9 +762,9 @@
  
  		quiesce_local_uvhub(hmaster);
  
@@ -2784,7 +3252,7 @@ index 9e42842e924a..5398f97172f9 100644
  
  		end_uvhub_quiesce(hmaster);
  
-@@ -793,7 +793,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
+@@ -785,7 +785,7 @@
  	cycles_t tm1;
  
  	hmaster = bcp->uvhub_master;
@@ -2793,7 +3261,7 @@ index 9e42842e924a..5398f97172f9 100644
  	if (!bcp->baudisabled) {
  		stat->s_bau_disabled++;
  		tm1 = get_cycles();
-@@ -806,7 +806,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
+@@ -798,7 +798,7 @@
  			}
  		}
  	}
@@ -2802,7 +3270,7 @@ index 9e42842e924a..5398f97172f9 100644
  }
  
  static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -869,7 +869,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
+@@ -861,7 +861,7 @@
   */
  static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
  {
@@ -2811,7 +3279,7 @@ index 9e42842e924a..5398f97172f9 100644
  	atomic_t *v;
  
  	v = &hmaster->active_descriptor_count;
-@@ -1002,7 +1002,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
+@@ -995,7 +995,7 @@
  	struct bau_control *hmaster;
  
  	hmaster = bcp->uvhub_master;
@@ -2820,7 +3288,7 @@ index 9e42842e924a..5398f97172f9 100644
  	if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
  		stat->s_bau_reenabled++;
  		for_each_present_cpu(tcpu) {
-@@ -1014,10 +1014,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
+@@ -1007,10 +1007,10 @@
  				tbcp->period_giveups = 0;
  			}
  		}
@@ -2833,7 +3301,7 @@ index 9e42842e924a..5398f97172f9 100644
  	return -1;
  }
  
-@@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables(void)
+@@ -1942,9 +1942,9 @@
  		bcp->cong_reps			= congested_reps;
  		bcp->disabled_period		= sec_2_cycles(disabled_period);
  		bcp->giveup_limit		= giveup_limit;
@@ -2846,11 +3314,10 @@ index 9e42842e924a..5398f97172f9 100644
  	}
  }
  
-diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
-index b333fc45f9ec..8b85916e6986 100644
---- a/arch/x86/platform/uv/uv_time.c
-+++ b/arch/x86/platform/uv/uv_time.c
-@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
+diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/platform/uv/uv_time.c linux-4.14/arch/x86/platform/uv/uv_time.c
+--- linux-4.14.orig/arch/x86/platform/uv/uv_time.c	2017-11-12 19:46:13.000000000 +0100
++++ linux-4.14/arch/x86/platform/uv/uv_time.c	2018-09-05 11:05:07.000000000 +0200
+@@ -57,7 +57,7 @@
  
  /* There is one of these allocated per node */
  struct uv_rtc_timer_head {
@@ -2859,7 +3326,7 @@ index b333fc45f9ec..8b85916e6986 100644
  	/* next cpu waiting for timer, local node relative: */
  	int		next_cpu;
  	/* number of cpus on this node: */
-@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
+@@ -177,7 +177,7 @@
  				uv_rtc_deallocate_timers();
  				return -ENOMEM;
  			}
@@ -2868,7 +3335,7 @@ index b333fc45f9ec..8b85916e6986 100644
  			head->ncpus = uv_blade_nr_possible_cpus(bid);
  			head->next_cpu = -1;
  			blade_info[bid] = head;
-@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
+@@ -231,7 +231,7 @@
  	unsigned long flags;
  	int next_cpu;
  
@@ -2877,7 +3344,7 @@ index b333fc45f9ec..8b85916e6986 100644
  
  	next_cpu = head->next_cpu;
  	*t = expires;
-@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
+@@ -243,12 +243,12 @@
  		if (uv_setup_intr(cpu, expires)) {
  			*t = ULLONG_MAX;
  			uv_rtc_find_next_timer(head, pnode);
@@ -2892,7 +3359,7 @@ index b333fc45f9ec..8b85916e6986 100644
  	return 0;
  }
  
-@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
+@@ -267,7 +267,7 @@
  	unsigned long flags;
  	int rc = 0;
  
@@ -2901,7 +3368,7 @@ index b333fc45f9ec..8b85916e6986 100644
  
  	if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
  		rc = 1;
-@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
+@@ -279,7 +279,7 @@
  			uv_rtc_find_next_timer(head, pnode);
  	}
  
@@ -2910,11 +3377,11 @@ index b333fc45f9ec..8b85916e6986 100644
  
  	return rc;
  }
-@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
- static cycle_t uv_read_rtc(struct clocksource *cs)
+@@ -299,13 +299,17 @@
+ static u64 uv_read_rtc(struct clocksource *cs)
  {
  	unsigned long offset;
-+	cycle_t cycles;
++	u64 cycles;
  
 +	preempt_disable();
  	if (uv_get_min_hub_revision_id() == 1)
@@ -2922,19 +3389,31 @@ index b333fc45f9ec..8b85916e6986 100644
  	else
  		offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
  
--	return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
-+	cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+-	return (u64)uv_read_local_mmr(UVH_RTC | offset);
++	cycles = (u64)uv_read_local_mmr(UVH_RTC | offset);
 +	preempt_enable();
<Skipped 49792 lines>
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/e4b2b4a843c12a6e36d7fce14e4a2cdb0e3b39c4



More information about the pld-cvs-commit mailing list