[packages/kernel/LINUX_4_9] RT patch updated

jajcus jajcus at pld-linux.org
Mon Mar 20 15:43:30 CET 2017


commit 5c015b7c57615866aa5a55c9fc9e5fa3b008eb01
Author: Jacek Konieczny <j.konieczny at eggsoft.pl>
Date:   Mon Mar 20 13:53:38 2017 +0100

    RT patch updated

 kernel-rt.patch | 709 +++++++++++++++++++++++++++++++++++++++++++++-----------
 kernel.spec     |   2 +-
 2 files changed, 576 insertions(+), 135 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index 38f969e..6d95d54 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -213,7 +213,7 @@ Patch146:	kernel-aufs4+vserver.patch
 Patch250:	kernel-fix_256colors_menuconfig.patch
 
 # https://rt.wiki.kernel.org/
-# https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/patch-4.9.6-rt4.patch.xz
+# https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/patch-4.9.13-rt12.patch.xz
 Patch500:	kernel-rt.patch
 
 Patch2000:	kernel-small_fixes.patch
diff --git a/kernel-rt.patch b/kernel-rt.patch
index 4ffbf4b..bef16fc 100644
--- a/kernel-rt.patch
+++ b/kernel-rt.patch
@@ -955,7 +955,7 @@ index ea5a2277ee46..b988e081ac79 100644
  	return pen_release != -1 ? -ENOSYS : 0;
  }
 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index 3a2e678b8d30..3ed1e9ba6a01 100644
+index 0122ad1a6027..926b1be48043 100644
 --- a/arch/arm/mm/fault.c
 +++ b/arch/arm/mm/fault.c
 @@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
@@ -2269,7 +2269,7 @@ index 931ced8ca345..167975ac8af7 100644
  /* --------------------------------------------------------------------------
                                Boot-time Configuration
 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index 3d8ff40ecc6f..2e96d4e0295b 100644
+index d1e25564b3c1..67e585fa801f 100644
 --- a/arch/x86/kernel/apic/io_apic.c
 +++ b/arch/x86/kernel/apic/io_apic.c
 @@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
@@ -2635,10 +2635,10 @@ index 3f05c044720b..fe68afd37162 100644
  	/*
  	 * APIC is created enabled. This will prevent kvm_lapic_set_base from
 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 487b957e7802..a144b8cb358b 100644
+index 731044efb195..d2905d9881f0 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -5932,6 +5932,13 @@ int kvm_arch_init(void *opaque)
+@@ -5933,6 +5933,13 @@ int kvm_arch_init(void *opaque)
  		goto out;
  	}
  
@@ -3235,10 +3235,10 @@ index 1cb5dd3a5da1..2f1ec8a67cbe 100644
  
  #else /* CONFIG_HIGHMEM */
 diff --git a/crypto/algapi.c b/crypto/algapi.c
-index df939b54b09f..efe5e06adcf7 100644
+index 1fad2a6b3bbb..ecb7315426a9 100644
 --- a/crypto/algapi.c
 +++ b/crypto/algapi.c
-@@ -718,13 +718,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+@@ -719,13 +719,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
  
  int crypto_register_notifier(struct notifier_block *nb)
  {
@@ -4059,7 +4059,7 @@ index adbd1de1cea5..1fac5074f2cf 100644
  	  This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
  	  Support for K10 and newer processors is now in acpi-cpufreq.
 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-index a218c2e395e7..5273d8f1d5dd 100644
+index 0c400f852a76..97d5f6193751 100644
 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
 @@ -1537,7 +1537,9 @@ execbuf_submit(struct i915_execbuffer_params *params,
@@ -4106,10 +4106,10 @@ index 3fc286cd1157..252a1117b103 100644
  	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  
 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 869b29fe9ec4..c8b8788d9d36 100644
+index b9be8a6141d8..3162feddabe8 100644
 --- a/drivers/gpu/drm/i915/intel_display.c
 +++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -12131,7 +12131,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
+@@ -12141,7 +12141,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	struct intel_flip_work *work;
  
@@ -4610,10 +4610,10 @@ index 4d200883c505..98b64ed5cb81 100644
  	Allows a block device to be used as cache for other devices; uses
  	a btree for indexing and the layout is optimized for SSDs.
 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
-index 31a89c8832c0..c3a7e8a9f761 100644
+index 2c965424d383..2c8877f50626 100644
 --- a/drivers/md/dm-rq.c
 +++ b/drivers/md/dm-rq.c
-@@ -838,7 +838,7 @@ static void dm_old_request_fn(struct request_queue *q)
+@@ -842,7 +842,7 @@ static void dm_old_request_fn(struct request_queue *q)
  		/* Establish tio->ti before queuing work (map_tio_request) */
  		tio->ti = ti;
  		kthread_queue_work(&md->kworker, &tio->work);
@@ -5412,7 +5412,7 @@ index 479e223f9cff..3418a54b4131 100644
  	usb_anchor_resume_wakeups(anchor);
  	atomic_dec(&urb->use_count);
 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
-index 17989b72cdae..88c6574b5992 100644
+index 8d412d8b1f29..176491dd739e 100644
 --- a/drivers/usb/gadget/function/f_fs.c
 +++ b/drivers/usb/gadget/function/f_fs.c
 @@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data *ffs)
@@ -5622,7 +5622,7 @@ index b205a629001d..5646afc022ba 100644
  		__this_cpu_inc(bh_accounting.nr);
  		recalc_bh_state();
 diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
-index 8f6a2a5863b9..4217828d0b68 100644
+index a27fc8791551..791aecb7c1ac 100644
 --- a/fs/cifs/readdir.c
 +++ b/fs/cifs/readdir.c
 @@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
@@ -6093,10 +6093,10 @@ index 1452177c822d..f43b01d54c59 100644
  };
  
 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
-index 241da19b7da4..8f9636cc298f 100644
+index 78ff8b63d5f7..3573653fd5cc 100644
 --- a/fs/nfs/nfs4proc.c
 +++ b/fs/nfs/nfs4proc.c
-@@ -2697,7 +2697,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+@@ -2698,7 +2698,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
  	unsigned int seq;
  	int ret;
  
@@ -6105,7 +6105,7 @@ index 241da19b7da4..8f9636cc298f 100644
  
  	ret = _nfs4_proc_open(opendata);
  	if (ret != 0)
-@@ -2735,7 +2735,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+@@ -2736,7 +2736,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
  	ctx->state = state;
  	if (d_inode(dentry) == state->inode) {
  		nfs_inode_attach_open_context(ctx);
@@ -6767,7 +6767,7 @@ index bb3f3297062a..a117a33ef72c 100644
  }
  
 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
-index 5e00f80b1535..65d0671f20b4 100644
+index 5e00f80b1535..a34e10b55cde 100644
 --- a/include/linux/hrtimer.h
 +++ b/include/linux/hrtimer.h
 @@ -87,6 +87,9 @@ enum hrtimer_restart {
@@ -6852,15 +6852,24 @@ index 5e00f80b1535..65d0671f20b4 100644
  /* Query timers: */
  extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
  
-@@ -436,7 +453,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
+@@ -436,9 +453,15 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
   * Helper function to check, whether the timer is running the callback
   * function
   */
 -static inline int hrtimer_callback_running(struct hrtimer *timer)
 +static inline int hrtimer_callback_running(const struct hrtimer *timer)
  {
- 	return timer->base->cpu_base->running == timer;
+-	return timer->base->cpu_base->running == timer;
++	if (timer->base->cpu_base->running == timer)
++		return 1;
++#ifdef CONFIG_PREEMPT_RT_BASE
++	if (timer->base->cpu_base->running_soft == timer)
++		return 1;
++#endif
++	return 0;
  }
+ 
+ /* Forward a hrtimer so it expires after now: */
 diff --git a/include/linux/idr.h b/include/linux/idr.h
 index 083d61e92706..5899796f50cb 100644
 --- a/include/linux/idr.h
@@ -7098,7 +7107,7 @@ index 72f0721f75e7..480972ae47d3 100644
   * Autoprobing for irqs:
   *
 diff --git a/include/linux/irq.h b/include/linux/irq.h
-index e79875574b39..177cee0c3305 100644
+index 39e3254e5769..8ebac94fbb9f 100644
 --- a/include/linux/irq.h
 +++ b/include/linux/irq.h
 @@ -72,6 +72,7 @@ enum irqchip_irq_state;
@@ -7697,6 +7706,30 @@ index 08d947fc4c59..705fb564a605 100644
  #ifdef CONFIG_X86_INTEL_MPX
  	/* address of the bounds directory */
  	void __user *bd_addr;
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 0c3207d26ac0..5944baaa3f28 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -496,6 +496,7 @@ static inline int module_is_live(struct module *mod)
+ struct module *__module_text_address(unsigned long addr);
+ struct module *__module_address(unsigned long addr);
+ bool is_module_address(unsigned long addr);
++bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
+ bool is_module_percpu_address(unsigned long addr);
+ bool is_module_text_address(unsigned long addr);
+ 
+@@ -663,6 +664,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
+ 	return false;
+ }
+ 
++static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
++{
++	return false;
++}
++
+ static inline bool is_module_text_address(unsigned long addr)
+ {
+ 	return false;
 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
 index 2cb7531e7d7a..b3fdfc820216 100644
 --- a/include/linux/mutex.h
@@ -7744,10 +7777,10 @@ index 2cb7531e7d7a..b3fdfc820216 100644
  #endif /* __LINUX_MUTEX_H */
 diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
 new file mode 100644
-index 000000000000..c38a44b14da5
+index 000000000000..e0284edec655
 --- /dev/null
 +++ b/include/linux/mutex_rt.h
-@@ -0,0 +1,84 @@
+@@ -0,0 +1,89 @@
 +#ifndef __LINUX_MUTEX_RT_H
 +#define __LINUX_MUTEX_RT_H
 +
@@ -7793,7 +7826,12 @@ index 000000000000..c38a44b14da5
 +#define mutex_lock_killable(l)		_mutex_lock_killable(l)
 +#define mutex_trylock(l)		_mutex_trylock(l)
 +#define mutex_unlock(l)			_mutex_unlock(l)
++
++#ifdef CONFIG_DEBUG_MUTEXES
 +#define mutex_destroy(l)		rt_mutex_destroy(&(l)->lock)
++#else
++static inline void mutex_destroy(struct mutex *lock) {}
++#endif
 +
 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
 +# define mutex_lock_nested(l, s)	_mutex_lock_nested(l, s)
@@ -7833,7 +7871,7 @@ index 000000000000..c38a44b14da5
 +
 +#endif
 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index d83590ef74a1..0ae3b6cf430c 100644
+index bb9b102c15cd..a5b12b8ad196 100644
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
 @@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handler_result_t;
@@ -7856,7 +7894,7 @@ index d83590ef74a1..0ae3b6cf430c 100644
  
  static inline bool napi_disable_pending(struct napi_struct *n)
  {
-@@ -2461,14 +2473,53 @@ void netdev_freemem(struct net_device *dev);
+@@ -2463,14 +2475,53 @@ void netdev_freemem(struct net_device *dev);
  void synchronize_net(void);
  int init_dummy_netdev(struct net_device *dev);
  
@@ -7911,7 +7949,7 @@ index d83590ef74a1..0ae3b6cf430c 100644
  struct net_device *dev_get_by_index(struct net *net, int ifindex);
  struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-@@ -2851,6 +2902,7 @@ struct softnet_data {
+@@ -2855,6 +2906,7 @@ struct softnet_data {
  	unsigned int		dropped;
  	struct sk_buff_head	input_pkt_queue;
  	struct napi_struct	backlog;
@@ -8156,7 +8194,7 @@ index 5b2e6159b744..ea940f451606 100644
  extern void percpu_up_write(struct percpu_rw_semaphore *);
  
 diff --git a/include/linux/percpu.h b/include/linux/percpu.h
-index 56939d3f6e53..1c7e33fc83e4 100644
+index 56939d3f6e53..b988bf40ad3e 100644
 --- a/include/linux/percpu.h
 +++ b/include/linux/percpu.h
 @@ -18,6 +18,35 @@
@@ -8195,6 +8233,14 @@ index 56939d3f6e53..1c7e33fc83e4 100644
  /* minimum unit size, also is the maximum supported allocation size */
  #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
  
+@@ -110,6 +139,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
+ #endif
+ 
+ extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
++extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
+ extern bool is_kernel_percpu_address(unsigned long addr);
+ 
+ #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
 diff --git a/include/linux/pid.h b/include/linux/pid.h
 index 23705a53abba..2cc64b779f03 100644
 --- a/include/linux/pid.h
@@ -10953,7 +10999,7 @@ index 3f9c97419f02..11dbe26a8279 100644
  
  config PREEMPT_COUNT
 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 85bc9beb046d..3b8da75ba2e0 100644
+index 4e2f3de0e40b..6401eb5fe140 100644
 --- a/kernel/cgroup.c
 +++ b/kernel/cgroup.c
 @@ -5040,10 +5040,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
@@ -10980,7 +11026,7 @@ index 85bc9beb046d..3b8da75ba2e0 100644
  }
  
  static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5742,6 +5742,7 @@ static int __init cgroup_wq_init(void)
+@@ -5739,6 +5739,7 @@ static int __init cgroup_wq_init(void)
  	 */
  	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
  	BUG_ON(!cgroup_destroy_wq);
@@ -11638,7 +11684,7 @@ index fc1ef736253c..83c666537a7a 100644
  	return r;
  }
 diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 02c8421f8c01..3748cb7b2d6e 100644
+index 4b3323151a2f..e89a1a4d59cd 100644
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
 @@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
@@ -11649,7 +11695,7 @@ index 02c8421f8c01..3748cb7b2d6e 100644
  }
  
  static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
-@@ -8335,6 +8336,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
+@@ -8363,6 +8364,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
  
  	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	hwc->hrtimer.function = perf_swevent_hrtimer;
@@ -11766,7 +11812,7 @@ index ba8a01564985..47784f8aed37 100644
  	p->utime = p->stime = p->gtime = 0;
  	p->utimescaled = p->stimescaled = 0;
 diff --git a/kernel/futex.c b/kernel/futex.c
-index 2c4be467fecd..064917c2d9a5 100644
+index 38b68c2735c5..6450a8d81667 100644
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
 @@ -904,7 +904,9 @@ void exit_pi_state_list(struct task_struct *curr)
@@ -12361,10 +12407,86 @@ index 6f88e352cd4f..5e27fb1079e7 100644
  obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
  obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
-index 4d7ffc0a0d00..9e52009c192e 100644
+index 4d7ffc0a0d00..3d157b3128eb 100644
 --- a/kernel/locking/lockdep.c
 +++ b/kernel/locking/lockdep.c
-@@ -3689,6 +3689,7 @@ static void check_flags(unsigned long flags)
+@@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+ 	struct lockdep_subclass_key *key;
+ 	struct hlist_head *hash_head;
+ 	struct lock_class *class;
++	bool is_static = false;
+ 
+ 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+ 		debug_locks_off();
+@@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+ 
+ 	/*
+ 	 * Static locks do not have their class-keys yet - for them the key
+-	 * is the lock object itself:
++	 * is the lock object itself. If the lock is in the per cpu area,
++	 * the canonical address of the lock (per cpu offset removed) is
++	 * used.
+ 	 */
+-	if (unlikely(!lock->key))
+-		lock->key = (void *)lock;
++	if (unlikely(!lock->key)) {
++		unsigned long can_addr, addr = (unsigned long)lock;
++
++		if (__is_kernel_percpu_address(addr, &can_addr))
++			lock->key = (void *)can_addr;
++		else if (__is_module_percpu_address(addr, &can_addr))
++			lock->key = (void *)can_addr;
++		else if (static_obj(lock))
++			lock->key = (void *)lock;
++		else
++			return ERR_PTR(-EINVAL);
++		is_static = true;
++	}
+ 
+ 	/*
+ 	 * NOTE: the class-key must be unique. For dynamic locks, a static
+@@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+ 		}
+ 	}
+ 
+-	return NULL;
++	return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
+ }
+ 
+ /*
+@@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
+ 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ 
+ 	class = look_up_lock_class(lock, subclass);
+-	if (likely(class))
++	if (likely(!IS_ERR_OR_NULL(class)))
+ 		goto out_set_class_cache;
+ 
+ 	/*
+ 	 * Debug-check: all keys must be persistent!
+- 	 */
+-	if (!static_obj(lock->key)) {
++	 */
++	if (IS_ERR(class)) {
+ 		debug_locks_off();
+ 		printk("INFO: trying to register non-static key.\n");
+ 		printk("the code is fine but needs lockdep annotation.\n");
+ 		printk("turning off the locking correctness validator.\n");
+ 		dump_stack();
+-
+ 		return NULL;
+ 	}
+ 
+@@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+ 		 * Clearly if the lock hasn't been acquired _ever_, we're not
+ 		 * holding it either, so report failure.
+ 		 */
+-		if (!class)
++		if (IS_ERR_OR_NULL(class))
+ 			return 0;
+ 
+ 		/*
+@@ -3689,6 +3702,7 @@ static void check_flags(unsigned long flags)
  		}
  	}
  
@@ -12372,7 +12494,7 @@ index 4d7ffc0a0d00..9e52009c192e 100644
  	/*
  	 * We dont accurately track softirq state in e.g.
  	 * hardirq contexts (such as on 4KSTACKS), so only
-@@ -3703,6 +3704,7 @@ static void check_flags(unsigned long flags)
+@@ -3703,6 +3717,7 @@ static void check_flags(unsigned long flags)
  			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
  		}
  	}
@@ -12380,6 +12502,15 @@ index 4d7ffc0a0d00..9e52009c192e 100644
  
  	if (!debug_locks)
  		print_irqtrace_events(current);
+@@ -4159,7 +4174,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
+ 		 * If the class exists we look it up and zap it:
+ 		 */
+ 		class = look_up_lock_class(lock, j);
+-		if (class)
++		if (!IS_ERR_OR_NULL(class))
+ 			zap_class(class);
+ 	}
+ 	/*
 diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
 index f8c5af52a131..788068773e61 100644
 --- a/kernel/locking/locktorture.c
@@ -14223,6 +14354,75 @@ index 0374a596cffa..94970338d518 100644
  }
 +
 +#endif
+diff --git a/kernel/module.c b/kernel/module.c
+index 0e54d5bf0097..3483a3743b44 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -660,16 +660,7 @@ static void percpu_modcopy(struct module *mod,
+ 		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
+ }
+ 
+-/**
+- * is_module_percpu_address - test whether address is from module static percpu
+- * @addr: address to test
+- *
+- * Test whether @addr belongs to module static percpu area.
+- *
+- * RETURNS:
+- * %true if @addr is from module static percpu area
+- */
+-bool is_module_percpu_address(unsigned long addr)
++bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
+ {
+ 	struct module *mod;
+ 	unsigned int cpu;
+@@ -683,9 +674,11 @@ bool is_module_percpu_address(unsigned long addr)
+ 			continue;
+ 		for_each_possible_cpu(cpu) {
+ 			void *start = per_cpu_ptr(mod->percpu, cpu);
++			void *va = (void *)addr;
+ 
+-			if ((void *)addr >= start &&
+-			    (void *)addr < start + mod->percpu_size) {
++			if (va >= start && va < start + mod->percpu_size) {
++				if (can_addr)
++					*can_addr = (unsigned long) (va - start);
+ 				preempt_enable();
+ 				return true;
+ 			}
+@@ -696,6 +689,20 @@ bool is_module_percpu_address(unsigned long addr)
+ 	return false;
+ }
+ 
++/**
++ * is_module_percpu_address - test whether address is from module static percpu
++ * @addr: address to test
++ *
++ * Test whether @addr belongs to module static percpu area.
++ *
++ * RETURNS:
++ * %true if @addr is from module static percpu area
++ */
++bool is_module_percpu_address(unsigned long addr)
++{
++	return __is_module_percpu_address(addr, NULL);
++}
++
+ #else /* ... !CONFIG_SMP */
+ 
+ static inline void __percpu *mod_percpu(struct module *mod)
+@@ -727,6 +734,11 @@ bool is_module_percpu_address(unsigned long addr)
+ 	return false;
+ }
+ 
++bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
++{
++	return false;
++}
++
+ #endif /* CONFIG_SMP */
+ 
+ #define MODINFO_ATTR(field)	\
 diff --git a/kernel/panic.c b/kernel/panic.c
 index e6480e20379e..7e9c1918a94e 100644
 --- a/kernel/panic.c
@@ -14369,7 +14569,7 @@ index 6ccb08f57fcb..c8cbb5ed2fe3 100644
  }
  EXPORT_SYMBOL(pm_suspend);
 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index f7a55e9ff2f7..9277ee033271 100644
+index 9c5b231684d0..cf15bdb6855b 100644
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
 @@ -351,6 +351,65 @@ __packed __aligned(4)
@@ -16144,7 +16344,7 @@ index 154fd689fe02..a6aa5801b21e 100644
  	return (nested == preempt_offset);
  }
 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index 37e2449186c4..26dcaabde8b3 100644
+index 37e2449186c4..e00accf92a4b 100644
 --- a/kernel/sched/deadline.c
 +++ b/kernel/sched/deadline.c
 @@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
@@ -16155,6 +16355,20 @@ index 37e2449186c4..26dcaabde8b3 100644
  }
  
  static
+@@ -1729,12 +1730,11 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
+ #ifdef CONFIG_SMP
+ 		if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
+ 			queue_push_tasks(rq);
+-#else
++#endif
+ 		if (dl_task(rq->curr))
+ 			check_preempt_curr_dl(rq, p, 0);
+ 		else
+ 			resched_curr(rq);
+-#endif
+ 	}
+ }
+ 
 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
 index fa178b62ea79..935224123441 100644
 --- a/kernel/sched/debug.c
@@ -16281,7 +16495,7 @@ index 69631fa46c2f..6d28fcd08872 100644
  #ifdef HAVE_RT_PUSH_IPI
  /*
 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
-index 2516b8df6dbb..2556baa0a97e 100644
+index 2516b8df6dbb..b0691f4e7d49 100644
 --- a/kernel/sched/rt.c
 +++ b/kernel/sched/rt.c
 @@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
@@ -16300,6 +16514,18 @@ index 2516b8df6dbb..2556baa0a97e 100644
  #endif
  #endif /* CONFIG_SMP */
  	/* We start is dequeued state, because no RT tasks are queued */
+@@ -2198,10 +2200,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
+ #ifdef CONFIG_SMP
+ 		if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
+ 			queue_push_tasks(rq);
+-#else
++#endif /* CONFIG_SMP */
+ 		if (p->prio < rq->curr->prio)
+ 			resched_curr(rq);
+-#endif /* CONFIG_SMP */
+ 	}
+ }
+ 
 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
 index 055f935d4421..19324ac27026 100644
 --- a/kernel/sched/sched.h
@@ -18876,7 +19102,7 @@ index 704f595ce83f..763a3e5121ff 100644
  #define CS_NAME_LEN	32
  
 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index c611c47de884..08a5ab762495 100644
+index c611c47de884..cdff4411f8f6 100644
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
 @@ -193,8 +193,11 @@ EXPORT_SYMBOL(jiffies_64);
@@ -18892,7 +19118,16 @@ index c611c47de884..08a5ab762495 100644
  	unsigned long		clk;
  	unsigned long		next_expiry;
  	unsigned int		cpu;
-@@ -948,10 +951,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
+@@ -203,6 +206,8 @@ struct timer_base {
+ 	bool			is_idle;
+ 	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+ 	struct hlist_head	vectors[WHEEL_SIZE];
++	struct hlist_head	expired_lists[LVL_DEPTH];
++	int			expired_count;
+ } ____cacheline_aligned;
+ 
+ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
+@@ -948,10 +953,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
  
  		if (!(tf & TIMER_MIGRATING)) {
  			base = get_timer_base(tf);
@@ -18905,7 +19140,7 @@ index c611c47de884..08a5ab762495 100644
  		}
  		cpu_relax();
  	}
-@@ -1023,9 +1026,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+@@ -1023,9 +1028,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
  			/* See the comment in lock_timer_base() */
  			timer->flags |= TIMER_MIGRATING;
  
@@ -18917,7 +19152,7 @@ index c611c47de884..08a5ab762495 100644
  			WRITE_ONCE(timer->flags,
  				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
  		}
-@@ -1050,7 +1053,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+@@ -1050,7 +1055,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
  	}
  
  out_unlock:
@@ -18926,7 +19161,7 @@ index c611c47de884..08a5ab762495 100644
  
  	return ret;
  }
-@@ -1144,19 +1147,46 @@ void add_timer_on(struct timer_list *timer, int cpu)
+@@ -1144,19 +1149,46 @@ void add_timer_on(struct timer_list *timer, int cpu)
  	if (base != new_base) {
  		timer->flags |= TIMER_MIGRATING;
  
@@ -18976,7 +19211,7 @@ index c611c47de884..08a5ab762495 100644
  /**
   * del_timer - deactive a timer.
   * @timer: the timer to be deactivated
-@@ -1180,7 +1210,7 @@ int del_timer(struct timer_list *timer)
+@@ -1180,7 +1212,7 @@ int del_timer(struct timer_list *timer)
  	if (timer_pending(timer)) {
  		base = lock_timer_base(timer, &flags);
  		ret = detach_if_pending(timer, base, true);
@@ -18985,7 +19220,7 @@ index c611c47de884..08a5ab762495 100644
  	}
  
  	return ret;
-@@ -1208,13 +1238,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
+@@ -1208,13 +1240,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
  		timer_stats_timer_clear_start_info(timer);
  		ret = detach_if_pending(timer, base, true);
  	}
@@ -19001,7 +19236,7 @@ index c611c47de884..08a5ab762495 100644
  /**
   * del_timer_sync - deactivate a timer and wait for the handler to finish.
   * @timer: the timer to be deactivated
-@@ -1274,7 +1304,7 @@ int del_timer_sync(struct timer_list *timer)
+@@ -1274,7 +1306,7 @@ int del_timer_sync(struct timer_list *timer)
  		int ret = try_to_del_timer_sync(timer);
  		if (ret >= 0)
  			return ret;
@@ -19010,7 +19245,17 @@ index c611c47de884..08a5ab762495 100644
  	}
  }
  EXPORT_SYMBOL(del_timer_sync);
-@@ -1339,14 +1369,17 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
+@@ -1323,7 +1355,8 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
+ 	}
+ }
+ 
+-static void expire_timers(struct timer_base *base, struct hlist_head *head)
++static inline void __expire_timers(struct timer_base *base,
++				   struct hlist_head *head)
+ {
+ 	while (!hlist_empty(head)) {
+ 		struct timer_list *timer;
+@@ -1339,33 +1372,53 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
  		fn = timer->function;
  		data = timer->data;
  
@@ -19033,7 +19278,56 @@ index c611c47de884..08a5ab762495 100644
  		}
  	}
  }
-@@ -1515,7 +1548,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+ 
+-static int __collect_expired_timers(struct timer_base *base,
+-				    struct hlist_head *heads)
++static void expire_timers(struct timer_base *base)
++{
++	struct hlist_head *head;
++
++	while (base->expired_count--) {
++		head = base->expired_lists + base->expired_count;
++		__expire_timers(base, head);
++	}
++	base->expired_count = 0;
++}
++
++static void __collect_expired_timers(struct timer_base *base)
+ {
+ 	unsigned long clk = base->clk;
+ 	struct hlist_head *vec;
+-	int i, levels = 0;
++	int i;
+ 	unsigned int idx;
+ 
++	/*
++	 * expire_timers() must be called at least once before we can
++	 * collect more timers
++	 */
++	if (WARN_ON(base->expired_count))
++		return;
++
+ 	for (i = 0; i < LVL_DEPTH; i++) {
+ 		idx = (clk & LVL_MASK) + i * LVL_SIZE;
+ 
+ 		if (__test_and_clear_bit(idx, base->pending_map)) {
+ 			vec = base->vectors + idx;
+-			hlist_move_list(vec, heads++);
+-			levels++;
++			hlist_move_list(vec,
++				&base->expired_lists[base->expired_count++]);
+ 		}
+ 		/* Is it time to look at the next level? */
+ 		if (clk & LVL_CLK_MASK)
+@@ -1373,7 +1426,6 @@ static int __collect_expired_timers(struct timer_base *base,
+ 		/* Shift clock for the next level granularity */
+ 		clk >>= LVL_CLK_SHIFT;
+ 	}
+-	return levels;
+ }
+ 
+ #ifdef CONFIG_NO_HZ_COMMON
+@@ -1515,7 +1567,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
  	if (cpu_is_offline(smp_processor_id()))
  		return expires;
  
@@ -19042,7 +19336,7 @@ index c611c47de884..08a5ab762495 100644
  	nextevt = __next_timer_interrupt(base);
  	is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
  	base->next_expiry = nextevt;
-@@ -1543,7 +1576,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+@@ -1543,7 +1595,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
  		if ((expires - basem) > TICK_NSEC)
  			base->is_idle = true;
  	}
@@ -19051,7 +19345,72 @@ index c611c47de884..08a5ab762495 100644
  
  	return cmp_next_hrtimer_event(basem, expires);
  }
-@@ -1608,13 +1641,13 @@ void update_process_times(int user_tick)
+@@ -1566,8 +1618,7 @@ void timer_clear_idle(void)
+ 	base->is_idle = false;
+ }
+ 
+-static int collect_expired_timers(struct timer_base *base,
+-				  struct hlist_head *heads)
++static void collect_expired_timers(struct timer_base *base)
+ {
+ 	/*
+ 	 * NOHZ optimization. After a long idle sleep we need to forward the
+@@ -1584,20 +1635,49 @@ static int collect_expired_timers(struct timer_base *base,
+ 		if (time_after(next, jiffies)) {
+ 			/* The call site will increment clock! */
+ 			base->clk = jiffies - 1;
+-			return 0;
++			return;
+ 		}
+ 		base->clk = next;
+ 	}
+-	return __collect_expired_timers(base, heads);
++	__collect_expired_timers(base);
+ }
+ #else
+-static inline int collect_expired_timers(struct timer_base *base,
+-					 struct hlist_head *heads)
++static inline void collect_expired_timers(struct timer_base *base)
+ {
+-	return __collect_expired_timers(base, heads);
++	__collect_expired_timers(base);
+ }
+ #endif
+ 
++static int find_expired_timers(struct timer_base *base)
++{
++	const unsigned long int end_clk = jiffies;
++
++	while (!base->expired_count && time_after_eq(end_clk, base->clk)) {
++		collect_expired_timers(base);
++		base->clk++;
++	}
++
++	return base->expired_count;
++}
++
++/* Called from CPU tick routine to quickly collect expired timers */
++static int tick_find_expired(struct timer_base *base)
++{
++	int count;
++
++	raw_spin_lock(&base->lock);
++
++	if (unlikely(time_after(jiffies, base->clk + HZ))) {
++		/* defer to ktimersoftd; don't spend too long in irq context */
++		count = -1;
++	} else
++		count = find_expired_timers(base);
++
++	raw_spin_unlock(&base->lock);
++
++	return count;
++}
++
+ /*
+  * Called from the timer interrupt handler to charge one tick to the current
+  * process.  user_tick is 1 if the tick is user time, 0 for system.
+@@ -1608,13 +1688,13 @@ void update_process_times(int user_tick)
  
  	/* Note: this timer irq context must be accounted for as well. */
  	account_process_tick(p, user_tick);
@@ -19067,19 +19426,29 @@ index c611c47de884..08a5ab762495 100644
  	run_posix_cpu_timers(p);
  }
  
-@@ -1630,7 +1663,7 @@ static inline void __run_timers(struct timer_base *base)
- 	if (!time_after_eq(jiffies, base->clk))
- 		return;
- 
--	spin_lock_irq(&base->lock);
+@@ -1624,24 +1704,13 @@ void update_process_times(int user_tick)
+  */
+ static inline void __run_timers(struct timer_base *base)
+ {
+-	struct hlist_head heads[LVL_DEPTH];
+-	int levels;
 +	raw_spin_lock_irq(&base->lock);
  
- 	while (time_after_eq(jiffies, base->clk)) {
+-	if (!time_after_eq(jiffies, base->clk))
+-		return;
++	while (find_expired_timers(base))
++		expire_timers(base);
  
-@@ -1640,8 +1673,8 @@ static inline void __run_timers(struct timer_base *base)
- 		while (levels--)
- 			expire_timers(base, heads + levels);
- 	}
+-	spin_lock_irq(&base->lock);
+-
+-	while (time_after_eq(jiffies, base->clk)) {
+-
+-		levels = collect_expired_timers(base, heads);
+-		base->clk++;
+-
+-		while (levels--)
+-			expire_timers(base, heads + levels);
+-	}
 -	base->running_timer = NULL;
 -	spin_unlock_irq(&base->lock);
 +	raw_spin_unlock_irq(&base->lock);
@@ -19087,7 +19456,7 @@ index c611c47de884..08a5ab762495 100644
  }
  
  /*
-@@ -1651,6 +1684,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+@@ -1651,6 +1720,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
  {
  	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
  
@@ -19096,7 +19465,22 @@ index c611c47de884..08a5ab762495 100644
  	__run_timers(base);
  	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
  		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
-@@ -1836,16 +1871,16 @@ int timers_dead_cpu(unsigned int cpu)
+@@ -1665,12 +1736,12 @@ void run_local_timers(void)
+ 
+ 	hrtimer_run_queues();
+ 	/* Raise the softirq only if required. */
+-	if (time_before(jiffies, base->clk)) {
++	if (time_before(jiffies, base->clk) || !tick_find_expired(base)) {
+ 		if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+ 			return;
+ 		/* CPU is awake, so check the deferrable base. */
+ 		base++;
+-		if (time_before(jiffies, base->clk))
++		if (time_before(jiffies, base->clk) || !tick_find_expired(base))
+ 			return;
+ 	}
+ 	raise_softirq(TIMER_SOFTIRQ);
+@@ -1836,16 +1907,17 @@ int timers_dead_cpu(unsigned int cpu)
  		 * The caller is globally serialized and nobody else
  		 * takes two locks at once, deadlock is not possible.
  		 */
@@ -19106,6 +19490,7 @@ index c611c47de884..08a5ab762495 100644
 +		raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  
  		BUG_ON(old_base->running_timer);
++		BUG_ON(old_base->expired_count);
  
  		for (i = 0; i < WHEEL_SIZE; i++)
  			migrate_timer_list(new_base, old_base->vectors + i);
@@ -19117,7 +19502,7 @@ index c611c47de884..08a5ab762495 100644
  		put_cpu_ptr(&timer_bases);
  	}
  	return 0;
-@@ -1861,8 +1896,11 @@ static void __init init_timer_cpu(int cpu)
+@@ -1861,8 +1933,12 @@ static void __init init_timer_cpu(int cpu)
  	for (i = 0; i < NR_BASES; i++) {
  		base = per_cpu_ptr(&timer_bases[i], cpu);
  		base->cpu = cpu;
@@ -19127,6 +19512,7 @@ index c611c47de884..08a5ab762495 100644
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +		init_swait_queue_head(&base->wait_for_running_timer);
 +#endif
++		base->expired_count = 0;
  	}
  }
  
@@ -21975,11 +22361,11 @@ index 1afec32de6f2..11fa431046a8 100644
  	dump_stack();
 diff --git a/localversion-rt b/localversion-rt
 new file mode 100644
-index 000000000000..ad3da1bcab7e
+index 000000000000..6e44e540b927
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt4
++-rt12
 diff --git a/mm/Kconfig b/mm/Kconfig
 index 86e3e0e74d20..77e5862a1ed2 100644
 --- a/mm/Kconfig
@@ -21994,7 +22380,7 @@ index 86e3e0e74d20..77e5862a1ed2 100644
  	select RADIX_TREE_MULTIORDER
  	help
 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
-index 8fde443f36d7..d7a863b0ec20 100644
+index 6ff2d7744223..b5a91dd53b5f 100644
 --- a/mm/backing-dev.c
 +++ b/mm/backing-dev.c
 @@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
@@ -22029,7 +22415,7 @@ index 70e6bec46dc2..6678ed58b7c6 100644
  				cc->last_migrated_pfn = 0;
  			}
 diff --git a/mm/filemap.c b/mm/filemap.c
-index 779801092ef1..554e1b4d0fc5 100644
+index d8d7df82c69a..0eac87a07892 100644
 --- a/mm/filemap.c
 +++ b/mm/filemap.c
 @@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct address_space *mapping,
@@ -22089,7 +22475,7 @@ index 50b4ca6787f0..77518a3b35a1 100644
  unsigned int nr_free_highpages (void)
  {
 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index d536a9daa511..70ac8827ee8c 100644
+index 4c6ade54d833..ba29283aa43d 100644
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
 @@ -67,6 +67,7 @@
@@ -22291,7 +22677,7 @@ index 6f4d27c5bb32..5cd25c745a8f 100644
  #ifdef finish_arch_post_lock_switch
  	finish_arch_post_lock_switch();
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 34ada718ef47..21f0dc3fe2aa 100644
+index f4a02e240fb6..2e73f8cfde74 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
 @@ -61,6 +61,7 @@
@@ -22606,7 +22992,7 @@ index 34ada718ef47..21f0dc3fe2aa 100644
  	return NULL;
  }
  
-@@ -6505,7 +6567,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -6531,7 +6593,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
  	int cpu = (unsigned long)hcpu;
  
  	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
@@ -22616,7 +23002,7 @@ index 34ada718ef47..21f0dc3fe2aa 100644
  		drain_pages(cpu);
  
  		/*
-@@ -6531,6 +6595,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -6557,6 +6621,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
  void __init page_alloc_init(void)
  {
  	hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -22624,7 +23010,7 @@ index 34ada718ef47..21f0dc3fe2aa 100644
  }
  
  /*
-@@ -7359,7 +7424,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7385,7 +7450,7 @@ void zone_pcp_reset(struct zone *zone)
  	struct per_cpu_pageset *pset;
  
  	/* avoid races with drain_pages()  */
@@ -22633,7 +23019,7 @@ index 34ada718ef47..21f0dc3fe2aa 100644
  	if (zone->pageset != &boot_pageset) {
  		for_each_online_cpu(cpu) {
  			pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7368,7 +7433,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7394,7 +7459,7 @@ void zone_pcp_reset(struct zone *zone)
  		free_percpu(zone->pageset);
  		zone->pageset = &boot_pageset;
  	}
@@ -22642,6 +23028,61 @@ index 34ada718ef47..21f0dc3fe2aa 100644
  }
  
  #ifdef CONFIG_MEMORY_HOTREMOVE
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 255714302394..59b529b944a9 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1280,6 +1280,28 @@ void free_percpu(void __percpu *ptr)
+ }
+ EXPORT_SYMBOL_GPL(free_percpu);
+ 
++bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
++{
++#ifdef CONFIG_SMP
++	const size_t static_size = __per_cpu_end - __per_cpu_start;
++	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
++	unsigned int cpu;
++
++	for_each_possible_cpu(cpu) {
++		void *start = per_cpu_ptr(base, cpu);
++		void *va = (void *)addr;
++
++		if (va >= start && va < start + static_size) {
++			if (can_addr)
++				*can_addr = (unsigned long) (va - start);
++			return true;
++		}
++	}
++#endif
++	/* on UP, can't distinguish from other static vars, always false */
++	return false;
++}
++
+ /**
+  * is_kernel_percpu_address - test whether address is from static percpu area
+  * @addr: address to test
+@@ -1293,20 +1315,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
+  */
+ bool is_kernel_percpu_address(unsigned long addr)
+ {
+-#ifdef CONFIG_SMP
+-	const size_t static_size = __per_cpu_end - __per_cpu_start;
+-	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
+-	unsigned int cpu;
+-
+-	for_each_possible_cpu(cpu) {
+-		void *start = per_cpu_ptr(base, cpu);
+-
+-		if ((void *)addr >= start && (void *)addr < start + static_size)
+-			return true;
+-        }
+-#endif
+-	/* on UP, can't distinguish from other static vars, always false */
+-	return false;
++	return __is_kernel_percpu_address(addr, NULL);
+ }
+ 
+ /**
 diff --git a/mm/slab.h b/mm/slab.h
 index bc05fdc3edce..610cf61634f0 100644
 --- a/mm/slab.h
@@ -22659,7 +23100,7 @@ index bc05fdc3edce..610cf61634f0 100644
  #ifdef CONFIG_SLAB
  	struct list_head slabs_partial;	/* partial list first, better asm code */
 diff --git a/mm/slub.c b/mm/slub.c
-index 2b3e740609e9..1732f9c5d31f 100644
+index 7aa0e97af928..35c873f09201 100644
 --- a/mm/slub.c
 +++ b/mm/slub.c
 @@ -1141,7 +1141,7 @@ static noinline int free_debug_processing(
@@ -22693,7 +23134,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  /*
   * Hooks for other subsystems that check memory allocations. In a typical
   * production configuration these hooks all should produce no code at all.
-@@ -1523,10 +1529,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1527,10 +1533,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  	void *start, *p;
  	int idx, order;
  	bool shuffle;
@@ -22711,7 +23152,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  		local_irq_enable();
  
  	flags |= s->allocflags;
-@@ -1601,7 +1614,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1605,7 +1618,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  	page->frozen = 1;
  
  out:
@@ -22720,7 +23161,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  		local_irq_disable();
  	if (!page)
  		return NULL;
-@@ -1660,6 +1673,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
+@@ -1664,6 +1677,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
  	__free_pages(page, order);
  }
  
@@ -22737,7 +23178,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  #define need_reserve_slab_rcu						\
  	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
  
-@@ -1691,6 +1714,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
+@@ -1695,6 +1718,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
  		}
  
  		call_rcu(head, rcu_free_slab);
@@ -22750,7 +23191,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	} else
  		__free_slab(s, page);
  }
-@@ -1798,7 +1827,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+@@ -1802,7 +1831,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
  	if (!n || !n->nr_partial)
  		return NULL;
  
@@ -22759,7 +23200,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	list_for_each_entry_safe(page, page2, &n->partial, lru) {
  		void *t;
  
-@@ -1823,7 +1852,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+@@ -1827,7 +1856,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
  			break;
  
  	}
@@ -22768,7 +23209,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	return object;
  }
  
-@@ -2069,7 +2098,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2073,7 +2102,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
  			 * that acquire_slab() will see a slab page that
  			 * is frozen
  			 */
@@ -22777,7 +23218,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  		}
  	} else {
  		m = M_FULL;
-@@ -2080,7 +2109,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2084,7 +2113,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
  			 * slabs from diagnostic functions will not see
  			 * any frozen slabs.
  			 */
@@ -22786,7 +23227,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  		}
  	}
  
-@@ -2115,7 +2144,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2119,7 +2148,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
  		goto redo;
  
  	if (lock)
@@ -22795,7 +23236,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  	if (m == M_FREE) {
  		stat(s, DEACTIVATE_EMPTY);
-@@ -2147,10 +2176,10 @@ static void unfreeze_partials(struct kmem_cache *s,
+@@ -2151,10 +2180,10 @@ static void unfreeze_partials(struct kmem_cache *s,
  		n2 = get_node(s, page_to_nid(page));
  		if (n != n2) {
  			if (n)
@@ -22808,7 +23249,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  		}
  
  		do {
-@@ -2179,7 +2208,7 @@ static void unfreeze_partials(struct kmem_cache *s,
+@@ -2183,7 +2212,7 @@ static void unfreeze_partials(struct kmem_cache *s,
  	}
  
  	if (n)
@@ -22817,7 +23258,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  	while (discard_page) {
  		page = discard_page;
-@@ -2218,14 +2247,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+@@ -2222,14 +2251,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
  			pobjects = oldpage->pobjects;
  			pages = oldpage->pages;
  			if (drain && pobjects > s->cpu_partial) {
@@ -22839,7 +23280,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  				oldpage = NULL;
  				pobjects = 0;
  				pages = 0;
-@@ -2297,7 +2333,22 @@ static bool has_cpu_slab(int cpu, void *info)
+@@ -2301,7 +2337,22 @@ static bool has_cpu_slab(int cpu, void *info)
  
  static void flush_all(struct kmem_cache *s)
  {
@@ -22862,7 +23303,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  }
  
  /*
-@@ -2352,10 +2403,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
+@@ -2356,10 +2407,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
  	unsigned long x = 0;
  	struct page *page;
  
@@ -22875,7 +23316,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	return x;
  }
  #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2493,8 +2544,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
+@@ -2497,8 +2548,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
   * already disabled (which is the case for bulk allocation).
   */
  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -22887,7 +23328,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	void *freelist;
  	struct page *page;
  
-@@ -2554,6 +2607,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2558,6 +2611,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  	VM_BUG_ON(!c->page->frozen);
  	c->freelist = get_freepointer(s, freelist);
  	c->tid = next_tid(c->tid);
@@ -22901,7 +23342,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	return freelist;
  
  new_slab:
-@@ -2585,7 +2645,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2589,7 +2649,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  	deactivate_slab(s, page, get_freepointer(s, freelist));
  	c->page = NULL;
  	c->freelist = NULL;
@@ -22910,7 +23351,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  }
  
  /*
-@@ -2597,6 +2657,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2601,6 +2661,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  {
  	void *p;
  	unsigned long flags;
@@ -22918,7 +23359,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  	local_irq_save(flags);
  #ifdef CONFIG_PREEMPT
-@@ -2608,8 +2669,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2612,8 +2673,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  	c = this_cpu_ptr(s->cpu_slab);
  #endif
  
@@ -22929,7 +23370,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	return p;
  }
  
-@@ -2795,7 +2857,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2799,7 +2861,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
  
  	do {
  		if (unlikely(n)) {
@@ -22938,7 +23379,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  			n = NULL;
  		}
  		prior = page->freelist;
-@@ -2827,7 +2889,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2831,7 +2893,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
  				 * Otherwise the list_lock will synchronize with
  				 * other processors updating the list of slabs.
  				 */
@@ -22947,7 +23388,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  			}
  		}
-@@ -2869,7 +2931,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2873,7 +2935,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
  		add_partial(n, page, DEACTIVATE_TO_TAIL);
  		stat(s, FREE_ADD_PARTIAL);
  	}
@@ -22956,7 +23397,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	return;
  
  slab_empty:
-@@ -2884,7 +2946,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2888,7 +2950,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
  		remove_full(s, n, page);
  	}
  
@@ -22965,7 +23406,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	stat(s, FREE_SLAB);
  	discard_slab(s, page);
  }
-@@ -3089,6 +3151,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3093,6 +3155,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  			  void **p)
  {
  	struct kmem_cache_cpu *c;
@@ -22973,7 +23414,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	int i;
  
  	/* memcg and kmem_cache debug support */
-@@ -3112,7 +3175,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3116,7 +3179,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  			 * of re-populating per CPU c->freelist
  			 */
  			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -22982,7 +23423,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  			if (unlikely(!p[i]))
  				goto error;
  
-@@ -3124,6 +3187,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3128,6 +3191,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  	}
  	c->tid = next_tid(c->tid);
  	local_irq_enable();
@@ -22990,7 +23431,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  	/* Clear memory outside IRQ disabled fastpath loop */
  	if (unlikely(flags & __GFP_ZERO)) {
-@@ -3271,7 +3335,7 @@ static void
+@@ -3275,7 +3339,7 @@ static void
  init_kmem_cache_node(struct kmem_cache_node *n)
  {
  	n->nr_partial = 0;
@@ -22999,7 +23440,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	INIT_LIST_HEAD(&n->partial);
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_set(&n->nr_slabs, 0);
-@@ -3615,6 +3679,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
+@@ -3619,6 +3683,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
  							const char *text)
  {
  #ifdef CONFIG_SLUB_DEBUG
@@ -23010,7 +23451,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	void *addr = page_address(page);
  	void *p;
  	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3635,6 +3703,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
+@@ -3639,6 +3707,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
  	slab_unlock(page);
  	kfree(map);
  #endif
@@ -23018,7 +23459,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  }
  
  /*
-@@ -3648,7 +3717,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+@@ -3652,7 +3721,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
  	struct page *page, *h;
  
  	BUG_ON(irqs_disabled());
@@ -23027,7 +23468,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	list_for_each_entry_safe(page, h, &n->partial, lru) {
  		if (!page->inuse) {
  			remove_partial(n, page);
-@@ -3658,7 +3727,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+@@ -3662,7 +3731,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
  			"Objects remaining in %s on __kmem_cache_shutdown()");
  		}
  	}
@@ -23036,7 +23477,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  	list_for_each_entry_safe(page, h, &discard, lru)
  		discard_slab(s, page);
-@@ -3916,7 +3985,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+@@ -3920,7 +3989,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
  		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
  			INIT_LIST_HEAD(promote + i);
  
@@ -23045,7 +23486,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  		/*
  		 * Build lists of slabs to discard or promote.
-@@ -3947,7 +4016,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+@@ -3951,7 +4020,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
  		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
  			list_splice(promote + i, &n->partial);
  
@@ -23054,7 +23495,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  		/* Release empty slabs */
  		list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4123,6 +4192,12 @@ void __init kmem_cache_init(void)
+@@ -4127,6 +4196,12 @@ void __init kmem_cache_init(void)
  {
  	static __initdata struct kmem_cache boot_kmem_cache,
  		boot_kmem_cache_node;
@@ -23067,7 +23508,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  	if (debug_guardpage_minorder())
  		slub_max_order = 0;
-@@ -4331,7 +4406,7 @@ static int validate_slab_node(struct kmem_cache *s,
+@@ -4335,7 +4410,7 @@ static int validate_slab_node(struct kmem_cache *s,
  	struct page *page;
  	unsigned long flags;
  
@@ -23076,7 +23517,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  
  	list_for_each_entry(page, &n->partial, lru) {
  		validate_slab_slab(s, page, map);
-@@ -4353,7 +4428,7 @@ static int validate_slab_node(struct kmem_cache *s,
+@@ -4357,7 +4432,7 @@ static int validate_slab_node(struct kmem_cache *s,
  		       s->name, count, atomic_long_read(&n->nr_slabs));
  
  out:
@@ -23085,7 +23526,7 @@ index 2b3e740609e9..1732f9c5d31f 100644
  	return count;
  }
  
-@@ -4541,12 +4616,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
+@@ -4545,12 +4620,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
  		if (!atomic_long_read(&n->nr_slabs))
  			continue;
  
@@ -23732,7 +24173,7 @@ index b0bc023d25c5..5af6426fbcbe 100644
  	migrate_read_unlock(zspage);
  	unpin_tag(handle);
 diff --git a/net/core/dev.c b/net/core/dev.c
-index e1d731fdc72c..6ab4b7863755 100644
+index 60b0a6049e72..660ca3b9c60b 100644
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
 @@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS;
@@ -23835,7 +24276,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  }
  
  /**
-@@ -2263,6 +2269,7 @@ static void __netif_reschedule(struct Qdisc *q)
+@@ -2258,6 +2264,7 @@ static void __netif_reschedule(struct Qdisc *q)
  	sd->output_queue_tailp = &q->next_sched;
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -23843,7 +24284,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  }
  
  void __netif_schedule(struct Qdisc *q)
-@@ -2344,6 +2351,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+@@ -2339,6 +2346,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
  	__this_cpu_write(softnet_data.completion_queue, skb);
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -23851,7 +24292,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  }
  EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
-@@ -3078,7 +3086,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+@@ -3073,7 +3081,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  	 * This permits qdisc->running owner to get the lock more
  	 * often and dequeue packets faster.
  	 */
@@ -23863,7 +24304,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  	if (unlikely(contended))
  		spin_lock(&q->busylock);
  
-@@ -3141,8 +3153,10 @@ static void skb_update_prio(struct sk_buff *skb)
+@@ -3136,8 +3148,10 @@ static void skb_update_prio(struct sk_buff *skb)
  #define skb_update_prio(skb)
  #endif
  
@@ -23874,7 +24315,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  
  /**
   *	dev_loopback_xmit - loop back @skb
-@@ -3376,8 +3390,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+@@ -3371,8 +3385,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
  		int cpu = smp_processor_id(); /* ok because BHs are off */
  
  		if (txq->xmit_lock_owner != cpu) {
@@ -23884,7 +24325,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  				goto recursion_alert;
  
  			skb = validate_xmit_skb(skb, dev);
-@@ -3387,9 +3400,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+@@ -3382,9 +3395,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
  			HARD_TX_LOCK(dev, txq, cpu);
  
  			if (!netif_xmit_stopped(txq)) {
@@ -23896,7 +24337,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  				if (dev_xmit_complete(rc)) {
  					HARD_TX_UNLOCK(dev, txq);
  					goto out;
-@@ -3763,6 +3776,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+@@ -3758,6 +3771,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  	rps_unlock(sd);
  
  	local_irq_restore(flags);
@@ -23904,7 +24345,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  
  	atomic_long_inc(&skb->dev->rx_dropped);
  	kfree_skb(skb);
-@@ -3781,7 +3795,7 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -3776,7 +3790,7 @@ static int netif_rx_internal(struct sk_buff *skb)
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu;
  
@@ -23913,7 +24354,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  		rcu_read_lock();
  
  		cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3791,13 +3805,13 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -3786,13 +3800,13 @@ static int netif_rx_internal(struct sk_buff *skb)
  		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  
  		rcu_read_unlock();
@@ -23930,7 +24371,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  	}
  	return ret;
  }
-@@ -3831,11 +3845,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3826,11 +3840,9 @@ int netif_rx_ni(struct sk_buff *skb)
  
  	trace_netif_rx_ni_entry(skb);
  
@@ -23944,7 +24385,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  
  	return err;
  }
-@@ -4314,7 +4326,7 @@ static void flush_backlog(struct work_struct *work)
+@@ -4309,7 +4321,7 @@ static void flush_backlog(struct work_struct *work)
  	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  			__skb_unlink(skb, &sd->input_pkt_queue);
@@ -23953,7 +24394,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  			input_queue_head_incr(sd);
  		}
  	}
-@@ -4324,11 +4336,14 @@ static void flush_backlog(struct work_struct *work)
+@@ -4319,11 +4331,14 @@ static void flush_backlog(struct work_struct *work)
  	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  			__skb_unlink(skb, &sd->process_queue);
@@ -23969,7 +24410,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  }
  
  static void flush_all_backlogs(void)
-@@ -4809,6 +4824,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+@@ -4804,6 +4819,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  		sd->rps_ipi_list = NULL;
  
  		local_irq_enable();
@@ -23977,7 +24418,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  
  		/* Send pending IPI's to kick RPS processing on remote cpus. */
  		while (remsd) {
-@@ -4822,6 +4838,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+@@ -4817,6 +4833,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  	} else
  #endif
  		local_irq_enable();
@@ -23985,7 +24426,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  }
  
  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4851,7 +4868,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -4846,7 +4863,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
  	while (again) {
  		struct sk_buff *skb;
  
@@ -23995,7 +24436,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  			rcu_read_lock();
  			__netif_receive_skb(skb);
  			rcu_read_unlock();
-@@ -4859,9 +4878,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -4854,9 +4873,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
  			if (++work >= quota)
  				return work;
  
@@ -24006,7 +24447,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  		rps_lock(sd);
  		if (skb_queue_empty(&sd->input_pkt_queue)) {
  			/*
-@@ -4899,9 +4918,11 @@ void __napi_schedule(struct napi_struct *n)
+@@ -4894,9 +4913,11 @@ void __napi_schedule(struct napi_struct *n)
  	local_irq_save(flags);
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  	local_irq_restore(flags);
@@ -24018,7 +24459,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  /**
   * __napi_schedule_irqoff - schedule for receive
   * @n: entry to schedule
-@@ -4913,6 +4934,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
+@@ -4908,6 +4929,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  }
  EXPORT_SYMBOL(__napi_schedule_irqoff);
@@ -24026,7 +24467,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  
  void __napi_complete(struct napi_struct *n)
  {
-@@ -5202,13 +5224,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+@@ -5197,13 +5219,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
  	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  	unsigned long time_limit = jiffies + 2;
  	int budget = netdev_budget;
@@ -24048,7 +24489,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  	for (;;) {
  		struct napi_struct *n;
  
-@@ -5239,7 +5269,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+@@ -5234,7 +5264,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
  	list_splice_tail(&repoll, &list);
  	list_splice(&list, &sd->poll_list);
  	if (!list_empty(&sd->poll_list))
@@ -24057,7 +24498,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  
  	net_rps_action_and_irq_enable(sd);
  }
-@@ -8000,16 +8030,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
+@@ -7995,16 +8025,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_enable();
@@ -24079,7 +24520,7 @@ index e1d731fdc72c..6ab4b7863755 100644
  
  	return NOTIFY_OK;
  }
-@@ -8314,8 +8348,9 @@ static int __init net_dev_init(void)
+@@ -8309,8 +8343,9 @@ static int __init net_dev_init(void)
  
  		INIT_WORK(flush, flush_backlog);
  
@@ -24554,7 +24995,7 @@ index 004af030ef1a..b64f751bda45 100644
  
  const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index dd2332390c45..f6a703b25b6c 100644
+index 34de326b4f09..fe9597af0840 100644
 --- a/net/packet/af_packet.c
 +++ b/net/packet/af_packet.c
 @@ -63,6 +63,7 @@
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/5c015b7c57615866aa5a55c9fc9e5fa3b008eb01



More information about the pld-cvs-commit mailing list