[packages/kernel/LINUX_4_9] RT patch updated

jajcus jajcus at pld-linux.org
Mon Apr 10 09:21:28 CEST 2017


commit 33c7bf0f76b919741a771f7bfd52648f868b04f9
Author: Jacek Konieczny <j.konieczny at eggsoft.pl>
Date:   Mon Apr 10 09:21:04 2017 +0200

    RT patch updated

 kernel-rt.patch | 2877 +++++++++++++++++++++++++++++++++++++++----------------
 kernel.spec     |    2 +-
 2 files changed, 2059 insertions(+), 820 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index 55497d5..c89d8f9 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -213,7 +213,7 @@ Patch146:	kernel-aufs4+vserver.patch
 Patch250:	kernel-fix_256colors_menuconfig.patch
 
 # https://rt.wiki.kernel.org/
-# https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/patch-4.9.13-rt12.patch.xz
+# https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/patch-4.9.20-rt16.patch.xz
 Patch500:	kernel-rt.patch
 
 Patch2000:	kernel-small_fixes.patch
diff --git a/kernel-rt.patch b/kernel-rt.patch
index bef16fc..be82622 100644
--- a/kernel-rt.patch
+++ b/kernel-rt.patch
@@ -1155,7 +1155,7 @@ index c2366510187a..6b60f582b738 100644
  	return pen_release != -1 ? -ENOSYS : 0;
  }
 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 969ef880d234..1182fe883771 100644
+index cf57a7799a0f..78d1b49fbed5 100644
 --- a/arch/arm64/Kconfig
 +++ b/arch/arm64/Kconfig
 @@ -91,6 +91,7 @@ config ARM64
@@ -1166,7 +1166,7 @@ index 969ef880d234..1182fe883771 100644
  	select HAVE_REGS_AND_STACK_ACCESS_API
  	select HAVE_RCU_TABLE_FREE
  	select HAVE_SYSCALL_TRACEPOINTS
-@@ -694,7 +695,7 @@ config XEN_DOM0
+@@ -704,7 +705,7 @@ config XEN_DOM0
  
  config XEN
  	bool "Xen guest support on ARM64"
@@ -2635,7 +2635,7 @@ index 3f05c044720b..fe68afd37162 100644
  	/*
  	 * APIC is created enabled. This will prevent kvm_lapic_set_base from
 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 731044efb195..d2905d9881f0 100644
+index e5bc139d1ba7..fa0aa5931a4b 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
 @@ -5933,6 +5933,13 @@ int kvm_arch_init(void *opaque)
@@ -3058,7 +3058,7 @@ index 381cb50a673c..dc8785233d94 100644
  		}
  	}
 diff --git a/block/blk-mq.c b/block/blk-mq.c
-index 81caceb96c3c..b12b0ab005a9 100644
+index ee54ad01f7ac..1a428fe7bbe1 100644
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
 @@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -3151,7 +3151,7 @@ index 81caceb96c3c..b12b0ab005a9 100644
  }
  
  static void __blk_mq_complete_request(struct request *rq)
-@@ -915,14 +935,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+@@ -906,14 +926,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
  		return;
  
  	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -4494,7 +4494,7 @@ index 11a13b5be73a..baaed0ac274b 100644
  	if (WARN_ON(!dev_data->domain))
  		return;
 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
-index d82637ab09fd..ebe41d30c093 100644
+index b9e50c10213b..fd3b4657723f 100644
 --- a/drivers/iommu/intel-iommu.c
 +++ b/drivers/iommu/intel-iommu.c
 @@ -479,7 +479,7 @@ struct deferred_flush_data {
@@ -4506,7 +4506,7 @@ index d82637ab09fd..ebe41d30c093 100644
  
  /* bitmap for indexing intel_iommus */
  static int g_num_of_iommus;
-@@ -3715,10 +3715,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+@@ -3716,10 +3716,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
  	struct intel_iommu *iommu;
  	struct deferred_flush_entry *entry;
  	struct deferred_flush_data *flush_data;
@@ -4518,7 +4518,7 @@ index d82637ab09fd..ebe41d30c093 100644
  
  	/* Flush all CPUs' entries to avoid deferring too much.  If
  	 * this becomes a bottleneck, can just flush us, and rely on
-@@ -3751,8 +3749,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+@@ -3752,8 +3750,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
  	}
  	flush_data->size++;
  	spin_unlock_irqrestore(&flush_data->lock, flags);
@@ -4813,7 +4813,7 @@ index d11cdbb8fba3..223bbb9acb03 100644
  }
  EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
 diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
-index 775c88303017..f8e9e1c2b2f6 100644
+index bedce3453dd3..faf038978650 100644
 --- a/drivers/pinctrl/qcom/pinctrl-msm.c
 +++ b/drivers/pinctrl/qcom/pinctrl-msm.c
 @@ -61,7 +61,7 @@ struct msm_pinctrl {
@@ -4951,9 +4951,9 @@ index 775c88303017..f8e9e1c2b2f6 100644
 -	spin_lock_irqsave(&pctrl->lock, flags);
 +	raw_spin_lock_irqsave(&pctrl->lock, flags);
  
- 	val = readl(pctrl->regs + g->intr_status_reg);
- 	val &= ~BIT(g->intr_status_bit);
-@@ -604,7 +604,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
+ 	val = readl(pctrl->regs + g->intr_cfg_reg);
+ 	val |= BIT(g->intr_enable_bit);
+@@ -600,7 +600,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
  
  	set_bit(d->hwirq, pctrl->enabled_irqs);
  
@@ -4962,7 +4962,7 @@ index 775c88303017..f8e9e1c2b2f6 100644
  }
  
  static void msm_gpio_irq_ack(struct irq_data *d)
-@@ -617,7 +617,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
+@@ -613,7 +613,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
  
  	g = &pctrl->soc->groups[d->hwirq];
  
@@ -4971,7 +4971,7 @@ index 775c88303017..f8e9e1c2b2f6 100644
  
  	val = readl(pctrl->regs + g->intr_status_reg);
  	if (g->intr_ack_high)
-@@ -629,7 +629,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
+@@ -625,7 +625,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
  	if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
  		msm_gpio_update_dual_edge_pos(pctrl, g, d);
  
@@ -4980,7 +4980,7 @@ index 775c88303017..f8e9e1c2b2f6 100644
  }
  
  static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
-@@ -642,7 +642,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+@@ -638,7 +638,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
  
  	g = &pctrl->soc->groups[d->hwirq];
  
@@ -4989,7 +4989,7 @@ index 775c88303017..f8e9e1c2b2f6 100644
  
  	/*
  	 * For hw without possibility of detecting both edges
-@@ -716,7 +716,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+@@ -712,7 +712,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
  	if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
  		msm_gpio_update_dual_edge_pos(pctrl, g, d);
  
@@ -4998,7 +4998,7 @@ index 775c88303017..f8e9e1c2b2f6 100644
  
  	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
  		irq_set_handler_locked(d, handle_level_irq);
-@@ -732,11 +732,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+@@ -728,11 +728,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
  	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
  	unsigned long flags;
  
@@ -5012,7 +5012,7 @@ index 775c88303017..f8e9e1c2b2f6 100644
  
  	return 0;
  }
-@@ -882,7 +882,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
+@@ -878,7 +878,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
  	pctrl->soc = soc_data;
  	pctrl->chip = msm_gpio_template;
  
@@ -5412,7 +5412,7 @@ index 479e223f9cff..3418a54b4131 100644
  	usb_anchor_resume_wakeups(anchor);
  	atomic_dec(&urb->use_count);
 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
-index 8d412d8b1f29..176491dd739e 100644
+index 89081b834615..90b231b7ad0a 100644
 --- a/drivers/usb/gadget/function/f_fs.c
 +++ b/drivers/usb/gadget/function/f_fs.c
 @@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data *ffs)
@@ -5952,10 +5952,10 @@ index 22c5b4aa4961..269c6a44449a 100644
  	locks_dispose_list(&dispose);
  }
 diff --git a/fs/namei.c b/fs/namei.c
-index 5b4eed221530..9c8dd3c83a80 100644
+index d5e5140c1045..150fbdd8e04c 100644
 --- a/fs/namei.c
 +++ b/fs/namei.c
-@@ -1629,7 +1629,7 @@ static struct dentry *lookup_slow(const struct qstr *name,
+@@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const struct qstr *name,
  {
  	struct dentry *dentry = ERR_PTR(-ENOENT), *old;
  	struct inode *inode = dir->d_inode;
@@ -5964,7 +5964,7 @@ index 5b4eed221530..9c8dd3c83a80 100644
  
  	inode_lock_shared(inode);
  	/* Don't go there if it's already dead */
-@@ -3086,7 +3086,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -3083,7 +3083,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
  	struct dentry *dentry;
  	int error, create_error = 0;
  	umode_t mode = op->mode;
@@ -5974,7 +5974,7 @@ index 5b4eed221530..9c8dd3c83a80 100644
  	if (unlikely(IS_DEADDIR(dir_inode)))
  		return -ENOENT;
 diff --git a/fs/namespace.c b/fs/namespace.c
-index 7cea503ae06d..cb15f5397991 100644
+index 5e35057f07ac..843d274ba167 100644
 --- a/fs/namespace.c
 +++ b/fs/namespace.c
 @@ -14,6 +14,7 @@
@@ -6093,7 +6093,7 @@ index 1452177c822d..f43b01d54c59 100644
  };
  
 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
-index 78ff8b63d5f7..3573653fd5cc 100644
+index 1536aeb0abab..0a8bc7eab083 100644
 --- a/fs/nfs/nfs4proc.c
 +++ b/fs/nfs/nfs4proc.c
 @@ -2698,7 +2698,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
@@ -6106,7 +6106,7 @@ index 78ff8b63d5f7..3573653fd5cc 100644
  	ret = _nfs4_proc_open(opendata);
  	if (ret != 0)
 @@ -2736,7 +2736,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
- 	ctx->state = state;
+ 
  	if (d_inode(dentry) == state->inode) {
  		nfs_inode_attach_open_context(ctx);
 -		if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
@@ -8764,7 +8764,7 @@ index 63a4e4cf40a5..08ab12df2863 100644
  
  /* RCUtree hotplug events */
 diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
-index 1abba5ce2a2f..30211c627511 100644
+index 1abba5ce2a2f..294a8b4875f1 100644
 --- a/include/linux/rtmutex.h
 +++ b/include/linux/rtmutex.h
 @@ -13,11 +13,15 @@
@@ -8833,9 +8833,11 @@ index 1abba5ce2a2f..30211c627511 100644
  
  #define DEFINE_RT_MUTEX(mutexname) \
  	struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
-@@ -91,6 +106,7 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
+@@ -90,7 +105,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
+ extern void rt_mutex_destroy(struct rt_mutex *lock);
  
  extern void rt_mutex_lock(struct rt_mutex *lock);
++extern int rt_mutex_lock_state(struct rt_mutex *lock, int state);
  extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
 +extern int rt_mutex_lock_killable(struct rt_mutex *lock);
  extern int rt_mutex_timed_lock(struct rt_mutex *lock,
@@ -9001,7 +9003,7 @@ index 000000000000..51b28d775fe1
 +
 +#endif
 diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
-index dd1d14250340..8e1f44ff1f2f 100644
+index dd1d14250340..aa2ac1f65c2d 100644
 --- a/include/linux/rwsem.h
 +++ b/include/linux/rwsem.h
 @@ -19,6 +19,10 @@
@@ -9015,19 +9017,26 @@ index dd1d14250340..8e1f44ff1f2f 100644
  struct rw_semaphore;
  
  #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-@@ -184,4 +188,6 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
- # define up_read_non_owner(sem)			up_read(sem)
- #endif
+@@ -106,6 +110,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
+ 	return !list_empty(&sem->wait_list);
+ }
  
 +#endif /* !PREEMPT_RT_FULL */
 +
- #endif /* _LINUX_RWSEM_H */
++/*
++ * The functions below are the same for all rwsem implementations including
++ * the RT specific variant.
++ */
++
+ /*
+  * lock for reading
+  */
 diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
 new file mode 100644
-index 000000000000..e26bd95a57c3
+index 000000000000..2ffbf093ae92
 --- /dev/null
 +++ b/include/linux/rwsem_rt.h
-@@ -0,0 +1,167 @@
+@@ -0,0 +1,67 @@
 +#ifndef _LINUX_RWSEM_RT_H
 +#define _LINUX_RWSEM_RT_H
 +
@@ -9035,165 +9044,65 @@ index 000000000000..e26bd95a57c3
 +#error "Include rwsem.h"
 +#endif
 +
-+/*
-+ * RW-semaphores are a spinlock plus a reader-depth count.
-+ *
-+ * Note that the semantics are different from the usual
-+ * Linux rw-sems, in PREEMPT_RT mode we do not allow
-+ * multiple readers to hold the lock at once, we only allow
-+ * a read-lock owner to read-lock recursively. This is
-+ * better for latency, makes the implementation inherently
-+ * fair and makes it simpler as well.
-+ */
-+
 +#include <linux/rtmutex.h>
++#include <linux/swait.h>
++
++#define READER_BIAS		(1U << 31)
++#define WRITER_BIAS		(1U << 30)
 +
 +struct rw_semaphore {
-+	struct rt_mutex		lock;
-+	int			read_depth;
++	atomic_t		readers;
++	struct rt_mutex		rtmutex;
 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
 +	struct lockdep_map	dep_map;
 +#endif
 +};
 +
-+#define __RWSEM_INITIALIZER(name) \
-+	{ .lock = __RT_MUTEX_INITIALIZER(name.lock), \
-+	  RW_DEP_MAP_INIT(name) }
++#define __RWSEM_INITIALIZER(name)				\
++{								\
++	.readers = ATOMIC_INIT(READER_BIAS),			\
++	.rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex),	\
++	RW_DEP_MAP_INIT(name)					\
++}
 +
 +#define DECLARE_RWSEM(lockname) \
 +	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
 +
-+extern void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
-+				     struct lock_class_key *key);
++extern void  __rwsem_init(struct rw_semaphore *rwsem, const char *name,
++			  struct lock_class_key *key);
 +
-+#define __rt_init_rwsem(sem, name, key)			\
-+	do {						\
-+		rt_mutex_init(&(sem)->lock);		\
-+		__rt_rwsem_init((sem), (name), (key));\
-+	} while (0)
-+
-+#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
++#define __init_rwsem(sem, name, key)			\
++do {							\
++		rt_mutex_init(&(sem)->rtmutex);		\
++		__rwsem_init((sem), (name), (key));	\
++} while (0)
 +
-+# define rt_init_rwsem(sem)				\
++#define init_rwsem(sem)					\
 +do {							\
 +	static struct lock_class_key __key;		\
 +							\
-+	__rt_init_rwsem((sem), #sem, &__key);		\
++	__init_rwsem((sem), #sem, &__key);		\
 +} while (0)
 +
-+extern void rt_down_write(struct rw_semaphore *rwsem);
-+extern int  rt_down_write_killable(struct rw_semaphore *rwsem);
-+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
-+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
-+extern int  rt_down_write_killable_nested(struct rw_semaphore *rwsem,
-+					  int subclass);
-+extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
-+				      struct lockdep_map *nest);
-+extern void rt__down_read(struct rw_semaphore *rwsem);
-+extern void rt_down_read(struct rw_semaphore *rwsem);
-+extern int  rt_down_write_trylock(struct rw_semaphore *rwsem);
-+extern int  rt__down_read_trylock(struct rw_semaphore *rwsem);
-+extern int  rt_down_read_trylock(struct rw_semaphore *rwsem);
-+extern void __rt_up_read(struct rw_semaphore *rwsem);
-+extern void rt_up_read(struct rw_semaphore *rwsem);
-+extern void rt_up_write(struct rw_semaphore *rwsem);
-+extern void rt_downgrade_write(struct rw_semaphore *rwsem);
-+
-+#define init_rwsem(sem)		rt_init_rwsem(sem)
-+#define rwsem_is_locked(s)	rt_mutex_is_locked(&(s)->lock)
-+
-+static inline int rwsem_is_contended(struct rw_semaphore *sem)
-+{
-+	/* rt_mutex_has_waiters() */
-+	return !RB_EMPTY_ROOT(&sem->lock.waiters);
-+}
-+
-+static inline void __down_read(struct rw_semaphore *sem)
-+{
-+	rt__down_read(sem);
-+}
-+
-+static inline void down_read(struct rw_semaphore *sem)
-+{
-+	rt_down_read(sem);
-+}
-+
-+static inline int __down_read_trylock(struct rw_semaphore *sem)
-+{
-+	return rt__down_read_trylock(sem);
-+}
-+
-+static inline int down_read_trylock(struct rw_semaphore *sem)
-+{
-+	return rt_down_read_trylock(sem);
-+}
-+
-+static inline void down_write(struct rw_semaphore *sem)
-+{
-+	rt_down_write(sem);
-+}
-+
-+static inline int down_write_killable(struct rw_semaphore *sem)
-+{
-+	return rt_down_write_killable(sem);
-+}
-+
-+static inline int down_write_trylock(struct rw_semaphore *sem)
-+{
-+	return rt_down_write_trylock(sem);
-+}
-+
-+static inline void __up_read(struct rw_semaphore *sem)
-+{
-+	__rt_up_read(sem);
-+}
-+
-+static inline void up_read(struct rw_semaphore *sem)
-+{
-+	rt_up_read(sem);
-+}
-+
-+static inline void up_write(struct rw_semaphore *sem)
-+{
-+	rt_up_write(sem);
-+}
-+
-+static inline void downgrade_write(struct rw_semaphore *sem)
-+{
-+	rt_downgrade_write(sem);
-+}
-+
-+static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
-+{
-+	return rt_down_read_nested(sem, subclass);
-+}
-+
-+static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
-+{
-+	rt_down_write_nested(sem, subclass);
-+}
-+
-+static inline int down_write_killable_nested(struct rw_semaphore *sem,
-+					     int subclass)
++static inline int rwsem_is_locked(struct rw_semaphore *sem)
 +{
-+	return rt_down_write_killable_nested(sem, subclass);
++	return atomic_read(&sem->readers) != READER_BIAS;
 +}
 +
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+static inline void down_write_nest_lock(struct rw_semaphore *sem,
-+		struct rw_semaphore *nest_lock)
++static inline int rwsem_is_contended(struct rw_semaphore *sem)
 +{
-+	rt_down_write_nested_lock(sem, &nest_lock->dep_map);
++	return atomic_read(&sem->readers) > 0;
 +}
 +
-+#else
++extern void __down_read(struct rw_semaphore *sem);
++extern int __down_read_trylock(struct rw_semaphore *sem);
++extern void __down_write(struct rw_semaphore *sem);
++extern int __must_check __down_write_killable(struct rw_semaphore *sem);
++extern int __down_write_trylock(struct rw_semaphore *sem);
++extern void __up_read(struct rw_semaphore *sem);
++extern void __up_write(struct rw_semaphore *sem);
++extern void __downgrade_write(struct rw_semaphore *sem);
 +
-+static inline void down_write_nest_lock(struct rw_semaphore *sem,
-+		struct rw_semaphore *nest_lock)
-+{
-+	rt_down_write_nested_lock(sem, NULL);
-+}
-+#endif
 +#endif
 diff --git a/include/linux/sched.h b/include/linux/sched.h
 index 75d9a57e212e..8cb7df0f56e3 100644
@@ -9754,10 +9663,36 @@ index 32810f279f8e..0db6e31161f6 100644
  		struct lock_class_key *class)
  {
 diff --git a/include/linux/smp.h b/include/linux/smp.h
-index 8e0cb7a0f836..b16ca967ad80 100644
+index 8e0cb7a0f836..891c533724f5 100644
 --- a/include/linux/smp.h
 +++ b/include/linux/smp.h
-@@ -185,6 +185,9 @@ static inline void smp_init(void) { }
+@@ -120,6 +120,13 @@ extern unsigned int setup_max_cpus;
+ extern void __init setup_nr_cpu_ids(void);
+ extern void __init smp_init(void);
+ 
++extern int __boot_cpu_id;
++
++static inline int get_boot_cpu_id(void)
++{
++	return __boot_cpu_id;
++}
++
+ #else /* !SMP */
+ 
+ static inline void smp_send_stop(void) { }
+@@ -158,6 +165,11 @@ static inline void smp_init(void) { up_late_init(); }
+ static inline void smp_init(void) { }
+ #endif
+ 
++static inline int get_boot_cpu_id(void)
++{
++	return 0;
++}
++
+ #endif /* !SMP */
+ 
+ /*
+@@ -185,6 +197,9 @@ static inline void smp_init(void) { }
  #define get_cpu()		({ preempt_disable(); smp_processor_id(); })
  #define put_cpu()		preempt_enable()
  
@@ -9768,7 +9703,7 @@ index 8e0cb7a0f836..b16ca967ad80 100644
   * Callback to arch code if there's nosmp or maxcpus=0 on the
   * boot command line:
 diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
-index 47dd0cebd204..02928fa5499d 100644
+index 47dd0cebd204..b241cc044bd3 100644
 --- a/include/linux/spinlock.h
 +++ b/include/linux/spinlock.h
 @@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
@@ -9795,20 +9730,7 @@ index 47dd0cebd204..02928fa5499d 100644
  /*
   * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
   */
-@@ -347,6 +355,12 @@ static __always_inline void spin_unlock(spinlock_t *lock)
- 	raw_spin_unlock(&lock->rlock);
- }
- 
-+static __always_inline int spin_unlock_no_deboost(spinlock_t *lock)
-+{
-+	raw_spin_unlock(&lock->rlock);
-+	return 0;
-+}
-+
- static __always_inline void spin_unlock_bh(spinlock_t *lock)
- {
- 	raw_spin_unlock_bh(&lock->rlock);
-@@ -416,4 +430,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+@@ -416,4 +424,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  #define atomic_dec_and_lock(atomic, lock) \
  		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  
@@ -9831,10 +9753,10 @@ index 5344268e6e62..043263f30e81 100644
  #endif /* __LINUX_SPINLOCK_API_SMP_H */
 diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
 new file mode 100644
-index 000000000000..3534cff3dd08
+index 000000000000..43ca841b913a
 --- /dev/null
 +++ b/include/linux/spinlock_rt.h
-@@ -0,0 +1,164 @@
+@@ -0,0 +1,162 @@
 +#ifndef __LINUX_SPINLOCK_RT_H
 +#define __LINUX_SPINLOCK_RT_H
 +
@@ -9863,7 +9785,6 @@ index 000000000000..3534cff3dd08
 +extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
 +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
 +extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
-+extern int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock);
 +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
 +extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
 +extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
@@ -9949,7 +9870,6 @@ index 000000000000..3534cff3dd08
 +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
 +
 +#define spin_unlock(lock)			rt_spin_unlock(lock)
-+#define spin_unlock_no_deboost(lock)		rt_spin_unlock_no_deboost(lock)
 +
 +#define spin_unlock_bh(lock)				\
 +	do {						\
@@ -11035,7 +10955,7 @@ index 4e2f3de0e40b..6401eb5fe140 100644
  	/*
  	 * Used to destroy pidlists and separate to serve as flush domain.
 diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 217fd2e7f435..69444f1bc924 100644
+index 217fd2e7f435..c23676e58dfd 100644
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
 @@ -239,6 +239,289 @@ static struct {
@@ -11404,6 +11324,26 @@ index 217fd2e7f435..69444f1bc924 100644
  	return ret;
  }
  
+@@ -1240,6 +1562,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
+ 
+ #endif /* CONFIG_PM_SLEEP_SMP */
+ 
++int __boot_cpu_id;
++
+ #endif /* CONFIG_SMP */
+ 
+ /* Boot processor state steps */
+@@ -1923,6 +2247,10 @@ void __init boot_cpu_init(void)
+ 	set_cpu_active(cpu, true);
+ 	set_cpu_present(cpu, true);
+ 	set_cpu_possible(cpu, true);
++
++#ifdef CONFIG_SMP
++	__boot_cpu_id = cpu;
++#endif
+ }
+ 
+ /*
 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
 index 29f815d2ef7e..341b17f24f95 100644
 --- a/kernel/cpuset.c
@@ -11684,7 +11624,7 @@ index fc1ef736253c..83c666537a7a 100644
  	return r;
  }
 diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 4b3323151a2f..e89a1a4d59cd 100644
+index 07c0dc806dfc..baf1a2867d74 100644
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
 @@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
@@ -11812,10 +11752,40 @@ index ba8a01564985..47784f8aed37 100644
  	p->utime = p->stime = p->gtime = 0;
  	p->utimescaled = p->stimescaled = 0;
 diff --git a/kernel/futex.c b/kernel/futex.c
-index 38b68c2735c5..6450a8d81667 100644
+index 4c6b6e697b73..a01d203939cb 100644
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -904,7 +904,9 @@ void exit_pi_state_list(struct task_struct *curr)
+@@ -800,7 +800,7 @@ static int refill_pi_state_cache(void)
+ 	return 0;
+ }
+ 
+-static struct futex_pi_state * alloc_pi_state(void)
++static struct futex_pi_state *alloc_pi_state(void)
+ {
+ 	struct futex_pi_state *pi_state = current->pi_state_cache;
+ 
+@@ -810,6 +810,11 @@ static struct futex_pi_state * alloc_pi_state(void)
+ 	return pi_state;
+ }
+ 
++static void get_pi_state(struct futex_pi_state *pi_state)
++{
++	WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
++}
++
+ /*
+  * Drops a reference to the pi_state object and frees or caches it
+  * when the last reference is gone.
+@@ -854,7 +859,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+  * Look up the task based on what TID userspace gave us.
+  * We dont trust it.
+  */
+-static struct task_struct * futex_find_get_task(pid_t pid)
++static struct task_struct *futex_find_get_task(pid_t pid)
+ {
+ 	struct task_struct *p;
+ 
+@@ -904,7 +909,9 @@ void exit_pi_state_list(struct task_struct *curr)
  		 * task still owns the PI-state:
  		 */
  		if (head->next != next) {
@@ -11825,128 +11795,935 @@ index 38b68c2735c5..6450a8d81667 100644
  			continue;
  		}
  
-@@ -1299,6 +1301,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
- 	struct futex_pi_state *pi_state = this->pi_state;
- 	u32 uninitialized_var(curval), newval;
- 	WAKE_Q(wake_q);
-+	WAKE_Q(wake_sleeper_q);
- 	bool deboost;
- 	int ret = 0;
+@@ -914,10 +921,12 @@ void exit_pi_state_list(struct task_struct *curr)
+ 		pi_state->owner = NULL;
+ 		raw_spin_unlock_irq(&curr->pi_lock);
  
-@@ -1365,7 +1368,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+-		rt_mutex_unlock(&pi_state->pi_mutex);
+-
++		get_pi_state(pi_state);
+ 		spin_unlock(&hb->lock);
  
- 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++		rt_mutex_futex_unlock(&pi_state->pi_mutex);
++		put_pi_state(pi_state);
++
+ 		raw_spin_lock_irq(&curr->pi_lock);
+ 	}
+ 	raw_spin_unlock_irq(&curr->pi_lock);
+@@ -971,6 +980,39 @@ void exit_pi_state_list(struct task_struct *curr)
+  *
+  * [10] There is no transient state which leaves owner and user space
+  *	TID out of sync.
++ *
++ *
++ * Serialization and lifetime rules:
++ *
++ * hb->lock:
++ *
++ *	hb -> futex_q, relation
++ *	futex_q -> pi_state, relation
++ *
++ *	(cannot be raw because hb can contain arbitrary amount
++ *	 of futex_q's)
++ *
++ * pi_mutex->wait_lock:
++ *
++ *	{uval, pi_state}
++ *
++ *	(and pi_mutex 'obviously')
++ *
++ * p->pi_lock:
++ *
++ *	p->pi_state_list -> pi_state->list, relation
++ *
++ * pi_state->refcount:
++ *
++ *	pi_state lifetime
++ *
++ *
++ * Lock order:
++ *
++ *   hb->lock
++ *     pi_mutex->wait_lock
++ *       p->pi_lock
++ *
+  */
  
--	deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
-+	deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
-+					&wake_sleeper_q);
+ /*
+@@ -978,10 +1020,12 @@ void exit_pi_state_list(struct task_struct *curr)
+  * the pi_state against the user space value. If correct, attach to
+  * it.
+  */
+-static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
++static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
++			      struct futex_pi_state *pi_state,
+ 			      struct futex_pi_state **ps)
+ {
+ 	pid_t pid = uval & FUTEX_TID_MASK;
++	int ret, uval2;
  
  	/*
- 	 * First unlock HB so the waiter does not spin on it once he got woken
-@@ -1373,8 +1377,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
- 	 * deboost first (and lose our higher priority), then the task might get
- 	 * scheduled away before the wake up can take place.
- 	 */
--	spin_unlock(&hb->lock);
-+	deboost |= spin_unlock_no_deboost(&hb->lock);
- 	wake_up_q(&wake_q);
-+	wake_up_q_sleeper(&wake_sleeper_q);
- 	if (deboost)
- 		rt_mutex_adjust_prio(current);
- 
-@@ -1924,6 +1929,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
- 				requeue_pi_wake_futex(this, &key2, hb2);
- 				drop_count++;
- 				continue;
-+			} else if (ret == -EAGAIN) {
-+				/*
-+				 * Waiter was woken by timeout or
-+				 * signal and has set pi_blocked_on to
-+				 * PI_WAKEUP_INPROGRESS before we
-+				 * tried to enqueue it on the rtmutex.
-+				 */
-+				this->pi_state = NULL;
-+				put_pi_state(pi_state);
-+				continue;
- 			} else if (ret) {
- 				/*
- 				 * rt_mutex_start_proxy_lock() detected a
-@@ -2814,7 +2829,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- 	struct hrtimer_sleeper timeout, *to = NULL;
- 	struct rt_mutex_waiter rt_waiter;
- 	struct rt_mutex *pi_mutex = NULL;
--	struct futex_hash_bucket *hb;
-+	struct futex_hash_bucket *hb, *hb2;
- 	union futex_key key2 = FUTEX_KEY_INIT;
- 	struct futex_q q = futex_q_init;
- 	int res, ret;
-@@ -2839,10 +2854,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- 	 * The waiter is allocated on our stack, manipulated by the requeue
- 	 * code while we sleep on uaddr.
- 	 */
--	debug_rt_mutex_init_waiter(&rt_waiter);
--	RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
--	RB_CLEAR_NODE(&rt_waiter.tree_entry);
--	rt_waiter.task = NULL;
-+	rt_mutex_init_waiter(&rt_waiter, false);
- 
- 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
- 	if (unlikely(ret != 0))
-@@ -2873,20 +2885,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
- 	futex_wait_queue_me(hb, &q, to);
+ 	 * Userspace might have messed up non-PI and PI futexes [3]
+@@ -989,9 +1033,39 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+ 	if (unlikely(!pi_state))
+ 		return -EINVAL;
  
--	spin_lock(&hb->lock);
--	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
--	spin_unlock(&hb->lock);
--	if (ret)
--		goto out_put_keys;
 +	/*
-+	 * On RT we must avoid races with requeue and trying to block
-+	 * on two mutexes (hb->lock and uaddr2's rtmutex) by
-+	 * serializing access to pi_blocked_on with pi_lock.
++	 * We get here with hb->lock held, and having found a
++	 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
++	 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
++	 * which in turn means that futex_lock_pi() still has a reference on
++	 * our pi_state.
++	 *
++	 * The waiter holding a reference on @pi_state also protects against
++	 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
++	 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
++	 * free pi_state before we can take a reference ourselves.
 +	 */
-+	raw_spin_lock_irq(&current->pi_lock);
-+	if (current->pi_blocked_on) {
-+		/*
-+		 * We have been requeued or are in the process of
-+		 * being requeued.
-+		 */
-+		raw_spin_unlock_irq(&current->pi_lock);
-+	} else {
-+		/*
-+		 * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
-+		 * prevents a concurrent requeue from moving us to the
-+		 * uaddr2 rtmutex. After that we can safely acquire
-+		 * (and possibly block on) hb->lock.
-+		 */
-+		current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
-+		raw_spin_unlock_irq(&current->pi_lock);
+ 	WARN_ON(!atomic_read(&pi_state->refcount));
+ 
+ 	/*
++	 * Now that we have a pi_state, we can acquire wait_lock
++	 * and do the state validation.
++	 */
++	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
 +
-+		spin_lock(&hb->lock);
++	/*
++	 * Since {uval, pi_state} is serialized by wait_lock, and our current
++	 * uval was read without holding it, it can have changed. Verify it
++	 * still is what we expect it to be, otherwise retry the entire
++	 * operation.
++	 */
++	if (get_futex_value_locked(&uval2, uaddr))
++		goto out_efault;
 +
-+		/*
-+		 * Clean up pi_blocked_on. We might leak it otherwise
-+		 * when we succeeded with the hb->lock in the fast
-+		 * path.
-+		 */
-+		raw_spin_lock_irq(&current->pi_lock);
-+		current->pi_blocked_on = NULL;
-+		raw_spin_unlock_irq(&current->pi_lock);
++	if (uval != uval2)
++		goto out_eagain;
 +
-+		ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-+		spin_unlock(&hb->lock);
-+		if (ret)
-+			goto out_put_keys;
-+	}
++	/*
+ 	 * Handle the owner died case:
+ 	 */
+ 	if (uval & FUTEX_OWNER_DIED) {
+@@ -1006,11 +1080,11 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+ 			 * is not 0. Inconsistent state. [5]
+ 			 */
+ 			if (pid)
+-				return -EINVAL;
++				goto out_einval;
+ 			/*
+ 			 * Take a ref on the state and return success. [4]
+ 			 */
+-			goto out_state;
++			goto out_attach;
+ 		}
+ 
+ 		/*
+@@ -1022,14 +1096,14 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+ 		 * Take a ref on the state and return success. [6]
+ 		 */
+ 		if (!pid)
+-			goto out_state;
++			goto out_attach;
+ 	} else {
+ 		/*
+ 		 * If the owner died bit is not set, then the pi_state
+ 		 * must have an owner. [7]
+ 		 */
+ 		if (!pi_state->owner)
+-			return -EINVAL;
++			goto out_einval;
+ 	}
  
  	/*
--	 * In order for us to be here, we know our q.key == key2, and since
--	 * we took the hb->lock above, we also know that futex_requeue() has
--	 * completed and we no longer have to concern ourselves with a wakeup
--	 * race with the atomic proxy lock acquisition by the requeue code. The
--	 * futex_requeue dropped our key1 reference and incremented our key2
--	 * reference count.
+@@ -1038,11 +1112,29 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+ 	 * user space TID. [9/10]
+ 	 */
+ 	if (pid != task_pid_vnr(pi_state->owner))
+-		return -EINVAL;
+-out_state:
+-	atomic_inc(&pi_state->refcount);
++		goto out_einval;
++
++out_attach:
++	get_pi_state(pi_state);
++	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ 	*ps = pi_state;
+ 	return 0;
++
++out_einval:
++	ret = -EINVAL;
++	goto out_error;
++
++out_eagain:
++	ret = -EAGAIN;
++	goto out_error;
++
++out_efault:
++	ret = -EFAULT;
++	goto out_error;
++
++out_error:
++	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++	return ret;
+ }
+ 
+ /*
+@@ -1093,6 +1185,9 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
+ 
+ 	/*
+ 	 * No existing pi state. First waiter. [2]
++	 *
++	 * This creates pi_state, we have hb->lock held, this means nothing can
++	 * observe this state, wait_lock is irrelevant.
+ 	 */
+ 	pi_state = alloc_pi_state();
+ 
+@@ -1117,17 +1212,18 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
+ 	return 0;
+ }
+ 
+-static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
++static int lookup_pi_state(u32 __user *uaddr, u32 uval,
++			   struct futex_hash_bucket *hb,
+ 			   union futex_key *key, struct futex_pi_state **ps)
+ {
+-	struct futex_q *match = futex_top_waiter(hb, key);
++	struct futex_q *top_waiter = futex_top_waiter(hb, key);
+ 
+ 	/*
+ 	 * If there is a waiter on that futex, validate it and
+ 	 * attach to the pi_state when the validation succeeds.
+ 	 */
+-	if (match)
+-		return attach_to_pi_state(uval, match->pi_state, ps);
++	if (top_waiter)
++		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
+ 
+ 	/*
+ 	 * We are the first waiter - try to look up the owner based on
+@@ -1146,7 +1242,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
+ 	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
+ 		return -EFAULT;
+ 
+-	/*If user space value changed, let the caller retry */
++	/* If user space value changed, let the caller retry */
+ 	return curval != uval ? -EAGAIN : 0;
+ }
+ 
+@@ -1174,7 +1270,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+ 				struct task_struct *task, int set_waiters)
+ {
+ 	u32 uval, newval, vpid = task_pid_vnr(task);
+-	struct futex_q *match;
++	struct futex_q *top_waiter;
+ 	int ret;
+ 
+ 	/*
+@@ -1200,9 +1296,9 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+ 	 * Lookup existing state first. If it exists, try to attach to
+ 	 * its pi_state.
+ 	 */
+-	match = futex_top_waiter(hb, key);
+-	if (match)
+-		return attach_to_pi_state(uval, match->pi_state, ps);
++	top_waiter = futex_top_waiter(hb, key);
++	if (top_waiter)
++		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
+ 
+ 	/*
+ 	 * No waiter and user TID is 0. We are here because the
+@@ -1288,45 +1384,39 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
+ 	 * memory barrier is required here to prevent the following
+ 	 * store to lock_ptr from getting ahead of the plist_del.
+ 	 */
+-	smp_wmb();
+-	q->lock_ptr = NULL;
++	smp_store_release(&q->lock_ptr, NULL);
+ }
+ 
+-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+-			 struct futex_hash_bucket *hb)
++/*
++ * Caller must hold a reference on @pi_state.
++ */
++static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
+ {
+-	struct task_struct *new_owner;
+-	struct futex_pi_state *pi_state = this->pi_state;
+ 	u32 uninitialized_var(curval), newval;
++	struct task_struct *new_owner;
++	bool deboost = false;
+ 	WAKE_Q(wake_q);
+-	bool deboost;
++	WAKE_Q(wake_sleeper_q);
+ 	int ret = 0;
+ 
+-	if (!pi_state)
+-		return -EINVAL;
+-
+-	/*
+-	 * If current does not own the pi_state then the futex is
+-	 * inconsistent and user space fiddled with the futex value.
+-	 */
+-	if (pi_state->owner != current)
+-		return -EINVAL;
+-
+-	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ 	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
++	if (WARN_ON_ONCE(!new_owner)) {
++		/*
++		 * As per the comment in futex_unlock_pi() this should not happen.
++		 *
++		 * When this happens, give up our locks and try again, giving
++		 * the futex_lock_pi() instance time to complete, either by
++		 * waiting on the rtmutex or removing itself from the futex
++		 * queue.
++		 */
++		ret = -EAGAIN;
++		goto out_unlock;
++	}
+ 
+ 	/*
+-	 * It is possible that the next waiter (the one that brought
+-	 * this owner to the kernel) timed out and is no longer
+-	 * waiting on the lock.
+-	 */
+-	if (!new_owner)
+-		new_owner = this->task;
+-
+-	/*
+-	 * We pass it to the next owner. The WAITERS bit is always
+-	 * kept enabled while there is PI state around. We cleanup the
+-	 * owner died bit, because we are the owner.
++	 * We pass it to the next owner. The WAITERS bit is always kept
++	 * enabled while there is PI state around. We cleanup the owner
++	 * died bit, because we are the owner.
+ 	 */
+ 	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+ 
+@@ -1335,6 +1425,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ 
+ 	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
+ 		ret = -EFAULT;
++
+ 	} else if (curval != uval) {
+ 		/*
+ 		 * If a unconditional UNLOCK_PI operation (user space did not
+@@ -1347,10 +1438,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ 		else
+ 			ret = -EINVAL;
+ 	}
+-	if (ret) {
+-		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+-		return ret;
+-	}
++
++	if (ret)
++		goto out_unlock;
+ 
+ 	raw_spin_lock(&pi_state->owner->pi_lock);
+ 	WARN_ON(list_empty(&pi_state->list));
+@@ -1363,22 +1453,22 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ 	pi_state->owner = new_owner;
+ 	raw_spin_unlock(&new_owner->pi_lock);
+ 
++	/*
++	 * We've updated the uservalue, this unlock cannot fail.
++	 */
++	deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
++					  &wake_sleeper_q);
++
++out_unlock:
+ 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ 
+-	deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+-
+-	/*
+-	 * First unlock HB so the waiter does not spin on it once he got woken
+-	 * up. Second wake up the waiter before the priority is adjusted. If we
+-	 * deboost first (and lose our higher priority), then the task might get
+-	 * scheduled away before the wake up can take place.
+-	 */
+-	spin_unlock(&hb->lock);
+-	wake_up_q(&wake_q);
+-	if (deboost)
++	if (deboost) {
++		wake_up_q(&wake_q);
++		wake_up_q_sleeper(&wake_sleeper_q);
+ 		rt_mutex_adjust_prio(current);
++	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+@@ -1824,7 +1914,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ 			 * If that call succeeds then we have pi_state and an
+ 			 * initial refcount on it.
+ 			 */
+-			ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
++			ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
+ 		}
+ 
+ 		switch (ret) {
+@@ -1907,7 +1997,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ 			 * refcount on the pi_state and store the pointer in
+ 			 * the futex_q object of the waiter.
+ 			 */
+-			atomic_inc(&pi_state->refcount);
++			get_pi_state(pi_state);
+ 			this->pi_state = pi_state;
+ 			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
+ 							this->rt_waiter,
+@@ -1924,6 +2014,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ 				requeue_pi_wake_futex(this, &key2, hb2);
+ 				drop_count++;
+ 				continue;
++			} else if (ret == -EAGAIN) {
++				/*
++				 * Waiter was woken by timeout or
++				 * signal and has set pi_blocked_on to
++				 * PI_WAKEUP_INPROGRESS before we
++				 * tried to enqueue it on the rtmutex.
++				 */
++				this->pi_state = NULL;
++				put_pi_state(pi_state);
++				continue;
+ 			} else if (ret) {
+ 				/*
+ 				 * rt_mutex_start_proxy_lock() detected a
+@@ -2007,20 +2107,7 @@ queue_unlock(struct futex_hash_bucket *hb)
+ 	hb_waiters_dec(hb);
+ }
+ 
+-/**
+- * queue_me() - Enqueue the futex_q on the futex_hash_bucket
+- * @q:	The futex_q to enqueue
+- * @hb:	The destination hash bucket
+- *
+- * The hb->lock must be held by the caller, and is released here. A call to
+- * queue_me() is typically paired with exactly one call to unqueue_me().  The
+- * exceptions involve the PI related operations, which may use unqueue_me_pi()
+- * or nothing if the unqueue is done as part of the wake process and the unqueue
+- * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
+- * an example).
+- */
+-static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
+-	__releases(&hb->lock)
++static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
+ {
+ 	int prio;
+ 
+@@ -2037,6 +2124,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
+ 	plist_node_init(&q->list, prio);
+ 	plist_add(&q->list, &hb->chain);
+ 	q->task = current;
++}
++
++/**
++ * queue_me() - Enqueue the futex_q on the futex_hash_bucket
++ * @q:	The futex_q to enqueue
++ * @hb:	The destination hash bucket
++ *
++ * The hb->lock must be held by the caller, and is released here. A call to
++ * queue_me() is typically paired with exactly one call to unqueue_me().  The
++ * exceptions involve the PI related operations, which may use unqueue_me_pi()
++ * or nothing if the unqueue is done as part of the wake process and the unqueue
++ * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
++ * an example).
++ */
++static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
++	__releases(&hb->lock)
++{
++	__queue_me(q, hb);
+ 	spin_unlock(&hb->lock);
+ }
+ 
+@@ -2123,10 +2228,13 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ {
+ 	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
+ 	struct futex_pi_state *pi_state = q->pi_state;
+-	struct task_struct *oldowner = pi_state->owner;
+ 	u32 uval, uninitialized_var(curval), newval;
++	struct task_struct *oldowner;
+ 	int ret;
+ 
++	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++
++	oldowner = pi_state->owner;
+ 	/* Owner died? */
+ 	if (!pi_state->owner)
+ 		newtid |= FUTEX_OWNER_DIED;
+@@ -2134,7 +2242,8 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ 	/*
+ 	 * We are here either because we stole the rtmutex from the
+ 	 * previous highest priority waiter or we are the highest priority
+-	 * waiter but failed to get the rtmutex the first time.
++	 * waiter but have failed to get the rtmutex the first time.
++	 *
+ 	 * We have to replace the newowner TID in the user space variable.
+ 	 * This must be atomic as we have to preserve the owner died bit here.
+ 	 *
+@@ -2142,17 +2251,16 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ 	 * because we can fault here. Imagine swapped out pages or a fork
+ 	 * that marked all the anonymous memory readonly for cow.
+ 	 *
+-	 * Modifying pi_state _before_ the user space value would
+-	 * leave the pi_state in an inconsistent state when we fault
+-	 * here, because we need to drop the hash bucket lock to
+-	 * handle the fault. This might be observed in the PID check
+-	 * in lookup_pi_state.
++	 * Modifying pi_state _before_ the user space value would leave the
++	 * pi_state in an inconsistent state when we fault here, because we
++	 * need to drop the locks to handle the fault. This might be observed
++	 * in the PID check in lookup_pi_state.
+ 	 */
+ retry:
+ 	if (get_futex_value_locked(&uval, uaddr))
+ 		goto handle_fault;
+ 
+-	while (1) {
++	for (;;) {
+ 		newval = (uval & FUTEX_OWNER_DIED) | newtid;
+ 
+ 		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+@@ -2167,47 +2275,60 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ 	 * itself.
+ 	 */
+ 	if (pi_state->owner != NULL) {
+-		raw_spin_lock_irq(&pi_state->owner->pi_lock);
++		raw_spin_lock(&pi_state->owner->pi_lock);
+ 		WARN_ON(list_empty(&pi_state->list));
+ 		list_del_init(&pi_state->list);
+-		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
++		raw_spin_unlock(&pi_state->owner->pi_lock);
+ 	}
+ 
+ 	pi_state->owner = newowner;
+ 
+-	raw_spin_lock_irq(&newowner->pi_lock);
++	raw_spin_lock(&newowner->pi_lock);
+ 	WARN_ON(!list_empty(&pi_state->list));
+ 	list_add(&pi_state->list, &newowner->pi_state_list);
+-	raw_spin_unlock_irq(&newowner->pi_lock);
++	raw_spin_unlock(&newowner->pi_lock);
++	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++
+ 	return 0;
+ 
+ 	/*
+-	 * To handle the page fault we need to drop the hash bucket
+-	 * lock here. That gives the other task (either the highest priority
+-	 * waiter itself or the task which stole the rtmutex) the
+-	 * chance to try the fixup of the pi_state. So once we are
+-	 * back from handling the fault we need to check the pi_state
+-	 * after reacquiring the hash bucket lock and before trying to
+-	 * do another fixup. When the fixup has been done already we
+-	 * simply return.
++	 * To handle the page fault we need to drop the locks here. That gives
++	 * the other task (either the highest priority waiter itself or the
++	 * task which stole the rtmutex) the chance to try the fixup of the
++	 * pi_state. So once we are back from handling the fault we need to
++	 * check the pi_state after reacquiring the locks and before trying to
++	 * do another fixup. When the fixup has been done already we simply
++	 * return.
++	 *
++	 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
++	 * drop hb->lock since the caller owns the hb -> futex_q relation.
++	 * Dropping the pi_mutex->wait_lock requires the state revalidate.
+ 	 */
+ handle_fault:
++	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ 	spin_unlock(q->lock_ptr);
+ 
+ 	ret = fault_in_user_writeable(uaddr);
+ 
+ 	spin_lock(q->lock_ptr);
++	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ 
+ 	/*
+ 	 * Check if someone else fixed it for us:
+ 	 */
+-	if (pi_state->owner != oldowner)
+-		return 0;
++	if (pi_state->owner != oldowner) {
++		ret = 0;
++		goto out_unlock;
++	}
+ 
+ 	if (ret)
+-		return ret;
++		goto out_unlock;
+ 
+ 	goto retry;
++
++out_unlock:
++	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++	return ret;
+ }
+ 
+ static long futex_wait_restart(struct restart_block *restart);
+@@ -2229,13 +2350,16 @@ static long futex_wait_restart(struct restart_block *restart);
+  */
+ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
+ {
+-	struct task_struct *owner;
+ 	int ret = 0;
+ 
+ 	if (locked) {
+ 		/*
+ 		 * Got the lock. We might not be the anticipated owner if we
+ 		 * did a lock-steal - fix up the PI-state in that case:
++		 *
++		 * We can safely read pi_state->owner without holding wait_lock
++		 * because we now own the rt_mutex, only the owner will attempt
++		 * to change it.
+ 		 */
+ 		if (q->pi_state->owner != current)
+ 			ret = fixup_pi_state_owner(uaddr, q, current);
+@@ -2243,43 +2367,15 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
+ 	}
+ 
+ 	/*
+-	 * Catch the rare case, where the lock was released when we were on the
+-	 * way back before we locked the hash bucket.
+-	 */
+-	if (q->pi_state->owner == current) {
+-		/*
+-		 * Try to get the rt_mutex now. This might fail as some other
+-		 * task acquired the rt_mutex after we removed ourself from the
+-		 * rt_mutex waiters list.
+-		 */
+-		if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
+-			locked = 1;
+-			goto out;
+-		}
+-
+-		/*
+-		 * pi_state is incorrect, some other task did a lock steal and
+-		 * we returned due to timeout or signal without taking the
+-		 * rt_mutex. Too late.
+-		 */
+-		raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
+-		owner = rt_mutex_owner(&q->pi_state->pi_mutex);
+-		if (!owner)
+-			owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
+-		raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
+-		ret = fixup_pi_state_owner(uaddr, q, owner);
+-		goto out;
+-	}
+-
+-	/*
+ 	 * Paranoia check. If we did not take the lock, then we should not be
+ 	 * the owner of the rt_mutex.
+ 	 */
+-	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
++	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
+ 		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
+ 				"pi-state %p\n", ret,
+ 				q->pi_state->pi_mutex.owner,
+ 				q->pi_state->owner);
++	}
+ 
+ out:
+ 	return ret ? ret : locked;
+@@ -2503,6 +2599,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+ 			 ktime_t *time, int trylock)
+ {
+ 	struct hrtimer_sleeper timeout, *to = NULL;
++	struct futex_pi_state *pi_state = NULL;
++	struct rt_mutex_waiter rt_waiter;
+ 	struct futex_hash_bucket *hb;
+ 	struct futex_q q = futex_q_init;
+ 	int res, ret;
+@@ -2555,25 +2653,77 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+ 		}
+ 	}
+ 
++	WARN_ON(!q.pi_state);
++
+ 	/*
+ 	 * Only actually queue now that the atomic ops are done:
+ 	 */
+-	queue_me(&q, hb);
++	__queue_me(&q, hb);
+ 
+-	WARN_ON(!q.pi_state);
+-	/*
+-	 * Block on the PI mutex:
+-	 */
+-	if (!trylock) {
+-		ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
+-	} else {
+-		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
++	if (trylock) {
++		ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
+ 		/* Fixup the trylock return value: */
+ 		ret = ret ? 0 : -EWOULDBLOCK;
++		goto no_block;
+ 	}
+ 
++	rt_mutex_init_waiter(&rt_waiter, false);
++
++	/*
++	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
++	 * hold it while doing rt_mutex_start_proxy(), because then it will
++	 * include hb->lock in the blocking chain, even through we'll not in
++	 * fact hold it while blocking. This will lead it to report -EDEADLK
++	 * and BUG when futex_unlock_pi() interleaves with this.
++	 *
++	 * Therefore acquire wait_lock while holding hb->lock, but drop the
++	 * latter before calling rt_mutex_start_proxy_lock(). This still fully
++	 * serializes against futex_unlock_pi() as that does the exact same
++	 * lock handoff sequence.
++	 */
++	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
++	/*
++	 * the migrate_disable() here disables migration in the in_atomic() fast
++	 * path which is enabled again in the following spin_unlock(). We have
++	 * one migrate_disable() pending in the slow-path which is reversed
++	 * after the raw_spin_unlock_irq() where we leave the atomic context.
++	 */
++	migrate_disable();
++
++	spin_unlock(q.lock_ptr);
++	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
++	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
++	migrate_enable();
++
++	if (ret) {
++		if (ret == 1)
++			ret = 0;
++
++		spin_lock(q.lock_ptr);
++		goto no_block;
++	}
++
++
++	if (unlikely(to))
++		hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
++
++	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
++
+ 	spin_lock(q.lock_ptr);
+ 	/*
++	 * If we failed to acquire the lock (signal/timeout), we must
++	 * first acquire the hb->lock before removing the lock from the
++	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
++	 * wait lists consistent.
++	 *
++	 * In particular; it is important that futex_unlock_pi() can not
++	 * observe this inconsistency.
++	 */
++	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
++		ret = 0;
++
++no_block:
++	/*
+ 	 * Fixup the pi_state owner and possibly acquire the lock if we
+ 	 * haven't already.
+ 	 */
+@@ -2589,12 +2739,19 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+ 	 * If fixup_owner() faulted and was unable to handle the fault, unlock
+ 	 * it and return the fault to userspace.
+ 	 */
+-	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
+-		rt_mutex_unlock(&q.pi_state->pi_mutex);
++	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
++		pi_state = q.pi_state;
++		get_pi_state(pi_state);
++	}
+ 
+ 	/* Unqueue and drop the lock */
+ 	unqueue_me_pi(&q);
+ 
++	if (pi_state) {
++		rt_mutex_futex_unlock(&pi_state->pi_mutex);
++		put_pi_state(pi_state);
++	}
++
+ 	goto out_put_key;
+ 
+ out_unlock_put_key:
+@@ -2631,7 +2788,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ 	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
+ 	union futex_key key = FUTEX_KEY_INIT;
+ 	struct futex_hash_bucket *hb;
+-	struct futex_q *match;
++	struct futex_q *top_waiter;
+ 	int ret;
+ 
+ retry:
+@@ -2655,12 +2812,48 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ 	 * all and we at least want to know if user space fiddled
+ 	 * with the futex value instead of blindly unlocking.
+ 	 */
+-	match = futex_top_waiter(hb, &key);
+-	if (match) {
+-		ret = wake_futex_pi(uaddr, uval, match, hb);
++	top_waiter = futex_top_waiter(hb, &key);
++	if (top_waiter) {
++		struct futex_pi_state *pi_state = top_waiter->pi_state;
++
++		ret = -EINVAL;
++		if (!pi_state)
++			goto out_unlock;
++
+ 		/*
+-		 * In case of success wake_futex_pi dropped the hash
+-		 * bucket lock.
++		 * If current does not own the pi_state then the futex is
++		 * inconsistent and user space fiddled with the futex value.
++		 */
++		if (pi_state->owner != current)
++			goto out_unlock;
++
++		get_pi_state(pi_state);
++		/*
++		 * By taking wait_lock while still holding hb->lock, we ensure
++		 * there is no point where we hold neither; and therefore
++		 * wake_futex_pi() must observe a state consistent with what we
++		 * observed.
++		 */
++		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++		/*
++		 * Magic trickery for now to make the RT migrate disable
++		 * logic happy. The following spin_unlock() happens with
++		 * interrupts disabled so the internal migrate_enable()
++		 * won't undo the migrate_disable() which was issued when
++		 * locking hb->lock.
++		 */
++		migrate_disable();
++		spin_unlock(&hb->lock);
++
++		/* Drops pi_state->pi_mutex.wait_lock */
++		ret = wake_futex_pi(uaddr, uval, pi_state);
++
++		migrate_enable();
++
++		put_pi_state(pi_state);
++
++		/*
++		 * Success, we're done! No tricky corner cases.
+ 		 */
+ 		if (!ret)
+ 			goto out_putkey;
+@@ -2675,7 +2868,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ 		 * setting the FUTEX_WAITERS bit. Try again.
+ 		 */
+ 		if (ret == -EAGAIN) {
+-			spin_unlock(&hb->lock);
+ 			put_futex_key(&key);
+ 			goto retry;
+ 		}
+@@ -2683,7 +2875,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ 		 * wake_futex_pi has detected invalid state. Tell user
+ 		 * space.
+ 		 */
+-		goto out_unlock;
++		goto out_putkey;
+ 	}
+ 
+ 	/*
+@@ -2693,8 +2885,10 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ 	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
+ 	 * owner.
+ 	 */
+-	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
++	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
++		spin_unlock(&hb->lock);
+ 		goto pi_faulted;
++	}
+ 
+ 	/*
+ 	 * If uval has changed, let user space handle it.
+@@ -2708,7 +2902,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ 	return ret;
+ 
+ pi_faulted:
+-	spin_unlock(&hb->lock);
+ 	put_futex_key(&key);
+ 
+ 	ret = fault_in_user_writeable(uaddr);
+@@ -2812,8 +3005,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 				 u32 __user *uaddr2)
+ {
+ 	struct hrtimer_sleeper timeout, *to = NULL;
++	struct futex_pi_state *pi_state = NULL;
+ 	struct rt_mutex_waiter rt_waiter;
+-	struct futex_hash_bucket *hb;
++	struct futex_hash_bucket *hb, *hb2;
+ 	union futex_key key2 = FUTEX_KEY_INIT;
+ 	struct futex_q q = futex_q_init;
+ 	int res, ret;
+@@ -2838,10 +3032,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 	 * The waiter is allocated on our stack, manipulated by the requeue
+ 	 * code while we sleep on uaddr.
+ 	 */
+-	debug_rt_mutex_init_waiter(&rt_waiter);
+-	RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
+-	RB_CLEAR_NODE(&rt_waiter.tree_entry);
+-	rt_waiter.task = NULL;
++	rt_mutex_init_waiter(&rt_waiter, false);
+ 
+ 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
+ 	if (unlikely(ret != 0))
+@@ -2872,20 +3063,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ 	futex_wait_queue_me(hb, &q, to);
+ 
+-	spin_lock(&hb->lock);
+-	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+-	spin_unlock(&hb->lock);
+-	if (ret)
+-		goto out_put_keys;
++	/*
++	 * On RT we must avoid races with requeue and trying to block
++	 * on two mutexes (hb->lock and uaddr2's rtmutex) by
++	 * serializing access to pi_blocked_on with pi_lock.
++	 */
++	raw_spin_lock_irq(&current->pi_lock);
++	if (current->pi_blocked_on) {
++		/*
++		 * We have been requeued or are in the process of
++		 * being requeued.
++		 */
++		raw_spin_unlock_irq(&current->pi_lock);
++	} else {
++		/*
++		 * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
++		 * prevents a concurrent requeue from moving us to the
++		 * uaddr2 rtmutex. After that we can safely acquire
++		 * (and possibly block on) hb->lock.
++		 */
++		current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
++		raw_spin_unlock_irq(&current->pi_lock);
++
++		spin_lock(&hb->lock);
++
++		/*
++		 * Clean up pi_blocked_on. We might leak it otherwise
++		 * when we succeeded with the hb->lock in the fast
++		 * path.
++		 */
++		raw_spin_lock_irq(&current->pi_lock);
++		current->pi_blocked_on = NULL;
++		raw_spin_unlock_irq(&current->pi_lock);
++
++		ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
++		spin_unlock(&hb->lock);
++		if (ret)
++			goto out_put_keys;
++	}
+ 
+ 	/*
+-	 * In order for us to be here, we know our q.key == key2, and since
+-	 * we took the hb->lock above, we also know that futex_requeue() has
+-	 * completed and we no longer have to concern ourselves with a wakeup
+-	 * race with the atomic proxy lock acquisition by the requeue code. The
+-	 * futex_requeue dropped our key1 reference and incremented our key2
+-	 * reference count.
 +	 * In order to be here, we have either been requeued, are in
 +	 * the process of being requeued, or requeue successfully
 +	 * acquired uaddr2 on our behalf.  If pi_blocked_on was
@@ -11959,7 +12736,7 @@ index 38b68c2735c5..6450a8d81667 100644
  
  	/* Check if the requeue code acquired the second futex for us. */
  	if (!q.rt_waiter) {
-@@ -2895,14 +2942,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2894,16 +3120,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  		 * did a lock-steal - fix up the PI-state in that case.
  		 */
  		if (q.pi_state && (q.pi_state->owner != current)) {
@@ -11967,6 +12744,12 @@ index 38b68c2735c5..6450a8d81667 100644
 +			spin_lock(&hb2->lock);
 +			BUG_ON(&hb2->lock != q.lock_ptr);
  			ret = fixup_pi_state_owner(uaddr2, &q, current);
+-			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+-				rt_mutex_unlock(&q.pi_state->pi_mutex);
++			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
++				pi_state = q.pi_state;
++				get_pi_state(pi_state);
++			}
  			/*
  			 * Drop the reference to the pi state which
  			 * the requeue_pi() code acquired for us.
@@ -11976,17 +12759,48 @@ index 38b68c2735c5..6450a8d81667 100644
 +			spin_unlock(&hb2->lock);
  		}
  	} else {
- 		/*
-@@ -2915,7 +2963,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- 		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
- 		debug_rt_mutex_free_waiter(&rt_waiter);
+ 		struct rt_mutex *pi_mutex;
+@@ -2915,10 +3144,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 		 */
+ 		WARN_ON(!q.pi_state);
+ 		pi_mutex = &q.pi_state->pi_mutex;
+-		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
+-		debug_rt_mutex_free_waiter(&rt_waiter);
++		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
  
 -		spin_lock(q.lock_ptr);
 +		spin_lock(&hb2->lock);
 +		BUG_ON(&hb2->lock != q.lock_ptr);
++		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
++			ret = 0;
++
++		debug_rt_mutex_free_waiter(&rt_waiter);
  		/*
  		 * Fixup the pi_state owner and possibly acquire the lock if we
  		 * haven't already.
+@@ -2936,13 +3169,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 		 * the fault, unlock the rt_mutex and return the fault to
+ 		 * userspace.
+ 		 */
+-		if (ret && rt_mutex_owner(pi_mutex) == current)
+-			rt_mutex_unlock(pi_mutex);
++		if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
++			pi_state = q.pi_state;
++			get_pi_state(pi_state);
++		}
+ 
+ 		/* Unqueue and drop the lock. */
+ 		unqueue_me_pi(&q);
+ 	}
+ 
++	if (pi_state) {
++		rt_mutex_futex_unlock(&pi_state->pi_mutex);
++		put_pi_state(pi_state);
++	}
++
+ 	if (ret == -EINTR) {
+ 		/*
+ 		 * We've already been requeued, but cannot restart by calling
 diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
 index d3f24905852c..f87aa8fdcc51 100644
 --- a/kernel/irq/handle.c
@@ -12371,7 +13185,7 @@ index ee1bc1bb8feb..ddef07958840 100644
  };
  
 diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
-index 6f88e352cd4f..5e27fb1079e7 100644
+index 6f88e352cd4f..6ff9e8011dd0 100644
 --- a/kernel/locking/Makefile
 +++ b/kernel/locking/Makefile
 @@ -2,7 +2,7 @@
@@ -12390,8 +13204,8 @@ index 6f88e352cd4f..5e27fb1079e7 100644
 +ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
 +obj-y += mutex.o
  obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
-+obj-y += rwsem.o
 +endif
++obj-y += rwsem.o
  obj-$(CONFIG_LOCKDEP) += lockdep.o
  ifeq ($(CONFIG_PROC_FS),y)
  obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
@@ -12403,7 +13217,7 @@ index 6f88e352cd4f..5e27fb1079e7 100644
  obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
  obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
 +endif
-+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o
  obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
  obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
@@ -12556,10 +13370,10 @@ index ce182599cf2e..2ad3a1e8344c 100644
  
 diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c
 new file mode 100644
-index 000000000000..665754c00e1e
+index 000000000000..6284e3b15091
 --- /dev/null
 +++ b/kernel/locking/rt.c
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,331 @@
 +/*
 + * kernel/rt.c
 + *
@@ -12801,239 +13615,72 @@ index 000000000000..665754c00e1e
 +}
 +EXPORT_SYMBOL(rt_write_lock);
 +
-+void __lockfunc rt_read_lock(rwlock_t *rwlock)
-+{
-+	struct rt_mutex *lock = &rwlock->lock;
-+
-+
-+	/*
-+	 * recursive read locks succeed when current owns the lock
-+	 */
-+	if (rt_mutex_owner(lock) != current) {
-+		rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
-+		__rt_spin_lock(lock);
-+	}
-+	rwlock->read_depth++;
-+}
-+
-+EXPORT_SYMBOL(rt_read_lock);
-+
-+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
-+{
-+	/* NOTE: we always pass in '1' for nested, for simplicity */
-+	rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
-+	__rt_spin_unlock(&rwlock->lock);
-+	migrate_enable();
-+}
-+EXPORT_SYMBOL(rt_write_unlock);
-+
-+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
-+{
-+	/* Release the lock only when read_depth is down to 0 */
-+	if (--rwlock->read_depth == 0) {
-+		rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
-+		__rt_spin_unlock(&rwlock->lock);
-+		migrate_enable();
-+	}
-+}
-+EXPORT_SYMBOL(rt_read_unlock);
-+
-+unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
-+{
-+	rt_write_lock(rwlock);
-+
-+	return 0;
-+}
-+EXPORT_SYMBOL(rt_write_lock_irqsave);
-+
-+unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
-+{
-+	rt_read_lock(rwlock);
-+
-+	return 0;
-+}
-+EXPORT_SYMBOL(rt_read_lock_irqsave);
-+
-+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
-+{
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	/*
-+	 * Make sure we are not reinitializing a held lock:
-+	 */
-+	debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
-+	lockdep_init_map(&rwlock->dep_map, name, key, 0);
-+#endif
-+	rwlock->lock.save_state = 1;
-+	rwlock->read_depth = 0;
-+}
-+EXPORT_SYMBOL(__rt_rwlock_init);
-+
-+/*
-+ * rw_semaphores
-+ */
-+
-+void  rt_up_write(struct rw_semaphore *rwsem)
-+{
-+	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
-+	rt_mutex_unlock(&rwsem->lock);
-+}
-+EXPORT_SYMBOL(rt_up_write);
-+
-+void __rt_up_read(struct rw_semaphore *rwsem)
-+{
-+	if (--rwsem->read_depth == 0)
-+		rt_mutex_unlock(&rwsem->lock);
-+}
-+
-+void  rt_up_read(struct rw_semaphore *rwsem)
-+{
-+	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
-+	__rt_up_read(rwsem);
-+}
-+EXPORT_SYMBOL(rt_up_read);
-+
-+/*
-+ * downgrade a write lock into a read lock
-+ * - just wake up any readers at the front of the queue
-+ */
-+void  rt_downgrade_write(struct rw_semaphore *rwsem)
-+{
-+	BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
-+	rwsem->read_depth = 1;
-+}
-+EXPORT_SYMBOL(rt_downgrade_write);
-+
-+int  rt_down_write_trylock(struct rw_semaphore *rwsem)
-+{
-+	int ret = rt_mutex_trylock(&rwsem->lock);
-+
-+	if (ret)
-+		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
-+	return ret;
-+}
-+EXPORT_SYMBOL(rt_down_write_trylock);
-+
-+void  rt_down_write(struct rw_semaphore *rwsem)
-+{
-+	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
-+	rt_mutex_lock(&rwsem->lock);
-+}
-+EXPORT_SYMBOL(rt_down_write);
-+
-+int rt_down_write_killable(struct rw_semaphore *rwsem)
-+{
-+	int ret;
-+
-+	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
-+	ret = rt_mutex_lock_killable(&rwsem->lock);
-+	if (ret)
-+		rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
-+	return ret;
-+}
-+EXPORT_SYMBOL(rt_down_write_killable);
-+
-+int rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass)
-+{
-+	int ret;
-+
-+	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
-+	ret = rt_mutex_lock_killable(&rwsem->lock);
-+	if (ret)
-+		rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
-+	return ret;
-+}
-+EXPORT_SYMBOL(rt_down_write_killable_nested);
-+
-+void  rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
-+{
-+	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
-+	rt_mutex_lock(&rwsem->lock);
-+}
-+EXPORT_SYMBOL(rt_down_write_nested);
-+
-+void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
-+			       struct lockdep_map *nest)
-+{
-+	rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
-+	rt_mutex_lock(&rwsem->lock);
-+}
-+EXPORT_SYMBOL(rt_down_write_nested_lock);
-+
-+int rt__down_read_trylock(struct rw_semaphore *rwsem)
-+{
-+	struct rt_mutex *lock = &rwsem->lock;
-+	int ret = 1;
-+
-+	/*
-+	 * recursive read locks succeed when current owns the rwsem,
-+	 * but not when read_depth == 0 which means that the rwsem is
-+	 * write locked.
-+	 */
-+	if (rt_mutex_owner(lock) != current)
-+		ret = rt_mutex_trylock(&rwsem->lock);
-+	else if (!rwsem->read_depth)
-+		ret = 0;
-+
-+	if (ret)
-+		rwsem->read_depth++;
-+	return ret;
-+
-+}
-+
-+int  rt_down_read_trylock(struct rw_semaphore *rwsem)
++void __lockfunc rt_read_lock(rwlock_t *rwlock)
 +{
-+	int ret;
++	struct rt_mutex *lock = &rwlock->lock;
 +
-+	ret = rt__down_read_trylock(rwsem);
-+	if (ret)
-+		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
 +
-+	return ret;
++	/*
++	 * recursive read locks succeed when current owns the lock
++	 */
++	if (rt_mutex_owner(lock) != current) {
++		rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++		__rt_spin_lock(lock);
++	}
++	rwlock->read_depth++;
 +}
-+EXPORT_SYMBOL(rt_down_read_trylock);
 +
-+void rt__down_read(struct rw_semaphore *rwsem)
-+{
-+	struct rt_mutex *lock = &rwsem->lock;
++EXPORT_SYMBOL(rt_read_lock);
 +
-+	if (rt_mutex_owner(lock) != current)
-+		rt_mutex_lock(&rwsem->lock);
-+	rwsem->read_depth++;
++void __lockfunc rt_write_unlock(rwlock_t *rwlock)
++{
++	/* NOTE: we always pass in '1' for nested, for simplicity */
++	rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++	__rt_spin_unlock(&rwlock->lock);
++	migrate_enable();
 +}
-+EXPORT_SYMBOL(rt__down_read);
++EXPORT_SYMBOL(rt_write_unlock);
 +
-+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
++void __lockfunc rt_read_unlock(rwlock_t *rwlock)
 +{
-+	rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
-+	rt__down_read(rwsem);
++	/* Release the lock only when read_depth is down to 0 */
++	if (--rwlock->read_depth == 0) {
++		rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++		__rt_spin_unlock(&rwlock->lock);
++		migrate_enable();
++	}
 +}
++EXPORT_SYMBOL(rt_read_unlock);
 +
-+void  rt_down_read(struct rw_semaphore *rwsem)
++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
 +{
-+	__rt_down_read(rwsem, 0);
++	rt_write_lock(rwlock);
++
++	return 0;
 +}
-+EXPORT_SYMBOL(rt_down_read);
++EXPORT_SYMBOL(rt_write_lock_irqsave);
 +
-+void  rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
 +{
-+	__rt_down_read(rwsem, subclass);
++	rt_read_lock(rwlock);
++
++	return 0;
 +}
-+EXPORT_SYMBOL(rt_down_read_nested);
++EXPORT_SYMBOL(rt_read_lock_irqsave);
 +
-+void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
-+			      struct lock_class_key *key)
++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
 +{
 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
 +	/*
 +	 * Make sure we are not reinitializing a held lock:
 +	 */
-+	debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
-+	lockdep_init_map(&rwsem->dep_map, name, key, 0);
++	debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
++	lockdep_init_map(&rwlock->dep_map, name, key, 0);
 +#endif
-+	rwsem->read_depth = 0;
-+	rwsem->lock.save_state = 0;
++	rwlock->lock.save_state = 1;
++	rwlock->read_depth = 0;
 +}
-+EXPORT_SYMBOL(__rt_rwsem_init);
++EXPORT_SYMBOL(__rt_rwlock_init);
 +
 +/**
 + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
@@ -13058,8 +13705,39 @@ index 000000000000..665754c00e1e
 +	return 1;
 +}
 +EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
+diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
+index 62b6cee8ea7f..0613c4b1d059 100644
+--- a/kernel/locking/rtmutex-debug.c
++++ b/kernel/locking/rtmutex-debug.c
+@@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
+ 	lock->name = name;
+ }
+ 
+-void
+-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
+-{
+-}
+-
+-void rt_mutex_deadlock_account_unlock(struct task_struct *task)
+-{
+-}
+-
+diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
+index d0519c3432b6..b585af9a1b50 100644
+--- a/kernel/locking/rtmutex-debug.h
++++ b/kernel/locking/rtmutex-debug.h
+@@ -9,9 +9,6 @@
+  * This file contains macros used solely by rtmutex.c. Debug version.
+  */
+ 
+-extern void
+-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
+-extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
+ extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
+ extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
+ extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 2c49d76f96c3..4f1a7663c34d 100644
+index 2c49d76f96c3..674ad9087eb5 100644
 --- a/kernel/locking/rtmutex.c
 +++ b/kernel/locking/rtmutex.c
 @@ -7,6 +7,11 @@
@@ -13216,7 +13894,12 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  			/*
  			 * The current top waiter stays enqueued. We
  			 * don't have to change anything in the lock
-@@ -941,6 +983,433 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+@@ -936,11 +978,395 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ 	 */
+ 	rt_mutex_set_owner(lock, task);
+ 
+-	rt_mutex_deadlock_account_lock(lock, task);
+-
  	return 1;
  }
  
@@ -13235,19 +13918,18 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +		migrate_disable();
 +
 +	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-+		rt_mutex_deadlock_account_lock(lock, current);
++		return;
 +	else
 +		slowfn(lock, do_mig_dis);
 +}
 +
-+static inline int rt_spin_lock_fastunlock(struct rt_mutex *lock,
-+					  int (*slowfn)(struct rt_mutex *lock))
++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
++					   void  (*slowfn)(struct rt_mutex *lock))
 +{
-+	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
-+		rt_mutex_deadlock_account_unlock(current);
-+		return 0;
-+	}
-+	return slowfn(lock);
++	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
++		return;
++	else
++		slowfn(lock);
 +}
 +#ifdef CONFIG_SMP
 +/*
@@ -13388,7 +14070,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +/*
 + * Slow path to release a rt_mutex spin_lock style
 + */
-+static int noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
 +{
 +	unsigned long flags;
 +	WAKE_Q(wake_q);
@@ -13398,12 +14080,10 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +
 +	debug_rt_mutex_unlock(lock);
 +
-+	rt_mutex_deadlock_account_unlock(current);
-+
 +	if (!rt_mutex_has_waiters(lock)) {
 +		lock->owner = NULL;
 +		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+		return 0;
++		return;
 +	}
 +
 +	mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
@@ -13414,33 +14094,6 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +
 +	/* Undo pi boosting.when necessary */
 +	rt_mutex_adjust_prio(current);
-+	return 0;
-+}
-+
-+static int noinline __sched rt_spin_lock_slowunlock_no_deboost(struct rt_mutex *lock)
-+{
-+	unsigned long flags;
-+	WAKE_Q(wake_q);
-+	WAKE_Q(wake_sleeper_q);
-+
-+	raw_spin_lock_irqsave(&lock->wait_lock, flags);
-+
-+	debug_rt_mutex_unlock(lock);
-+
-+	rt_mutex_deadlock_account_unlock(current);
-+
-+	if (!rt_mutex_has_waiters(lock)) {
-+		lock->owner = NULL;
-+		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+		return 0;
-+	}
-+
-+	mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
-+
-+	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+	wake_up_q(&wake_q);
-+	wake_up_q_sleeper(&wake_sleeper_q);
-+	return 1;
 +}
 +
 +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
@@ -13495,17 +14148,6 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +}
 +EXPORT_SYMBOL(rt_spin_unlock);
 +
-+int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock)
-+{
-+	int ret;
-+
-+	/* NOTE: we always pass in '1' for nested, for simplicity */
-+	spin_release(&lock->dep_map, 1, _RET_IP_);
-+	ret = rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_no_deboost);
-+	migrate_enable();
-+	return ret;
-+}
-+
 +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
 +{
 +	rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
@@ -13650,7 +14292,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  /*
   * Task blocks on lock.
   *
-@@ -971,6 +1440,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+@@ -971,6 +1397,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
  		return -EDEADLK;
  
  	raw_spin_lock(&task->pi_lock);
@@ -13674,7 +14316,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  	__rt_mutex_adjust_prio(task);
  	waiter->task = task;
  	waiter->lock = lock;
-@@ -994,7 +1480,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+@@ -994,7 +1437,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
  		rt_mutex_enqueue_pi(owner, waiter);
  
  		__rt_mutex_adjust_prio(owner);
@@ -13683,7 +14325,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  			chain_walk = 1;
  	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
  		chain_walk = 1;
-@@ -1036,6 +1522,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+@@ -1036,6 +1479,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
   * Called with lock->wait_lock held and interrupts disabled.
   */
  static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -13691,7 +14333,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  				    struct rt_mutex *lock)
  {
  	struct rt_mutex_waiter *waiter;
-@@ -1064,7 +1551,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+@@ -1064,7 +1508,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
  
  	raw_spin_unlock(&current->pi_lock);
  
@@ -13703,7 +14345,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  }
  
  /*
-@@ -1078,7 +1568,7 @@ static void remove_waiter(struct rt_mutex *lock,
+@@ -1078,7 +1525,7 @@ static void remove_waiter(struct rt_mutex *lock,
  {
  	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
  	struct task_struct *owner = rt_mutex_owner(lock);
@@ -13712,7 +14354,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  
  	raw_spin_lock(&current->pi_lock);
  	rt_mutex_dequeue(lock, waiter);
-@@ -1102,7 +1592,8 @@ static void remove_waiter(struct rt_mutex *lock,
+@@ -1102,7 +1549,8 @@ static void remove_waiter(struct rt_mutex *lock,
  	__rt_mutex_adjust_prio(owner);
  
  	/* Store the lock on which owner is blocked or NULL */
@@ -13722,7 +14364,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  
  	raw_spin_unlock(&owner->pi_lock);
  
-@@ -1138,17 +1629,17 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+@@ -1138,21 +1586,30 @@ void rt_mutex_adjust_pi(struct task_struct *task)
  	raw_spin_lock_irqsave(&task->pi_lock, flags);
  
  	waiter = task->pi_blocked_on;
@@ -13742,7 +14384,20 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
  				   next_lock, NULL, task);
  }
-@@ -1166,7 +1657,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ 
++void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
++{
++	debug_rt_mutex_init_waiter(waiter);
++	RB_CLEAR_NODE(&waiter->pi_tree_entry);
++	RB_CLEAR_NODE(&waiter->tree_entry);
++	waiter->task = NULL;
++	waiter->savestate = savestate;
++}
++
+ /**
+  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
+  * @lock:		 the rt_mutex to take
+@@ -1166,7 +1623,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
  static int __sched
  __rt_mutex_slowlock(struct rt_mutex *lock, int state,
  		    struct hrtimer_sleeper *timeout,
@@ -13752,20 +14407,35 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  {
  	int ret = 0;
  
-@@ -1189,6 +1681,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
- 				break;
- 		}
+@@ -1175,16 +1633,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ 		if (try_to_take_rt_mutex(lock, current, waiter))
+ 			break;
  
-+		if (ww_ctx && ww_ctx->acquired > 0) {
-+			ret = __mutex_lock_check_stamp(lock, ww_ctx);
-+			if (ret)
-+				break;
+-		/*
+-		 * TASK_INTERRUPTIBLE checks for signals and
+-		 * timeout. Ignored otherwise.
+-		 */
+-		if (unlikely(state == TASK_INTERRUPTIBLE)) {
+-			/* Signal pending? */
+-			if (signal_pending(current))
+-				ret = -EINTR;
+-			if (timeout && !timeout->task)
+-				ret = -ETIMEDOUT;
++		if (timeout && !timeout->task) {
++			ret = -ETIMEDOUT;
++			break;
++		}
++		if (signal_pending_state(state, current)) {
++			ret = -EINTR;
++			break;
 +		}
 +
- 		raw_spin_unlock_irq(&lock->wait_lock);
- 
- 		debug_rt_mutex_print_deadlock(waiter);
-@@ -1223,21 +1721,96 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
++		if (ww_ctx && ww_ctx->acquired > 0) {
++			ret = __mutex_lock_check_stamp(lock, ww_ctx);
+ 			if (ret)
+ 				break;
+ 		}
+@@ -1223,21 +1682,148 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
  	}
  }
  
@@ -13845,6 +14515,58 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +}
 +#endif
 +
++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
++				     struct hrtimer_sleeper *timeout,
++				     enum rtmutex_chainwalk chwalk,
++				     struct ww_acquire_ctx *ww_ctx,
++				     struct rt_mutex_waiter *waiter)
++{
++	int ret;
++
++	/* Try to acquire the lock again: */
++	if (try_to_take_rt_mutex(lock, current, NULL)) {
++		if (ww_ctx)
++			ww_mutex_account_lock(lock, ww_ctx);
++		return 0;
++	}
++
++	set_current_state(state);
++
++	/* Setup the timer, when timeout != NULL */
++	if (unlikely(timeout))
++		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
++
++	ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
++
++	if (likely(!ret)) {
++		/* sleep on the mutex */
++		ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
++					  ww_ctx);
++	} else if (ww_ctx) {
++		/* ww_mutex received EDEADLK, let it become EALREADY */
++		ret = __mutex_lock_check_stamp(lock, ww_ctx);
++		BUG_ON(!ret);
++	}
++
++	if (unlikely(ret)) {
++		__set_current_state(TASK_RUNNING);
++		if (rt_mutex_has_waiters(lock))
++			remove_waiter(lock, waiter);
++		/* ww_mutex want to report EDEADLK/EALREADY, let them */
++		if (!ww_ctx)
++			rt_mutex_handle_deadlock(ret, chwalk, waiter);
++	} else if (ww_ctx) {
++		ww_mutex_account_lock(lock, ww_ctx);
++	}
++
++	/*
++	 * try_to_take_rt_mutex() sets the waiter bit
++	 * unconditionally. We might have to fix that up.
++	 */
++	fixup_rt_mutex_waiters(lock);
++	return ret;
++}
++
  /*
   * Slow path lock function:
   */
@@ -13866,42 +14588,46 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  
  	/*
  	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1251,6 +1824,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
- 
- 	/* Try to acquire the lock again: */
- 	if (try_to_take_rt_mutex(lock, current, NULL)) {
-+		if (ww_ctx)
-+			ww_mutex_account_lock(lock, ww_ctx);
- 		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
- 		return 0;
- 	}
-@@ -1265,13 +1840,23 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+@@ -1249,36 +1835,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ 	 */
+ 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
  
- 	if (likely(!ret))
- 		/* sleep on the mutex */
+-	/* Try to acquire the lock again: */
+-	if (try_to_take_rt_mutex(lock, current, NULL)) {
+-		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+-		return 0;
+-	}
+-
+-	set_current_state(state);
+-
+-	/* Setup the timer, when timeout != NULL */
+-	if (unlikely(timeout))
+-		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
+-
+-	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
+-
+-	if (likely(!ret))
+-		/* sleep on the mutex */
 -		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
-+		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
-+					  ww_ctx);
-+	else if (ww_ctx) {
-+		/* ww_mutex received EDEADLK, let it become EALREADY */
-+		ret = __mutex_lock_check_stamp(lock, ww_ctx);
-+		BUG_ON(!ret);
-+	}
- 
- 	if (unlikely(ret)) {
- 		__set_current_state(TASK_RUNNING);
- 		if (rt_mutex_has_waiters(lock))
- 			remove_waiter(lock, &waiter);
+-
+-	if (unlikely(ret)) {
+-		__set_current_state(TASK_RUNNING);
+-		if (rt_mutex_has_waiters(lock))
+-			remove_waiter(lock, &waiter);
 -		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
-+		/* ww_mutex want to report EDEADLK/EALREADY, let them */
-+		if (!ww_ctx)
-+			rt_mutex_handle_deadlock(ret, chwalk, &waiter);
-+	} else if (ww_ctx) {
-+		ww_mutex_account_lock(lock, ww_ctx);
- 	}
+-	}
+-
+-	/*
+-	 * try_to_take_rt_mutex() sets the waiter bit
+-	 * unconditionally. We might have to fix that up.
+-	 */
+-	fixup_rt_mutex_waiters(lock);
++	ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
++				       &waiter);
  
- 	/*
-@@ -1331,7 +1916,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
+ 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ 
+@@ -1331,7 +1889,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
   * Return whether the current task needs to undo a potential priority boosting.
   */
  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -13911,7 +14637,16 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  {
  	unsigned long flags;
  
-@@ -1387,7 +1973,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+@@ -1340,8 +1899,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+ 
+ 	debug_rt_mutex_unlock(lock);
+ 
+-	rt_mutex_deadlock_account_unlock(current);
+-
+ 	/*
+ 	 * We must be careful here if the fast path is enabled. If we
+ 	 * have no waiters queued we cannot set owner to NULL here
+@@ -1387,7 +1944,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
  	 *
  	 * Queue the next waiter for wakeup once we release the wait_lock.
  	 */
@@ -13920,7 +14655,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
  
-@@ -1403,31 +1989,36 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+@@ -1403,63 +1960,79 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
   */
  static inline int
  rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -13931,13 +14666,14 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +				enum rtmutex_chainwalk chwalk,
 +				struct ww_acquire_ctx *ww_ctx))
  {
- 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
- 		rt_mutex_deadlock_account_lock(lock, current);
+-	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
+-		rt_mutex_deadlock_account_lock(lock, current);
++	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
  		return 0;
- 	} else
+-	} else
 -		return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
-+		return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
-+			      ww_ctx);
++
++	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
  }
  
  static inline int
@@ -13952,16 +14688,29 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +				      struct ww_acquire_ctx *ww_ctx))
  {
  	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
- 	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
- 		rt_mutex_deadlock_account_lock(lock, current);
+-	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
+-		rt_mutex_deadlock_account_lock(lock, current);
++	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
  		return 0;
- 	} else
+-	} else
 -		return slowfn(lock, state, timeout, chwalk);
-+		return slowfn(lock, state, timeout, chwalk, ww_ctx);
++
++	return slowfn(lock, state, timeout, chwalk, ww_ctx);
  }
  
  static inline int
-@@ -1444,17 +2035,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
+ rt_mutex_fasttrylock(struct rt_mutex *lock,
+ 		     int (*slowfn)(struct rt_mutex *lock))
+ {
+-	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
+-		rt_mutex_deadlock_account_lock(lock, current);
++	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ 		return 1;
+-	}
++
+ 	return slowfn(lock);
+ }
+ 
  static inline void
  rt_mutex_fastunlock(struct rt_mutex *lock,
  		    bool (*slowfn)(struct rt_mutex *lock,
@@ -13971,47 +14720,76 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  {
  	WAKE_Q(wake_q);
 +	WAKE_Q(wake_sleeper_q);
++	bool deboost;
  
- 	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
- 		rt_mutex_deadlock_account_unlock(current);
+-	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+-		rt_mutex_deadlock_account_unlock(current);
++	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
++		return;
  
- 	} else {
+-	} else {
 -		bool deboost = slowfn(lock, &wake_q);
-+		bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q);
++	deboost = slowfn(lock, &wake_q, &wake_sleeper_q);
  
- 		wake_up_q(&wake_q);
-+		wake_up_q_sleeper(&wake_sleeper_q);
+-		wake_up_q(&wake_q);
++	wake_up_q(&wake_q);
++	wake_up_q_sleeper(&wake_sleeper_q);
  
- 		/* Undo pi boosting if necessary: */
- 		if (deboost)
-@@ -1471,7 +2065,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
- {
- 	might_sleep();
+-		/* Undo pi boosting if necessary: */
+-		if (deboost)
+-			rt_mutex_adjust_prio(current);
+-	}
++	/* Undo pi boosting if necessary: */
++	if (deboost)
++		rt_mutex_adjust_prio(current);
++}
++
++/**
++ * rt_mutex_lock_state - lock a rt_mutex with a given state
++ *
++ * @lock:	The rt_mutex to be locked
++ * @state:	The state to set when blocking on the rt_mutex
++ */
++int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state)
++{
++	might_sleep();
++
++	return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
+ }
  
+ /**
+@@ -1469,15 +2042,13 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
+  */
+ void __sched rt_mutex_lock(struct rt_mutex *lock)
+ {
+-	might_sleep();
+-
 -	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
-+	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
++	rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE);
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock);
  
-@@ -1488,7 +2082,7 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
+ /**
+  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+- *
++ **
+  * @lock:		the rt_mutex to be locked
+  *
+  * Returns:
+@@ -1486,23 +2057,32 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+  */
+ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
  {
- 	might_sleep();
- 
+-	might_sleep();
+-
 -	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
-+	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
++	return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE);
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
  
-@@ -1501,11 +2095,30 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
- 	might_sleep();
- 
- 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
--				       RT_MUTEX_FULL_CHAINWALK,
-+				       RT_MUTEX_FULL_CHAINWALK, NULL,
- 				       rt_mutex_slowlock);
- }
- 
- /**
+-/*
+- * Futex variant with full deadlock detection.
++/**
 + * rt_mutex_lock_killable - lock a rt_mutex killable
 + *
 + * @lock:              the rt_mutex to be locked
@@ -14020,21 +14798,29 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 + * Returns:
 + *  0          on success
 + * -EINTR      when interrupted by a signal
-+ * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
-+ */
+  */
+-int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
+-			      struct hrtimer_sleeper *timeout)
 +int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
-+{
-+	might_sleep();
-+
-+	return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
+ {
+-	might_sleep();
++	return rt_mutex_lock_state(lock, TASK_KILLABLE);
 +}
 +EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-+
-+/**
-  * rt_mutex_timed_lock - lock a rt_mutex interruptible
-  *			the timeout structure is provided
-  *			by the caller
-@@ -1525,6 +2138,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
+ 
+-	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+-				       RT_MUTEX_FULL_CHAINWALK,
+-				       rt_mutex_slowlock);
++/*
++ * Futex variant, must not use fastpath.
++ */
++int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
++{
++	return rt_mutex_slowtrylock(lock);
+ }
+ 
+ /**
+@@ -1525,6 +2105,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
  
  	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
  				       RT_MUTEX_MIN_CHAINWALK,
@@ -14042,7 +14828,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  				       rt_mutex_slowlock);
  }
  EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -1542,7 +2156,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1542,7 +2123,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
   */
  int __sched rt_mutex_trylock(struct rt_mutex *lock)
  {
@@ -14054,24 +14840,60 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  		return 0;
  
  	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
-@@ -1568,13 +2186,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
-  * required or not.
+@@ -1561,20 +2146,41 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
+ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+ 
+ /**
+- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
+- * @lock: the rt_mutex to be unlocked
+- *
+- * Returns: true/false indicating whether priority adjustment is
+- * required or not.
++ * Futex variant, that since futex variants do not use the fast-path, can be
++ * simple and will not need to retry.
   */
- bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
+-bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
 -				   struct wake_q_head *wqh)
-+				   struct wake_q_head *wqh,
-+				   struct wake_q_head *wq_sleeper)
++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
++				    struct wake_q_head *wake_q,
++				    struct wake_q_head *wq_sleeper)
  {
- 	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
- 		rt_mutex_deadlock_account_unlock(current);
- 		return false;
+-	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+-		rt_mutex_deadlock_account_unlock(current);
+-		return false;
++	lockdep_assert_held(&lock->wait_lock);
++
++	debug_rt_mutex_unlock(lock);
++
++	if (!rt_mutex_has_waiters(lock)) {
++		lock->owner = NULL;
++		return false; /* done */
++	}
++
++	mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);
++	return true; /* deboost and wakeups */
++}
++
++void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
++{
++	WAKE_Q(wake_q);
++	WAKE_Q(wake_sleeper_q);
++	bool deboost;
++
++	raw_spin_lock_irq(&lock->wait_lock);
++	deboost = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
++	raw_spin_unlock_irq(&lock->wait_lock);
++
++	if (deboost) {
++		wake_up_q(&wake_q);
++		wake_up_q_sleeper(&wake_sleeper_q);
++		rt_mutex_adjust_prio(current);
  	}
 -	return rt_mutex_slowunlock(lock, wqh);
-+	return rt_mutex_slowunlock(lock, wqh, wq_sleeper);
  }
  
  /**
-@@ -1607,13 +2226,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1607,13 +2213,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  void __rt_mutex_init(struct rt_mutex *lock, const char *name)
  {
  	lock->owner = NULL;
@@ -14086,7 +14908,7 @@ index 2c49d76f96c3..4f1a7663c34d 100644
  
  /**
   * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1628,7 +2246,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1628,10 +2233,9 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
  void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  				struct task_struct *proxy_owner)
  {
@@ -14094,11 +14916,26 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +	rt_mutex_init(lock);
  	debug_rt_mutex_proxy_lock(lock, proxy_owner);
  	rt_mutex_set_owner(lock, proxy_owner);
- 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
-@@ -1676,6 +2294,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- 		return 1;
- 	}
+-	rt_mutex_deadlock_account_lock(lock, proxy_owner);
+ }
  
+ /**
+@@ -1647,7 +2251,66 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+ {
+ 	debug_rt_mutex_proxy_unlock(lock);
+ 	rt_mutex_set_owner(lock, NULL);
+-	rt_mutex_deadlock_account_unlock(proxy_owner);
++}
++
++int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
++			      struct rt_mutex_waiter *waiter,
++			      struct task_struct *task)
++{
++	int ret;
++
++	if (try_to_take_rt_mutex(lock, task, NULL))
++		return 1;
++
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +	/*
 +	 * In PREEMPT_RT there's an added race.
@@ -14128,31 +14965,148 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +	raw_spin_unlock(&task->pi_lock);
 +#endif
 +
- 	/* We enforce deadlock detection for futexes */
- 	ret = task_blocks_on_rt_mutex(lock, waiter, task,
- 				      RT_MUTEX_FULL_CHAINWALK);
-@@ -1690,7 +2337,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- 		ret = 0;
- 	}
- 
--	if (unlikely(ret))
++	/* We enforce deadlock detection for futexes */
++	ret = task_blocks_on_rt_mutex(lock, waiter, task,
++				      RT_MUTEX_FULL_CHAINWALK);
++
++	if (ret && !rt_mutex_owner(lock)) {
++		/*
++		 * Reset the return value. We might have
++		 * returned with -EDEADLK and the owner
++		 * released the lock while we were walking the
++		 * pi chain.  Let the waiter sort it out.
++		 */
++		ret = 0;
++	}
++
 +	if (ret && rt_mutex_has_waiters(lock))
- 		remove_waiter(lock, waiter);
++		remove_waiter(lock, waiter);
++
++	debug_rt_mutex_print_deadlock(waiter);
++
++	return ret;
+ }
+ 
+ /**
+@@ -1670,33 +2333,9 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ 	int ret;
  
+ 	raw_spin_lock_irq(&lock->wait_lock);
+-
+-	if (try_to_take_rt_mutex(lock, task, NULL)) {
+-		raw_spin_unlock_irq(&lock->wait_lock);
+-		return 1;
+-	}
+-
+-	/* We enforce deadlock detection for futexes */
+-	ret = task_blocks_on_rt_mutex(lock, waiter, task,
+-				      RT_MUTEX_FULL_CHAINWALK);
+-
+-	if (ret && !rt_mutex_owner(lock)) {
+-		/*
+-		 * Reset the return value. We might have
+-		 * returned with -EDEADLK and the owner
+-		 * released the lock while we were walking the
+-		 * pi chain.  Let the waiter sort it out.
+-		 */
+-		ret = 0;
+-	}
+-
+-	if (unlikely(ret))
+-		remove_waiter(lock, waiter);
+-
++	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
  	raw_spin_unlock_irq(&lock->wait_lock);
-@@ -1746,7 +2393,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+ 
+-	debug_rt_mutex_print_deadlock(waiter);
+-
+ 	return ret;
+ }
+ 
+@@ -1721,21 +2360,23 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
+ }
+ 
+ /**
+- * rt_mutex_finish_proxy_lock() - Complete lock acquisition
++ * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
+  * @lock:		the rt_mutex we were woken on
+  * @to:			the timeout, null if none. hrtimer should already have
+  *			been started.
+  * @waiter:		the pre-initialized rt_mutex_waiter
+  *
+- * Complete the lock acquisition started our behalf by another thread.
++ * Wait for the the lock acquisition started on our behalf by
++ * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
++ * rt_mutex_cleanup_proxy_lock().
+  *
+  * Returns:
+  *  0 - success
+  * <0 - error, one of -EINTR, -ETIMEDOUT
+  *
+- * Special API call for PI-futex requeue support
++ * Special API call for PI-futex support
+  */
+-int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
++int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
+ 			       struct hrtimer_sleeper *to,
+ 			       struct rt_mutex_waiter *waiter)
+ {
+@@ -1746,10 +2387,47 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
  	set_current_state(TASK_INTERRUPTIBLE);
  
  	/* sleep on the mutex */
 -	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
 +	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
  
- 	if (unlikely(ret))
+-	if (unlikely(ret))
++	raw_spin_unlock_irq(&lock->wait_lock);
++
++	return ret;
++}
++
++/**
++ * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
++ * @lock:		the rt_mutex we were woken on
++ * @waiter:		the pre-initialized rt_mutex_waiter
++ *
++ * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
++ *
++ * Unless we acquired the lock; we're still enqueued on the wait-list and can
++ * in fact still be granted ownership until we're removed. Therefore we can
++ * find we are in fact the owner and must disregard the
++ * rt_mutex_wait_proxy_lock() failure.
++ *
++ * Returns:
++ *  true  - did the cleanup, we done.
++ *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
++ *          caller should disregards its return value.
++ *
++ * Special API call for PI-futex support
++ */
++bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
++				 struct rt_mutex_waiter *waiter)
++{
++	bool cleanup = false;
++
++	raw_spin_lock_irq(&lock->wait_lock);
++	/*
++	 * Unless we're the owner; we're still enqueued on the wait_list.
++	 * So check if we became owner, if not, take us off the wait_list.
++	 */
++	if (rt_mutex_owner(lock) != current) {
  		remove_waiter(lock, waiter);
-@@ -1761,3 +2408,89 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
++		fixup_rt_mutex_waiters(lock);
++		cleanup = true;
++	}
  
- 	return ret;
- }
+ 	/*
+ 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+@@ -1759,5 +2437,91 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+ 
+ 	raw_spin_unlock_irq(&lock->wait_lock);
+ 
++	return cleanup;
++}
 +
 +static inline int
 +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
@@ -14195,8 +15149,8 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +	else if (!ret && ww_ctx->acquired > 1)
 +		return ww_mutex_deadlock_injection(lock, ww_ctx);
 +
-+	return ret;
-+}
+ 	return ret;
+ }
 +EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
 +
 +int __sched
@@ -14239,8 +15193,21 @@ index 2c49d76f96c3..4f1a7663c34d 100644
 +}
 +EXPORT_SYMBOL(ww_mutex_unlock);
 +#endif
+diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
+index c4060584c407..6607802efa8b 100644
+--- a/kernel/locking/rtmutex.h
++++ b/kernel/locking/rtmutex.h
+@@ -11,8 +11,6 @@
+  */
+ 
+ #define rt_mutex_deadlock_check(l)			(0)
+-#define rt_mutex_deadlock_account_lock(m, t)		do { } while (0)
+-#define rt_mutex_deadlock_account_unlock(l)		do { } while (0)
+ #define debug_rt_mutex_init_waiter(w)			do { } while (0)
+ #define debug_rt_mutex_free_waiter(w)			do { } while (0)
+ #define debug_rt_mutex_lock(l)				do { } while (0)
 diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
-index e317e1cbb3eb..f457c7574920 100644
+index e317e1cbb3eb..819826407462 100644
 --- a/kernel/locking/rtmutex_common.h
 +++ b/kernel/locking/rtmutex_common.h
 @@ -27,6 +27,7 @@ struct rt_mutex_waiter {
@@ -14251,7 +15218,7 @@ index e317e1cbb3eb..f457c7574920 100644
  #ifdef CONFIG_DEBUG_RT_MUTEXES
  	unsigned long		ip;
  	struct pid		*deadlock_task_pid;
-@@ -98,6 +99,9 @@ enum rtmutex_chainwalk {
+@@ -98,22 +99,45 @@ enum rtmutex_chainwalk {
  /*
   * PI-futex support (proxy locking functions, etc.):
   */
@@ -14261,31 +15228,322 @@ index e317e1cbb3eb..f457c7574920 100644
  extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
  extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  				       struct task_struct *proxy_owner);
-@@ -111,7 +115,8 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
- 				      struct rt_mutex_waiter *waiter);
- extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
- extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
+ extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+ 				  struct task_struct *proxy_owner);
++extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate);
++extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
++				     struct rt_mutex_waiter *waiter,
++				     struct task_struct *task);
+ extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ 				     struct rt_mutex_waiter *waiter,
+ 				     struct task_struct *task);
+-extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+-				      struct hrtimer_sleeper *to,
+-				      struct rt_mutex_waiter *waiter);
+-extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
+-extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
 -				  struct wake_q_head *wqh);
-+				  struct wake_q_head *wqh,
-+				  struct wake_q_head *wq_sleeper);
++extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
++			       struct hrtimer_sleeper *to,
++			       struct rt_mutex_waiter *waiter);
++extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
++				 struct rt_mutex_waiter *waiter);
++
++extern int rt_mutex_futex_trylock(struct rt_mutex *l);
++
++extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
++extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
++				 struct wake_q_head *wqh,
++				 struct wake_q_head *wq_sleeper);
++
  extern void rt_mutex_adjust_prio(struct task_struct *task);
  
++/* RW semaphore special interface */
++struct ww_acquire_ctx;
++
++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
++				     struct hrtimer_sleeper *timeout,
++				     enum rtmutex_chainwalk chwalk,
++				     struct ww_acquire_ctx *ww_ctx,
++				     struct rt_mutex_waiter *waiter);
++
  #ifdef CONFIG_DEBUG_RT_MUTEXES
-@@ -120,4 +125,14 @@ extern void rt_mutex_adjust_prio(struct task_struct *task);
- # include "rtmutex.h"
- #endif
- 
-+static inline void
-+rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
+ # include "rtmutex-debug.h"
+ #else
+diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c
+new file mode 100644
+index 000000000000..4a708ffcded6
+--- /dev/null
++++ b/kernel/locking/rwsem-rt.c
+@@ -0,0 +1,268 @@
++/*
++ */
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/export.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * RT-specific reader/writer semaphores
++ *
++ * down_write()
++ *  1) Lock sem->rtmutex
++ *  2) Remove the reader BIAS to force readers into the slow path
++ *  3) Wait until all readers have left the critical region
++ *  4) Mark it write locked
++ *
++ * up_write()
++ *  1) Remove the write locked marker
++ *  2) Set the reader BIAS so readers can use the fast path again
++ *  3) Unlock sem->rtmutex to release blocked readers
++ *
++ * down_read()
++ *  1) Try fast path acquisition (reader BIAS is set)
++ *  2) Take sem->rtmutex.wait_lock which protects the writelocked flag
++ *  3) If !writelocked, acquire it for read
++ *  4) If writelocked, block on sem->rtmutex
++ *  5) unlock sem->rtmutex, goto 1)
++ *
++ * up_read()
++ *  1) Try fast path release (reader count != 1)
++ *  2) Wake the writer waiting in down_write()#3
++ *
++ * down_read()#3 has the consequence, that rw semaphores on RT are not writer
++ * fair, but writers, which should be avoided in RT tasks (think mmap_sem),
++ * are subject to the rtmutex priority/DL inheritance mechanism.
++ *
++ * It's possible to make the rw semaphores writer fair by keeping a list of
++ * active readers. A blocked writer would force all newly incoming readers to
++ * block on the rtmutex, but the rtmutex would have to be proxy locked for one
++ * reader after the other. We can't use multi-reader inheritance because there
++ * is no way to support that with SCHED_DEADLINE. Implementing the one by one
++ * reader boosting/handover mechanism is a major surgery for a very dubious
++ * value.
++ *
++ * The risk of writer starvation is there, but the pathological use cases
++ * which trigger it are not necessarily the typical RT workloads.
++ */
++
++void __rwsem_init(struct rw_semaphore *sem, const char *name,
++		  struct lock_class_key *key)
 +{
-+	debug_rt_mutex_init_waiter(waiter);
-+	waiter->task = NULL;
-+	waiter->savestate = savestate;
-+	RB_CLEAR_NODE(&waiter->pi_tree_entry);
-+	RB_CLEAR_NODE(&waiter->tree_entry);
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	/*
++	 * Make sure we are not reinitializing a held semaphore:
++	 */
++	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
++	lockdep_init_map(&sem->dep_map, name, key, 0);
++#endif
++	atomic_set(&sem->readers, READER_BIAS);
 +}
++EXPORT_SYMBOL(__rwsem_init);
 +
- #endif
++int __down_read_trylock(struct rw_semaphore *sem)
++{
++	int r, old;
++
++	/*
++	 * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
++	 * set.
++	 */
++	for (r = atomic_read(&sem->readers); r < 0;) {
++		old = atomic_cmpxchg(&sem->readers, r, r + 1);
++		if (likely(old == r))
++			return 1;
++		r = old;
++	}
++	return 0;
++}
++
++void __sched __down_read(struct rw_semaphore *sem)
++{
++	struct rt_mutex *m = &sem->rtmutex;
++	struct rt_mutex_waiter waiter;
++
++	if (__down_read_trylock(sem))
++		return;
++
++	might_sleep();
++	raw_spin_lock_irq(&m->wait_lock);
++	/*
++	 * Allow readers as long as the writer has not completely
++	 * acquired the semaphore for write.
++	 */
++	if (atomic_read(&sem->readers) != WRITER_BIAS) {
++		atomic_inc(&sem->readers);
++		raw_spin_unlock_irq(&m->wait_lock);
++		return;
++	}
++
++	/*
++	 * Call into the slow lock path with the rtmutex->wait_lock
++	 * held, so this can't result in the following race:
++	 *
++	 * Reader1		Reader2		Writer
++	 *			down_read()
++	 *					down_write()
++	 *					rtmutex_lock(m)
++	 *					swait()
++	 * down_read()
++	 * unlock(m->wait_lock)
++	 *			up_read()
++	 *			swake()
++	 *					lock(m->wait_lock)
++	 *					sem->writelocked=true
++	 *					unlock(m->wait_lock)
++	 *
++	 *					up_write()
++	 *					sem->writelocked=false
++	 *					rtmutex_unlock(m)
++	 *			down_read()
++	 *					down_write()
++	 *					rtmutex_lock(m)
++	 *					swait()
++	 * rtmutex_lock(m)
++	 *
++	 * That would put Reader1 behind the writer waiting on
++	 * Reader2 to call up_read() which might be unbound.
++	 */
++	rt_mutex_init_waiter(&waiter, false);
++	rt_mutex_slowlock_locked(m, TASK_UNINTERRUPTIBLE, NULL,
++				 RT_MUTEX_MIN_CHAINWALK, NULL,
++				 &waiter);
++	/*
++	 * The slowlock() above is guaranteed to return with the rtmutex is
++	 * now held, so there can't be a writer active. Increment the reader
++	 * count and immediately drop the rtmutex again.
++	 */
++	atomic_inc(&sem->readers);
++	raw_spin_unlock_irq(&m->wait_lock);
++	rt_mutex_unlock(m);
++
++	debug_rt_mutex_free_waiter(&waiter);
++}
++
++void __up_read(struct rw_semaphore *sem)
++{
++	struct rt_mutex *m = &sem->rtmutex;
++	struct task_struct *tsk;
++
++	/*
++	 * sem->readers can only hit 0 when a writer is waiting for the
++	 * active readers to leave the critical region.
++	 */
++	if (!atomic_dec_and_test(&sem->readers))
++		return;
++
++	might_sleep();
++	raw_spin_lock_irq(&m->wait_lock);
++	/*
++	 * Wake the writer, i.e. the rtmutex owner. It might release the
++	 * rtmutex concurrently in the fast path (due to a signal), but to
++	 * clean up the rwsem it needs to acquire m->wait_lock. The worst
++	 * case which can happen is a spurious wakeup.
++	 */
++	tsk = rt_mutex_owner(m);
++	if (tsk)
++		wake_up_process(tsk);
++
++	raw_spin_unlock_irq(&m->wait_lock);
++}
++
++static void __up_write_unlock(struct rw_semaphore *sem, int bias,
++			      unsigned long flags)
++{
++	struct rt_mutex *m = &sem->rtmutex;
++
++	atomic_add(READER_BIAS - bias, &sem->readers);
++	raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++	rt_mutex_unlock(m);
++}
++
++static int __sched __down_write_common(struct rw_semaphore *sem, int state)
++{
++	struct rt_mutex *m = &sem->rtmutex;
++	unsigned long flags;
++
++	/* Take the rtmutex as a first step */
++	if (rt_mutex_lock_state(m, state))
++		return -EINTR;
++
++	/* Force readers into slow path */
++	atomic_sub(READER_BIAS, &sem->readers);
++	might_sleep();
++
++	set_current_state(state);
++	for (;;) {
++		raw_spin_lock_irqsave(&m->wait_lock, flags);
++		/* Have all readers left the critical region? */
++		if (!atomic_read(&sem->readers)) {
++			atomic_set(&sem->readers, WRITER_BIAS);
++			__set_current_state(TASK_RUNNING);
++			raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++			return 0;
++		}
++
++		if (signal_pending_state(state, current)) {
++			__set_current_state(TASK_RUNNING);
++			__up_write_unlock(sem, 0, flags);
++			return -EINTR;
++		}
++		raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++
++		if (atomic_read(&sem->readers) != 0) {
++			schedule();
++			set_current_state(state);
++		}
++	}
++}
++
++void __sched __down_write(struct rw_semaphore *sem)
++{
++	__down_write_common(sem, TASK_UNINTERRUPTIBLE);
++}
++
++int __sched __down_write_killable(struct rw_semaphore *sem)
++{
++	return __down_write_common(sem, TASK_KILLABLE);
++}
++
++int __down_write_trylock(struct rw_semaphore *sem)
++{
++	struct rt_mutex *m = &sem->rtmutex;
++	unsigned long flags;
++
++	if (!rt_mutex_trylock(m))
++		return 0;
++
++	atomic_sub(READER_BIAS, &sem->readers);
++
++	raw_spin_lock_irqsave(&m->wait_lock, flags);
++	if (!atomic_read(&sem->readers)) {
++		atomic_set(&sem->readers, WRITER_BIAS);
++		raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++		return 1;
++	}
++	__up_write_unlock(sem, 0, flags);
++	return 0;
++}
++
++void __up_write(struct rw_semaphore *sem)
++{
++	struct rt_mutex *m = &sem->rtmutex;
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&m->wait_lock, flags);
++	__up_write_unlock(sem, WRITER_BIAS, flags);
++}
++
++void __downgrade_write(struct rw_semaphore *sem)
++{
++	struct rt_mutex *m = &sem->rtmutex;
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&m->wait_lock, flags);
++	/* Release it and account current as reader */
++	__up_write_unlock(sem, WRITER_BIAS - 1, flags);
++}
 diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
 index db3ccb1dd614..909779647bd1 100644
 --- a/kernel/locking/spinlock.c
@@ -14355,7 +15613,7 @@ index 0374a596cffa..94970338d518 100644
 +
 +#endif
 diff --git a/kernel/module.c b/kernel/module.c
-index 0e54d5bf0097..3483a3743b44 100644
+index 0e54d5bf0097..f27764fbfa24 100644
 --- a/kernel/module.c
 +++ b/kernel/module.c
 @@ -660,16 +660,7 @@ static void percpu_modcopy(struct module *mod,
@@ -14376,7 +15634,7 @@ index 0e54d5bf0097..3483a3743b44 100644
  {
  	struct module *mod;
  	unsigned int cpu;
-@@ -683,9 +674,11 @@ bool is_module_percpu_address(unsigned long addr)
+@@ -683,9 +674,15 @@ bool is_module_percpu_address(unsigned long addr)
  			continue;
  		for_each_possible_cpu(cpu) {
  			void *start = per_cpu_ptr(mod->percpu, cpu);
@@ -14385,12 +15643,16 @@ index 0e54d5bf0097..3483a3743b44 100644
 -			if ((void *)addr >= start &&
 -			    (void *)addr < start + mod->percpu_size) {
 +			if (va >= start && va < start + mod->percpu_size) {
-+				if (can_addr)
++				if (can_addr) {
 +					*can_addr = (unsigned long) (va - start);
++					*can_addr += (unsigned long)
++						per_cpu_ptr(mod->percpu,
++							    get_boot_cpu_id());
++				}
  				preempt_enable();
  				return true;
  			}
-@@ -696,6 +689,20 @@ bool is_module_percpu_address(unsigned long addr)
+@@ -696,6 +693,20 @@ bool is_module_percpu_address(unsigned long addr)
  	return false;
  }
  
@@ -14411,7 +15673,7 @@ index 0e54d5bf0097..3483a3743b44 100644
  #else /* ... !CONFIG_SMP */
  
  static inline void __percpu *mod_percpu(struct module *mod)
-@@ -727,6 +734,11 @@ bool is_module_percpu_address(unsigned long addr)
+@@ -727,6 +738,11 @@ bool is_module_percpu_address(unsigned long addr)
  	return false;
  }
  
@@ -16344,7 +17606,7 @@ index 154fd689fe02..a6aa5801b21e 100644
  	return (nested == preempt_offset);
  }
 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index 37e2449186c4..e00accf92a4b 100644
+index c95c5122b105..e00accf92a4b 100644
 --- a/kernel/sched/deadline.c
 +++ b/kernel/sched/deadline.c
 @@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
@@ -16355,20 +17617,6 @@ index 37e2449186c4..e00accf92a4b 100644
  }
  
  static
-@@ -1729,12 +1730,11 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
- #ifdef CONFIG_SMP
- 		if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
- 			queue_push_tasks(rq);
--#else
-+#endif
- 		if (dl_task(rq->curr))
- 			check_preempt_curr_dl(rq, p, 0);
- 		else
- 			resched_curr(rq);
--#endif
- 	}
- }
- 
 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
 index fa178b62ea79..935224123441 100644
 --- a/kernel/sched/debug.c
@@ -16495,7 +17743,7 @@ index 69631fa46c2f..6d28fcd08872 100644
  #ifdef HAVE_RT_PUSH_IPI
  /*
 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
-index 2516b8df6dbb..b0691f4e7d49 100644
+index f139f22ce30d..b0691f4e7d49 100644
 --- a/kernel/sched/rt.c
 +++ b/kernel/sched/rt.c
 @@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
@@ -16514,18 +17762,6 @@ index 2516b8df6dbb..b0691f4e7d49 100644
  #endif
  #endif /* CONFIG_SMP */
  	/* We start is dequeued state, because no RT tasks are queued */
-@@ -2198,10 +2200,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
- #ifdef CONFIG_SMP
- 		if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
- 			queue_push_tasks(rq);
--#else
-+#endif /* CONFIG_SMP */
- 		if (p->prio < rq->curr->prio)
- 			resched_curr(rq);
--#endif /* CONFIG_SMP */
- 	}
- }
- 
 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
 index 055f935d4421..19324ac27026 100644
 --- a/kernel/sched/sched.h
@@ -16779,7 +18015,7 @@ index 000000000000..1950f40ca725
 +}
 +EXPORT_SYMBOL_GPL(swork_put);
 diff --git a/kernel/signal.c b/kernel/signal.c
-index 75761acc77cf..ae0773c76bb0 100644
+index 0b1415720a15..c884647951f7 100644
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
 @@ -14,6 +14,7 @@
@@ -20845,7 +22081,7 @@ index 000000000000..7f6ee70dea41
 +
 +device_initcall(latency_hist_init);
 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 8696ce6bf2f6..277f048a4695 100644
+index 90b66ed6f0e2..7d9897e41ded 100644
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
 @@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
@@ -22361,11 +23597,11 @@ index 1afec32de6f2..11fa431046a8 100644
  	dump_stack();
 diff --git a/localversion-rt b/localversion-rt
 new file mode 100644
-index 000000000000..6e44e540b927
+index 000000000000..1199ebade17b
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt12
++-rt16
 diff --git a/mm/Kconfig b/mm/Kconfig
 index 86e3e0e74d20..77e5862a1ed2 100644
 --- a/mm/Kconfig
@@ -22415,7 +23651,7 @@ index 70e6bec46dc2..6678ed58b7c6 100644
  				cc->last_migrated_pfn = 0;
  			}
 diff --git a/mm/filemap.c b/mm/filemap.c
-index d8d7df82c69a..0eac87a07892 100644
+index edfb90e3830c..a8d2c7a73d54 100644
 --- a/mm/filemap.c
 +++ b/mm/filemap.c
 @@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct address_space *mapping,
@@ -22475,7 +23711,7 @@ index 50b4ca6787f0..77518a3b35a1 100644
  unsigned int nr_free_highpages (void)
  {
 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 4c6ade54d833..ba29283aa43d 100644
+index 0de26691f0f5..db6fe1ba7b34 100644
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
 @@ -67,6 +67,7 @@
@@ -22573,7 +23809,7 @@ index 4c6ade54d833..ba29283aa43d 100644
  	put_online_cpus();
  	mutex_unlock(&percpu_charge_mutex);
  }
-@@ -4548,12 +4552,12 @@ static int mem_cgroup_move_account(struct page *page,
+@@ -4553,12 +4557,12 @@ static int mem_cgroup_move_account(struct page *page,
  
  	ret = 0;
  
@@ -22588,7 +23824,7 @@ index 4c6ade54d833..ba29283aa43d 100644
  out_unlock:
  	unlock_page(page);
  out:
-@@ -5428,10 +5432,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
+@@ -5433,10 +5437,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
  
  	commit_charge(page, memcg, lrucare);
  
@@ -22601,7 +23837,7 @@ index 4c6ade54d833..ba29283aa43d 100644
  
  	if (do_memsw_account() && PageSwapCache(page)) {
  		swp_entry_t entry = { .val = page_private(page) };
-@@ -5487,14 +5491,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+@@ -5492,14 +5496,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
  		memcg_oom_recover(memcg);
  	}
  
@@ -22618,7 +23854,7 @@ index 4c6ade54d833..ba29283aa43d 100644
  
  	if (!mem_cgroup_is_root(memcg))
  		css_put_many(&memcg->css, nr_pages);
-@@ -5649,10 +5653,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+@@ -5654,10 +5658,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
  
  	commit_charge(newpage, memcg, false);
  
@@ -22631,7 +23867,7 @@ index 4c6ade54d833..ba29283aa43d 100644
  }
  
  DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -5832,6 +5836,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+@@ -5837,6 +5841,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
  {
  	struct mem_cgroup *memcg, *swap_memcg;
  	unsigned short oldid;
@@ -22639,7 +23875,7 @@ index 4c6ade54d833..ba29283aa43d 100644
  
  	VM_BUG_ON_PAGE(PageLRU(page), page);
  	VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5872,12 +5877,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+@@ -5877,12 +5882,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
  	 * important here to have the interrupts disabled because it is the
  	 * only synchronisation we have for udpating the per-CPU variables.
  	 */
@@ -22677,7 +23913,7 @@ index 6f4d27c5bb32..5cd25c745a8f 100644
  #ifdef finish_arch_post_lock_switch
  	finish_arch_post_lock_switch();
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index f4a02e240fb6..2e73f8cfde74 100644
+index 1460e6ad5e14..dc4eac895340 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
 @@ -61,6 +61,7 @@
@@ -23029,10 +24265,10 @@ index f4a02e240fb6..2e73f8cfde74 100644
  
  #ifdef CONFIG_MEMORY_HOTREMOVE
 diff --git a/mm/percpu.c b/mm/percpu.c
-index 255714302394..59b529b944a9 100644
+index f014cebbf405..4e739fcf91bf 100644
 --- a/mm/percpu.c
 +++ b/mm/percpu.c
-@@ -1280,6 +1280,28 @@ void free_percpu(void __percpu *ptr)
+@@ -1283,6 +1283,31 @@ void free_percpu(void __percpu *ptr)
  }
  EXPORT_SYMBOL_GPL(free_percpu);
  
@@ -23048,8 +24284,11 @@ index 255714302394..59b529b944a9 100644
 +		void *va = (void *)addr;
 +
 +		if (va >= start && va < start + static_size) {
-+			if (can_addr)
++			if (can_addr) {
 +				*can_addr = (unsigned long) (va - start);
++				*can_addr += (unsigned long)
++					per_cpu_ptr(base, get_boot_cpu_id());
++			}
 +			return true;
 +		}
 +	}
@@ -23061,7 +24300,7 @@ index 255714302394..59b529b944a9 100644
  /**
   * is_kernel_percpu_address - test whether address is from static percpu area
   * @addr: address to test
-@@ -1293,20 +1315,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
+@@ -1296,20 +1321,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
   */
  bool is_kernel_percpu_address(unsigned long addr)
  {
@@ -23084,7 +24323,7 @@ index 255714302394..59b529b944a9 100644
  
  /**
 diff --git a/mm/slab.h b/mm/slab.h
-index bc05fdc3edce..610cf61634f0 100644
+index ceb7d70cdb76..dfd281e43fbe 100644
 --- a/mm/slab.h
 +++ b/mm/slab.h
 @@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
@@ -23100,7 +24339,7 @@ index bc05fdc3edce..610cf61634f0 100644
  #ifdef CONFIG_SLAB
  	struct list_head slabs_partial;	/* partial list first, better asm code */
 diff --git a/mm/slub.c b/mm/slub.c
-index 7aa0e97af928..35c873f09201 100644
+index 58c7526f8de2..6d72b7f87129 100644
 --- a/mm/slub.c
 +++ b/mm/slub.c
 @@ -1141,7 +1141,7 @@ static noinline int free_debug_processing(
@@ -23477,7 +24716,7 @@ index 7aa0e97af928..35c873f09201 100644
  
  	list_for_each_entry_safe(page, h, &discard, lru)
  		discard_slab(s, page);
-@@ -3920,7 +3989,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+@@ -3905,7 +3974,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
  		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
  			INIT_LIST_HEAD(promote + i);
  
@@ -23486,7 +24725,7 @@ index 7aa0e97af928..35c873f09201 100644
  
  		/*
  		 * Build lists of slabs to discard or promote.
-@@ -3951,7 +4020,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+@@ -3936,7 +4005,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
  		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
  			list_splice(promote + i, &n->partial);
  
@@ -23495,7 +24734,7 @@ index 7aa0e97af928..35c873f09201 100644
  
  		/* Release empty slabs */
  		list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4127,6 +4196,12 @@ void __init kmem_cache_init(void)
+@@ -4112,6 +4181,12 @@ void __init kmem_cache_init(void)
  {
  	static __initdata struct kmem_cache boot_kmem_cache,
  		boot_kmem_cache_node;
@@ -23508,7 +24747,7 @@ index 7aa0e97af928..35c873f09201 100644
  
  	if (debug_guardpage_minorder())
  		slub_max_order = 0;
-@@ -4335,7 +4410,7 @@ static int validate_slab_node(struct kmem_cache *s,
+@@ -4320,7 +4395,7 @@ static int validate_slab_node(struct kmem_cache *s,
  	struct page *page;
  	unsigned long flags;
  
@@ -23517,7 +24756,7 @@ index 7aa0e97af928..35c873f09201 100644
  
  	list_for_each_entry(page, &n->partial, lru) {
  		validate_slab_slab(s, page, map);
-@@ -4357,7 +4432,7 @@ static int validate_slab_node(struct kmem_cache *s,
+@@ -4342,7 +4417,7 @@ static int validate_slab_node(struct kmem_cache *s,
  		       s->name, count, atomic_long_read(&n->nr_slabs));
  
  out:
@@ -23526,7 +24765,7 @@ index 7aa0e97af928..35c873f09201 100644
  	return count;
  }
  
-@@ -4545,12 +4620,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
+@@ -4530,12 +4605,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
  		if (!atomic_long_read(&n->nr_slabs))
  			continue;
  
@@ -23921,7 +25160,7 @@ index 604f26a4f696..312006d2db50 100644
  
  void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 diff --git a/mm/workingset.c b/mm/workingset.c
-index fb1f9183d89a..7e6ef1a48cd3 100644
+index 33f6f4db32fd..f4ff55f4b60e 100644
 --- a/mm/workingset.c
 +++ b/mm/workingset.c
 @@ -334,7 +334,8 @@ void workingset_activation(struct page *page)
@@ -24173,7 +25412,7 @@ index b0bc023d25c5..5af6426fbcbe 100644
  	migrate_read_unlock(zspage);
  	unpin_tag(handle);
 diff --git a/net/core/dev.c b/net/core/dev.c
-index 60b0a6049e72..660ca3b9c60b 100644
+index 2e04fd188081..3ba60ef8c79e 100644
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
 @@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS;
@@ -24276,7 +25515,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  }
  
  /**
-@@ -2258,6 +2264,7 @@ static void __netif_reschedule(struct Qdisc *q)
+@@ -2285,6 +2291,7 @@ static void __netif_reschedule(struct Qdisc *q)
  	sd->output_queue_tailp = &q->next_sched;
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -24284,7 +25523,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  }
  
  void __netif_schedule(struct Qdisc *q)
-@@ -2339,6 +2346,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+@@ -2366,6 +2373,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
  	__this_cpu_write(softnet_data.completion_queue, skb);
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -24292,7 +25531,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  }
  EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
-@@ -3073,7 +3081,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+@@ -3100,7 +3108,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  	 * This permits qdisc->running owner to get the lock more
  	 * often and dequeue packets faster.
  	 */
@@ -24304,7 +25543,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  	if (unlikely(contended))
  		spin_lock(&q->busylock);
  
-@@ -3136,8 +3148,10 @@ static void skb_update_prio(struct sk_buff *skb)
+@@ -3163,8 +3175,10 @@ static void skb_update_prio(struct sk_buff *skb)
  #define skb_update_prio(skb)
  #endif
  
@@ -24315,7 +25554,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  
  /**
   *	dev_loopback_xmit - loop back @skb
-@@ -3371,8 +3385,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+@@ -3398,8 +3412,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
  		int cpu = smp_processor_id(); /* ok because BHs are off */
  
  		if (txq->xmit_lock_owner != cpu) {
@@ -24325,7 +25564,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  				goto recursion_alert;
  
  			skb = validate_xmit_skb(skb, dev);
-@@ -3382,9 +3395,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+@@ -3409,9 +3422,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
  			HARD_TX_LOCK(dev, txq, cpu);
  
  			if (!netif_xmit_stopped(txq)) {
@@ -24337,7 +25576,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  				if (dev_xmit_complete(rc)) {
  					HARD_TX_UNLOCK(dev, txq);
  					goto out;
-@@ -3758,6 +3771,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+@@ -3785,6 +3798,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  	rps_unlock(sd);
  
  	local_irq_restore(flags);
@@ -24345,7 +25584,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  
  	atomic_long_inc(&skb->dev->rx_dropped);
  	kfree_skb(skb);
-@@ -3776,7 +3790,7 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -3803,7 +3817,7 @@ static int netif_rx_internal(struct sk_buff *skb)
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu;
  
@@ -24354,7 +25593,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  		rcu_read_lock();
  
  		cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3786,13 +3800,13 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -3813,13 +3827,13 @@ static int netif_rx_internal(struct sk_buff *skb)
  		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  
  		rcu_read_unlock();
@@ -24371,7 +25610,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  	}
  	return ret;
  }
-@@ -3826,11 +3840,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3853,11 +3867,9 @@ int netif_rx_ni(struct sk_buff *skb)
  
  	trace_netif_rx_ni_entry(skb);
  
@@ -24385,7 +25624,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  
  	return err;
  }
-@@ -4309,7 +4321,7 @@ static void flush_backlog(struct work_struct *work)
+@@ -4336,7 +4348,7 @@ static void flush_backlog(struct work_struct *work)
  	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  			__skb_unlink(skb, &sd->input_pkt_queue);
@@ -24394,7 +25633,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  			input_queue_head_incr(sd);
  		}
  	}
-@@ -4319,11 +4331,14 @@ static void flush_backlog(struct work_struct *work)
+@@ -4346,11 +4358,14 @@ static void flush_backlog(struct work_struct *work)
  	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  			__skb_unlink(skb, &sd->process_queue);
@@ -24410,7 +25649,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  }
  
  static void flush_all_backlogs(void)
-@@ -4804,6 +4819,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+@@ -4831,6 +4846,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  		sd->rps_ipi_list = NULL;
  
  		local_irq_enable();
@@ -24418,7 +25657,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  
  		/* Send pending IPI's to kick RPS processing on remote cpus. */
  		while (remsd) {
-@@ -4817,6 +4833,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+@@ -4844,6 +4860,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  	} else
  #endif
  		local_irq_enable();
@@ -24426,7 +25665,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  }
  
  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4846,7 +4863,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -4873,7 +4890,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
  	while (again) {
  		struct sk_buff *skb;
  
@@ -24436,7 +25675,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  			rcu_read_lock();
  			__netif_receive_skb(skb);
  			rcu_read_unlock();
-@@ -4854,9 +4873,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -4881,9 +4900,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
  			if (++work >= quota)
  				return work;
  
@@ -24447,7 +25686,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  		rps_lock(sd);
  		if (skb_queue_empty(&sd->input_pkt_queue)) {
  			/*
-@@ -4894,9 +4913,11 @@ void __napi_schedule(struct napi_struct *n)
+@@ -4921,9 +4940,11 @@ void __napi_schedule(struct napi_struct *n)
  	local_irq_save(flags);
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  	local_irq_restore(flags);
@@ -24459,7 +25698,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  /**
   * __napi_schedule_irqoff - schedule for receive
   * @n: entry to schedule
-@@ -4908,6 +4929,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
+@@ -4935,6 +4956,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  }
  EXPORT_SYMBOL(__napi_schedule_irqoff);
@@ -24467,7 +25706,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  
  void __napi_complete(struct napi_struct *n)
  {
-@@ -5197,13 +5219,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+@@ -5224,13 +5246,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
  	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  	unsigned long time_limit = jiffies + 2;
  	int budget = netdev_budget;
@@ -24489,7 +25728,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  	for (;;) {
  		struct napi_struct *n;
  
-@@ -5234,7 +5264,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+@@ -5261,7 +5291,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
  	list_splice_tail(&repoll, &list);
  	list_splice(&list, &sd->poll_list);
  	if (!list_empty(&sd->poll_list))
@@ -24498,7 +25737,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  
  	net_rps_action_and_irq_enable(sd);
  }
-@@ -7995,16 +8025,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
+@@ -8022,16 +8052,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_enable();
@@ -24520,7 +25759,7 @@ index 60b0a6049e72..660ca3b9c60b 100644
  
  	return NOTIFY_OK;
  }
-@@ -8309,8 +8343,9 @@ static int __init net_dev_init(void)
+@@ -8336,8 +8370,9 @@ static int __init net_dev_init(void)
  
<Skipped 99 lines>
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/33c7bf0f76b919741a771f7bfd52648f868b04f9



More information about the pld-cvs-commit mailing list