[packages/kernel/LINUX_4_9] rt patch up to 4.9.68-rt60

jajcus jajcus at pld-linux.org
Sun Dec 17 15:49:21 CET 2017


commit f9907f71e2570b474fae7ca7ad3406e99c9b7736
Author: Jacek Konieczny <jajcus at jajcus.net>
Date:   Sun Dec 17 14:45:25 2017 +0100

    rt patch up to 4.9.68-rt60
    
    Nothing removed from the patch now :)
    
    Release: 2

 kernel-rt.patch | 166 +++++++++++++++++++++++++++++---------------------------
 kernel.spec     |   4 +-
 2 files changed, 89 insertions(+), 81 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index 4e057629..3389c3ff 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -71,7 +71,7 @@
 %define		have_pcmcia	0
 %endif
 
-%define		rel		1
+%define		rel		2
 %define		basever		4.9
 %define		postver		.68
 
@@ -213,7 +213,7 @@ Patch146:	kernel-aufs4+vserver.patch
 Patch250:	kernel-fix_256colors_menuconfig.patch
 
 # https://rt.wiki.kernel.org/
-# http://www.kernel.org/pub/linux/kernel/projects/rt/4.9/patch-4.9.65-rt57.patch.xz
+# http://www.kernel.org/pub/linux/kernel/projects/rt/4.9/patch-4.9.68-rt60.patch.xz
 Patch500:	kernel-rt.patch
 
 Patch2000:	kernel-small_fixes.patch
diff --git a/kernel-rt.patch b/kernel-rt.patch
index f620c485..840aada4 100644
--- a/kernel-rt.patch
+++ b/kernel-rt.patch
@@ -2649,10 +2649,10 @@ index bd7be8efdc4c..b3b0a7f7b1ca 100644
  	 * Leave lazy mode, flushing any hypercalls made here.
  	 * This must be done before restoring TLS segments so
 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 3f05c044720b..fe68afd37162 100644
+index b24b3c6d686e..02a062b0de5d 100644
 --- a/arch/x86/kvm/lapic.c
 +++ b/arch/x86/kvm/lapic.c
-@@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
+@@ -1944,6 +1944,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
  	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
  		     HRTIMER_MODE_ABS_PINNED);
  	apic->lapic_timer.timer.function = apic_timer_fn;
@@ -2661,10 +2661,10 @@ index 3f05c044720b..fe68afd37162 100644
  	/*
  	 * APIC is created enabled. This will prevent kvm_lapic_set_base from
 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 595f8149c0d9..31b15149f412 100644
+index 02d45296a97c..4963bd51d20b 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -5961,6 +5961,13 @@ int kvm_arch_init(void *opaque)
+@@ -5966,6 +5966,13 @@ int kvm_arch_init(void *opaque)
  		goto out;
  	}
  
@@ -2957,7 +2957,7 @@ index b333fc45f9ec..8b85916e6986 100644
  
  /*
 diff --git a/block/blk-core.c b/block/blk-core.c
-index 95379fc83805..e531da0c9232 100644
+index b1c76aa73492..5808a85a7974 100644
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
 @@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
@@ -2979,7 +2979,7 @@ index 95379fc83805..e531da0c9232 100644
  
  	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
  	__blk_run_queue(q);
-@@ -659,7 +662,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
+@@ -660,7 +663,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
  		if (nowait)
  			return -EBUSY;
  
@@ -2988,7 +2988,7 @@ index 95379fc83805..e531da0c9232 100644
  				!atomic_read(&q->mq_freeze_depth) ||
  				blk_queue_dying(q));
  		if (blk_queue_dying(q))
-@@ -679,7 +682,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+@@ -680,7 +683,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
  	struct request_queue *q =
  		container_of(ref, struct request_queue, q_usage_counter);
  
@@ -2997,7 +2997,7 @@ index 95379fc83805..e531da0c9232 100644
  }
  
  static void blk_rq_timed_out_timer(unsigned long data)
-@@ -748,7 +751,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+@@ -750,7 +753,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  	q->bypass_depth = 1;
  	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
  
@@ -3006,7 +3006,7 @@ index 95379fc83805..e531da0c9232 100644
  
  	/*
  	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
-@@ -3200,7 +3203,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
+@@ -3202,7 +3205,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
  		blk_run_queue_async(q);
  	else
  		__blk_run_queue(q);
@@ -3015,7 +3015,7 @@ index 95379fc83805..e531da0c9232 100644
  }
  
  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3248,7 +3251,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3250,7 +3253,6 @@ EXPORT_SYMBOL(blk_check_plugged);
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  {
  	struct request_queue *q;
@@ -3023,7 +3023,7 @@ index 95379fc83805..e531da0c9232 100644
  	struct request *rq;
  	LIST_HEAD(list);
  	unsigned int depth;
-@@ -3268,11 +3270,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -3270,11 +3272,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  	q = NULL;
  	depth = 0;
  
@@ -3035,7 +3035,7 @@ index 95379fc83805..e531da0c9232 100644
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
  		list_del_init(&rq->queuelist);
-@@ -3285,7 +3282,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -3287,7 +3284,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  				queue_unplugged(q, depth, from_schedule);
  			q = rq->q;
  			depth = 0;
@@ -3044,7 +3044,7 @@ index 95379fc83805..e531da0c9232 100644
  		}
  
  		/*
-@@ -3312,8 +3309,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -3314,8 +3311,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  	 */
  	if (q)
  		queue_unplugged(q, depth, from_schedule);
@@ -5465,7 +5465,7 @@ index e8819aa20415..dd7f9bf45d6c 100644
  #include <asm/serial.h>
  /*
 diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
-index 1ef31e3ee4a1..ff9f4e50563c 100644
+index f6e4373a8850..4620b51b0e7c 100644
 --- a/drivers/tty/serial/8250/8250_port.c
 +++ b/drivers/tty/serial/8250/8250_port.c
 @@ -35,6 +35,7 @@
@@ -5476,7 +5476,7 @@ index 1ef31e3ee4a1..ff9f4e50563c 100644
  #include <linux/uaccess.h>
  #include <linux/pm_runtime.h>
  #include <linux/timer.h>
-@@ -3140,9 +3141,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3143,9 +3144,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
  
  	serial8250_rpm_get(up);
  
@@ -5573,7 +5573,7 @@ index fcc7aa248ce7..fb2c38d875f9 100644
  	usb_anchor_resume_wakeups(anchor);
  	atomic_dec(&urb->use_count);
 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
-index 273320fa30ae..f2a125841653 100644
+index 4fce83266926..5e902a89d8e6 100644
 --- a/drivers/usb/gadget/function/f_fs.c
 +++ b/drivers/usb/gadget/function/f_fs.c
 @@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data *ffs)
@@ -6031,7 +6031,7 @@ index 920aa0b1c6b0..3d6b5fd1bf06 100644
  	inode->dirtied_when = 0;
  
 diff --git a/fs/libfs.c b/fs/libfs.c
-index 48826d4da189..3ea54d1fc431 100644
+index 9588780ad43e..9b37abd354c9 100644
 --- a/fs/libfs.c
 +++ b/fs/libfs.c
 @@ -89,7 +89,7 @@ static struct dentry *next_positive(struct dentry *parent,
@@ -6273,7 +6273,7 @@ index dff600ae0d74..d726d2e09353 100644
  		mutex_unlock(&sp->so_delegreturn_mutex);
  		put_nfs_open_context(ctx);
 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
-index d04ec3814779..ba90d41d3c34 100644
+index 65566d5fcf39..5f08183dddcd 100644
 --- a/fs/nfs/dir.c
 +++ b/fs/nfs/dir.c
 @@ -485,7 +485,7 @@ static
@@ -6285,7 +6285,7 @@ index d04ec3814779..ba90d41d3c34 100644
  	struct dentry *dentry;
  	struct dentry *alias;
  	struct inode *dir = d_inode(parent);
-@@ -1491,7 +1491,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+@@ -1492,7 +1492,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
  		    struct file *file, unsigned open_flags,
  		    umode_t mode, int *opened)
  {
@@ -6294,7 +6294,7 @@ index d04ec3814779..ba90d41d3c34 100644
  	struct nfs_open_context *ctx;
  	struct dentry *res;
  	struct iattr attr = { .ia_valid = ATTR_OPEN };
-@@ -1806,7 +1806,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+@@ -1807,7 +1807,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
  
  	trace_nfs_rmdir_enter(dir, dentry);
  	if (d_really_is_positive(dentry)) {
@@ -6306,7 +6306,7 @@ index d04ec3814779..ba90d41d3c34 100644
  		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
  		/* Ensure the VFS deletes this inode */
  		switch (error) {
-@@ -1816,7 +1820,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+@@ -1817,7 +1821,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
  		case -ENOENT:
  			nfs_dentry_handle_enoent(dentry);
  		}
@@ -6348,10 +6348,10 @@ index 1452177c822d..f43b01d54c59 100644
  };
  
 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
-index a53b8e0c896a..da0a483c5442 100644
+index 4638654e26f3..5dd6fd555c72 100644
 --- a/fs/nfs/nfs4proc.c
 +++ b/fs/nfs/nfs4proc.c
-@@ -2695,7 +2695,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+@@ -2691,7 +2691,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
  	unsigned int seq;
  	int ret;
  
@@ -6360,7 +6360,7 @@ index a53b8e0c896a..da0a483c5442 100644
  
  	ret = _nfs4_proc_open(opendata);
  	if (ret != 0)
-@@ -2733,7 +2733,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+@@ -2729,7 +2729,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
  
  	if (d_inode(dentry) == state->inode) {
  		nfs_inode_attach_open_context(ctx);
@@ -6370,7 +6370,7 @@ index a53b8e0c896a..da0a483c5442 100644
  	}
  out:
 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
-index 92671914067f..44038480c88c 100644
+index 71deeae6eefd..4be6999299dc 100644
 --- a/fs/nfs/nfs4state.c
 +++ b/fs/nfs/nfs4state.c
 @@ -488,7 +488,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
@@ -6781,7 +6781,7 @@ index 8fdcb783197d..d07dbeec7bc1 100644
  
  #endif /* _LINUX_BH_H */
 diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
-index 447a915db25d..e187a3356345 100644
+index 4431ea2c8802..0744157a97ca 100644
 --- a/include/linux/buffer_head.h
 +++ b/include/linux/buffer_head.h
 @@ -75,8 +75,50 @@ struct buffer_head {
@@ -12202,7 +12202,7 @@ index fc1ef736253c..83c666537a7a 100644
  	return r;
  }
 diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 36ff2d93f222..554aebf7e88b 100644
+index 13b9784427b0..f74fbfe5465c 100644
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
 @@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
@@ -12213,7 +12213,7 @@ index 36ff2d93f222..554aebf7e88b 100644
  }
  
  static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
-@@ -8390,6 +8391,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
+@@ -8405,6 +8406,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
  
  	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	hwc->hrtimer.function = perf_swevent_hrtimer;
@@ -18056,7 +18056,7 @@ index 8d0f35debf35..b62cf6400fe0 100644
  }
  EXPORT_SYMBOL(completion_done);
 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 78181c03d9c7..034a738f1bf7 100644
+index e5066955cc3a..ed1ebcc2ff3d 100644
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
 @@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_features =
@@ -18174,7 +18174,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  void resched_cpu(int cpu)
  {
  	struct rq *rq = cpu_rq(cpu);
-@@ -525,11 +577,14 @@ void resched_cpu(int cpu)
+@@ -524,11 +576,14 @@ void resched_cpu(int cpu)
   */
  int get_nohz_timer_target(void)
  {
@@ -18191,7 +18191,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  
  	rcu_read_lock();
  	for_each_domain(cpu, sd) {
-@@ -548,6 +603,8 @@ int get_nohz_timer_target(void)
+@@ -547,6 +602,8 @@ int get_nohz_timer_target(void)
  		cpu = housekeeping_any_cpu();
  unlock:
  	rcu_read_unlock();
@@ -18200,7 +18200,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  	return cpu;
  }
  /*
-@@ -1093,7 +1150,8 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
+@@ -1092,7 +1149,8 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
  	p->nr_cpus_allowed = cpumask_weight(new_mask);
  }
  
@@ -18210,7 +18210,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  {
  	struct rq *rq = task_rq(p);
  	bool queued, running;
-@@ -1122,6 +1180,98 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+@@ -1121,6 +1179,98 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  		set_curr_task(rq, p);
  }
  
@@ -18309,7 +18309,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  /*
   * Change a given task's CPU affinity. Migrate the thread to a
   * proper CPU and schedule it away if the CPU it's executing on
-@@ -1180,7 +1330,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+@@ -1179,7 +1329,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
  	}
  
  	/* Can the task run on the task's current CPU? If so, we're done */
@@ -18318,7 +18318,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  		goto out;
  
  	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-@@ -1367,6 +1517,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
+@@ -1366,6 +1516,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
  	return ret;
  }
  
@@ -18337,7 +18337,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  /*
   * wait_task_inactive - wait for a thread to unschedule.
   *
-@@ -1411,7 +1573,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+@@ -1410,7 +1572,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  		 * is actually now running somewhere else!
  		 */
  		while (task_running(rq, p)) {
@@ -18346,7 +18346,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  				return 0;
  			cpu_relax();
  		}
-@@ -1426,7 +1588,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+@@ -1425,7 +1587,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  		running = task_running(rq, p);
  		queued = task_on_rq_queued(p);
  		ncsw = 0;
@@ -18356,7 +18356,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
  		task_rq_unlock(rq, p, &rf);
  
-@@ -1681,10 +1844,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
+@@ -1680,10 +1843,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
  {
  	activate_task(rq, p, en_flags);
  	p->on_rq = TASK_ON_RQ_QUEUED;
@@ -18367,7 +18367,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  }
  
  /*
-@@ -2019,8 +2178,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+@@ -2018,8 +2177,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  	 */
  	smp_mb__before_spinlock();
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -18396,7 +18396,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  
  	trace_sched_waking(p);
  
-@@ -2103,53 +2281,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+@@ -2102,53 +2280,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  }
  
  /**
@@ -18450,7 +18450,7 @@ index 78181c03d9c7..034a738f1bf7 100644
   * wake_up_process - Wake up a specific process
   * @p: The process to be woken up.
   *
-@@ -2167,6 +2298,18 @@ int wake_up_process(struct task_struct *p)
+@@ -2166,6 +2297,18 @@ int wake_up_process(struct task_struct *p)
  }
  EXPORT_SYMBOL(wake_up_process);
  
@@ -18469,7 +18469,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  int wake_up_state(struct task_struct *p, unsigned int state)
  {
  	return try_to_wake_up(p, state, 0);
-@@ -2443,6 +2586,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -2442,6 +2585,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
  	p->on_cpu = 0;
  #endif
  	init_task_preempt_count(p);
@@ -18479,7 +18479,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  #ifdef CONFIG_SMP
  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
  	RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -2771,21 +2917,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+@@ -2770,21 +2916,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
  	finish_arch_post_lock_switch();
  
  	fire_sched_in_preempt_notifiers(current);
@@ -18506,7 +18506,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  		put_task_struct(prev);
  	}
  
-@@ -3253,6 +3394,114 @@ static inline void schedule_debug(struct task_struct *prev)
+@@ -3252,6 +3393,114 @@ static inline void schedule_debug(struct task_struct *prev)
  	schedstat_inc(this_rq()->sched_count);
  }
  
@@ -18621,7 +18621,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  /*
   * Pick up the highest-prio task:
   */
-@@ -3369,19 +3618,6 @@ static void __sched notrace __schedule(bool preempt)
+@@ -3368,19 +3617,6 @@ static void __sched notrace __schedule(bool preempt)
  		} else {
  			deactivate_task(rq, prev, DEQUEUE_SLEEP);
  			prev->on_rq = 0;
@@ -18641,7 +18641,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  		}
  		switch_count = &prev->nvcsw;
  	}
-@@ -3391,6 +3627,7 @@ static void __sched notrace __schedule(bool preempt)
+@@ -3390,6 +3626,7 @@ static void __sched notrace __schedule(bool preempt)
  
  	next = pick_next_task(rq, prev, cookie);
  	clear_tsk_need_resched(prev);
@@ -18649,7 +18649,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  	clear_preempt_need_resched();
  	rq->clock_skip_update = 0;
  
-@@ -3438,9 +3675,20 @@ void __noreturn do_task_dead(void)
+@@ -3437,9 +3674,20 @@ void __noreturn do_task_dead(void)
  
  static inline void sched_submit_work(struct task_struct *tsk)
  {
@@ -18671,7 +18671,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  	 * If we are going to sleep and we have plugged IO queued,
  	 * make sure to submit it to avoid deadlocks.
  	 */
-@@ -3448,6 +3696,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
+@@ -3447,6 +3695,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
  		blk_schedule_flush_plug(tsk);
  }
  
@@ -18684,7 +18684,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  asmlinkage __visible void __sched schedule(void)
  {
  	struct task_struct *tsk = current;
-@@ -3458,6 +3712,7 @@ asmlinkage __visible void __sched schedule(void)
+@@ -3457,6 +3711,7 @@ asmlinkage __visible void __sched schedule(void)
  		__schedule(false);
  		sched_preempt_enable_no_resched();
  	} while (need_resched());
@@ -18692,7 +18692,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  }
  EXPORT_SYMBOL(schedule);
  
-@@ -3521,6 +3776,30 @@ static void __sched notrace preempt_schedule_common(void)
+@@ -3520,6 +3775,30 @@ static void __sched notrace preempt_schedule_common(void)
  	} while (need_resched());
  }
  
@@ -18723,7 +18723,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  #ifdef CONFIG_PREEMPT
  /*
   * this is the entry point to schedule() from in-kernel preemption
-@@ -3535,7 +3814,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
+@@ -3534,7 +3813,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
  	 */
  	if (likely(!preemptible()))
  		return;
@@ -18733,7 +18733,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  	preempt_schedule_common();
  }
  NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3562,6 +3842,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+@@ -3561,6 +3841,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
  	if (likely(!preemptible()))
  		return;
  
@@ -18743,7 +18743,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  	do {
  		/*
  		 * Because the function tracer can trace preempt_count_sub()
-@@ -3584,7 +3867,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+@@ -3583,7 +3866,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
  		 * an infinite recursion.
  		 */
  		prev_ctx = exception_enter();
@@ -18760,7 +18760,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  		exception_exit(prev_ctx);
  
  		preempt_latency_stop(1);
-@@ -3630,10 +3922,25 @@ EXPORT_SYMBOL(default_wake_function);
+@@ -3629,10 +3921,25 @@ EXPORT_SYMBOL(default_wake_function);
  
  #ifdef CONFIG_RT_MUTEXES
  
@@ -18788,7 +18788,7 @@ index 78181c03d9c7..034a738f1bf7 100644
   *
   * This function changes the 'effective' priority of a task. It does
   * not touch ->normal_prio like __setscheduler().
-@@ -3641,16 +3948,40 @@ EXPORT_SYMBOL(default_wake_function);
+@@ -3640,16 +3947,40 @@ EXPORT_SYMBOL(default_wake_function);
   * Used by the rt_mutex code to implement priority inheritance
   * logic. Call site only calls if the priority of the task changed.
   */
@@ -18832,7 +18832,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  
  	/*
  	 * Idle task boosting is a nono in general. There is one
-@@ -3670,7 +4001,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+@@ -3669,7 +4000,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
  		goto out_unlock;
  	}
  
@@ -18841,7 +18841,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  	oldprio = p->prio;
  
  	if (oldprio == prio)
-@@ -3694,7 +4025,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+@@ -3693,7 +4024,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
  	 *          running task
  	 */
  	if (dl_prio(prio)) {
@@ -18849,7 +18849,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  		if (!dl_prio(p->normal_prio) ||
  		    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
  			p->dl.dl_boosted = 1;
-@@ -3731,6 +4061,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+@@ -3730,6 +4060,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
  	balance_callback(rq);
  	preempt_enable();
  }
@@ -18861,7 +18861,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  #endif
  
  void set_user_nice(struct task_struct *p, long nice)
-@@ -3975,10 +4310,9 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
+@@ -3974,10 +4309,9 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
  	 * Keep a potential priority boosting if called from
  	 * sched_setscheduler().
  	 */
@@ -18874,7 +18874,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  
  	if (dl_prio(p->prio))
  		p->sched_class = &dl_sched_class;
-@@ -4265,7 +4599,7 @@ static int __sched_setscheduler(struct task_struct *p,
+@@ -4264,7 +4598,7 @@ static int __sched_setscheduler(struct task_struct *p,
  		 * the runqueue. This will be done when the task deboost
  		 * itself.
  		 */
@@ -18883,7 +18883,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  		if (new_effective_prio == oldprio)
  			queue_flags &= ~DEQUEUE_MOVE;
  	}
-@@ -4940,6 +5274,7 @@ int __cond_resched_lock(spinlock_t *lock)
+@@ -4939,6 +5273,7 @@ int __cond_resched_lock(spinlock_t *lock)
  }
  EXPORT_SYMBOL(__cond_resched_lock);
  
@@ -18891,7 +18891,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  int __sched __cond_resched_softirq(void)
  {
  	BUG_ON(!in_softirq());
-@@ -4953,6 +5288,7 @@ int __sched __cond_resched_softirq(void)
+@@ -4952,6 +5287,7 @@ int __sched __cond_resched_softirq(void)
  	return 0;
  }
  EXPORT_SYMBOL(__cond_resched_softirq);
@@ -18899,7 +18899,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  
  /**
   * yield - yield the current processor to other threads.
-@@ -5316,7 +5652,9 @@ void init_idle(struct task_struct *idle, int cpu)
+@@ -5315,7 +5651,9 @@ void init_idle(struct task_struct *idle, int cpu)
  
  	/* Set the preempt count _outside_ the spinlocks! */
  	init_idle_preempt_count(idle, cpu);
@@ -18910,7 +18910,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  	/*
  	 * The idle tasks have their own, simple scheduling class:
  	 */
-@@ -5459,6 +5797,8 @@ void sched_setnuma(struct task_struct *p, int nid)
+@@ -5458,6 +5796,8 @@ void sched_setnuma(struct task_struct *p, int nid)
  #endif /* CONFIG_NUMA_BALANCING */
  
  #ifdef CONFIG_HOTPLUG_CPU
@@ -18919,7 +18919,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  /*
   * Ensures that the idle task is using init_mm right before its cpu goes
   * offline.
-@@ -5473,7 +5813,12 @@ void idle_task_exit(void)
+@@ -5472,7 +5812,12 @@ void idle_task_exit(void)
  		switch_mm(mm, &init_mm, current);
  		finish_arch_post_lock_switch();
  	}
@@ -18933,7 +18933,15 @@ index 78181c03d9c7..034a738f1bf7 100644
  }
  
  /*
-@@ -7434,6 +7779,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5881,6 +6226,7 @@ static int init_rootdomain(struct root_domain *rd)
+ 	rd->rto_cpu = -1;
+ 	raw_spin_lock_init(&rd->rto_lock);
+ 	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
++	rd->rto_push_work.flags |= IRQ_WORK_HARD_IRQ;
+ #endif
+ 
+ 	init_dl_bw(&rd->dl_bw);
+@@ -7439,6 +7785,10 @@ int sched_cpu_dying(unsigned int cpu)
  	update_max_interval();
  	nohz_balance_exit_idle(cpu);
  	hrtick_clear(rq);
@@ -18944,7 +18952,7 @@ index 78181c03d9c7..034a738f1bf7 100644
  	return 0;
  }
  #endif
-@@ -7695,7 +8044,7 @@ void __init sched_init(void)
+@@ -7700,7 +8050,7 @@ void __init sched_init(void)
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  static inline int preempt_count_equals(int preempt_offset)
  {
@@ -19091,7 +19099,7 @@ index 69631fa46c2f..6d28fcd08872 100644
  #ifdef HAVE_RT_PUSH_IPI
  /*
 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
-index f139f22ce30d..b0691f4e7d49 100644
+index 9c131168d933..340a0a5d435c 100644
 --- a/kernel/sched/rt.c
 +++ b/kernel/sched/rt.c
 @@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
@@ -19103,10 +19111,10 @@ index f139f22ce30d..b0691f4e7d49 100644
  }
  
 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index ad77d666583c..16df04073b57 100644
+index cff985feb6e7..280c7d5a7657 100644
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
-@@ -1152,6 +1152,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+@@ -1162,6 +1162,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
  #define WF_FORK		0x02		/* child wakeup after fork */
  #define WF_MIGRATED	0x4		/* internal use, task got migrated */
@@ -19114,7 +19122,7 @@ index ad77d666583c..16df04073b57 100644
  
  /*
   * To aid in avoiding the subversion of "niceness" due to uneven distribution
-@@ -1335,6 +1336,15 @@ extern void init_sched_fair_class(void);
+@@ -1345,6 +1346,15 @@ extern void init_sched_fair_class(void);
  extern void resched_curr(struct rq *rq);
  extern void resched_cpu(int cpu);
  
@@ -24873,11 +24881,11 @@ index 1afec32de6f2..11fa431046a8 100644
  	dump_stack();
 diff --git a/localversion-rt b/localversion-rt
 new file mode 100644
-index 000000000000..c06cc4356292
+index 000000000000..66fa05e70f29
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt57
++-rt60
 diff --git a/mm/Kconfig b/mm/Kconfig
 index 86e3e0e74d20..77e5862a1ed2 100644
 --- a/mm/Kconfig
@@ -25129,7 +25137,7 @@ index 6f4d27c5bb32..5cd25c745a8f 100644
  #ifdef finish_arch_post_lock_switch
  	finish_arch_post_lock_switch();
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 4a044134ce84..3f58b5571c8c 100644
+index fbc38888252b..1cb08e1406ea 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
 @@ -61,6 +61,7 @@
@@ -25397,7 +25405,7 @@ index 4a044134ce84..3f58b5571c8c 100644
  }
  
  /*
-@@ -2636,7 +2696,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2629,7 +2689,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
  		struct per_cpu_pages *pcp;
  		struct list_head *list;
  
@@ -25406,7 +25414,7 @@ index 4a044134ce84..3f58b5571c8c 100644
  		do {
  			pcp = &this_cpu_ptr(zone->pageset)->pcp;
  			list = &pcp->lists[migratetype];
-@@ -2663,7 +2723,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2656,7 +2716,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
  		 * allocate greater than order-1 page units with __GFP_NOFAIL.
  		 */
  		WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -25415,7 +25423,7 @@ index 4a044134ce84..3f58b5571c8c 100644
  
  		do {
  			page = NULL;
-@@ -2675,22 +2735,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2668,22 +2728,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
  			if (!page)
  				page = __rmqueue(zone, order, migratetype);
  		} while (page && check_new_pages(page, order));
@@ -25444,7 +25452,7 @@ index 4a044134ce84..3f58b5571c8c 100644
  	return NULL;
  }
  
-@@ -6568,7 +6630,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -6561,7 +6623,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
  	int cpu = (unsigned long)hcpu;
  
  	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
@@ -25454,7 +25462,7 @@ index 4a044134ce84..3f58b5571c8c 100644
  		drain_pages(cpu);
  
  		/*
-@@ -6594,6 +6658,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -6587,6 +6651,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
  void __init page_alloc_init(void)
  {
  	hotcpu_notifier(page_alloc_cpu_notify, 0);
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/f9907f71e2570b474fae7ca7ad3406e99c9b7736



More information about the pld-cvs-commit mailing list