packages (Titanium): kernel-desktop/kernel-desktop.spec, kernel-desktop/ker...

cactus cactus at pld-linux.org
Thu Sep 17 13:34:35 CEST 2009


Author: cactus                       Date: Thu Sep 17 11:34:35 2009 GMT
Module: packages                      Tag: Titanium
---- Log message:
- rel 1.1; bfs up to 222

---- Files affected:
packages/kernel-desktop:
   kernel-desktop.spec (1.204.2.66 -> 1.204.2.67) , kernel-desktop-sched-bfs.patch (1.1.2.5 -> 1.1.2.6) 

---- Diffs:

================================================================
Index: packages/kernel-desktop/kernel-desktop.spec
diff -u packages/kernel-desktop/kernel-desktop.spec:1.204.2.66 packages/kernel-desktop/kernel-desktop.spec:1.204.2.67
--- packages/kernel-desktop/kernel-desktop.spec:1.204.2.66	Mon Sep 14 15:07:24 2009
+++ packages/kernel-desktop/kernel-desktop.spec	Thu Sep 17 13:34:29 2009
@@ -46,7 +46,7 @@
 
 %define		_basever		2.6.31
 %define		_postver		%{nil}
-%define		_rel			1
+%define		_rel			1.1
 
 %define		_enable_debug_packages			0
 
@@ -1085,6 +1085,9 @@
 All persons listed below can be reached at <cvs_login>@pld-linux.org
 
 $Log$
+Revision 1.204.2.67  2009/09/17 11:34:29  cactus
+- rel 1.1; bfs up to 222
+
 Revision 1.204.2.66  2009/09/14 13:07:24  shadzik
 - rel 1
 

================================================================
Index: packages/kernel-desktop/kernel-desktop-sched-bfs.patch
diff -u packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.5 packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.6
--- packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.5	Mon Sep 14 22:25:11 2009
+++ packages/kernel-desktop/kernel-desktop-sched-bfs.patch	Thu Sep 17 13:34:29 2009
@@ -1,4 +1,4 @@
-The Brain Fuck Scheduler v0.221 by Con Kolivas.
+The Brain Fuck Scheduler v0.222 by Con Kolivas.
 
 A single shared runqueue O(n) strict fairness earliest deadline first design.
 
@@ -21,8 +21,9 @@
 Now includes accurate sub-tick accounting of tasks so userspace reported
 cpu usage may be very different.
 
+
 ---
- Documentation/sysctl/kernel.txt |   28
+ Documentation/sysctl/kernel.txt |   26
  Makefile                        |    2
  fs/pipe.c                       |    4
  fs/proc/base.c                  |    2
@@ -30,7 +31,6 @@
  include/linux/ioprio.h          |    2
  include/linux/sched.h           |  194
  init/Kconfig                    |   61
- init/main.c                     |    5
  kernel/Kconfig.preempt          |   19
  kernel/Makefile                 |    4
  kernel/delayacct.c              |    2
@@ -39,7 +39,7 @@
  kernel/kthread.c                |    4
  kernel/posix-cpu-timers.c       |   14
  kernel/sched.c                  |10583 ----------------------------------------
- kernel/sched_bfs.c              | 6203 +++++++++++++++++++++++
+ kernel/sched_bfs.c              | 6152 +++++++++++++++++++++++
  kernel/sched_debug.c            |  509 -
  kernel/sched_fair.c             | 1842 ------
  kernel/sched_idletask.c         |  129
@@ -49,13 +49,13 @@
  kernel/trace/trace.c            |    4
  kernel/workqueue.c              |    2
  mm/oom_kill.c                   |    2
- 27 files changed, 6319 insertions(+), 15265 deletions(-)
+ 26 files changed, 6261 insertions(+), 15265 deletions(-)
 
 Index: linux-2.6.31-bfs/Documentation/sysctl/kernel.txt
 ===================================================================
---- linux-2.6.31-bfs.orig/Documentation/sysctl/kernel.txt	2009-09-13 13:45:35.267511949 +1000
-+++ linux-2.6.31-bfs/Documentation/sysctl/kernel.txt	2009-09-13 13:45:45.782386464 +1000
-@@ -27,6 +27,7 @@
+--- linux-2.6.31-bfs.orig/Documentation/sysctl/kernel.txt	2009-09-16 15:18:59.336781941 +1000
++++ linux-2.6.31-bfs/Documentation/sysctl/kernel.txt	2009-09-17 09:35:47.864326713 +1000
+@@ -27,6 +27,7 @@ show up in /proc/sys/kernel:
  - domainname
  - hostname
  - hotplug
@@ -63,7 +63,7 @@
  - java-appletviewer           [ binfmt_java, obsolete ]
  - java-interpreter            [ binfmt_java, obsolete ]
  - kstack_depth_to_print       [ X86 only ]
-@@ -49,6 +50,7 @@
+@@ -49,6 +50,7 @@ show up in /proc/sys/kernel:
  - randomize_va_space
  - real-root-dev               ==> Documentation/initrd.txt
  - reboot-cmd                  [ SPARC only ]
@@ -71,7 +71,7 @@
  - rtsig-max
  - rtsig-nr
  - sem
-@@ -171,6 +173,16 @@
+@@ -171,6 +173,16 @@ Default value is "/sbin/hotplug".
 
  ==============================================================
 
@@ -88,7 +88,7 @@
  l2cr: (PPC only)
 
  This flag controls the L2 cache of G3 processor boards. If
-@@ -333,6 +345,22 @@
+@@ -333,6 +345,20 @@ rebooting. ???
 
  ==============================================================
 
@@ -100,11 +100,9 @@
 +overall. Conversely decreasing it will decrease average and maximum
 +latencies but at the expense of throughput. This value is in
 +milliseconds and the default value chosen depends on the number of
-+cpus available at scheduler initialisation with a minimum of 6. The
-+value can be set to 0 which means no more than one tick (limited
-+by HZ resolution).
++cpus available at scheduler initialisation with a minimum of 6.
 +
-+Valid values are from 0-5000.
++Valid values are from 1-5000.
 +
 +==============================================================
 +
@@ -113,9 +111,9 @@
  The file rtsig-max can be used to tune the maximum number
 Index: linux-2.6.31-bfs/fs/pipe.c
 ===================================================================
---- linux-2.6.31-bfs.orig/fs/pipe.c	2009-09-13 13:45:35.238512036 +1000
-+++ linux-2.6.31-bfs/fs/pipe.c	2009-09-13 13:45:45.794386717 +1000
-@@ -78,10 +78,6 @@
+--- linux-2.6.31-bfs.orig/fs/pipe.c	2009-09-16 15:18:59.344779398 +1000
++++ linux-2.6.31-bfs/fs/pipe.c	2009-09-16 15:19:58.187533968 +1000
+@@ -78,10 +78,6 @@ void pipe_wait(struct pipe_inode_info *p
  {
  	DEFINE_WAIT(wait);
 
@@ -128,9 +126,9 @@
  	schedule();
 Index: linux-2.6.31-bfs/include/linux/init_task.h
 ===================================================================
---- linux-2.6.31-bfs.orig/include/linux/init_task.h	2009-09-13 13:45:35.272511904 +1000
-+++ linux-2.6.31-bfs/include/linux/init_task.h	2009-09-13 13:45:45.811386843 +1000
-@@ -116,21 +116,16 @@
+--- linux-2.6.31-bfs.orig/include/linux/init_task.h	2009-09-16 15:18:59.432778948 +1000
++++ linux-2.6.31-bfs/include/linux/init_task.h	2009-09-16 15:19:58.188533048 +1000
+@@ -116,21 +116,16 @@ extern struct cred init_cred;
  	.usage		= ATOMIC_INIT(2),				\
  	.flags		= PF_KTHREAD,					\
  	.lock_depth	= -1,						\
@@ -159,8 +157,8 @@
  	.ptraced	= LIST_HEAD_INIT(tsk.ptraced),			\
 Index: linux-2.6.31-bfs/include/linux/sched.h
 ===================================================================
---- linux-2.6.31-bfs.orig/include/linux/sched.h	2009-09-13 13:45:35.281511942 +1000
-+++ linux-2.6.31-bfs/include/linux/sched.h	2009-09-13 13:46:11.880566952 +1000
+--- linux-2.6.31-bfs.orig/include/linux/sched.h	2009-09-16 15:18:59.442778755 +1000
++++ linux-2.6.31-bfs/include/linux/sched.h	2009-09-16 15:19:58.189534363 +1000
 @@ -36,8 +36,11 @@
  #define SCHED_FIFO		1
  #define SCHED_RR		2
@@ -175,7 +173,7 @@
 
  #ifdef __KERNEL__
 
-@@ -144,13 +147,10 @@
+@@ -144,13 +147,10 @@ extern u64 cpu_nr_migrations(int cpu);
  extern unsigned long get_parent_ip(unsigned long addr);
 
  struct seq_file;
@@ -189,7 +187,7 @@
  #else
  static inline void
  proc_sched_show_task(struct task_struct *p, struct seq_file *m)
-@@ -159,10 +159,6 @@
+@@ -159,10 +159,6 @@ proc_sched_show_task(struct task_struct
  static inline void proc_sched_set_task(struct task_struct *p)
  {
  }
@@ -200,7 +198,7 @@
  #endif
 
  extern unsigned long long time_sync_thresh;
-@@ -254,8 +250,8 @@
+@@ -254,8 +250,8 @@ extern asmlinkage void schedule_tail(str
  extern void init_idle(struct task_struct *idle, int cpu);
  extern void init_idle_bootup_task(struct task_struct *idle);
 
@@ -211,7 +209,7 @@
 
  extern cpumask_var_t nohz_cpu_mask;
  #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
-@@ -1021,148 +1017,6 @@
+@@ -1021,148 +1017,6 @@ struct uts_namespace;
  struct rq;
  struct sched_domain;
 
@@ -360,7 +358,7 @@
  struct task_struct {
  	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
  	void *stack;
-@@ -1172,17 +1026,18 @@
+@@ -1172,17 +1026,18 @@ struct task_struct {
 
  	int lock_depth;		/* BKL lock depth */
 
@@ -387,7 +385,7 @@
 
  #ifdef CONFIG_PREEMPT_NOTIFIERS
  	/* list of struct preempt_notifier: */
-@@ -1205,6 +1060,9 @@
+@@ -1205,6 +1060,9 @@ struct task_struct {
 
  	unsigned int policy;
  	cpumask_t cpus_allowed;
@@ -397,7 +395,7 @@
 
  #ifdef CONFIG_PREEMPT_RCU
  	int rcu_read_lock_nesting;
-@@ -1273,6 +1131,7 @@
+@@ -1273,6 +1131,7 @@ struct task_struct {
  	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
 
  	cputime_t utime, stime, utimescaled, stimescaled;
@@ -405,7 +403,7 @@
  	cputime_t gtime;
  	cputime_t prev_utime, prev_stime;
  	unsigned long nvcsw, nivcsw; /* context switch counts */
-@@ -1497,11 +1356,14 @@
+@@ -1497,11 +1356,14 @@ struct task_struct {
   * priority to a value higher than any user task. Note:
   * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
   */
@@ -423,7 +421,7 @@
  #define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
 
  static inline int rt_prio(int prio)
-@@ -1785,11 +1647,7 @@
+@@ -1785,11 +1647,7 @@ task_sched_runtime(struct task_struct *t
  extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
 
  /* sched_exec is called by processes performing an exec */
@@ -437,9 +435,9 @@
  extern void sched_clock_idle_wakeup_event(u64 delta_ns);
 Index: linux-2.6.31-bfs/kernel/sched.c
 ===================================================================
---- linux-2.6.31-bfs.orig/kernel/sysctl.c	2009-09-13 13:45:35.325511987 +1000
-+++ linux-2.6.31-bfs/kernel/sysctl.c	2009-09-13 13:45:45.824386781 +1000
-@@ -86,6 +86,8 @@
+--- linux-2.6.31-bfs.orig/kernel/sysctl.c	2009-09-16 15:18:59.418785545 +1000
++++ linux-2.6.31-bfs/kernel/sysctl.c	2009-09-17 09:35:47.873127695 +1000
+@@ -86,6 +86,8 @@ extern int percpu_pagelist_fraction;
  extern int compat_log;
  extern int latencytop_enabled;
  extern int sysctl_nr_open_min, sysctl_nr_open_max;
@@ -448,7 +446,7 @@
  #ifndef CONFIG_MMU
  extern int sysctl_nr_trim_pages;
  #endif
-@@ -100,10 +102,11 @@
+@@ -100,10 +102,11 @@ static int neg_one = -1;
  #endif
 
  static int zero;
@@ -462,7 +460,7 @@
 
  /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
  static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
-@@ -238,134 +241,7 @@
+@@ -238,134 +241,7 @@ static struct ctl_table root_table[] = {
  	{ .ctl_name = 0 }
  };
 
@@ -597,7 +595,7 @@
  #ifdef CONFIG_PROVE_LOCKING
  	{
  		.ctl_name	= CTL_UNNUMBERED,
-@@ -798,6 +674,28 @@
+@@ -798,6 +674,28 @@ static struct ctl_table kern_table[] = {
  		.proc_handler	= &proc_dointvec,
  	},
  #endif
@@ -609,7 +607,7 @@
 +		.mode		= 0644,
 +		.proc_handler	= &proc_dointvec_minmax,
 +		.strategy	= &sysctl_intvec,
-+		.extra1		= &zero,
++		.extra1		= &one,
 +		.extra2		= &five_thousand,
 +	},
 +	{
@@ -628,9 +626,9 @@
  		.ctl_name	= KERN_SPIN_RETRY,
 Index: linux-2.6.31-bfs/kernel/workqueue.c
 ===================================================================
---- linux-2.6.31-bfs.orig/kernel/workqueue.c	2009-09-13 13:45:35.345512018 +1000
-+++ linux-2.6.31-bfs/kernel/workqueue.c	2009-09-13 13:45:45.836549400 +1000
-@@ -317,8 +317,6 @@
+--- linux-2.6.31-bfs.orig/kernel/workqueue.c	2009-09-16 15:18:59.422785635 +1000
++++ linux-2.6.31-bfs/kernel/workqueue.c	2009-09-16 15:19:58.199528584 +1000
+@@ -317,8 +317,6 @@ static int worker_thread(void *__cwq)
  	if (cwq->wq->freezeable)
  		set_freezable();
 
@@ -642,8 +640,8 @@
 Index: linux-2.6.31-bfs/kernel/sched_fair.c
 ===================================================================
 --- /dev/null	1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.31-bfs/kernel/sched_bfs.c	2009-09-14 16:41:30.090860676 +1000
-@@ -0,0 +1,6203 @@
++++ linux-2.6.31-bfs/kernel/sched_bfs.c	2009-09-17 09:35:47.847128014 +1000
+@@ -0,0 +1,6152 @@
 +/*
 + *  kernel/sched_bfs.c, was sched.c
 + *
@@ -812,7 +810,6 @@
 +#ifdef CONFIG_SMP
 +	cpumask_t cpu_idle_map;
 +#endif
-+	void (*wunt)(struct task_struct *, struct rq *, unsigned long);
 +};
 +
 +static struct global_rq grq;
@@ -828,7 +825,6 @@
 +#endif
 +#endif
 +
-+	struct task_struct *preempt_next;
 +	struct task_struct *curr, *idle;
 +	struct mm_struct *prev_mm;
 +
@@ -1135,11 +1131,6 @@
 +	return (!list_empty(&p->run_list));
 +}
 +
-+static inline int queued_or_running(struct task_struct *p)
-+{
-+	return (task_queued(p) || task_running(p));
-+}
-+
 +/*
 + * Removing from the global runqueue. Enter with grq locked.
 + */
@@ -1470,11 +1461,11 @@
 +		 *
 +		 * NOTE! Since we don't hold any locks, it's not
 +		 * even sure that "rq" stays as the right runqueue!
-+		 * But we don't care, since "task_running()" will
++		 * But we don't care, since this will
 +		 * return false if the runqueue has changed and p
 +		 * is actually now running somewhere else!
 +		 */
-+		while (task_running(p)) {
++		while (task_running(p) && p == rq->curr) {
 +			if (match_state && unlikely(p->state != match_state))
 +				return 0;
 +			cpu_relax();
@@ -1585,14 +1576,16 @@
 +/*
 + * Wake up *any* suitable cpu to schedule this task.
 + */
-+static void try_preempt(struct task_struct *p, struct rq *this_rq)
++static void try_preempt(struct task_struct *p)
 +{
++	struct rq *highest_prio_rq, *this_rq;
 +	unsigned long latest_deadline, cpu;
-+	struct rq *highest_prio_rq;
 +	int highest_prio;
 +	cpumask_t tmp;
 +
-+	/* Use this_rq as baseline and fall back on */
++	/* Try the task's previous rq first and as a fallback */
++	this_rq = task_rq(p);
++
 +	if (cpu_isset(this_rq->cpu, p->cpus_allowed)) {
 +		highest_prio_rq = this_rq;
 +		/* If this_rq is idle, use that. */
@@ -1631,7 +1624,6 @@
 +		return;
 +found_rq:
 +	resched_task(highest_prio_rq->curr);
-+	highest_prio_rq->preempt_next = p;
 +	return;
 +}
 +
@@ -1694,7 +1686,7 @@
 +	if (!(old_state & state))
 +		goto out_unlock;
 +
-+	if (queued_or_running(p))
++	if (task_queued(p) || task_running(p))
 +		goto out_running;
 +
 +	activate_task(p, rq);
@@ -1705,7 +1697,7 @@
 +	 * instead waiting for current to deschedule.
 +	 */
 +	if (!sync || (sync && !no_idle_cpus()))
-+		try_preempt(p, rq);
++		try_preempt(p);
 +	success = 1;
 +
 +out_running:
@@ -1812,14 +1804,15 @@
 + * that must be done for every newly created context, then puts the task
 + * on the runqueue and wakes it.
 + */
-+static void
-+normal_wunt(struct task_struct *p, struct rq *rq, unsigned long clone_flags)
++void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 +{
 +	struct task_struct *parent = p->parent;
++	unsigned long flags;
++	struct rq *rq;
 +
 +	BUG_ON(p->state != TASK_RUNNING);
-+	set_task_cpu(p, task_cpu(parent));
 +
++	rq = time_task_grq_lock(p, &flags);
 +	activate_task(p, rq);
 +	trace_sched_wakeup_new(rq, p, 1);
 +	if (!(clone_flags & CLONE_VM) && rq->curr == parent &&
@@ -1830,43 +1823,9 @@
 +		 * usually avoids a lot of COW overhead.
 +		 */
 +			set_tsk_need_resched(parent);
-+			rq->preempt_next = p;
 +	} else
-+		try_preempt(p, rq);
-+}
-+
-+extern int fragile_boot;
-+
-+/* Fragile version to not wake to other cpus during boot */
-+static void
-+fb_wunt(struct task_struct *p, struct rq *rq, unsigned long clone_flags)
-+{
-+	struct task_struct *parent = p->parent;
-+
-+	BUG_ON(p->state != TASK_RUNNING);
-+	set_task_cpu(p, task_cpu(parent));
-+
-+	activate_task(p, rq);
-+	trace_sched_wakeup_new(rq, p, 1);
-+	/* Child always runs first */
-+	set_tsk_need_resched(parent);
-+	rq->preempt_next = p;
-+	/*
-+	 * fragile_boot is set initially and unset only once just before
-+	 * init so we change to normal wunt from here onwards, the ->wunt
-+	 * pointer is protected by grq lock.
-+	 */
-+	if (!fragile_boot)
-+		grq.wunt = normal_wunt;
-+}
-+
-+void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
-+{
-+	unsigned long flags;
-+
-+	struct rq *rq = time_task_grq_lock(p, &flags);
-+	grq.wunt(p, rq, clone_flags);
-+	grq_unlock_irqrestore(&flags);
++		try_preempt(p);
++	task_grq_unlock(&flags);
 +}
 +
 +/*
@@ -2842,72 +2801,67 @@
 +
 +/*
 + * O(n) lookup of all tasks in the global runqueue. The real brainfuck
-+ * of lock contention and O(n).
++ * of lock contention and O(n). It's not really O(n) as only the queued,
++ * but not running tasks are scanned, and is O(n) queued in the worst case
++ * scenario only because the right task can be found before scanning all of
++ * them.
++ * Tasks are selected in this order:
++ * Real time tasks are selected purely by their static priority and in the
++ * order they were queued, so the lowest value idx, and the first queued task
++ * of that priority value is chosen.
++ * If no real time tasks are found, the SCHED_ISO priority is checked, and
++ * all SCHED_ISO tasks have the same priority value, so they're selected by
++ * the earliest deadline value.
++ * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the
++ * earliest deadline.
++ * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are
++ * selected by the earliest deadline.
 + */
 +static inline struct
 +task_struct *earliest_deadline_task(struct rq *rq, struct task_struct *idle)
 +{
-+	unsigned long long_deadline, shortest_deadline;
-+	struct task_struct *edt, *p;
++	unsigned long dl, earliest_deadline = 0; /* Initialise to silence compiler */
++	struct task_struct *p, *edt;
 +	unsigned int cpu = rq->cpu;
 +	struct list_head *queue;
 +	int idx = 0;
 +
-+	if (rq->preempt_next) {
-+		if (likely(task_queued(rq->preempt_next) &&
-+			cpu_isset(cpu, rq->preempt_next->cpus_allowed))) {
-+				edt = rq->preempt_next;
-+				goto out_take;
-+		}
-+	}
++	edt = idle;
 +retry:
 +	idx = find_next_bit(grq.prio_bitmap, PRIO_LIMIT, idx);
-+	queue = &grq.queue[idx];
-+	if (idx < MAX_RT_PRIO) {
-+		/* We found rt tasks */
-+		list_for_each_entry(p, queue, run_list) {
-+			if (cpu_isset(cpu, p->cpus_allowed)) {
-+				edt = p;
-+				goto out_take;
-+			}
-+		}
-+		/* More rt tasks, we couldn't take the lower prio ones */
-+		++idx;
-+		goto retry;
-+	}
-+
-+	/* No rt tasks, find earliest deadline task */
-+	edt = idle;
-+	if (unlikely(idx >= PRIO_LIMIT)) {
-+		/* All rt tasks but none suitable for this cpu */
++	if (idx >= PRIO_LIMIT)
 +		goto out;
-+	}
-+
-+	long_deadline = shortest_deadline = longest_deadline() * 2 + 1;
++	queue = &grq.queue[idx];
 +	list_for_each_entry(p, queue, run_list) {
-+		unsigned long deadline_diff;
 +		/* Make sure cpu affinity is ok */
 +		if (!cpu_isset(cpu, p->cpus_allowed))
 +			continue;
++		if (idx < MAX_RT_PRIO) {
++			/* We found an rt task */
++			edt = p;
++			goto out_take;
++		}
 +
-+		deadline_diff = p->deadline - jiffies;
++		/*
++		 * No rt task, select the earliest deadline task now.
++		 * On the 1st run the 2nd condition is never used, so
++		 * there is no need to initialise earliest_deadline
++		 * before. Normalise all old deadlines to now.
++		 */
++		if (time_before(p->deadline, jiffies))
++			dl = jiffies;
++		else
++			dl = p->deadline;
 +
-+		/* Normalise all old deadlines and cope with jiffy wrap. */
-+		if (deadline_diff > long_deadline)
-+			deadline_diff = 0;
-+
-+		/* Select the earliest deadline task now */
-+		if (edt == idle || deadline_diff < shortest_deadline) {
-+			shortest_deadline = deadline_diff;
++		if (edt == idle ||
++		    time_before(dl, earliest_deadline)) {
++			earliest_deadline = dl;
 +			edt = p;
 +		}
 +	}
 +	if (edt == idle) {
-+		if (idx < IDLE_PRIO) {
-+			/* Haven't checked for SCHED_IDLEPRIO tasks yet */
-+			idx++;
++		if (++idx < PRIO_LIMIT)
 +			goto retry;
-+		}
 +		goto out;
 +	}
 +out_take:
@@ -3049,8 +3003,6 @@
 +	else
 +		clear_cpuidle_map(cpu);
 +
-+	rq->preempt_next = NULL;
-+
 +	prefetch(next);
 +	prefetch_stack(next);
 +
@@ -3570,20 +3522,13 @@
 +	if (queued)
 +		dequeue_task(p);
 +	p->prio = prio;
-+	if (task_running(p))
++	if (task_running(p) && prio > oldprio)
 +		resched_task(p);
 +	if (queued) {
 +		enqueue_task(p);
-+		try_preempt(p, rq);
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop.spec?r1=1.204.2.66&r2=1.204.2.67&f=u
    http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop-sched-bfs.patch?r1=1.1.2.5&r2=1.1.2.6&f=u



More information about the pld-cvs-commit mailing list