packages (Titanium): kernel-desktop/kernel-desktop-sched-bfs.patch - BFS pa...
lmasko
lmasko at pld-linux.org
Tue Mar 16 13:06:39 CET 2010
Author: lmasko Date: Tue Mar 16 12:06:39 2010 GMT
Module: packages Tag: Titanium
---- Log message:
- BFS patch up to v315 for 2.6.33.
---- Files affected:
packages/kernel-desktop:
kernel-desktop-sched-bfs.patch (1.1.2.15 -> 1.1.2.16)
---- Diffs:
================================================================
Index: packages/kernel-desktop/kernel-desktop-sched-bfs.patch
diff -u packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.15 packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.16
--- packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.15 Mon Dec 28 18:53:29 2009
+++ packages/kernel-desktop/kernel-desktop-sched-bfs.patch Tue Mar 16 13:06:32 2010
@@ -1,4 +1,4 @@
-The Brain Fuck Scheduler v0.313 by Con Kolivas.
+The Brain Fuck Scheduler v0.315 by Con Kolivas.
A single shared runqueue O(n) strict fairness earliest deadline first design.
@@ -10,7 +10,7 @@
Features SCHED_IDLEPRIO and SCHED_ISO scheduling policies as well.
You do NOT need to use these policies for good performance, they are purely
-optional.
+optional for even better performance in extreme conditions.
To run something idleprio, use schedtool like so:
@@ -21,7 +21,7 @@
schedtool -I -e amarok
Now includes accurate sub-tick accounting of tasks so userspace reported
-cpu usage may be very different.
+cpu usage may be very different if you have very short lived tasks.
---
Documentation/scheduler/sched-BFS.txt | 356 +
@@ -31,24 +31,25 @@
include/linux/init_task.h | 65
include/linux/ioprio.h | 2
include/linux/sched.h | 107
- init/Kconfig | 20
+ init/Kconfig | 17
init/main.c | 2
kernel/delayacct.c | 2
kernel/exit.c | 2
- kernel/fork.c | 2
+ kernel/kthread.c | 2
kernel/posix-cpu-timers.c | 14
kernel/sched.c | 4
- kernel/sched_bfs.c | 6653 ++++++++++++++++++++++++++++++
- kernel/sysctl.c | 35
+ kernel/sched_bfs.c | 6724 ++++++++++++++++++++++++++++++
+ kernel/slow-work.c | 1
+ kernel/sysctl.c | 31
lib/Kconfig.debug | 2
mm/oom_kill.c | 2
- 18 files changed, 7272 insertions(+), 29 deletions(-)
+ 19 files changed, 7337 insertions(+), 29 deletions(-)
-Index: linux-2.6.32-bfs/Documentation/sysctl/kernel.txt
+Index: linux-2.6.33-ck1/Documentation/sysctl/kernel.txt
===================================================================
---- linux-2.6.32-bfs.orig/Documentation/sysctl/kernel.txt 2009-12-03 21:39:54.000000000 +1100
-+++ linux-2.6.32-bfs/Documentation/sysctl/kernel.txt 2009-12-19 00:21:06.935377895 +1100
-@@ -29,6 +29,7 @@ show up in /proc/sys/kernel:
+--- linux-2.6.33-ck1.orig/Documentation/sysctl/kernel.txt 2010-02-25 21:51:46.369907919 +1100
++++ linux-2.6.33-ck1/Documentation/sysctl/kernel.txt 2010-02-25 21:58:38.561793820 +1100
+@@ -31,6 +31,7 @@ show up in /proc/sys/kernel:
- domainname
- hostname
- hotplug
@@ -56,7 +57,7 @@
- java-appletviewer [ binfmt_java, obsolete ]
- java-interpreter [ binfmt_java, obsolete ]
- kstack_depth_to_print [ X86 only ]
-@@ -51,6 +52,7 @@ show up in /proc/sys/kernel:
+@@ -53,6 +54,7 @@ show up in /proc/sys/kernel:
- randomize_va_space
- real-root-dev ==> Documentation/initrd.txt
- reboot-cmd [ SPARC only ]
@@ -64,7 +65,7 @@
- rtsig-max
- rtsig-nr
- sem
-@@ -209,6 +211,16 @@ Default value is "/sbin/hotplug".
+@@ -240,6 +242,16 @@ Default value is "/sbin/hotplug".
==============================================================
@@ -81,7 +82,7 @@
l2cr: (PPC only)
This flag controls the L2 cache of G3 processor boards. If
-@@ -383,6 +395,20 @@ rebooting. ???
+@@ -414,6 +426,20 @@ rebooting. ???
==============================================================
@@ -102,11 +103,11 @@
rtsig-max & rtsig-nr:
The file rtsig-max can be used to tune the maximum number
-Index: linux-2.6.32-bfs/include/linux/init_task.h
+Index: linux-2.6.33-ck1/include/linux/init_task.h
===================================================================
---- linux-2.6.32-bfs.orig/include/linux/init_task.h 2009-12-03 21:40:09.000000000 +1100
-+++ linux-2.6.32-bfs/include/linux/init_task.h 2009-12-19 00:21:06.936211740 +1100
-@@ -119,6 +119,69 @@ extern struct cred init_cred;
+--- linux-2.6.33-ck1.orig/include/linux/init_task.h 2010-02-25 21:51:52.563668896 +1100
++++ linux-2.6.33-ck1/include/linux/init_task.h 2010-02-25 21:58:38.561793820 +1100
+@@ -115,6 +115,69 @@ extern struct cred init_cred;
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
@@ -156,7 +157,7 @@
+ .journal_info = NULL, \
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .fs_excl = ATOMIC_INIT(0), \
-+ .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
++ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
@@ -176,7 +177,7 @@
#define INIT_TASK(tsk) \
{ \
.state = 0, \
-@@ -185,7 +248,7 @@ extern struct cred init_cred;
+@@ -181,7 +244,7 @@ extern struct cred init_cred;
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
}
@@ -185,10 +186,10 @@
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
-Index: linux-2.6.32-bfs/include/linux/sched.h
+Index: linux-2.6.33-ck1/include/linux/sched.h
===================================================================
---- linux-2.6.32-bfs.orig/include/linux/sched.h 2009-12-03 21:40:09.000000000 +1100
-+++ linux-2.6.32-bfs/include/linux/sched.h 2009-12-28 03:03:44.025251129 +1100
+--- linux-2.6.33-ck1.orig/include/linux/sched.h 2010-02-25 21:51:52.643543559 +1100
++++ linux-2.6.33-ck1/include/linux/sched.h 2010-02-25 22:00:21.406620423 +1100
@@ -36,8 +36,15 @@
#define SCHED_FIFO 1
#define SCHED_RR 2
@@ -206,7 +207,7 @@
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
-@@ -261,9 +268,6 @@ extern asmlinkage void schedule_tail(str
+@@ -264,9 +271,6 @@ extern asmlinkage void schedule_tail(str
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
@@ -216,7 +217,7 @@
extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
-@@ -1221,17 +1225,31 @@ struct task_struct {
+@@ -1238,17 +1242,31 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
@@ -248,7 +249,7 @@
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
-@@ -1253,6 +1271,9 @@ struct task_struct {
+@@ -1270,6 +1288,9 @@ struct task_struct {
unsigned int policy;
cpumask_t cpus_allowed;
@@ -258,7 +259,7 @@
#ifdef CONFIG_TREE_PREEMPT_RCU
int rcu_read_lock_nesting;
-@@ -1330,6 +1351,9 @@ struct task_struct {
+@@ -1347,6 +1368,9 @@ struct task_struct {
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
@@ -266,10 +267,10 @@
+ unsigned long utime_pc, stime_pc;
+#endif
cputime_t gtime;
+ #ifndef CONFIG_VIRT_CPU_ACCOUNTING
cputime_t prev_utime, prev_stime;
- unsigned long nvcsw, nivcsw; /* context switch counts */
-@@ -1541,6 +1565,64 @@ struct task_struct {
- unsigned long stack_start;
+@@ -1568,6 +1592,64 @@ struct task_struct {
+ #endif
};
+#ifdef CONFIG_SCHED_BFS
@@ -292,7 +293,7 @@
+
+static inline void print_scheduler_version(void)
+{
-+ printk(KERN_INFO"BFS CPU scheduler v0.313 by Con Kolivas.\n");
++ printk(KERN_INFO"BFS CPU scheduler v0.315 by Con Kolivas.\n");
+}
+
+static inline int iso_task(struct task_struct *p)
@@ -331,9 +332,9 @@
+#endif
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
- #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -1559,9 +1641,19 @@ struct task_struct {
+@@ -1586,9 +1668,19 @@ struct task_struct {
#define MAX_USER_RT_PRIO 100
#define MAX_RT_PRIO MAX_USER_RT_PRIO
@@ -354,7 +355,7 @@
static inline int rt_prio(int prio)
{
-@@ -1870,7 +1962,7 @@ task_sched_runtime(struct task_struct *t
+@@ -1897,7 +1989,7 @@ task_sched_runtime(struct task_struct *t
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
@@ -363,7 +364,7 @@
extern void sched_exec(void);
#else
#define sched_exec() {}
-@@ -2025,6 +2117,9 @@ extern void wake_up_new_task(struct task
+@@ -2060,6 +2152,9 @@ extern void wake_up_new_task(struct task
static inline void kick_process(struct task_struct *tsk) { }
#endif
extern void sched_fork(struct task_struct *p, int clone_flags);
@@ -373,11 +374,11 @@
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
-Index: linux-2.6.32-bfs/kernel/sysctl.c
+Index: linux-2.6.33-ck1/kernel/sysctl.c
===================================================================
---- linux-2.6.32-bfs.orig/kernel/sysctl.c 2009-12-03 21:40:10.000000000 +1100
-+++ linux-2.6.32-bfs/kernel/sysctl.c 2009-12-19 00:21:07.673250666 +1100
-@@ -105,7 +105,12 @@ static int zero;
+--- linux-2.6.33-ck1.orig/kernel/sysctl.c 2010-02-25 21:51:52.883543363 +1100
++++ linux-2.6.33-ck1/kernel/sysctl.c 2010-02-25 21:58:38.564793459 +1100
+@@ -104,7 +104,12 @@ static int zero;
static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static unsigned long one_ul = 1;
@@ -391,8 +392,8 @@
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
-@@ -243,7 +248,7 @@ static struct ctl_table root_table[] = {
- { .ctl_name = 0 }
+@@ -239,7 +244,7 @@ static struct ctl_table root_table[] = {
+ { }
};
-#ifdef CONFIG_SCHED_DEBUG
@@ -400,58 +401,54 @@
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
-@@ -251,6 +256,7 @@ static int max_wakeup_granularity_ns = N
+@@ -251,6 +256,7 @@ static int max_sched_shares_ratelimit =
#endif
static struct ctl_table kern_table[] = {
+#ifndef CONFIG_SCHED_BFS
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_child_runs_first",
-@@ -379,6 +385,7 @@ static struct ctl_table kern_table[] = {
+ .data = &sysctl_sched_child_runs_first,
+@@ -364,6 +370,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
+#endif /* !CONFIG_SCHED_BFS */
#ifdef CONFIG_PROVE_LOCKING
{
- .ctl_name = CTL_UNNUMBERED,
-@@ -830,6 +837,30 @@ static struct ctl_table kern_table[] = {
- .proc_handler = &proc_dointvec,
+ .procname = "prove_locking",
+@@ -761,6 +768,26 @@ static struct ctl_table kern_table[] = {
+ .proc_handler = proc_dointvec,
},
#endif
+#ifdef CONFIG_SCHED_BFS
+ {
-+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "rr_interval",
+ .data = &rr_interval,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
-+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ .extra2 = &five_thousand,
+ },
+ {
-+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "iso_cpu",
+ .data = &sched_iso_cpu,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
-+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
+ },
+#endif
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
{
- .ctl_name = KERN_SPIN_RETRY,
-Index: linux-2.6.32-bfs/kernel/sched_bfs.c
+ .procname = "spin_retry",
+Index: linux-2.6.33-ck1/kernel/sched_bfs.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.32-bfs/kernel/sched_bfs.c 2009-12-28 03:03:44.024125606 +1100
-@@ -0,0 +1,6653 @@
++++ linux-2.6.33-ck1/kernel/sched_bfs.c 2010-02-25 21:58:53.033543899 +1100
+@@ -0,0 +1,6724 @@
+/*
+ * kernel/sched_bfs.c, was sched.c
+ *
@@ -598,7 +595,7 @@
+ * by grq.lock.
+ */
+struct global_rq {
-+ spinlock_t lock;
++ raw_spinlock_t lock;
+ unsigned long nr_running;
+ unsigned long nr_uninterruptible;
+ unsigned long long nr_switches;
@@ -787,19 +784,19 @@
+static inline void grq_lock(void)
+ __acquires(grq.lock)
+{
-+ spin_lock(&grq.lock);
++ raw_spin_lock(&grq.lock);
+}
+
+static inline void grq_unlock(void)
+ __releases(grq.lock)
+{
-+ spin_unlock(&grq.lock);
++ raw_spin_unlock(&grq.lock);
+}
+
+static inline void grq_lock_irq(void)
+ __acquires(grq.lock)
+{
-+ spin_lock_irq(&grq.lock);
++ raw_spin_lock_irq(&grq.lock);
+}
+
+static inline void time_lock_grq(struct rq *rq)
@@ -812,19 +809,19 @@
+static inline void grq_unlock_irq(void)
+ __releases(grq.lock)
+{
-+ spin_unlock_irq(&grq.lock);
++ raw_spin_unlock_irq(&grq.lock);
+}
+
+static inline void grq_lock_irqsave(unsigned long *flags)
+ __acquires(grq.lock)
+{
-+ spin_lock_irqsave(&grq.lock, *flags);
++ raw_spin_lock_irqsave(&grq.lock, *flags);
+}
+
+static inline void grq_unlock_irqrestore(unsigned long *flags)
+ __releases(grq.lock)
+{
-+ spin_unlock_irqrestore(&grq.lock, *flags);
++ raw_spin_unlock_irqrestore(&grq.lock, *flags);
+}
+
+static inline struct rq
@@ -879,14 +876,14 @@
+ */
+inline int grunqueue_is_locked(void)
+{
-+ return spin_is_locked(&grq.lock);
++ return raw_spin_is_locked(&grq.lock);
+}
+
+inline void grq_unlock_wait(void)
+ __releases(grq.lock)
+{
+ smp_mb(); /* spin-unlock-wait is not a full memory barrier */
-+ spin_unlock_wait(&grq.lock);
++ raw_spin_unlock_wait(&grq.lock);
+}
+
+static inline void time_grq_lock(struct rq *rq, unsigned long *flags)
@@ -1314,10 +1311,17 @@
+#ifdef CONFIG_SMP
+void set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
-+ int old_cpu = task_cpu(p);
++#ifdef CONFIG_SCHED_DEBUG
++ /*
++ * We should never call set_task_cpu() on a blocked task,
++ * ttwu() will sort out the placement.
++ */
++ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
++ !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
++#endif
+
+ trace_sched_migrate_task(p, cpu);
-+ if (old_cpu != cpu)
++ if (task_cpu(p) != cpu)
+ perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
+
+ /*
@@ -1372,7 +1376,7 @@
+{
+ int cpu;
+
-+ assert_spin_locked(&grq.lock);
++ assert_raw_spin_locked(&grq.lock);
+
+ if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+ return;
@@ -1597,36 +1601,6 @@
+EXPORT_SYMBOL_GPL(kick_process);
+#endif
+
-+/**
-+ * kthread_bind - bind a just-created kthread to a cpu.
-+ * @p: thread created by kthread_create().
-+ * @cpu: cpu (might not be online, must be possible) for @k to run on.
-+ *
-+ * Description: This function is equivalent to set_cpus_allowed(),
-+ * except that @cpu doesn't need to be online, and the thread must be
-+ * stopped (i.e., just returned from kthread_create()).
-+ *
-+ * Function lives here instead of kthread.c because it messes with
-+ * scheduler internals which require locking.
-+ */
-+void kthread_bind(struct task_struct *p, unsigned int cpu)
-+ {
-+ unsigned long flags;
-+
-+ /* Must have done schedule() in kthread() before we set_task_cpu */
-+ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
-+ WARN_ON(1);
-+ return;
-+ }
-+
-+ grq_lock_irqsave(&flags);
-+ set_task_cpu(p, cpu);
-+ p->cpus_allowed = cpumask_of_cpu(cpu);
-+ p->flags |= PF_THREAD_BOUND;
-+ grq_unlock_irqrestore(&flags);
-+}
-+EXPORT_SYMBOL(kthread_bind);
-+
+#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
+#define task_idle(p) ((p)->prio == PRIO_LIMIT)
+
@@ -1810,12 +1784,11 @@
+ INIT_HLIST_HEAD(&p->preempt_notifiers);
+#endif
+ /*
-+ * We mark the process as running here, but have not actually
-+ * inserted it onto the runqueue yet. This guarantees that
++ * We mark the process as waking here. This guarantees that
+ * nobody will actually run it, and a signal or other external
+ * event cannot wake it up and insert it on the runqueue either.
+ */
-+ p->state = TASK_RUNNING;
++ p->state = TASK_WAKING;
+ set_task_cpu(p, cpu);
+
+ /* Should be reset in fork.c but done here for ease of bfs patching */
@@ -1897,8 +1870,9 @@
+ struct rq *rq;
+
+ rq = task_grq_lock(p, &flags); ;
++ BUG_ON(p->state != TASK_WAKING);
++ p->state = TASK_RUNNING;
+ parent = p->parent;
-+ BUG_ON(p->state != TASK_RUNNING);
+ /* Unnecessary but small chance that the parent changed cpus */
+ set_task_cpu(p, task_cpu(parent));
+ activate_task(p, rq);
@@ -2122,14 +2096,14 @@
+ */
+ arch_start_context_switch(prev);
+
-+ if (unlikely(!mm)) {
++ if (likely(!mm)) {
+ next->active_mm = oldmm;
+ atomic_inc(&oldmm->mm_count);
+ enter_lazy_tlb(oldmm, next);
+ } else
+ switch_mm(oldmm, mm, next);
+
-+ if (unlikely(!prev->mm)) {
++ if (likely(!prev->mm)) {
+ prev->active_mm = NULL;
+ rq->prev_mm = oldmm;
+ }
@@ -2530,8 +2504,13 @@
+ p->gtime = cputime_add(p->gtime, cputime);
+
+ /* Add guest time to cpustat. */
-+ cpustat->user = cputime64_add(cpustat->user, tmp);
-+ cpustat->guest = cputime64_add(cpustat->guest, tmp);
++ if (TASK_NICE(p) > 0) {
++ cpustat->nice = cputime64_add(cpustat->nice, tmp);
++ cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
++ } else {
++ cpustat->user = cputime64_add(cpustat->user, tmp);
++ cpustat->guest = cputime64_add(cpustat->guest, tmp);
++ }
+}
+
+/*
@@ -3068,8 +3047,12 @@
+ } else
+ grq_unlock_irq();
+
-+ if (unlikely(reacquire_kernel_lock(current) < 0))
++ if (unlikely(reacquire_kernel_lock(current) < 0)) {
++ prev = rq->curr;
++ switch_count = &prev->nivcsw;
+ goto need_resched_nonpreemptible;
++ }
++
+ preempt_enable_no_resched();
+ if (need_resched())
+ goto need_resched;
@@ -3495,14 +3478,15 @@
+ */
+bool try_wait_for_completion(struct completion *x)
+{
++ unsigned long flags;
+ int ret = 1;
+
-+ spin_lock_irq(&x->wait.lock);
++ spin_lock_irqsave(&x->wait.lock, flags);
+ if (!x->done)
+ ret = 0;
+ else
+ x->done--;
-+ spin_unlock_irq(&x->wait.lock);
++ spin_unlock_irqrestore(&x->wait.lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(try_wait_for_completion);
@@ -3517,12 +3501,13 @@
+ */
+bool completion_done(struct completion *x)
+{
++ unsigned long flags;
+ int ret = 1;
+
-+ spin_lock_irq(&x->wait.lock);
++ spin_lock_irqsave(&x->wait.lock, flags);
+ if (!x->done)
+ ret = 0;
-+ spin_unlock_irq(&x->wait.lock);
++ spin_unlock_irqrestore(&x->wait.lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(completion_done);
@@ -3935,7 +3920,7 @@
+ * make sure no PI-waiters arrive (or leave) while we are
+ * changing the priority of the task:
+ */
-+ spin_lock_irqsave(&p->pi_lock, flags);
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /*
+ * To be able to change p->policy safely, the apropriate
+ * runqueue lock must be held.
@@ -3943,9 +3928,9 @@
+ rq = __task_grq_lock(p);
+ /* recheck policy now with rq lock held */
+ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
-+ __task_grq_unlock();
-+ spin_unlock_irqrestore(&p->pi_lock, flags);
+ policy = oldpolicy = -1;
++ __task_grq_unlock();
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ goto recheck;
+ }
+ update_rq_clock(rq);
@@ -3960,7 +3945,7 @@
+ try_preempt(p, rq);
+ }
+ __task_grq_unlock();
-+ spin_unlock_irqrestore(&p->pi_lock, flags);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
<<Diff was trimmed, longer than 597 lines>>
---- CVS-web:
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop-sched-bfs.patch?r1=1.1.2.15&r2=1.1.2.16&f=u
More information about the pld-cvs-commit
mailing list