packages (Titanium): kernel-desktop/kernel-desktop.spec, kernel-desktop/ker...
lmasko
lmasko at pld-linux.org
Sun Aug 8 11:53:23 CEST 2010
Author: lmasko Date: Sun Aug 8 09:53:23 2010 GMT
Module: packages Tag: Titanium
---- Log message:
- Updated BFS patch, now applies and compiles.
---- Files affected:
packages/kernel-desktop:
kernel-desktop.spec (1.204.2.117 -> 1.204.2.118) , kernel-desktop-sched-bfs.patch (1.1.2.18 -> 1.1.2.19)
---- Diffs:
================================================================
Index: packages/kernel-desktop/kernel-desktop.spec
diff -u packages/kernel-desktop/kernel-desktop.spec:1.204.2.117 packages/kernel-desktop/kernel-desktop.spec:1.204.2.118
--- packages/kernel-desktop/kernel-desktop.spec:1.204.2.117 Mon Aug 2 11:34:09 2010
+++ packages/kernel-desktop/kernel-desktop.spec Sun Aug 8 11:53:17 2010
@@ -47,7 +47,7 @@
%define _basever 2.6.35
%define _postver %{nil}
-%define _rel 0.31415
+%define _rel 0.314159
%define _enable_debug_packages 0
@@ -985,6 +985,9 @@
All persons listed below can be reached at <cvs_login>@pld-linux.org
$Log$
+Revision 1.204.2.118 2010/08/08 09:53:17 lmasko
+- Updated BFS patch, now applies and compiles.
+
Revision 1.204.2.117 2010/08/02 09:34:09 shadzik
- Linux 2.6.35
- rel π/10
================================================================
Index: packages/kernel-desktop/kernel-desktop-sched-bfs.patch
diff -u packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.18 packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.19
--- packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.18 Sat May 22 11:56:53 2010
+++ packages/kernel-desktop/kernel-desktop-sched-bfs.patch Sun Aug 8 11:53:17 2010
@@ -1,4 +1,4 @@
-The Brain Fuck Scheduler v0.318 by Con Kolivas.
+The Brain Fuck Scheduler v0.323 by Con Kolivas.
A single shared runqueue O(n) strict fairness earliest deadline first design.
@@ -30,7 +30,7 @@
fs/proc/base.c | 2
include/linux/init_task.h | 65
include/linux/ioprio.h | 2
- include/linux/sched.h | 107
+ include/linux/sched.h | 106
init/Kconfig | 17
init/main.c | 1
kernel/delayacct.c | 2
@@ -38,17 +38,17 @@
kernel/kthread.c | 2
kernel/posix-cpu-timers.c | 14
kernel/sched.c | 4
- kernel/sched_bfs.c | 6898 ++++++++++++++++++++++++++++++
+ kernel/sched_bfs.c | 6832 ++++++++++++++++++++++++++++++
kernel/slow-work.c | 1
kernel/sysctl.c | 31
lib/Kconfig.debug | 2
mm/oom_kill.c | 2
- 19 files changed, 7510 insertions(+), 29 deletions(-)
+ 19 files changed, 7444 insertions(+), 28 deletions(-)
-Index: linux-2.6.34-ck1/Documentation/sysctl/kernel.txt
+Index: linux-2.6.35-ck1/Documentation/sysctl/kernel.txt
===================================================================
---- linux-2.6.34-ck1.orig/Documentation/sysctl/kernel.txt 2010-05-18 12:24:34.172444877 +1000
-+++ linux-2.6.34-ck1/Documentation/sysctl/kernel.txt 2010-05-18 12:26:15.769444964 +1000
+--- linux-2.6.35-ck1.orig/Documentation/sysctl/kernel.txt 2010-08-06 19:38:28.838115005 +1000
++++ linux-2.6.35-ck1/Documentation/sysctl/kernel.txt 2010-08-06 19:38:30.313113290 +1000
@@ -31,6 +31,7 @@ show up in /proc/sys/kernel:
- domainname
- hostname
@@ -103,11 +103,11 @@
rtsig-max & rtsig-nr:
The file rtsig-max can be used to tune the maximum number
-Index: linux-2.6.34-ck1/include/linux/init_task.h
+Index: linux-2.6.35-ck1/include/linux/init_task.h
===================================================================
---- linux-2.6.34-ck1.orig/include/linux/init_task.h 2010-05-18 12:24:34.178444440 +1000
-+++ linux-2.6.34-ck1/include/linux/init_task.h 2010-05-18 12:26:15.769444964 +1000
-@@ -107,6 +107,69 @@ extern struct cred init_cred;
+--- linux-2.6.35-ck1.orig/include/linux/init_task.h 2010-08-06 19:38:28.869114968 +1000
++++ linux-2.6.35-ck1/include/linux/init_task.h 2010-08-06 19:38:30.313113290 +1000
+@@ -106,6 +106,69 @@ extern struct cred init_cred;
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
@@ -186,10 +186,10 @@
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
-Index: linux-2.6.34-ck1/include/linux/sched.h
+Index: linux-2.6.35-ck1/include/linux/sched.h
===================================================================
---- linux-2.6.34-ck1.orig/include/linux/sched.h 2010-05-18 12:24:34.191445024 +1000
-+++ linux-2.6.34-ck1/include/linux/sched.h 2010-05-18 12:26:15.769444964 +1000
+--- linux-2.6.35-ck1.orig/include/linux/sched.h 2010-08-06 19:38:28.859114980 +1000
++++ linux-2.6.35-ck1/include/linux/sched.h 2010-08-06 19:38:43.767097655 +1000
@@ -36,8 +36,15 @@
#define SCHED_FIFO 1
#define SCHED_RR 2
@@ -207,17 +207,16 @@
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
-@@ -268,9 +275,6 @@ extern asmlinkage void schedule_tail(str
+@@ -268,8 +275,6 @@ extern asmlinkage void schedule_tail(str
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
-extern int runqueue_is_locked(int cpu);
--extern void task_rq_unlock_wait(struct task_struct *p);
-
extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
-@@ -1176,17 +1180,31 @@ struct task_struct {
+@@ -1179,17 +1184,31 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
@@ -249,17 +248,7 @@
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
-@@ -1208,6 +1226,9 @@ struct task_struct {
-
- unsigned int policy;
- cpumask_t cpus_allowed;
-+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_SCHED_BFS)
-+ cpumask_t unplugged_mask;
-+#endif
-
- #ifdef CONFIG_TREE_PREEMPT_RCU
- int rcu_read_lock_nesting;
-@@ -1287,6 +1308,9 @@ struct task_struct {
+@@ -1284,6 +1303,9 @@ struct task_struct {
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
@@ -269,7 +258,7 @@
cputime_t gtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
cputime_t prev_utime, prev_stime;
-@@ -1507,6 +1531,64 @@ struct task_struct {
+@@ -1506,6 +1528,67 @@ struct task_struct {
#endif
};
@@ -278,7 +267,6 @@
+extern void grq_unlock_wait(void);
+#define tsk_seruntime(t) ((t)->sched_time)
+#define tsk_rttimeout(t) ((t)->rt_timeout)
-+#define task_rq_unlock_wait(tsk) grq_unlock_wait()
+
+static inline void set_oom_timeslice(struct task_struct *p)
+{
@@ -293,16 +281,16 @@
+
+static inline void print_scheduler_version(void)
+{
-+ printk(KERN_INFO"BFS CPU scheduler v0.318 by Con Kolivas.\n");
++ printk(KERN_INFO"BFS CPU scheduler v0.323 by Con Kolivas.\n");
+}
+
+static inline int iso_task(struct task_struct *p)
+{
+ return (p->policy == SCHED_ISO);
+}
-+#else
++extern void remove_cpu(unsigned long cpu);
++#else /* CFS */
+extern int runqueue_is_locked(int cpu);
-+extern void task_rq_unlock_wait(struct task_struct *p);
+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
+#define tsk_rttimeout(t) ((t)->rt.timeout)
+
@@ -329,12 +317,16 @@
+{
+ return 0;
+}
-+#endif
++
++static inline void remove_cpu(unsigned long cpu)
++{
++}
++#endif /* CONFIG_SCHED_BFS */
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -1525,9 +1607,19 @@ struct task_struct {
+@@ -1524,9 +1607,19 @@ struct task_struct {
#define MAX_USER_RT_PRIO 100
#define MAX_RT_PRIO MAX_USER_RT_PRIO
@@ -355,7 +347,7 @@
static inline int rt_prio(int prio)
{
-@@ -1836,7 +1928,7 @@ task_sched_runtime(struct task_struct *t
+@@ -1835,7 +1928,7 @@ task_sched_runtime(struct task_struct *t
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
@@ -364,7 +356,7 @@
extern void sched_exec(void);
#else
#define sched_exec() {}
-@@ -1999,6 +2091,9 @@ extern void wake_up_new_task(struct task
+@@ -1999,6 +2092,9 @@ extern void wake_up_new_task(struct task
static inline void kick_process(struct task_struct *tsk) { }
#endif
extern void sched_fork(struct task_struct *p, int clone_flags);
@@ -374,11 +366,11 @@
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
-Index: linux-2.6.34-ck1/kernel/sysctl.c
+Index: linux-2.6.35-ck1/kernel/sysctl.c
===================================================================
---- linux-2.6.34-ck1.orig/kernel/sysctl.c 2010-05-18 12:24:34.224444706 +1000
-+++ linux-2.6.34-ck1/kernel/sysctl.c 2010-05-18 12:26:15.770444796 +1000
-@@ -113,7 +113,12 @@ static int zero;
+--- linux-2.6.35-ck1.orig/kernel/sysctl.c 2010-08-06 19:38:28.922114906 +1000
++++ linux-2.6.35-ck1/kernel/sysctl.c 2010-08-06 19:38:30.315113288 +1000
+@@ -115,7 +115,12 @@ static int zero;
static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static unsigned long one_ul = 1;
@@ -392,7 +384,7 @@
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
-@@ -229,7 +234,7 @@ static struct ctl_table root_table[] = {
+@@ -252,7 +257,7 @@ static struct ctl_table root_table[] = {
{ }
};
@@ -401,7 +393,7 @@
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
-@@ -241,6 +246,7 @@ static int max_sched_shares_ratelimit =
+@@ -269,6 +274,7 @@ static int max_extfrag_threshold = 1000;
#endif
static struct ctl_table kern_table[] = {
@@ -409,7 +401,7 @@
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
-@@ -354,6 +360,7 @@ static struct ctl_table kern_table[] = {
+@@ -382,6 +388,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
@@ -417,7 +409,7 @@
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
-@@ -751,6 +758,26 @@ static struct ctl_table kern_table[] = {
+@@ -779,6 +786,26 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
@@ -444,11 +436,11 @@
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
{
.procname = "spin_retry",
-Index: linux-2.6.34-ck1/kernel/sched_bfs.c
+Index: linux-2.6.35-ck1/kernel/sched_bfs.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.34-ck1/kernel/sched_bfs.c 2010-05-18 12:26:15.774197412 +1000
-@@ -0,0 +1,6898 @@
++++ linux-2.6.35-ck1/kernel/sched_bfs.c 2010-08-06 19:38:30.320113282 +1000
+@@ -0,0 +1,6832 @@
+/*
+ * kernel/sched_bfs.c, was sched.c
+ *
@@ -508,7 +500,6 @@
+#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
-+#include <linux/kthread.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
@@ -620,9 +611,11 @@
+struct rq {
+#ifdef CONFIG_SMP
+#ifdef CONFIG_NO_HZ
++ u64 nohz_stamp;
+ unsigned char in_nohz_recently;
+#endif
+#endif
++ unsigned int skip_clock_update;
+
+ struct task_struct *curr, *idle;
+ struct mm_struct *prev_mm;
@@ -633,6 +626,7 @@
+ int rq_time_slice;
+ u64 rq_last_ran;
+ int rq_prio;
++ int rq_running; /* There is a task running */
+
+ /* Accurate timekeeping data */
+ u64 timekeep_clock;
@@ -784,7 +778,8 @@
+ */
+inline void update_rq_clock(struct rq *rq)
+{
-+ rq->clock = sched_clock_cpu(cpu_of(rq));
++ if (!rq->skip_clock_update)
++ rq->clock = sched_clock_cpu(cpu_of(rq));
+}
+
+static inline int task_running(struct task_struct *p)
@@ -1321,15 +1316,6 @@
+#ifdef CONFIG_SMP
+void set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
-+#ifdef CONFIG_SCHED_DEBUG
-+ /*
-+ * We should never call set_task_cpu() on a blocked task,
-+ * ttwu() will sort out the placement.
-+ */
-+ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
-+ !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
-+#endif
-+
+ trace_sched_migrate_task(p, cpu);
+ if (task_cpu(p) != cpu)
+ perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
@@ -1422,58 +1408,11 @@
+
+#ifdef CONFIG_SMP
+struct migration_req {
-+ struct list_head list;
-+
+ struct task_struct *task;
+ int dest_cpu;
-+
-+ struct completion done;
+};
+
+/*
-+ * wait_task_context_switch - wait for a thread to complete at least one
-+ * context switch.
-+ *
-+ * @p must not be current.
-+ */
-+void wait_task_context_switch(struct task_struct *p)
-+{
-+ unsigned long nvcsw, nivcsw, flags;
-+ int running;
-+ struct rq *rq;
-+
-+ nvcsw = p->nvcsw;
-+ nivcsw = p->nivcsw;
-+ for (;;) {
-+ /*
-+ * The runqueue is assigned before the actual context
-+ * switch. We need to take the runqueue lock.
-+ *
-+ * We could check initially without the lock but it is
-+ * very likely that we need to take the lock in every
-+ * iteration.
-+ */
-+ rq = task_grq_lock(p, &flags);
-+ running = task_running(p);
-+ task_grq_unlock(&flags);
-+
-+ if (likely(!running))
-+ break;
-+ /*
-+ * The switch count is incremented before the actual
-+ * context switch. We thus wait for two switches to be
-+ * sure at least one completed.
-+ */
-+ if ((p->nvcsw - nvcsw) > 1)
-+ break;
-+ if ((p->nivcsw - nivcsw) > 1)
-+ break;
-+
-+ cpu_relax();
-+ }
-+}
-+
-+/*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+ * If @match_state is nonzero, it's the @p->state value just checked and
@@ -1535,7 +1474,7 @@
+ * just go back and repeat.
+ */
+ rq = task_grq_lock(p, &flags);
-+ trace_sched_wait_task(rq, p);
++ trace_sched_wait_task(p);
+ running = task_running(p);
+ on_rq = task_queued(p);
+ ncsw = 0;
@@ -1614,6 +1553,24 @@
+#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
+#define task_idle(p) ((p)->prio == PRIO_LIMIT)
+
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Check to see if there is a task that is affined only to offline CPUs but
++ * still wants runtime. This happens to kernel threads during suspend/halt and
++ * disabling of CPUs.
++ */
++static inline int online_cpus(struct task_struct *p)
++{
++ return (likely(cpus_intersects(cpu_online_map, p->cpus_allowed)));
++}
++#else /* CONFIG_HOTPLUG_CPU */
++/* All available CPUs are always online without hotplug. */
++static inline int online_cpus(struct task_struct *p)
++{
++ return 1;
++}
++#endif
++
+/*
+ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
+ * basis of earlier deadlines. SCHED_BATCH, ISO and IDLEPRIO don't preempt
@@ -1635,7 +1592,11 @@
+ return;
+ }
+
-+ cpus_and(tmp, cpu_online_map, p->cpus_allowed);
++ if (online_cpus(p))
++ cpus_and(tmp, cpu_online_map, p->cpus_allowed);
++ else
++ (cpumask_copy(&tmp, &cpu_online_map));
++
+ latest_deadline = 0;
+ highest_prio = -1;
+
@@ -1663,11 +1624,11 @@
+
+ if (p->prio > highest_prio || (p->prio == highest_prio &&
+ p->policy == SCHED_NORMAL && !time_before(p->deadline, latest_deadline)))
-+ return;
++ return;
+
+ /* p gets to preempt highest_prio_rq->curr */
+ resched_task(highest_prio_rq->curr);
-+ return;
++ highest_prio_rq->skip_clock_update = 1;
+}
+#else /* CONFIG_SMP */
+static void try_preempt(struct task_struct *p, struct rq *this_rq)
@@ -1676,7 +1637,6 @@
+ (p->prio == uprq->rq_prio && p->policy == SCHED_NORMAL &&
+ time_before(p->deadline, uprq->rq_deadline)))
+ resched_task(uprq->curr);
-+ return;
+}
+#endif /* CONFIG_SMP */
+
@@ -1754,7 +1714,7 @@
+ success = 1;
+
+out_running:
-+ trace_sched_wakeup(rq, p, success);
++ trace_sched_wakeup(p, success);
+ p->state = TASK_RUNNING;
+out_unlock:
+ task_grq_unlock(&flags);
@@ -1798,11 +1758,11 @@
+ INIT_HLIST_HEAD(&p->preempt_notifiers);
+#endif
+ /*
-+ * We mark the process as waking here. This guarantees that
++ * We mark the process as running here. This guarantees that
+ * nobody will actually run it, and a signal or other external
+ * event cannot wake it up and insert it on the runqueue either.
+ */
-+ p->state = TASK_WAKING;
++ p->state = TASK_RUNNING;
+ set_task_cpu(p, cpu);
+
+ /* Should be reset in fork.c but done here for ease of bfs patching */
@@ -1883,15 +1843,13 @@
+ unsigned long flags;
+ struct rq *rq;
+
-+ get_cpu();
+ rq = task_grq_lock(p, &flags); ;
-+ BUG_ON(p->state != TASK_WAKING);
+ p->state = TASK_RUNNING;
+ parent = p->parent;
-+ /* Unnecessary but small chance that the parent changed cpus */
++ /* Unnecessary but small chance that the parent changed CPU */
+ set_task_cpu(p, task_cpu(parent));
+ activate_task(p, rq);
-+ trace_sched_wakeup_new(rq, p, 1);
++ trace_sched_wakeup_new(p, 1);
+ if (!(clone_flags & CLONE_VM) && rq->curr == parent &&
+ !suitable_idle_cpus(p)) {
+ /*
@@ -1903,7 +1861,6 @@
+ } else
+ try_preempt(p, rq);
+ task_grq_unlock(&flags);
-+ put_cpu();
+}
+
+/*
@@ -2073,7 +2030,7 @@
+ /*
+ * Remove function-return probe instances associated with this
+ * task and put them back on the free list.
-+ */
++ */
+ kprobe_flush_task(prev);
+ put_task_struct(prev);
+ }
@@ -2108,7 +2065,7 @@
+ struct mm_struct *mm, *oldmm;
+
+ prepare_task_switch(rq, prev, next);
-+ trace_sched_switch(rq, prev, next);
++ trace_sched_switch(prev, next);
+ mm = next->mm;
+ oldmm = prev->active_mm;
+ /*
@@ -2198,9 +2155,9 @@
+ return sum;
+}
+
-+unsigned long nr_iowait_cpu(void)
++unsigned long nr_iowait_cpu(int cpu)
+{
-+ struct rq *this = this_rq();
++ struct rq *this = cpu_rq(cpu);
+ return atomic_read(&this->nr_iowait);
+}
+
@@ -2209,10 +2166,12 @@
+ return nr_running() + nr_uninterruptible();
+}
+
-+/* Fudge this on BFS since load is equal everywhere */
++/* Beyond a task running on this CPU, load is equal everywhere on BFS */
+unsigned long this_cpu_load(void)
+{
-+ return nr_active() / num_online_cpus();
++ return this_rq()->rq_running +
++ (queued_notrunning() + nr_uninterruptible()) /
++ (1 + num_online_cpus());
+}
+
+/* Variables and functions for calc_load */
@@ -2626,15 +2585,16 @@
+/*
+ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
+ * tasks and set the refractory flag if necessary. There is 10% hysteresis
-+ * for unsetting the flag.
++ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
++ * slow division.
+ */
+static unsigned int test_ret_isorefractory(struct rq *rq)
+{
+ if (likely(!grq.iso_refractory)) {
-+ if (grq.iso_ticks / ISO_PERIOD > sched_iso_cpu)
++ if (grq.iso_ticks > ISO_PERIOD * sched_iso_cpu)
+ set_iso_refractory();
+ } else {
-+ if (grq.iso_ticks / ISO_PERIOD < (sched_iso_cpu * 90 / 100))
++ if (grq.iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128))
+ clear_iso_refractory();
+ }
+ return grq.iso_refractory;
@@ -2653,8 +2613,8 @@
+ if (grq.iso_ticks) {
+ grq_lock();
+ grq.iso_ticks -= grq.iso_ticks / ISO_PERIOD + 1;
-+ if (unlikely(grq.iso_refractory && grq.iso_ticks /
-+ ISO_PERIOD < (sched_iso_cpu * 90 / 100)))
++ if (unlikely(grq.iso_refractory && grq.iso_ticks <
++ ISO_PERIOD * (sched_iso_cpu * 115 / 128)))
+ clear_iso_refractory();
+ grq_unlock();
+ }
@@ -2871,7 +2831,7 @@
+ queue = grq.queue + idx;
+ list_for_each_entry(p, queue, run_list) {
+ /* Make sure cpu affinity is ok */
-+ if (!cpu_isset(cpu, p->cpus_allowed))
++ if (online_cpus(p) && !cpu_isset(cpu, p->cpus_allowed))
+ continue;
+ if (idx < MAX_RT_PRIO) {
+ /* We found an rt task */
@@ -2969,6 +2929,10 @@
+ rq->rq_last_ran = p->last_ran;
+ rq->rq_policy = p->policy;
+ rq->rq_prio = p->prio;
<<Diff was trimmed, longer than 597 lines>>
---- CVS-web:
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop.spec?r1=1.204.2.117&r2=1.204.2.118&f=u
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop-sched-bfs.patch?r1=1.1.2.18&r2=1.1.2.19&f=u
More information about the pld-cvs-commit
mailing list