packages (Titanium): kernel-desktop/kernel-desktop.spec, kernel-desktop/ker...
cactus
cactus at pld-linux.org
Tue Sep 22 02:44:27 CEST 2009
Author: cactus Date: Tue Sep 22 00:44:27 2009 GMT
Module: packages Tag: Titanium
---- Log message:
- rel 1.2; bfs up to 230
---- Files affected:
packages/kernel-desktop:
kernel-desktop.spec (1.204.2.67 -> 1.204.2.68) , kernel-desktop-sched-bfs.patch (1.1.2.6 -> 1.1.2.7)
---- Diffs:
================================================================
Index: packages/kernel-desktop/kernel-desktop.spec
diff -u packages/kernel-desktop/kernel-desktop.spec:1.204.2.67 packages/kernel-desktop/kernel-desktop.spec:1.204.2.68
--- packages/kernel-desktop/kernel-desktop.spec:1.204.2.67 Thu Sep 17 13:34:29 2009
+++ packages/kernel-desktop/kernel-desktop.spec Tue Sep 22 02:44:22 2009
@@ -46,7 +46,7 @@
%define _basever 2.6.31
%define _postver %{nil}
-%define _rel 1.1
+%define _rel 1.2
%define _enable_debug_packages 0
@@ -1085,6 +1085,9 @@
All persons listed below can be reached at <cvs_login>@pld-linux.org
$Log$
+Revision 1.204.2.68 2009/09/22 00:44:22 cactus
+- rel 1.2; bfs up to 230
+
Revision 1.204.2.67 2009/09/17 11:34:29 cactus
- rel 1.1; bfs up to 222
================================================================
Index: packages/kernel-desktop/kernel-desktop-sched-bfs.patch
diff -u packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.6 packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.7
--- packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.6 Thu Sep 17 13:34:29 2009
+++ packages/kernel-desktop/kernel-desktop-sched-bfs.patch Tue Sep 22 02:44:22 2009
@@ -1,4 +1,4 @@
-The Brain Fuck Scheduler v0.222 by Con Kolivas.
+The Brain Fuck Scheduler v0.230 by Con Kolivas.
A single shared runqueue O(n) strict fairness earliest deadline first design.
@@ -21,7 +21,6 @@
Now includes accurate sub-tick accounting of tasks so userspace reported
cpu usage may be very different.
-
---
Documentation/sysctl/kernel.txt | 26
Makefile | 2
@@ -39,7 +38,23 @@
kernel/kthread.c | 4
kernel/posix-cpu-timers.c | 14
kernel/sched.c |10583 ----------------------------------------
- kernel/sched_bfs.c | 6152 +++++++++++++++++++++++
+ Documentation/sysctl/kernel.txt | 26
+ Makefile | 2
+ fs/pipe.c | 4
+ fs/proc/base.c | 2
+ include/linux/init_task.h | 15
+ include/linux/ioprio.h | 2
+ include/linux/sched.h | 194
+ init/Kconfig | 61
+ kernel/Kconfig.preempt | 19
+ kernel/Makefile | 4
+ kernel/delayacct.c | 2
+ kernel/exit.c | 6
+ kernel/fork.c | 2
+ kernel/kthread.c | 4
+ kernel/posix-cpu-timers.c | 14
+ kernel/sched.c |10583 ----------------------------------------
+ kernel/sched_bfs.c | 6104 +++++++++++++++++++++++
kernel/sched_debug.c | 509 -
kernel/sched_fair.c | 1842 ------
kernel/sched_idletask.c | 129
@@ -49,12 +64,12 @@
kernel/trace/trace.c | 4
kernel/workqueue.c | 2
mm/oom_kill.c | 2
- 26 files changed, 6261 insertions(+), 15265 deletions(-)
+ 26 files changed, 6213 insertions(+), 15265 deletions(-)
Index: linux-2.6.31-bfs/Documentation/sysctl/kernel.txt
===================================================================
---- linux-2.6.31-bfs.orig/Documentation/sysctl/kernel.txt 2009-09-16 15:18:59.336781941 +1000
-+++ linux-2.6.31-bfs/Documentation/sysctl/kernel.txt 2009-09-17 09:35:47.864326713 +1000
+--- linux-2.6.31-bfs.orig/Documentation/sysctl/kernel.txt 2009-09-10 11:43:10.000000000 +1000
++++ linux-2.6.31-bfs/Documentation/sysctl/kernel.txt 2009-09-19 16:44:41.750240511 +1000
@@ -27,6 +27,7 @@ show up in /proc/sys/kernel:
- domainname
- hostname
@@ -111,8 +126,8 @@
The file rtsig-max can be used to tune the maximum number
Index: linux-2.6.31-bfs/fs/pipe.c
===================================================================
---- linux-2.6.31-bfs.orig/fs/pipe.c 2009-09-16 15:18:59.344779398 +1000
-+++ linux-2.6.31-bfs/fs/pipe.c 2009-09-16 15:19:58.187533968 +1000
+--- linux-2.6.31-bfs.orig/fs/pipe.c 2009-09-10 11:45:24.000000000 +1000
++++ linux-2.6.31-bfs/fs/pipe.c 2009-09-19 16:44:41.751239255 +1000
@@ -78,10 +78,6 @@ void pipe_wait(struct pipe_inode_info *p
{
DEFINE_WAIT(wait);
@@ -126,8 +141,8 @@
schedule();
Index: linux-2.6.31-bfs/include/linux/init_task.h
===================================================================
---- linux-2.6.31-bfs.orig/include/linux/init_task.h 2009-09-16 15:18:59.432778948 +1000
-+++ linux-2.6.31-bfs/include/linux/init_task.h 2009-09-16 15:19:58.188533048 +1000
+--- linux-2.6.31-bfs.orig/include/linux/init_task.h 2009-09-10 11:45:32.000000000 +1000
++++ linux-2.6.31-bfs/include/linux/init_task.h 2009-09-19 16:44:41.784239187 +1000
@@ -116,21 +116,16 @@ extern struct cred init_cred;
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
@@ -157,8 +172,8 @@
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
Index: linux-2.6.31-bfs/include/linux/sched.h
===================================================================
---- linux-2.6.31-bfs.orig/include/linux/sched.h 2009-09-16 15:18:59.442778755 +1000
-+++ linux-2.6.31-bfs/include/linux/sched.h 2009-09-16 15:19:58.189534363 +1000
+--- linux-2.6.31-bfs.orig/include/linux/sched.h 2009-09-10 11:45:35.000000000 +1000
++++ linux-2.6.31-bfs/include/linux/sched.h 2009-09-19 16:44:41.787239190 +1000
@@ -36,8 +36,11 @@
#define SCHED_FIFO 1
#define SCHED_RR 2
@@ -435,8 +450,8 @@
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
Index: linux-2.6.31-bfs/kernel/sched.c
===================================================================
---- linux-2.6.31-bfs.orig/kernel/sysctl.c 2009-09-16 15:18:59.418785545 +1000
-+++ linux-2.6.31-bfs/kernel/sysctl.c 2009-09-17 09:35:47.873127695 +1000
+--- linux-2.6.31-bfs.orig/kernel/sysctl.c 2009-09-10 11:45:40.000000000 +1000
++++ linux-2.6.31-bfs/kernel/sysctl.c 2009-09-19 16:44:41.797239567 +1000
@@ -86,6 +86,8 @@ extern int percpu_pagelist_fraction;
extern int compat_log;
extern int latencytop_enabled;
@@ -626,8 +641,8 @@
.ctl_name = KERN_SPIN_RETRY,
Index: linux-2.6.31-bfs/kernel/workqueue.c
===================================================================
---- linux-2.6.31-bfs.orig/kernel/workqueue.c 2009-09-16 15:18:59.422785635 +1000
-+++ linux-2.6.31-bfs/kernel/workqueue.c 2009-09-16 15:19:58.199528584 +1000
+--- linux-2.6.31-bfs.orig/kernel/workqueue.c 2009-09-10 11:45:41.000000000 +1000
++++ linux-2.6.31-bfs/kernel/workqueue.c 2009-09-19 16:44:41.809251974 +1000
@@ -317,8 +317,6 @@ static int worker_thread(void *__cwq)
if (cwq->wq->freezeable)
set_freezable();
@@ -640,8 +655,8 @@
Index: linux-2.6.31-bfs/kernel/sched_fair.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.31-bfs/kernel/sched_bfs.c 2009-09-17 09:35:47.847128014 +1000
-@@ -0,0 +1,6152 @@
++++ linux-2.6.31-bfs/kernel/sched_bfs.c 2009-09-19 16:46:17.843365208 +1000
+@@ -0,0 +1,6104 @@
+/*
+ * kernel/sched_bfs.c, was sched.c
+ *
@@ -808,6 +823,7 @@
+ unsigned long iso_ticks;
+ unsigned short iso_refractory;
+#ifdef CONFIG_SMP
++ unsigned long qnr; /* queued not running */
+ cpumask_t cpu_idle_map;
+#endif
+};
@@ -827,6 +843,7 @@
+
+ struct task_struct *curr, *idle;
+ struct mm_struct *prev_mm;
++ struct list_head queue; /* Place to store currently running task */
+
+ /* Stored data about rq->curr to work outside grq lock */
+ unsigned long rq_deadline;
@@ -836,12 +853,8 @@
+
+ /* Accurate timekeeping data */
+ u64 timekeep_clock;
-+ unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns,
-+ iowait_ns, idle_ns;
+ unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
+ iowait_pc, idle_pc;
-+ unsigned long total_ns, last_total_ns;
-+
+ atomic_t nr_iowait;
+
+ int cpu; /* cpu of this runqueue */
@@ -987,8 +1000,8 @@
+static inline void time_lock_grq(struct rq *rq)
+ __acquires(grq.lock)
+{
-+ update_rq_clock(rq);
+ grq_lock();
++ update_rq_clock(rq);
+}
+
+static inline void grq_unlock_irq(void)
@@ -1000,15 +1013,14 @@
+static inline void grq_lock_irqsave(unsigned long *flags)
+ __acquires(grq.lock)
+{
-+ local_irq_save(*flags);
-+ grq_lock();
++ smp_mb();
++ spin_lock_irqsave(&grq.lock, *flags);
+}
+
+static inline void grq_unlock_irqrestore(unsigned long *flags)
+ __releases(grq.lock)
+{
-+ spin_unlock(&grq.lock);
-+ local_irq_restore(*flags);
++ spin_unlock_irqrestore(&grq.lock, *flags);
+}
+
+static inline struct rq
@@ -1023,12 +1035,8 @@
+*time_task_grq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(grq.lock)
+{
-+ struct rq *rq;
-+
-+ rq = task_rq(p);
-+ local_irq_save(*flags);
++ struct rq *rq = task_grq_lock(p, flags);
+ update_rq_clock(rq);
-+ grq_lock();
+ return rq;
+}
+
@@ -1060,8 +1068,8 @@
+static inline void time_grq_lock(struct rq *rq, unsigned long *flags)
+ __acquires(grq.lock)
+{
-+ local_irq_save(*flags);
-+ time_lock_grq(rq);
++ spin_lock_irqsave(&grq.lock, *flags);
++ update_rq_clock(rq);
+}
+
+static inline struct rq *__task_grq_lock(struct task_struct *p)
@@ -1121,9 +1129,10 @@
+/*
+ * A task that is queued will be on the grq run list.
+ * A task that is not running or queued will not be on the grq run list.
-+ * A task that is currently running will have ->oncpu set.
-+ * The only time a task will be both queued and running by these definitions
-+ * is during schedule, and all under grq_lock so it should never be seen.
++ * A task that is currently running will have ->oncpu set and be queued
++ * temporarily in its own rq queue.
++ * A task that is running and no longer queued will be seen only on
++ * context switch exit.
+ */
+
+static inline int task_queued(struct task_struct *p)
@@ -1131,6 +1140,11 @@
+ return (!list_empty(&p->run_list));
+}
+
++static inline int task_queued_only(struct task_struct *p)
++{
++ return (!list_empty(&p->run_list) && !task_running(p));
++}
++
+/*
+ * Removing from the global runqueue. Enter with grq locked.
+ */
@@ -1189,7 +1203,7 @@
+ sched_info_queued(p);
+}
+
-+static inline int prio_ratio(struct task_struct *p)
++static inline int pratio(struct task_struct *p)
+{
+ return prio_ratios[TASK_USER_PRIO(p)];
+}
@@ -1201,9 +1215,39 @@
+ */
+static inline int task_timeslice(struct task_struct *p)
+{
-+ return (rr_interval * prio_ratio(p) / 100);
++ return (rr_interval * pratio(p) / 100);
+}
+
++#ifdef CONFIG_SMP
++static inline void inc_qnr(void)
++{
++ grq.qnr++;
++}
++
++static inline void dec_qnr(void)
++{
++ grq.qnr--;
++}
++
++static inline int queued_notrunning(void)
++{
++ return grq.qnr;
++}
++#else
++static inline void inc_qnr(void)
++{
++}
++
++static inline void dec_qnr(void)
++{
++}
++
++static inline int queued_notrunning(void)
++{
++ return grq.nr_running;
++}
++#endif
++
+/*
+ * activate_idle_task - move idle task to the _front_ of runqueue.
+ */
@@ -1211,6 +1255,7 @@
+{
+ enqueue_task_head(p);
+ grq.nr_running++;
++ inc_qnr();
+}
+
+static inline int normal_prio(struct task_struct *p)
@@ -1268,12 +1313,12 @@
+ grq.nr_uninterruptible--;
+ enqueue_task(p);
+ grq.nr_running++;
++ inc_qnr();
+}
+
+/*
+ * deactivate_task - If it's running, it's not on the grq and we can just
-+ * decrement the nr_running. Otherwise we have to dequeue it. Enter with grq
-+ * locked.
++ * decrement the nr_running.
+ */
+static inline void deactivate_task(struct task_struct *p)
+{
@@ -1304,6 +1349,8 @@
+{
+ set_task_cpu(p, rq->cpu);
+ dequeue_task(p);
++ list_add(&p->run_list, &rq->queue);
++ dec_qnr();
+}
+
+/*
@@ -1312,10 +1359,13 @@
+ */
+static inline void return_task(struct task_struct *p, int deactivate)
+{
++ list_del_init(&p->run_list);
+ if (deactivate)
+ deactivate_task(p);
-+ else
++ else {
++ inc_qnr();
+ enqueue_task(p);
++ }
+}
+
+/*
@@ -1562,12 +1612,11 @@
+ */
+static inline int task_preempts_curr(struct task_struct *p, struct rq *rq)
+{
-+ struct task_struct *curr = rq->curr;
+ int preempts = 0;
+
-+ if (p->prio < curr->prio)
++ if (p->prio < rq->rq_prio)
+ preempts = 1;
-+ else if (p->policy == SCHED_NORMAL && (p->prio == curr->prio &&
++ else if (p->policy == SCHED_NORMAL && (p->prio == rq->rq_prio &&
+ time_before(p->deadline, rq->rq_deadline)))
+ preempts = 1;
+ return preempts;
@@ -1649,14 +1698,14 @@
+}
+
+#ifdef CONFIG_SMP
-+static int no_idle_cpus(void)
++static int suitable_idle_cpus(struct task_struct *p)
+{
-+ return (cpus_empty(grq.cpu_idle_map));
++ return (cpus_intersects(p->cpus_allowed, grq.cpu_idle_map));
+}
+#else
-+static int no_idle_cpus(void)
++static int suitable_idle_cpus(struct task_struct *p)
+{
-+ return 1;
++ return 0;
+}
+#endif
+
@@ -1686,7 +1735,12 @@
+ if (!(old_state & state))
+ goto out_unlock;
+
-+ if (task_queued(p) || task_running(p))
++ /*
++ * Note this catches tasks that are running and queued, but returns
++ * false during the context switch when they're running and no
++ * longer queued.
++ */
++ if (task_queued(p))
+ goto out_running;
+
+ activate_task(p, rq);
@@ -1696,7 +1750,7 @@
+ * don't trigger a preemption if there are no idle cpus,
+ * instead waiting for current to deschedule.
+ */
-+ if (!sync || (sync && !no_idle_cpus()))
++ if (!sync || (sync && suitable_idle_cpus(p)))
+ try_preempt(p);
+ success = 1;
+
@@ -1737,6 +1791,7 @@
+void sched_fork(struct task_struct *p, int clone_flags)
+{
+ int cpu = get_cpu();
++ struct rq *rq;
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -1775,23 +1830,21 @@
+ /*
+ * Share the timeslice between parent and child, thus the
+ * total amount of pending timeslices in the system doesn't change,
-+ * resulting in more scheduling fairness.
++ * resulting in more scheduling fairness. If it's negative, it won't
++ * matter since that's the same as being 0. current's time_slice is
++ * actually in rq_time_slice when it's running.
+ */
+ local_irq_disable();
-+ if (current->time_slice > 0) {
-+ current->time_slice /= 2;
-+ if (current->time_slice)
-+ p->time_slice = current->time_slice;
-+ else
-+ p->time_slice = 1;
++ rq = task_rq(current);
++ if (likely(rq->rq_time_slice > 0)) {
++ rq->rq_time_slice /= 2;
+ /*
+ * The remainder of the first timeslice might be recovered by
+ * the parent if the child exits early enough.
+ */
+ p->first_time_slice = 1;
-+ } else
-+ p->time_slice = 0;
-+
++ }
++ p->time_slice = rq->rq_time_slice;
+ local_irq_enable();
+out:
+ put_cpu();
@@ -1806,23 +1859,24 @@
+ */
+void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
+{
-+ struct task_struct *parent = p->parent;
++ struct task_struct *parent;
+ unsigned long flags;
+ struct rq *rq;
+
++ rq = time_task_grq_lock(p, &flags); ;
++ parent = p->parent;
+ BUG_ON(p->state != TASK_RUNNING);
-+
-+ rq = time_task_grq_lock(p, &flags);
++ set_task_cpu(p, task_cpu(parent));
+ activate_task(p, rq);
+ trace_sched_wakeup_new(rq, p, 1);
+ if (!(clone_flags & CLONE_VM) && rq->curr == parent &&
-+ no_idle_cpus()) {
++ !suitable_idle_cpus(p)) {
+ /*
+ * The VM isn't cloned, so we're in a good position to
+ * do child-runs-first in anticipation of an exec. This
+ * usually avoids a lot of COW overhead.
+ */
-+ set_tsk_need_resched(parent);
++ resched_task(parent);
+ } else
+ try_preempt(p);
+ task_grq_unlock(&flags);
@@ -2158,133 +2212,104 @@
+
+EXPORT_PER_CPU_SYMBOL(kstat);
+
-+/* Add the nanoseconds for each component of load */
-+static void ns_idle_time(struct rq *rq, long ns)
++/*
++ * On each tick, see what percentage of that tick was attributed to each
++ * component and add the percentage to the _pc values. Once a _pc value has
++ * accumulated one tick's worth, account for that. This means the total
++ * percentage of load components will always be 100 per tick.
++ */
++static void pc_idle_time(struct rq *rq, unsigned long pc)
+{
-+ if (atomic_read(&rq->nr_iowait) > 0)
-+ rq->iowait_ns += ns;
-+ else
-+ rq->idle_ns += ns;
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ cputime64_t tmp = cputime_to_cputime64(jiffies_to_cputime(1));
++
++ if (atomic_read(&rq->nr_iowait) > 0) {
++ rq->iowait_pc += pc;
++ if (rq->iowait_pc >= 100) {
++ rq->iowait_pc %= 100;
++ cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
++ }
++ } else {
++ rq->idle_pc += pc;
++ if (rq->idle_pc >= 100) {
++ rq->idle_pc %= 100;
++ cpustat->idle = cputime64_add(cpustat->idle, tmp);
++ }
++ }
+}
+
-+/*
-+ * Add accounting for each task as well, but we won't know the length of the
-+ * current tick in advance, so store the length of the old one and use that.
-+ */
+static void
-+ns_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset,
-+ long ns)
++pc_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset,
++ unsigned long pc, unsigned long ns)
+{
-+ p->stime_pc += ns * 100 / rq->last_total_ns;
-+ if (p->stime_pc >= 100) {
-+ cputime_t one_jiffy = jiffies_to_cputime(1);
-+ cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ cputime_t one_jiffy = jiffies_to_cputime(1);
++ cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
++ cputime64_t tmp = cputime_to_cputime64(one_jiffy);
+
++ p->stime_pc += pc;
++ if (p->stime_pc >= 100) {
+ p->stime_pc -= 100;
+ p->stime = cputime_add(p->stime, one_jiffy);
+ p->stimescaled = cputime_add(p->stimescaled, one_jiffy_scaled);
+ account_group_system_time(p, one_jiffy);
++ acct_update_integrals(p);
+ }
++ p->sched_time += ns;
++
+ if (hardirq_count() - hardirq_offset)
-+ rq->irq_ns += ns;
-+ else if (softirq_count())
-+ rq->softirq_ns += ns;
-+ else
-+ rq->system_ns += ns;
++ rq->irq_pc += pc;
++ else if (softirq_count()) {
++ rq->softirq_pc += pc;
++ if (rq->softirq_pc >= 100) {
++ rq->softirq_pc %= 100;
++ cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
++ }
++ } else {
++ rq->system_pc += pc;
++ if (rq->system_pc >= 100) {
++ rq->system_pc %= 100;
++ cpustat->system = cputime64_add(cpustat->system, tmp);
++ }
++ }
+}
+
-+static void ns_user_time(struct rq *rq, struct task_struct *p, long ns)
++static void pc_user_time(struct rq *rq, struct task_struct *p,
++ unsigned long pc, unsigned long ns)
+{
-+ p->utime_pc += ns * 100 / rq->last_total_ns;
-+ if (p->utime_pc >= 100) {
-+ cputime_t one_jiffy = jiffies_to_cputime(1);
-+ cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
++ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++ cputime_t one_jiffy = jiffies_to_cputime(1);
++ cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
++ cputime64_t tmp = cputime_to_cputime64(one_jiffy);
+
++ p->utime_pc += pc;
++ if (p->utime_pc >= 100) {
+ p->utime_pc -= 100;
+ p->utime = cputime_add(p->utime, one_jiffy);
+ p->utimescaled = cputime_add(p->utimescaled, one_jiffy_scaled);
+ account_group_user_time(p, one_jiffy);
++ acct_update_integrals(p);
+ }
-+ if (TASK_NICE(p) > 0 || idleprio_task(p))
-+ rq->nice_ns += ns;
-+ else
-+ rq->user_ns += ns;
-+}
++ p->sched_time += ns;
<<Diff was trimmed, longer than 597 lines>>
---- CVS-web:
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop.spec?r1=1.204.2.67&r2=1.204.2.68&f=u
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop-sched-bfs.patch?r1=1.1.2.6&r2=1.1.2.7&f=u
More information about the pld-cvs-commit
mailing list