packages (Titanium): kernel-desktop/kernel-desktop-sched-bfs.patch - up to 209
cactus
cactus at pld-linux.org
Mon Sep 7 20:20:45 CEST 2009
Author: cactus Date: Mon Sep 7 18:20:45 2009 GMT
Module: packages Tag: Titanium
---- Log message:
- up to 209
---- Files affected:
packages/kernel-desktop:
kernel-desktop-sched-bfs.patch (1.1.2.3 -> 1.1.2.4)
---- Diffs:
================================================================
Index: packages/kernel-desktop/kernel-desktop-sched-bfs.patch
diff -u packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.3 packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.4
--- packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.3 Sat Sep 5 22:28:53 2009
+++ packages/kernel-desktop/kernel-desktop-sched-bfs.patch Mon Sep 7 20:20:39 2009
@@ -1,4 +1,4 @@
-The Brain Fuck Scheduler v0.208 by Con Kolivas.
+The Brain Fuck Scheduler v0.209 by Con Kolivas.
A single shared runqueue O(n) strict fairness earliest deadline first design.
@@ -20,30 +20,30 @@
schedtool -I -e amarok
---
- Documentation/sysctl/kernel.txt | 25
- fs/pipe.c | 4
- fs/proc/base.c | 2
- include/linux/init_task.h | 15
- include/linux/ioprio.h | 2
- include/linux/sched.h | 193
- init/Kconfig | 61
- kernel/Makefile | 4
- kernel/delayacct.c | 2
- kernel/exit.c | 6
- kernel/fork.c | 2
- kernel/kthread.c | 4
- kernel/posix-cpu-timers.c | 12
+ Documentation/sysctl/kernel.txt | 25
+ fs/pipe.c | 4
+ fs/proc/base.c | 2
+ include/linux/init_task.h | 15
+ include/linux/ioprio.h | 2
+ include/linux/sched.h | 193
+ init/Kconfig | 61
+ kernel/Makefile | 4
+ kernel/delayacct.c | 2
+ kernel/exit.c | 6
+ kernel/fork.c | 2
+ kernel/kthread.c | 4
+ kernel/posix-cpu-timers.c | 12
kernel/sched.c |10241 ----------------------------------------
- kernel/sched_bfs.c | 5818 ++++++++++++++++++++++
+ kernel/sched_bfs.c | 5793 ++++++++++++++++++++++
kernel/sched_debug.c | 509 -
kernel/sched_fair.c | 1835 -------
- kernel/sched_idletask.c | 128
+ kernel/sched_idletask.c | 128
kernel/sched_rt.c | 1771 ------
- kernel/sysctl.c | 145
- kernel/trace/trace.c | 4
- kernel/workqueue.c | 2
- mm/oom_kill.c | 2
- 23 files changed, 5921 insertions(+), 14866 deletions(-)
+ kernel/sysctl.c | 145
+ kernel/trace/trace.c | 4
+ kernel/workqueue.c | 2
+ mm/oom_kill.c | 2
+ 23 files changed, 5896 insertions(+), 14866 deletions(-)
Index: linux-2.6.30-bfs/Documentation/sysctl/kernel.txt
===================================================================
@@ -66,9 +66,9 @@
- rtsig-nr
- sem
@@ -170,6 +172,16 @@
-
+
==============================================================
-
+
+iso_cpu:
+
+This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
@@ -80,12 +80,12 @@
+==============================================================
+
l2cr: (PPC only)
-
+
This flag controls the L2 cache of G3 processor boards. If
@@ -322,6 +334,19 @@
-
+
==============================================================
-
+
+rr_interval:
+
+This is the smallest duration that any cpu process scheduling unit
@@ -100,7 +100,7 @@
+==============================================================
+
rtsig-max & rtsig-nr:
-
+
The file rtsig-max can be used to tune the maximum number
Index: linux-2.6.30-bfs/fs/pipe.c
===================================================================
@@ -109,7 +109,7 @@
@@ -78,10 +78,6 @@
{
DEFINE_WAIT(wait);
-
+
- /*
- * Pipes are system-local resources, so sleeping on them
- * is considered a noninteractive wait:
@@ -120,7 +120,7 @@
Index: linux-2.6.30-bfs/include/linux/init_task.h
===================================================================
--- linux-2.6.30-bfs.orig/include/linux/init_task.h 2009-09-03 19:50:51.802053428 +1000
-+++ linux-2.6.30-bfs/include/linux/init_task.h 2009-09-03 19:51:10.224053762 +1000
++++ linux-2.6.30-bfs/include/linux/init_task.h 2009-09-06 18:56:46.759601885 +1000
@@ -119,21 +119,16 @@
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
@@ -163,12 +163,12 @@
+
+#define SCHED_MAX (SCHED_IDLEPRIO)
+#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
-
+
#ifdef __KERNEL__
-
+
@@ -141,13 +144,10 @@
extern unsigned long get_parent_ip(unsigned long addr);
-
+
struct seq_file;
-struct cfs_rq;
struct task_group;
@@ -189,17 +189,17 @@
-{
-}
#endif
-
+
extern unsigned long long time_sync_thresh;
@@ -251,8 +247,8 @@
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
-
+
-extern int runqueue_is_locked(void);
-extern void task_rq_unlock_wait(struct task_struct *p);
+extern int grunqueue_is_locked(void);
+extern void grq_unlock_wait(void);
-
+
extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -969,151 +965,9 @@
@@ -209,7 +209,7 @@
-
struct rq;
struct sched_domain;
-
+
-struct sched_class {
- const struct sched_class *next;
-
@@ -355,9 +355,9 @@
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -1123,17 +977,18 @@
-
+
int lock_depth; /* BKL lock depth */
-
+
-#ifdef CONFIG_SMP
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
@@ -378,17 +378,17 @@
+
+ int rt_nr_cpus_allowed;
+ unsigned long rt_timeout;
-
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
@@ -1156,6 +1011,9 @@
-
+
unsigned int policy;
cpumask_t cpus_allowed;
+#ifdef CONFIG_HOTPLUG_CPU
+ cpumask_t unplugged_mask;
+#endif
-
+
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -1446,11 +1304,14 @@
@@ -407,18 +407,18 @@
+#define IDLE_PRIO (MAX_RT_PRIO + 2)
+#define PRIO_LIMIT ((IDLE_PRIO) + 1)
#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
-
+
static inline int rt_prio(int prio)
@@ -1733,11 +1594,7 @@
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
-
+
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
-extern void sched_exec(void);
-#else
#define sched_exec() {}
-#endif
-
+
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
Index: linux-2.6.30-bfs/kernel/sched.c
@@ -436,7 +436,7 @@
#endif
@@ -97,10 +99,11 @@
#endif
-
+
static int zero;
-static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
@@ -445,13 +445,13 @@
+static int __read_mostly one = 1;
+static int __read_mostly one_hundred = 100;
+static int __read_mostly five_thousand = 5000;
-
+
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -234,123 +237,7 @@
{ .ctl_name = 0 }
};
-
+
-#ifdef CONFIG_SCHED_DEBUG
-static int min_sched_granularity_ns = 100000; /* 100 usecs */
-static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
@@ -608,7 +608,7 @@
@@ -320,8 +320,6 @@
if (cwq->wq->freezeable)
set_freezable();
-
+
- set_user_nice(current, -5);
-
for (;;) {
@@ -617,8 +617,8 @@
Index: linux-2.6.30-bfs/kernel/sched_fair.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.30-bfs/kernel/sched_bfs.c 2009-09-05 14:01:50.817362817 +1000
-@@ -0,0 +1,5818 @@
++++ linux-2.6.30-bfs/kernel/sched_bfs.c 2009-09-06 18:56:58.389602008 +1000
+@@ -0,0 +1,5793 @@
+/*
+ * kernel/sched_bfs.c, was sched.c
+ *
@@ -803,8 +803,6 @@
+#endif
+#endif
+
-+ /* Cached timestamp set by update_cpu_clock() */
-+ unsigned long long most_recent_timestamp;
+ struct task_struct *preempt_next;
+ struct task_struct *curr, *idle;
+ struct mm_struct *prev_mm;
@@ -924,14 +922,9 @@
+# define finish_arch_switch(prev) do { } while (0)
+#endif
+
-+/*
-+ * This will cost if schedstats is enabled since it's done under lock.
-+ */
+static inline void update_rq_clock(struct rq *rq)
+{
-+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+ rq->clock = sched_clock_cpu(cpu_of(rq));
-+#endif
+}
+
+static inline int task_running(struct task_struct *p)
@@ -1226,11 +1219,12 @@
+}
+
+/*
-+ * activate_task - move a task to the runqueue. Enter with grq locked.
++ * activate_task - move a task to the runqueue. Enter with grq locked. The rq
++ * doesn't really matter but gives us the local clock.
+ */
-+static void activate_task(struct task_struct *p)
++static void activate_task(struct task_struct *p, struct rq *rq)
+{
-+ unsigned long long now = sched_clock();
++ u64 now = rq->clock;
+
+ /*
+ * Sleep time is in units of nanosecs, so shift by 20 to get a
@@ -1603,7 +1597,7 @@
+ if (queued_or_running(p))
+ goto out_running;
+
-+ activate_task(p);
++ activate_task(p, rq);
+ try_preempt(p, rq);
+ success = 1;
+
@@ -1686,7 +1680,6 @@
+ } else
+ p->time_slice = 0;
+
-+ p->timestamp = sched_clock();
+ local_irq_enable();
+out:
+ put_cpu();
@@ -1721,7 +1714,7 @@
+ BUG_ON(p->state != TASK_RUNNING);
+ set_task_cpu(p, task_cpu(parent));
+
-+ activate_task(p);
++ activate_task(p, rq);
+ trace_sched_wakeup_new(rq, p, 1);
+ if (!(clone_flags & CLONE_VM) && rq->curr == parent &&
+ no_idle_cpus()) {
@@ -1978,11 +1971,7 @@
+
+unsigned long nr_uninterruptible(void)
+{
-+ unsigned long nu = grq.nr_uninterruptible;
-+
-+ if (unlikely (nu < 0))
-+ nu = 0;
-+ return nu;
++ return grq.nr_uninterruptible;
+}
+
+unsigned long long nr_context_switches(void)
@@ -2019,10 +2008,9 @@
+ * to just returning jiffies, and for hardware that can't do tsc.
+ */
+static void
-+update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now,
-+ int tick)
++update_cpu_clock(struct task_struct *p, struct rq *rq, int tick)
+{
-+ long time_diff = now - p->last_ran;
++ long time_diff = rq->clock - p->last_ran;
+
+ if (tick) {
+ /*
@@ -2047,7 +2035,7 @@
+ if (p != rq->idle && p->policy != SCHED_FIFO)
+ p->time_slice -= time_diff / 1000;
+ p->sched_time += time_diff;
-+ p->last_ran = rq->most_recent_timestamp = now;
++ p->last_ran = rq->clock;
+}
+
+/*
@@ -2062,7 +2050,7 @@
+
+ if (p == rq->curr) {
+ update_rq_clock(rq);
-+ ns = sched_clock() - p->last_ran;
++ ns = rq->clock - p->last_ran;
+ if ((s64)ns < 0)
+ ns = 0;
+ }
@@ -2102,22 +2090,6 @@
+}
+
+/*
-+ * Return current->sched_time plus any more ns on the sched_clock
-+ * that have not yet been banked.
-+ */
-+unsigned long long current_sched_time(const struct task_struct *p)
-+{
-+ unsigned long long ns;
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ ns = p->sched_time + sched_clock() - p->last_ran;
-+ local_irq_restore(flags);
-+
-+ return ns;
-+}
-+
-+/*
+ * Return sum_exec_runtime for the thread group.
+ * In case the task is currently running, return the sum plus current's
+ * pending runtime that have not been accounted yet.
@@ -2375,14 +2347,14 @@
+ */
+void scheduler_tick(void)
+{
-+ unsigned long long now = sched_clock();
+ int cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+ struct task_struct *p;
+
++ sched_clock_tick();
+ time_lock_rq(rq);
+ p = rq->curr;
-+ update_cpu_clock(p, rq, now, 1);
++ update_cpu_clock(p, rq, 1);
+ if (!rq_idle(rq))
+ task_running_tick(rq, p);
+ else
@@ -2634,9 +2606,9 @@
+{
+ struct task_struct *prev, *next, *idle;
+ int deactivate = 0, cpu;
-+ unsigned long long now;
+ long *switch_count;
+ struct rq *rq;
++ u64 now;
+
+ cpu = smp_processor_id();
+ rq = this_rq();
@@ -2658,10 +2630,10 @@
+ dump_stack();
+ }
+
-+ now = sched_clock();
-+
+ grq_lock_irq();
+ update_rq_clock(rq);
++ now = rq->clock;
++
+ clear_tsk_need_resched(prev);
+
+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
@@ -2695,7 +2667,7 @@
+ prefetch(next);
+ prefetch_stack(next);
+
-+ update_cpu_clock(prev, rq, now, 0);
++ update_cpu_clock(prev, rq, 0);
+ prev->timestamp = prev->last_ran = now;
+ rq->queued_prio = next->prio;
+
@@ -3347,10 +3319,10 @@
+ if (prio <= 0)
+ goto out;
+
-+ delta = (p->deadline - jiffies) * 200 / prio_ratios[39];
-+ if (delta > 80 || delta < 0)
-+ delta = 0;
-+ prio += delta;
++ /* 225 is a fudge to end up giving +80 for lowest possible prio */
++ delta = (p->deadline - jiffies) * 225 / prio_ratios[39];
++ if (delta > 0 && delta <= 80)
++ prio += delta;
+out:
+ return prio;
+}
@@ -3867,8 +3839,7 @@
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. It does this by
-+ * refilling the timeslice, offsetting the deadline by the remaining
-+ * timeslice and scheduling away.
++ * refilling the timeslice, resetting the deadline and scheduling away.
+ */
+SYSCALL_DEFINE0(sched_yield)
+{
@@ -3877,6 +3848,7 @@
+ grq_lock_irq();
+ p = current;
+ schedstat_inc(this_rq(), yld_count);
++ update_rq_clock(task_rq(p));
+ time_slice_expired(p);
+ requeue_task(p);
+
@@ -4172,10 +4144,12 @@
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
-+ idle->timestamp = idle->last_ran = sched_clock();
++ time_grq_lock(rq, &flags);
++ idle->timestamp = idle->last_ran = rq->clock;
+ idle->state = TASK_RUNNING;
++ /* Setting prio to illegal value shouldn't matter when never queued */
++ idle->prio = PRIO_LIMIT;
+ idle->cpus_allowed = cpumask_of_cpu(cpu);
-+ grq_lock_irqsave(&flags);
+ set_task_cpu(idle, cpu);
+ rq->curr = rq->idle = idle;
+ idle->oncpu = 1;
@@ -4396,11 +4370,10 @@
+ * Strictly not necessary since rest of the CPUs are stopped by now
+ * and interrupts disabled on the current cpu.
+ */
-+ grq_lock_irqsave(&flags);
++ time_grq_lock(rq, &flags);
+
+ __setscheduler(idle, SCHED_FIFO, MAX_RT_PRIO - 1);
+
-+ update_rq_clock(rq);
+ activate_idle_task(idle);
+ rq->preempt_next = idle;
+ resched_task(rq->curr);
@@ -4720,6 +4693,8 @@
+ deactivate_task(rq->idle);
+ rq->idle->static_prio = MAX_PRIO;
+ __setscheduler(rq->idle, SCHED_NORMAL, 0);
++ rq->idle->prio = PRIO_LIMIT;
++ update_rq_clock(rq);
+ grq_unlock_irq();
+ cpuset_unlock();
+ break;
@@ -6311,6 +6286,7 @@
+
+ spin_lock_irqsave(&p->pi_lock, flags);
+ rq = __task_grq_lock(p);
++ update_rq_clock(rq);
+
+ queued = task_queued(p);
+ if (queued)
@@ -6329,7 +6305,6 @@
+
+ read_unlock_irq(&tasklist_lock);
+}
-+
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+#ifdef CONFIG_IA64
@@ -6444,7 +6419,7 @@
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-
+
-obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
+obj-y = sched_bfs.o fork.o exec_domain.o panic.o printk.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \
@@ -6457,7 +6432,7 @@
-CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer
+CFLAGS_sched_bfs.o := $(PROFILING) -fno-omit-frame-pointer
endif
-
+
$(obj)/configs.o: $(obj)/config_data.h
Index: linux-2.6.30-bfs/kernel/kthread.c
===================================================================
@@ -6466,10 +6441,10 @@
@@ -15,7 +15,7 @@
#include <linux/mutex.h>
#include <trace/sched.h>
-
+
-#define KTHREAD_NICE_LEVEL (-5)
+#define KTHREAD_NICE_LEVEL (0)
-
+
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
@@ -180,7 +180,7 @@
@@ -6491,7 +6466,7 @@
times->stime = cputime_add(times->stime, t->stime);
- times->sum_exec_runtime += t->se.sum_exec_runtime;
+ times->sum_exec_runtime += t->sched_time;
-
+
t = next_thread(t);
} while (t != tsk);
@@ -516,7 +516,7 @@
@@ -6500,7 +6475,7 @@
cleanup_timers(tsk->cpu_timers,
- tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
+ tsk->utime, tsk->stime, tsk->sched_time);
-
+
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
@@ -1016,7 +1016,7 @@
@@ -6514,7 +6489,7 @@
}
@@ -1032,7 +1032,7 @@
unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
-
+
if (hard != RLIM_INFINITY &&
- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+ tsk->rt_timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
@@ -6537,7 +6512,7 @@
- .sum_exec_runtime = tsk->se.sum_exec_runtime
+ .sum_exec_runtime = tsk->sched_time
};
-
+
if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
<<Diff was trimmed, longer than 597 lines>>
---- CVS-web:
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop-sched-bfs.patch?r1=1.1.2.3&r2=1.1.2.4&f=u
More information about the pld-cvs-commit
mailing list