packages (Titanium): kernel-desktop/kernel-desktop-sched-bfs.patch, kernel-...
cactus
cactus at pld-linux.org
Sat Sep 5 01:24:20 CEST 2009
Author: cactus Date: Fri Sep 4 23:24:20 2009 GMT
Module: packages Tag: Titanium
---- Log message:
- rel 4; bfs - 206
---- Files affected:
packages/kernel-desktop:
kernel-desktop-sched-bfs.patch (1.1.2.1 -> 1.1.2.2) , kernel-desktop.spec (1.204.2.54 -> 1.204.2.55)
---- Diffs:
================================================================
Index: packages/kernel-desktop/kernel-desktop-sched-bfs.patch
diff -u packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.1 packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.2
--- packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.1 Fri Sep 4 00:49:55 2009
+++ packages/kernel-desktop/kernel-desktop-sched-bfs.patch Sat Sep 5 01:24:14 2009
@@ -1,4 +1,4 @@
-The Brain Fuck Scheduler v0.204 by Con Kolivas.
+The Brain Fuck Scheduler v0.206 by Con Kolivas.
A single shared runqueue O(n) strict fairness earliest deadline first design.
@@ -34,7 +34,7 @@
kernel/kthread.c | 4
kernel/posix-cpu-timers.c | 12
kernel/sched.c |10241 ----------------------------------------
- kernel/sched_bfs.c | 5837 ++++++++++++++++++++++
+ kernel/sched_bfs.c | 5817 ++++++++++++++++++++++
kernel/sched_debug.c | 509 -
kernel/sched_fair.c | 1835 -------
kernel/sched_idletask.c | 128
@@ -43,7 +43,7 @@
kernel/trace/trace.c | 4
kernel/workqueue.c | 2
mm/oom_kill.c | 2
- 23 files changed, 5940 insertions(+), 14866 deletions(-)
+ 23 files changed, 5920 insertions(+), 14866 deletions(-)
Index: linux-2.6.30-bfs/Documentation/sysctl/kernel.txt
===================================================================
@@ -423,7 +423,6 @@
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
Index: linux-2.6.30-bfs/kernel/sched.c
===================================================================
-===================================================================
--- linux-2.6.30-bfs.orig/kernel/sysctl.c 2009-09-03 19:50:51.867053380 +1000
+++ linux-2.6.30-bfs/kernel/sysctl.c 2009-09-03 19:51:10.311054330 +1000
@@ -83,6 +83,8 @@
@@ -556,2044 +555,70 @@
- },
- {
- .ctl_name = CTL_UNNUMBERED,
-- .procname = "sched_rt_runtime_us",
-- .data = &sysctl_sched_rt_runtime,
-- .maxlen = sizeof(int),
-- .mode = 0644,
-- .proc_handler = &sched_rt_handler,
-- },
-- {
-- .ctl_name = CTL_UNNUMBERED,
-- .procname = "sched_compat_yield",
-- .data = &sysctl_sched_compat_yield,
-- .maxlen = sizeof(unsigned int),
-- .mode = 0644,
-- .proc_handler = &proc_dointvec,
-- },
- #ifdef CONFIG_PROVE_LOCKING
- {
- .ctl_name = CTL_UNNUMBERED,
-@@ -756,6 +643,28 @@
- .proc_handler = &proc_dointvec,
- },
- #endif
-+ {
-+ .ctl_name = CTL_UNNUMBERED,
-+ .procname = "rr_interval",
-+ .data = &rr_interval,
-+ .maxlen = sizeof (int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .strategy = &sysctl_intvec,
-+ .extra1 = &one,
-+ .extra2 = &five_thousand,
-+ },
-+ {
-+ .ctl_name = CTL_UNNUMBERED,
-+ .procname = "iso_cpu",
-+ .data = &sched_iso_cpu,
-+ .maxlen = sizeof (int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .strategy = &sysctl_intvec,
-+ .extra1 = &zero,
-+ .extra2 = &one_hundred,
-+ },
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- {
- .ctl_name = KERN_SPIN_RETRY,
-Index: linux-2.6.30-bfs/kernel/workqueue.c
-===================================================================
---- linux-2.6.30-bfs.orig/kernel/workqueue.c 2009-09-03 19:50:51.895053538 +1000
-+++ linux-2.6.30-bfs/kernel/workqueue.c 2009-09-03 19:51:10.366061008 +1000
-@@ -320,8 +320,6 @@
- if (cwq->wq->freezeable)
- set_freezable();
-
-- set_user_nice(current, -5);
--
- for (;;) {
- prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
- if (!freezing(current) &&
-Index: linux-2.6.30-bfs/kernel/sched_fair.c
-===================================================================
---- linux-2.6.30-bfs.orig/kernel/sched_fair.c 2009-09-03 19:50:51.837054274 +1000
-+++ /dev/null 1970-01-01 00:00:00.000000000 +0000
-@@ -1,1835 +0,0 @@
--/*
-- * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
-- *
-- * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo at redhat.com>
-- *
-- * Interactivity improvements by Mike Galbraith
-- * (C) 2007 Mike Galbraith <efault at gmx.de>
-- *
-- * Various enhancements by Dmitry Adamushko.
-- * (C) 2007 Dmitry Adamushko <dmitry.adamushko at gmail.com>
-- *
-- * Group scheduling enhancements by Srivatsa Vaddagiri
-- * Copyright IBM Corporation, 2007
-- * Author: Srivatsa Vaddagiri <vatsa at linux.vnet.ibm.com>
-- *
-- * Scaled math optimizations by Thomas Gleixner
-- * Copyright (C) 2007, Thomas Gleixner <tglx at linutronix.de>
-- *
-- * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
-- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr at redhat.com>
-- */
--
--#include <linux/latencytop.h>
--
--/*
-- * Targeted preemption latency for CPU-bound tasks:
-- * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
-- *
-- * NOTE: this latency value is not the same as the concept of
-- * 'timeslice length' - timeslices in CFS are of variable length
-- * and have no persistent notion like in traditional, time-slice
-- * based scheduling concepts.
-- *
-- * (to see the precise effective timeslice length of your workload,
-- * run vmstat and monitor the context-switches (cs) field)
-- */
--unsigned int sysctl_sched_latency = 20000000ULL;
--
--/*
-- * Minimal preemption granularity for CPU-bound tasks:
-- * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
-- */
--unsigned int sysctl_sched_min_granularity = 4000000ULL;
--
--/*
-- * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
-- */
--static unsigned int sched_nr_latency = 5;
--
--/*
-- * After fork, child runs first. (default) If set to 0 then
-- * parent will (try to) run first.
-- */
--const_debug unsigned int sysctl_sched_child_runs_first = 1;
--
--/*
-- * sys_sched_yield() compat mode
-- *
-- * This option switches the agressive yield implementation of the
-- * old scheduler back on.
-- */
--unsigned int __read_mostly sysctl_sched_compat_yield;
--
--/*
-- * SCHED_OTHER wake-up granularity.
-- * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
-- *
-- * This option delays the preemption effects of decoupled workloads
-- * and reduces their over-scheduling. Synchronous workloads will still
-- * have immediate wakeup/sleep latencies.
-- */
--unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
--
--const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
--
--static const struct sched_class fair_sched_class;
--
--/**************************************************************
-- * CFS operations on generic schedulable entities:
-- */
--
--static inline struct task_struct *task_of(struct sched_entity *se)
--{
-- return container_of(se, struct task_struct, se);
--}
--
--#ifdef CONFIG_FAIR_GROUP_SCHED
--
--/* cpu runqueue to which this cfs_rq is attached */
--static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
--{
-- return cfs_rq->rq;
--}
--
--/* An entity is a task if it doesn't "own" a runqueue */
--#define entity_is_task(se) (!se->my_q)
--
--/* Walk up scheduling entities hierarchy */
--#define for_each_sched_entity(se) \
-- for (; se; se = se->parent)
--
--static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
--{
-- return p->se.cfs_rq;
--}
--
--/* runqueue on which this entity is (to be) queued */
--static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
--{
-- return se->cfs_rq;
--}
--
--/* runqueue "owned" by this group */
--static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
--{
-- return grp->my_q;
--}
--
--/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
-- * another cpu ('this_cpu')
-- */
--static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
--{
-- return cfs_rq->tg->cfs_rq[this_cpu];
--}
--
--/* Iterate thr' all leaf cfs_rq's on a runqueue */
--#define for_each_leaf_cfs_rq(rq, cfs_rq) \
-- list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
--
--/* Do the two (enqueued) entities belong to the same group ? */
--static inline int
--is_same_group(struct sched_entity *se, struct sched_entity *pse)
--{
-- if (se->cfs_rq == pse->cfs_rq)
-- return 1;
--
-- return 0;
--}
--
--static inline struct sched_entity *parent_entity(struct sched_entity *se)
--{
-- return se->parent;
--}
--
--/* return depth at which a sched entity is present in the hierarchy */
--static inline int depth_se(struct sched_entity *se)
--{
-- int depth = 0;
--
-- for_each_sched_entity(se)
-- depth++;
--
-- return depth;
--}
--
--static void
--find_matching_se(struct sched_entity **se, struct sched_entity **pse)
--{
-- int se_depth, pse_depth;
--
-- /*
-- * preemption test can be made between sibling entities who are in the
-- * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
-- * both tasks until we find their ancestors who are siblings of common
-- * parent.
-- */
--
-- /* First walk up until both entities are at same depth */
-- se_depth = depth_se(*se);
-- pse_depth = depth_se(*pse);
--
-- while (se_depth > pse_depth) {
-- se_depth--;
-- *se = parent_entity(*se);
-- }
--
-- while (pse_depth > se_depth) {
-- pse_depth--;
-- *pse = parent_entity(*pse);
-- }
--
-- while (!is_same_group(*se, *pse)) {
-- *se = parent_entity(*se);
-- *pse = parent_entity(*pse);
-- }
--}
--
--#else /* CONFIG_FAIR_GROUP_SCHED */
--
--static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
--{
-- return container_of(cfs_rq, struct rq, cfs);
--}
--
--#define entity_is_task(se) 1
--
--#define for_each_sched_entity(se) \
-- for (; se; se = NULL)
--
--static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
--{
-- return &task_rq(p)->cfs;
--}
--
--static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
--{
-- struct task_struct *p = task_of(se);
-- struct rq *rq = task_rq(p);
--
-- return &rq->cfs;
--}
--
--/* runqueue "owned" by this group */
--static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
--{
-- return NULL;
--}
--
--static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
--{
-- return &cpu_rq(this_cpu)->cfs;
--}
--
--#define for_each_leaf_cfs_rq(rq, cfs_rq) \
-- for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
--
--static inline int
--is_same_group(struct sched_entity *se, struct sched_entity *pse)
--{
-- return 1;
--}
--
--static inline struct sched_entity *parent_entity(struct sched_entity *se)
--{
-- return NULL;
--}
--
--static inline void
--find_matching_se(struct sched_entity **se, struct sched_entity **pse)
--{
--}
--
--#endif /* CONFIG_FAIR_GROUP_SCHED */
--
--
--/**************************************************************
-- * Scheduling class tree data structure manipulation methods:
-- */
--
--static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
--{
-- s64 delta = (s64)(vruntime - min_vruntime);
-- if (delta > 0)
-- min_vruntime = vruntime;
--
-- return min_vruntime;
--}
--
--static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
--{
-- s64 delta = (s64)(vruntime - min_vruntime);
-- if (delta < 0)
-- min_vruntime = vruntime;
--
-- return min_vruntime;
--}
--
--static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
--{
-- return se->vruntime - cfs_rq->min_vruntime;
--}
--
--static void update_min_vruntime(struct cfs_rq *cfs_rq)
--{
-- u64 vruntime = cfs_rq->min_vruntime;
--
-- if (cfs_rq->curr)
-- vruntime = cfs_rq->curr->vruntime;
--
-- if (cfs_rq->rb_leftmost) {
-- struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
-- struct sched_entity,
-- run_node);
--
-- if (!cfs_rq->curr)
-- vruntime = se->vruntime;
-- else
-- vruntime = min_vruntime(vruntime, se->vruntime);
-- }
--
-- cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
--}
--
--/*
-- * Enqueue an entity into the rb-tree:
-- */
--static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
--{
-- struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
-- struct rb_node *parent = NULL;
-- struct sched_entity *entry;
-- s64 key = entity_key(cfs_rq, se);
-- int leftmost = 1;
--
-- /*
-- * Find the right place in the rbtree:
-- */
-- while (*link) {
-- parent = *link;
-- entry = rb_entry(parent, struct sched_entity, run_node);
-- /*
-- * We dont care about collisions. Nodes with
-- * the same key stay together.
-- */
-- if (key < entity_key(cfs_rq, entry)) {
-- link = &parent->rb_left;
-- } else {
-- link = &parent->rb_right;
-- leftmost = 0;
-- }
-- }
--
-- /*
-- * Maintain a cache of leftmost tree entries (it is frequently
-- * used):
-- */
-- if (leftmost)
-- cfs_rq->rb_leftmost = &se->run_node;
--
-- rb_link_node(&se->run_node, parent, link);
-- rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
--}
--
--static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
--{
-- if (cfs_rq->rb_leftmost == &se->run_node) {
-- struct rb_node *next_node;
--
-- next_node = rb_next(&se->run_node);
-- cfs_rq->rb_leftmost = next_node;
-- }
--
-- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
--}
--
--static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
--{
-- struct rb_node *left = cfs_rq->rb_leftmost;
--
-- if (!left)
-- return NULL;
--
-- return rb_entry(left, struct sched_entity, run_node);
--}
--
--static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
--{
-- struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
--
-- if (!last)
-- return NULL;
--
-- return rb_entry(last, struct sched_entity, run_node);
--}
--
--/**************************************************************
-- * Scheduling class statistics methods:
-- */
--
--#ifdef CONFIG_SCHED_DEBUG
--int sched_nr_latency_handler(struct ctl_table *table, int write,
-- struct file *filp, void __user *buffer, size_t *lenp,
-- loff_t *ppos)
--{
-- int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
--
-- if (ret || !write)
-- return ret;
--
-- sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
-- sysctl_sched_min_granularity);
--
-- return 0;
--}
--#endif
--
--/*
-- * delta /= w
-- */
--static inline unsigned long
--calc_delta_fair(unsigned long delta, struct sched_entity *se)
--{
-- if (unlikely(se->load.weight != NICE_0_LOAD))
-- delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
--
-- return delta;
--}
--
--/*
-- * The idea is to set a period in which each task runs once.
-- *
-- * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
-- * this period because otherwise the slices get too small.
-- *
-- * p = (nr <= nl) ? l : l*nr/nl
-- */
--static u64 __sched_period(unsigned long nr_running)
--{
-- u64 period = sysctl_sched_latency;
-- unsigned long nr_latency = sched_nr_latency;
--
-- if (unlikely(nr_running > nr_latency)) {
-- period = sysctl_sched_min_granularity;
-- period *= nr_running;
-- }
--
-- return period;
--}
--
--/*
-- * We calculate the wall-time slice from the period by taking a part
-- * proportional to the weight.
-- *
-- * s = p*P[w/rw]
-- */
--static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
--{
-- u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
--
-- for_each_sched_entity(se) {
-- struct load_weight *load;
--
-- cfs_rq = cfs_rq_of(se);
-- load = &cfs_rq->load;
--
-- if (unlikely(!se->on_rq)) {
-- struct load_weight lw = cfs_rq->load;
--
-- update_load_add(&lw, se->load.weight);
-- load = &lw;
-- }
-- slice = calc_delta_mine(slice, se->load.weight, load);
-- }
-- return slice;
--}
--
--/*
-- * We calculate the vruntime slice of a to be inserted task
-- *
-- * vs = s/w
-- */
--static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
--{
-- return calc_delta_fair(sched_slice(cfs_rq, se), se);
--}
--
--/*
-- * Update the current task's runtime statistics. Skip current tasks that
-- * are not in our scheduling class.
-- */
--static inline void
--__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
-- unsigned long delta_exec)
--{
-- unsigned long delta_exec_weighted;
--
-- schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
--
-- curr->sum_exec_runtime += delta_exec;
-- schedstat_add(cfs_rq, exec_clock, delta_exec);
-- delta_exec_weighted = calc_delta_fair(delta_exec, curr);
-- curr->vruntime += delta_exec_weighted;
-- update_min_vruntime(cfs_rq);
--}
--
--static void update_curr(struct cfs_rq *cfs_rq)
--{
-- struct sched_entity *curr = cfs_rq->curr;
-- u64 now = rq_of(cfs_rq)->clock;
-- unsigned long delta_exec;
--
-- if (unlikely(!curr))
-- return;
--
-- /*
-- * Get the amount of time the current task was running
-- * since the last time we changed load (this cannot
-- * overflow on 32 bits):
-- */
-- delta_exec = (unsigned long)(now - curr->exec_start);
-- if (!delta_exec)
<<Diff was trimmed, longer than 597 lines>>
---- CVS-web:
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop-sched-bfs.patch?r1=1.1.2.1&r2=1.1.2.2&f=u
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop.spec?r1=1.204.2.54&r2=1.204.2.55&f=u
More information about the pld-cvs-commit
mailing list