SOURCES: kernel-desktop-ck.patch - up to patch-2.6.20-ck1

sparky sparky at pld-linux.org
Wed Mar 28 00:46:56 CEST 2007


Author: sparky                       Date: Tue Mar 27 22:46:56 2007 GMT
Module: SOURCES                       Tag: HEAD
---- Log message:
- up to patch-2.6.20-ck1

---- Files affected:
SOURCES:
   kernel-desktop-ck.patch (1.6 -> 1.7) 

---- Diffs:

================================================================
Index: SOURCES/kernel-desktop-ck.patch
diff -u SOURCES/kernel-desktop-ck.patch:1.6 SOURCES/kernel-desktop-ck.patch:1.7
--- SOURCES/kernel-desktop-ck.patch:1.6	Tue Nov 21 17:59:49 2006
+++ SOURCES/kernel-desktop-ck.patch	Wed Mar 28 00:46:51 2007
@@ -1,9 +1,9 @@
-Index: linux-2.6.18-ck1/fs/proc/array.c
+Index: linux-2.6.20-ck1/fs/proc/array.c
 ===================================================================
---- linux-2.6.18-ck1.orig/fs/proc/array.c	2006-09-21 19:54:55.000000000 +1000
-+++ linux-2.6.18-ck1/fs/proc/array.c	2006-09-21 20:00:33.000000000 +1000
+--- linux-2.6.20-ck1.orig/fs/proc/array.c	2007-02-05 22:52:03.000000000 +1100
++++ linux-2.6.20-ck1/fs/proc/array.c	2007-02-16 19:01:30.000000000 +1100
 @@ -165,7 +165,7 @@ static inline char * task_state(struct t
- 	read_lock(&tasklist_lock);
+ 	rcu_read_lock();
  	buffer += sprintf(buffer,
  		"State:\t%s\n"
 -		"SleepAVG:\t%lu%%\n"
@@ -17,14 +17,14 @@
  		get_task_state(p),
 -		(p->sleep_avg/1024)*100/(1020000000/1024),
 +		p->bonus,
- 	       	p->tgid,
- 		p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
- 		pid_alive(p) && p->ptrace ? p->parent->pid : 0,
-Index: linux-2.6.18-ck1/kernel/exit.c
+ 	       	p->tgid, p->pid,
+ 	       	pid_alive(p) ? rcu_dereference(p->real_parent)->tgid : 0,
+ 		pid_alive(p) && p->ptrace ? rcu_dereference(p->parent)->pid : 0,
+Index: linux-2.6.20-ck1/kernel/exit.c
 ===================================================================
---- linux-2.6.18-ck1.orig/kernel/exit.c	2006-09-21 19:55:00.000000000 +1000
-+++ linux-2.6.18-ck1/kernel/exit.c	2006-09-21 20:00:33.000000000 +1000
-@@ -166,7 +166,6 @@ repeat:
+--- linux-2.6.20-ck1.orig/kernel/exit.c	2007-02-05 22:52:04.000000000 +1100
++++ linux-2.6.20-ck1/kernel/exit.c	2007-02-16 19:01:30.000000000 +1100
+@@ -170,7 +170,6 @@ repeat:
  		zap_leader = (leader->exit_signal == -1);
  	}
  
@@ -32,11 +32,11 @@
  	write_unlock_irq(&tasklist_lock);
  	proc_flush_task(p);
  	release_thread(p);
-Index: linux-2.6.18-ck1/include/linux/sched.h
+Index: linux-2.6.20-ck1/include/linux/sched.h
 ===================================================================
---- linux-2.6.18-ck1.orig/include/linux/sched.h	2006-09-21 19:54:58.000000000 +1000
-+++ linux-2.6.18-ck1/include/linux/sched.h	2006-09-21 20:05:32.000000000 +1000
-@@ -32,9 +32,16 @@
+--- linux-2.6.20-ck1.orig/include/linux/sched.h	2007-02-05 22:52:04.000000000 +1100
++++ linux-2.6.20-ck1/include/linux/sched.h	2007-02-16 19:01:33.000000000 +1100
+@@ -34,9 +34,14 @@
  #define SCHED_FIFO		1
  #define SCHED_RR		2
  #define SCHED_BATCH		3
@@ -47,13 +47,11 @@
  
 +#define SCHED_MAX		SCHED_IDLEPRIO
 +#define SCHED_RANGE(policy)	((policy) <= SCHED_MAX)
-+#define SCHED_RT(policy)	((policy) == SCHED_FIFO || \
-+					(policy) == SCHED_RR)
 +
  struct sched_param {
  	int sched_priority;
  };
-@@ -204,6 +211,7 @@ extern void show_stack(struct task_struc
+@@ -216,6 +221,7 @@ extern void show_stack(struct task_struc
  
  void io_schedule(void);
  long io_schedule_timeout(long timeout);
@@ -61,7 +59,7 @@
  
  extern void cpu_init (void);
  extern void trap_init(void);
-@@ -498,14 +506,18 @@ struct signal_struct {
+@@ -522,14 +528,20 @@ struct signal_struct {
  
  #define MAX_USER_RT_PRIO	100
  #define MAX_RT_PRIO		MAX_USER_RT_PRIO
@@ -72,18 +70,20 @@
 +#define MIN_USER_PRIO		(MAX_PRIO - 2)
 +#define IDLEPRIO_PRIO		(MAX_PRIO - 1)
  
- #define rt_prio(prio)		unlikely((prio) < MAX_RT_PRIO)
+-#define rt_prio(prio)		unlikely((prio) < MAX_RT_PRIO)
++#define rt_prio(prio)		unlikely((prio) < ISO_PRIO)
  #define rt_task(p)		rt_prio((p)->prio)
  #define batch_task(p)		(unlikely((p)->policy == SCHED_BATCH))
--#define has_rt_policy(p) \
--	unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH)
-+#define has_rt_policy(p) 	unlikely(SCHED_RT((p)->policy))
+-#define is_rt_policy(p)		((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
++#define is_rt_policy(policy)	((policy) == SCHED_FIFO || \
++					(policy) == SCHED_RR)
+ #define has_rt_policy(p)	unlikely(is_rt_policy((p)->policy))
 +#define iso_task(p)		(unlikely((p)->policy == SCHED_ISO))
 +#define idleprio_task(p)	(unlikely((p)->policy == SCHED_IDLEPRIO))
  
  /*
   * Some day this will be a full-fledged user tracking system..
-@@ -707,6 +719,22 @@ extern unsigned int max_cache_size;
+@@ -741,6 +753,22 @@ extern unsigned int max_cache_size;
  
  #endif	/* CONFIG_SMP */
  
@@ -105,10 +105,10 @@
 +}
  
  struct io_context;			/* See blkdev.h */
- void exit_io_context(void);
-@@ -755,15 +783,6 @@ struct audit_context;		/* See audit.c */
- struct mempolicy;
+ struct cpuset;
+@@ -789,15 +817,6 @@ struct mempolicy;
  struct pipe_inode_info;
+ struct uts_namespace;
  
 -enum sleep_type {
 -	SLEEP_NORMAL,
@@ -122,18 +122,18 @@
  struct task_struct {
  	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
  	struct thread_info *thread_info;
-@@ -781,19 +800,18 @@ struct task_struct {
+@@ -815,20 +834,19 @@ struct task_struct {
  	int load_weight;	/* for niceness load balancing purposes */
  	int prio, static_prio, normal_prio;
  	struct list_head run_list;
 -	struct prio_array *array;
  
  	unsigned short ioprio;
+ #ifdef CONFIG_BLK_DEV_IO_TRACE
  	unsigned int btrace_seq;
- 
+ #endif
 -	unsigned long sleep_avg;
--	unsigned long long timestamp, last_ran;
-+	unsigned long long timestamp;
+ 	unsigned long long timestamp, last_ran;
 +	unsigned long runtime, totalrun, ns_debit, systime;
 +	unsigned int bonus;
 +	unsigned int slice, time_slice;
@@ -146,7 +146,7 @@
  
  #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  	struct sched_info sched_info;
-@@ -942,6 +960,7 @@ struct task_struct {
+@@ -993,6 +1011,7 @@ struct task_struct {
  	struct held_lock held_locks[MAX_LOCK_DEPTH];
  	unsigned int lockdep_recursion;
  #endif
@@ -154,7 +154,7 @@
  
  /* journalling filesystem info */
  	void *journal_info;
-@@ -1054,8 +1073,11 @@ static inline void put_task_struct(struc
+@@ -1155,8 +1174,11 @@ static inline void put_task_struct(struc
  #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
  #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
  #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
@@ -166,7 +166,7 @@
  
  /*
   * Only the _current_ task can read/write to tsk->flags, but other
-@@ -1191,7 +1213,6 @@ extern void FASTCALL(wake_up_new_task(st
+@@ -1291,7 +1313,6 @@ extern void FASTCALL(wake_up_new_task(st
   static inline void kick_process(struct task_struct *tsk) { }
  #endif
  extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
@@ -174,18 +174,18 @@
  
  extern int in_group_p(gid_t);
  extern int in_egroup_p(gid_t);
-Index: linux-2.6.18-ck1/kernel/sched.c
+Index: linux-2.6.20-ck1/kernel/sched.c
 ===================================================================
---- linux-2.6.18-ck1.orig/kernel/sched.c	2006-09-21 19:55:00.000000000 +1000
-+++ linux-2.6.18-ck1/kernel/sched.c	2006-09-21 20:05:30.000000000 +1000
+--- linux-2.6.20-ck1.orig/kernel/sched.c	2007-02-05 22:52:04.000000000 +1100
++++ linux-2.6.20-ck1/kernel/sched.c	2007-02-16 19:01:31.000000000 +1100
 @@ -16,6 +16,10 @@
   *		by Davide Libenzi, preemptible kernel bits by Robert Love.
   *  2003-09-03	Interactivity tuning by Con Kolivas.
   *  2004-04-02	Scheduler domains code by Nick Piggin
-+ *  2006-09-21	Staircase scheduling policy by Con Kolivas with help
++ *  2007-02-14	Staircase scheduling policy by Con Kolivas with help
 + *		from William Lee Irwin III, Zwane Mwaikambo, Peter Williams
 + *		and Andreas Mohr.
-+ *		Staircase v16.2
++ *		Staircase v17
   */
  
  #include <linux/mm.h>
@@ -215,7 +215,7 @@
   * Convert user-nice values [ -20 ... 0 ... 19 ]
   * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
   * and back.
-@@ -77,124 +100,26 @@
+@@ -77,123 +100,20 @@
  /*
   * Some helpers for converting nanosecond timing to jiffy resolution
   */
@@ -305,20 +305,6 @@
  #define TASK_PREEMPTS_CURR(p, rq) \
  	((p)->prio < (rq)->curr->prio)
  
- /*
-- * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
-- * to time slice values: [800ms ... 100ms ... 5ms]
-- *
-- * The higher a thread's priority, the bigger timeslices
-- * it gets during one round of execution. But even the lowest
-- * priority thread gets MIN_TIMESLICE worth of execution time.
-+ * This is the time all tasks within the same priority round robin.
-+ * Set to a minimum of 6ms. It is 10 times longer in compute mode.
-  */
-+#define _RR_INTERVAL		((6 * HZ / 1001) + 1)
-+#define RR_INTERVAL		(_RR_INTERVAL * (1 + 9 * sched_compute))
-+#define DEF_TIMESLICE		(RR_INTERVAL * 19)
- 
 -#define SCALE_PRIO(x, prio) \
 -	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
 -
@@ -330,34 +316,49 @@
 -		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
 -}
 -
+-/*
+- * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+- * to time slice values: [800ms ... 100ms ... 5ms]
+- *
+- * The higher a thread's priority, the bigger timeslices
+- * it gets during one round of execution. But even the lowest
+- * priority thread gets MIN_TIMESLICE worth of execution time.
+- */
+-
 -static inline unsigned int task_timeslice(struct task_struct *p)
 -{
 -	return static_prio_timeslice(p->static_prio);
 -}
- 
+-
  /*
-  * These are the runqueue data structures:
+- * These are the runqueue data structures:
++ * This is the time all tasks within the same priority round robin.
++ * Set to a minimum of 6ms. It is 10 times longer in compute mode.
   */
- 
+-
 -struct prio_array {
 -	unsigned int nr_active;
 -	DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
 -	struct list_head queue[MAX_PRIO];
 -};
--
++#define _RR_INTERVAL		((6 * HZ / 1001) + 1)
++#define RR_INTERVAL		(_RR_INTERVAL * (1 + 9 * sched_compute))
++#define DEF_TIMESLICE		(RR_INTERVAL * 19)
+ 
  /*
   * This is the main, per-CPU runqueue data structure.
-  *
-@@ -224,12 +149,14 @@ struct rq {
+@@ -224,14 +144,16 @@ struct rq {
  	 */
  	unsigned long nr_uninterruptible;
  
 -	unsigned long expired_timestamp;
- 	unsigned long long timestamp_last_tick;
+ 	/* Cached timestamp set by update_cpu_clock() */
+ 	unsigned long long most_recent_timestamp;
 +	unsigned short cache_ticks, preempted;
 +	unsigned long iso_ticks;
 +	unsigned short iso_refractory;
  	struct task_struct *curr, *idle;
+ 	unsigned long next_balance;
  	struct mm_struct *prev_mm;
 -	struct prio_array *active, *expired, arrays[2];
 -	int best_expired_prio;
@@ -366,7 +367,7 @@
  	atomic_t nr_iowait;
  
  #ifdef CONFIG_SMP
-@@ -553,13 +480,7 @@ static inline struct rq *this_rq_lock(vo
+@@ -568,13 +490,7 @@ static inline struct rq *this_rq_lock(vo
  
  #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  /*
@@ -381,7 +382,7 @@
   *
   * This function is only called from sched_info_arrive(), rather than
   * dequeue_task(). Even though a task may be queued and dequeued multiple
-@@ -592,13 +513,11 @@ static void sched_info_arrive(struct tas
+@@ -607,13 +523,11 @@ static void sched_info_arrive(struct tas
  }
  
  /*
@@ -400,7 +401,7 @@
   * set_user_nice(), and even load_balance() as it moves tasks from runqueue
   * to runqueue.
   *
-@@ -657,73 +576,81 @@ sched_info_switch(struct task_struct *pr
+@@ -672,73 +586,81 @@ sched_info_switch(struct task_struct *pr
  #define sched_info_switch(t, next)	do { } while (0)
  #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
  
@@ -529,7 +530,7 @@
  /*
   * To aid in avoiding the subversion of "niceness" due to uneven distribution
   * of tasks with abnormal "nice" values across CPUs the contribution that
-@@ -741,10 +668,9 @@ static inline int __normal_prio(struct t
+@@ -756,10 +678,9 @@ static inline int __normal_prio(struct t
  #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
  #define LOAD_WEIGHT(lp) \
  	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
@@ -543,7 +544,7 @@
  
  static void set_load_weight(struct task_struct *p)
  {
-@@ -760,8 +686,14 @@ static void set_load_weight(struct task_
+@@ -775,8 +696,14 @@ static void set_load_weight(struct task_
  		else
  #endif
  			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
@@ -559,7 +560,7 @@
  }
  
  static inline void
-@@ -789,146 +721,263 @@ static inline void dec_nr_running(struct
+@@ -804,149 +731,262 @@ static inline void dec_nr_running(struct
  }
  
  /*
@@ -777,10 +778,10 @@
 -			 */
 -			p->sleep_type = SLEEP_NONINTERACTIVE;
 -		} else {
-+static inline int idleprio_suitable(const struct task_struct *p)
++static inline int idleprio_suitable(struct task_struct *p)
 +{
-+	return (!p->mutexes_held &&
-+		!(p->flags & (PF_FREEZE | PF_NONSLEEP | PF_EXITING)));
++	return (!p->mutexes_held && !freezing(p) &&
++		!(p->flags & (PF_NONSLEEP | PF_EXITING)));
 +}
 +
 +static inline int idleprio(const struct task_struct *p)
@@ -905,11 +906,13 @@
   *
 - * Update all the scheduling statistics stuff. (sleep average
 - * calculation, priority modifiers, etc.)
-+ * Update all the scheduling statistics stuff. (priority modifiers, etc.)
   */
  static void activate_task(struct task_struct *p, struct rq *rq, int local)
  {
 -	unsigned long long now;
+-
+-	if (rt_task(p))
+-		goto out;
 +	unsigned long long now = sched_clock();
 +	unsigned long rr = rr_interval(p);
  
@@ -917,13 +920,11 @@
  #ifdef CONFIG_SMP
  	if (!local) {
  		/* Compensate for drifting sched_clock */
-@@ -937,34 +986,15 @@ static void activate_task(struct task_st
- 			+ rq->timestamp_last_tick;
+@@ -967,32 +1007,15 @@ static void activate_task(struct task_st
+ 				     (now - p->timestamp) >> 20);
  	}
- #endif
--
--	if (!rt_task(p))
--		p->prio = recalc_task_prio(p, now);
+ 
+-	p->prio = recalc_task_prio(p, now);
 -
 -	/*
 -	 * This checks to make sure it's not an uninterruptible task
@@ -955,11 +956,11 @@
 +		p->flags &= ~(PF_FORKED | PF_NONSLEEP);
  	}
  	p->timestamp = now;
--
+-out:
  	__activate_task(p, rq);
  }
  
-@@ -974,8 +1004,7 @@ static void activate_task(struct task_st
+@@ -1002,8 +1025,7 @@ out:
  static void deactivate_task(struct task_struct *p, struct rq *rq)
  {
  	dec_nr_running(p, rq);
@@ -969,7 +970,7 @@
  }
  
  /*
-@@ -1057,7 +1086,7 @@ migrate_task(struct task_struct *p, int 
+@@ -1085,7 +1107,7 @@ migrate_task(struct task_struct *p, int 
  	 * If the task is not on a runqueue (and not running), then
  	 * it is sufficient to simply update the task's cpu field.
  	 */
@@ -978,7 +979,7 @@
  		set_task_cpu(p, dest_cpu);
  		return 0;
  	}
-@@ -1088,7 +1117,7 @@ void wait_task_inactive(struct task_stru
+@@ -1116,7 +1138,7 @@ void wait_task_inactive(struct task_stru
  repeat:
  	rq = task_rq_lock(p, &flags);
  	/* Must be off runqueue entirely, not preempted. */
@@ -987,7 +988,7 @@
  		/* If it's preempted, we yield.  It could be a while. */
  		preempted = !task_running(rq, p);
  		task_rq_unlock(rq, &flags);
-@@ -1345,6 +1374,24 @@ static inline int wake_idle(int cpu, str
+@@ -1381,6 +1403,24 @@ static inline int wake_idle(int cpu, str
  }
  #endif
  
@@ -1012,7 +1013,7 @@
  /***
   * try_to_wake_up - wake up a thread
   * @p: the to-be-woken-up thread
-@@ -1376,7 +1423,7 @@ static int try_to_wake_up(struct task_st
+@@ -1412,7 +1452,7 @@ static int try_to_wake_up(struct task_st
  	if (!(old_state & state))
  		goto out;
  
@@ -1021,7 +1022,7 @@
  		goto out_running;
  
  	cpu = task_cpu(p);
-@@ -1467,7 +1514,7 @@ out_set_cpu:
+@@ -1505,7 +1545,7 @@ out_set_cpu:
  		old_state = p->state;
  		if (!(old_state & state))
  			goto out;
@@ -1030,7 +1031,7 @@
  			goto out_running;
  
  		this_cpu = smp_processor_id();
-@@ -1476,25 +1523,9 @@ out_set_cpu:
+@@ -1514,25 +1554,9 @@ out_set_cpu:
  
  out_activate:
  #endif /* CONFIG_SMP */
@@ -1057,7 +1058,7 @@
  	/*
  	 * Sync wakeups (i.e. those types of wakeups where the waker
  	 * has indicated that it will leave the CPU in short order)
-@@ -1503,15 +1534,16 @@ out_activate:
+@@ -1541,15 +1565,16 @@ out_activate:
  	 * the waker guarantees that the freshly woken up task is going
  	 * to be considered on this CPU.)
  	 */
@@ -1073,12 +1074,12 @@
  out_running:
  	p->state = TASK_RUNNING;
  out:
-+	if (idleprio_task(p) && (p->flags & PF_FREEZE) && idleprio(p))
++	if (idleprio_task(p) && freezing(p) && idleprio(p))
 +		requeue_task(p, rq, effective_prio(p));
  	task_rq_unlock(rq, &flags);
  
  	return success;
-@@ -1556,7 +1588,6 @@ void fastcall sched_fork(struct task_str
+@@ -1595,7 +1620,6 @@ void fastcall sched_fork(struct task_str
  	p->prio = current->normal_prio;
  
  	INIT_LIST_HEAD(&p->run_list);
@@ -1086,7 +1087,7 @@
  #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  	if (unlikely(sched_info_on()))
  		memset(&p->sched_info, 0, sizeof(p->sched_info));
-@@ -1568,30 +1599,6 @@ void fastcall sched_fork(struct task_str
+@@ -1607,30 +1631,6 @@ void fastcall sched_fork(struct task_str
  	/* Want to start with kernel preemption disabled. */
  	task_thread_info(p)->preempt_count = 1;
  #endif
@@ -1111,23 +1112,13 @@
 -		 * runqueue lock is not a problem.
 -		 */
 -		current->time_slice = 1;
--		scheduler_tick();
+-		task_running_tick(cpu_rq(cpu), current);
 -	}
 -	local_irq_enable();
  	put_cpu();
  }
  
-@@ -1604,47 +1611,29 @@ void fastcall sched_fork(struct task_str
-  */
- void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
- {
--	struct rq *rq, *this_rq;
- 	unsigned long flags;
- 	int this_cpu, cpu;
-+	struct rq *rq, *this_rq;
- 
- 	rq = task_rq_lock(p, &flags);
- 	BUG_ON(p->state != TASK_RUNNING);
+@@ -1652,38 +1652,20 @@ void fastcall wake_up_new_task(struct ta
  	this_cpu = smp_processor_id();
  	cpu = task_cpu(p);
  
@@ -1171,10 +1162,10 @@
  		/*
  		 * We skip the following code due to cpu == this_cpu
  	 	 *
-@@ -1661,53 +1650,19 @@ void fastcall wake_up_new_task(struct ta
+@@ -1700,53 +1682,19 @@ void fastcall wake_up_new_task(struct ta
  		 */
- 		p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
- 					+ rq->timestamp_last_tick;
+ 		p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
+ 					+ rq->most_recent_timestamp;
 -		__activate_task(p, rq);
 -		if (TASK_PREEMPTS_CURR(p, rq))
 -			resched_task(rq->curr);
@@ -1228,16 +1219,7 @@
  /**
   * prepare_task_switch - prepare to switch tasks
   * @rq: the runqueue preparing to switch
-@@ -1910,7 +1865,7 @@ unsigned long nr_active(void)
- static inline int
- task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
- {
--	return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
-+	return (long long)(now - p->timestamp) < (long long)sd->cache_hot_time;
- }
- 
- /*
-@@ -2023,23 +1978,21 @@ void sched_exec(void)
+@@ -2068,23 +2016,21 @@ void sched_exec(void)
   * pull_task - move a task from a remote runqueue to the local runqueue.
   * Both runqueues must be locked.
   */
@@ -1254,8 +1236,8 @@
  	inc_nr_running(p, this_rq);
 -	enqueue_task(p, this_array);
 +	enqueue_task(p, this_rq);
- 	p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
- 				+ this_rq->timestamp_last_tick;
+ 	p->timestamp = (p->timestamp - src_rq->most_recent_timestamp)
+ 				+ this_rq->most_recent_timestamp;
  	/*
  	 * Note that idle threads have a prio of MAX_PRIO, for this test
  	 * to be always true for them.
@@ -1266,7 +1248,7 @@
  }
  
  /*
-@@ -2077,8 +2030,6 @@ int can_migrate_task(struct task_struct 
+@@ -2127,8 +2073,6 @@ int can_migrate_task(struct task_struct 
  	return 1;
  }
  
@@ -1275,7 +1257,7 @@
  /*
   * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
   * load from busiest to this_rq, as part of a balancing operation within
-@@ -2093,7 +2044,6 @@ static int move_tasks(struct rq *this_rq
+@@ -2143,7 +2087,6 @@ static int move_tasks(struct rq *this_rq
  {
  	int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
  	    best_prio_seen, skip_for_load;
@@ -1283,7 +1265,7 @@
  	struct list_head *head, *curr;
  	struct task_struct *tmp;
  	long rem_load_move;
-@@ -2103,8 +2053,8 @@ static int move_tasks(struct rq *this_rq
+@@ -2153,8 +2096,8 @@ static int move_tasks(struct rq *this_rq
  
  	rem_load_move = max_load_move;
  	pinned = 1;
@@ -1294,7 +1276,7 @@
  	/*
  	 * Enable handling of the case where there is more than one task
  	 * with the best priority.   If the current running task is one
-@@ -2114,38 +2064,17 @@ static int move_tasks(struct rq *this_rq
+@@ -2164,38 +2107,17 @@ static int move_tasks(struct rq *this_rq
  	 */
  	best_prio_seen = best_prio == busiest->curr->prio;
  
@@ -1337,16 +1319,16 @@
  	curr = head->prev;
  skip_queue:
  	tmp = list_entry(curr, struct task_struct, run_list);
-@@ -2175,7 +2104,7 @@ skip_queue:
- 		schedstat_inc(sd, lb_hot_gained[idle]);
- #endif
+@@ -2220,7 +2142,7 @@ skip_queue:
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    http://cvs.pld-linux.org/SOURCES/kernel-desktop-ck.patch?r1=1.6&r2=1.7&f=u



More information about the pld-cvs-commit mailing list