SOURCES: kernel-desktop-ck.patch - raw uncompressed: http://wanink...

glen glen at pld-linux.org
Tue Apr 8 22:48:29 CEST 2008


Author: glen                         Date: Tue Apr  8 20:48:29 2008 GMT
Module: SOURCES                       Tag: HEAD
---- Log message:
- raw uncompressed: http://waninkoko.info/ckpatches/2.6.24/2.6.24-ck1/patch-2.6.24-cks1.bz2

---- Files affected:
SOURCES:
   kernel-desktop-ck.patch (1.13 -> 1.14) 

---- Diffs:

================================================================
Index: SOURCES/kernel-desktop-ck.patch
diff -u SOURCES/kernel-desktop-ck.patch:1.13 SOURCES/kernel-desktop-ck.patch:1.14
--- SOURCES/kernel-desktop-ck.patch:1.13	Tue Aug 21 09:27:52 2007
+++ SOURCES/kernel-desktop-ck.patch	Tue Apr  8 22:48:23 2008
@@ -1,130 +1,8 @@
-diff -urN linux-2.6.22.suspend2/arch/i386/defconfig linux-2.6.22.ck/arch/i386/defconfig
---- linux-2.6.22.suspend2/arch/i386/defconfig	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/arch/i386/defconfig	2007-08-02 13:23:45.865825786 +0200
-@@ -226,10 +226,10 @@
- # CONFIG_IRQBALANCE is not set
- CONFIG_SECCOMP=y
- # CONFIG_HZ_100 is not set
--CONFIG_HZ_250=y
-+# CONFIG_HZ_250 is not set
- # CONFIG_HZ_300 is not set
--# CONFIG_HZ_1000 is not set
--CONFIG_HZ=250
-+CONFIG_HZ_1000=y
-+CONFIG_HZ=1000
- # CONFIG_KEXEC is not set
- # CONFIG_CRASH_DUMP is not set
- CONFIG_PHYSICAL_START=0x100000
-diff -urN linux-2.6.22.suspend2/arch/i386/Kconfig linux-2.6.22.ck/arch/i386/Kconfig
---- linux-2.6.22.suspend2/arch/i386/Kconfig	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/arch/i386/Kconfig	2007-08-02 13:23:45.861825804 +0200
-@@ -550,7 +550,7 @@
- 
- choice
- 	depends on EXPERIMENTAL
--	prompt "Memory split" if EMBEDDED
-+	prompt "Memory split"
- 	default VMSPLIT_3G
- 	help
- 	  Select the desired split between kernel and user memory.
-@@ -569,17 +569,17 @@
- 	  option alone!
- 
- 	config VMSPLIT_3G
--		bool "3G/1G user/kernel split"
-+		bool "Default 896MB lowmem (3G/1G user/kernel split)"
- 	config VMSPLIT_3G_OPT
- 		depends on !HIGHMEM
--		bool "3G/1G user/kernel split (for full 1G low memory)"
-+		bool "1GB lowmem (3G/1G user/kernel split)"
- 	config VMSPLIT_2G
--		bool "2G/2G user/kernel split"
-+		bool "2GB lowmem (2G/2G user/kernel split)"
- 	config VMSPLIT_2G_OPT
- 		depends on !HIGHMEM
--		bool "2G/2G user/kernel split (for full 2G low memory)"
-+		bool "2GB lowmem (2G/2G user/kernel split)"
- 	config VMSPLIT_1G
--		bool "1G/3G user/kernel split"
-+		bool "3GB lowmem (1G/3G user/kernel split)"
- endchoice
- 
- config PAGE_OFFSET
-diff -urN linux-2.6.22.suspend2/arch/i386/kernel/cpu/proc.c linux-2.6.22.ck/arch/i386/kernel/cpu/proc.c
---- linux-2.6.22.suspend2/arch/i386/kernel/cpu/proc.c	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/arch/i386/kernel/cpu/proc.c	2007-08-02 13:23:45.917825552 +0200
-@@ -157,7 +157,7 @@
- 
- 	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
- 		     c->loops_per_jiffy/(500000/HZ),
--		     (c->loops_per_jiffy/(5000/HZ)) % 100);
-+		     (c->loops_per_jiffy * 10/(50000/HZ)) % 100);
- 	seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
- 
- 	return 0;
-diff -urN linux-2.6.22.suspend2/arch/i386/kernel/smpboot.c linux-2.6.22.ck/arch/i386/kernel/smpboot.c
---- linux-2.6.22.suspend2/arch/i386/kernel/smpboot.c	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/arch/i386/kernel/smpboot.c	2007-08-02 13:23:45.945825426 +0200
-@@ -1094,7 +1094,7 @@
- 		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- 		cpucount+1,
- 		bogosum/(500000/HZ),
--		(bogosum/(5000/HZ))%100);
-+		(bogosum * 10/(50000/HZ))%100);
- 	
- 	Dprintk("Before bogocount - setting activated=1.\n");
- 
-diff -urN linux-2.6.22.suspend2/arch/x86_64/defconfig linux-2.6.22.ck/arch/x86_64/defconfig
---- linux-2.6.22.suspend2/arch/x86_64/defconfig	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/arch/x86_64/defconfig	2007-08-02 13:23:45.885825696 +0200
-@@ -185,10 +185,10 @@
- CONFIG_SECCOMP=y
- # CONFIG_CC_STACKPROTECTOR is not set
- # CONFIG_HZ_100 is not set
--CONFIG_HZ_250=y
-+# CONFIG_HZ_250 is not set
- # CONFIG_HZ_300 is not set
--# CONFIG_HZ_1000 is not set
--CONFIG_HZ=250
-+CONFIG_HZ_1000=y
-+CONFIG_HZ=1000
- CONFIG_K8_NB=y
- CONFIG_GENERIC_HARDIRQS=y
- CONFIG_GENERIC_IRQ_PROBE=y
-diff -urN linux-2.6.22.suspend2/arch/x86_64/kernel/setup.c linux-2.6.22.ck/arch/x86_64/kernel/setup.c
---- linux-2.6.22.suspend2/arch/x86_64/kernel/setup.c	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/arch/x86_64/kernel/setup.c	2007-08-02 13:23:45.957825372 +0200
-@@ -1047,7 +1047,7 @@
- 		
- 	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
- 		   c->loops_per_jiffy/(500000/HZ),
--		   (c->loops_per_jiffy/(5000/HZ)) % 100);
-+		   (c->loops_per_jiffy * 10/(50000/HZ)) % 100);
- 
- 	if (c->x86_tlbsize > 0) 
- 		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
-diff -urN linux-2.6.22.suspend2/block/cfq-iosched.c linux-2.6.22.ck/block/cfq-iosched.c
---- linux-2.6.22.suspend2/block/cfq-iosched.c	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/block/cfq-iosched.c	2007-08-02 13:23:45.737826362 +0200
-@@ -1276,10 +1276,12 @@
- 			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
- 		case IOPRIO_CLASS_NONE:
- 			/*
--			 * no prio set, place us in the middle of the BE classes
-+			 * Select class and ioprio according to policy and nice
- 			 */
-+			cfqq->ioprio_class = task_policy_ioprio_class(tsk);
- 			cfqq->ioprio = task_nice_ioprio(tsk);
--			cfqq->ioprio_class = IOPRIO_CLASS_BE;
-+			if (cfqq->ioprio_class == IOPRIO_CLASS_IDLE)
-+				cfq_clear_cfqq_idle_window(cfqq);
- 			break;
- 		case IOPRIO_CLASS_RT:
- 			cfqq->ioprio = task_ioprio(tsk);
-diff -urN linux-2.6.22.suspend2/Documentation/filesystems/proc.txt linux-2.6.22.ck/Documentation/filesystems/proc.txt
---- linux-2.6.22.suspend2/Documentation/filesystems/proc.txt	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/Documentation/filesystems/proc.txt	2007-08-02 13:23:45.817826002 +0200
-@@ -1333,6 +1333,14 @@
+diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
+index dec9945..ed4b2fa 100644
+--- a/Documentation/filesystems/proc.txt
++++ b/Documentation/filesystems/proc.txt
+@@ -1436,6 +1436,14 @@ To free pagecache, dentries and inodes:
  As this is a non-destructive operation and dirty objects are not freeable, the
  user should run `sync' first.
  
@@ -139,339 +17,11 @@
  
  2.5 /proc/sys/dev - Device specific parameters
  ----------------------------------------------
-diff -urN linux-2.6.22.suspend2/Documentation/sched-design.txt linux-2.6.22.ck/Documentation/sched-design.txt
---- linux-2.6.22.suspend2/Documentation/sched-design.txt	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/Documentation/sched-design.txt	2007-08-02 13:23:45.633826830 +0200
-@@ -1,11 +1,14 @@
--		   Goals, Design and Implementation of the
--		      new ultra-scalable O(1) scheduler
-+ Goals, Design and Implementation of the ultra-scalable O(1) scheduler by
-+ Ingo Molnar and theStaircase Deadline cpu scheduler policy designed by
-+ Con Kolivas.
- 
- 
--  This is an edited version of an email Ingo Molnar sent to
--  lkml on 4 Jan 2002.  It describes the goals, design, and
--  implementation of Ingo's new ultra-scalable O(1) scheduler.
--  Last Updated: 18 April 2002.
-+  This was originally an edited version of an email Ingo Molnar sent to
-+  lkml on 4 Jan 2002.  It describes the goals, design, and implementation
-+  of Ingo's ultra-scalable O(1) scheduler. It now contains a description
-+  of the Staircase Deadline priority scheduler that was built on this
-+  design.
-+  Last Updated: Fri, 4 May 2007
- 
- 
- Goal
-@@ -163,3 +166,222 @@
- code is smaller than the old one.
- 
- 	Ingo
-+
-+
-+Staircase Deadline cpu scheduler policy
-+================================================
-+
-+Design summary
-+==============
-+
-+A novel design which incorporates a foreground-background descending priority
-+system (the staircase) via a bandwidth allocation matrix according to nice
-+level.
-+
-+
-+Features
-+========
-+
-+A starvation free, strict fairness O(1) scalable design with interactivity
-+as good as the above restrictions can provide. There is no interactivity
-+estimator, no sleep/run measurements and only simple fixed accounting.
-+The design has strict enough a design and accounting that task behaviour
-+can be modelled and maximum scheduling latencies can be predicted by
-+the virtual deadline mechanism that manages runqueues. The prime concern
-+in this design is to maintain fairness at all costs determined by nice level,
-+yet to maintain as good interactivity as can be allowed within the
-+constraints of strict fairness.
-+
-+
-+Design description
-+==================
-+
-+SD works off the principle of providing each task a quota of runtime that it is
-+allowed to run at a number of priority levels determined by its static priority
-+(ie. its nice level). If the task uses up its quota it has its priority
-+decremented to the next level determined by a priority matrix. Once every
-+runtime quota has been consumed of every priority level, a task is queued on the
-+"expired" array. When no other tasks exist with quota, the expired array is
-+activated and fresh quotas are handed out. This is all done in O(1).
-+
-+Design details
-+==============
-+
-+Each task keeps a record of its own entitlement of cpu time. Most of the rest of
-+these details apply to non-realtime tasks as rt task management is straight
-+forward.
-+
-+Each runqueue keeps a record of what major epoch it is up to in the
-+rq->prio_rotation field which is incremented on each major epoch. It also
-+keeps a record of the current prio_level for each static priority task.
-+
-+Each task keeps a record of what major runqueue epoch it was last running
-+on in p->rotation. It also keeps a record of what priority levels it has
-+already been allocated quota from during this epoch in a bitmap p->bitmap.
-+
-+The only tunable that determines all other details is the RR_INTERVAL. This
-+is set to 8ms, and is scaled gently upwards with more cpus. This value is
-+tunable via a /proc interface.
-+
-+All tasks are initially given a quota based on RR_INTERVAL. This is equal to
-+RR_INTERVAL between nice values of -6 and 0, half that size above nice 0, and
-+progressively larger for nice values from -1 to -20. This is assigned to
-+p->quota and only changes with changes in nice level.
-+
-+As a task is first queued, it checks in recalc_task_prio to see if it has run at
-+this runqueue's current priority rotation. If it has not, it will have its
-+p->prio level set according to the first slot in a "priority matrix" and will be
-+given a p->time_slice equal to the p->quota, and has its allocation bitmap bit
-+set in p->bitmap for this prio level. It is then queued on the current active
-+priority array.
-+
-+If a task has already been running during this major epoch, and it has
-+p->time_slice left and the rq->prio_quota for the task's p->prio still
-+has quota, it will be placed back on the active array, but no more quota
-+will be added.
-+
-+If a task has been running during this major epoch, but does not have
-+p->time_slice left, it will find the next lowest priority in its bitmap that it
-+has not been allocated quota from. It then gets the a full quota in
-+p->time_slice. It is then queued on the current active priority array at the
-+newly determined lower priority.
-+
-+If a task has been running during this major epoch, and does not have
-+any entitlement left in p->bitmap and no time_slice left, it will have its
-+bitmap cleared, and be queued at its best prio again, but on the expired
-+priority array.
-+
-+When a task is queued, it has its relevant bit set in the array->prio_bitmap.
-+
-+p->time_slice is stored in nanosconds and is updated via update_cpu_clock on
-+schedule() and scheduler_tick. If p->time_slice is below zero then the
-+recalc_task_prio is readjusted and the task rescheduled.
-+
-+
-+Priority Matrix
-+===============
-+
-+In order to minimise the latencies between tasks of different nice levels
-+running concurrently, the dynamic priority slots where different nice levels
-+are queued are dithered instead of being sequential. What this means is that
-+there are 40 priority slots where a task may run during one major rotation,
-+and the allocation of slots is dependant on nice level. In the
-+following table, a zero represents a slot where the task may run.
-+
-+PRIORITY:0..................20.................39
-+nice -20 0000000000000000000000000000000000000000
-+nice -10 1000100010001000100010001000100010010000
-+nice   0 1010101010101010101010101010101010101010
-+nice   5 1011010110110101101101011011010110110110
-+nice  10 1110111011101110111011101110111011101110
-+nice  15 1111111011111110111111101111111011111110
-+nice  19 1111111111111111111111111111111111111110
-+
-+As can be seen, a nice -20 task runs in every priority slot whereas a nice 19
-+task only runs one slot per major rotation. This dithered table allows for the
-+smallest possible maximum latencies between tasks of varying nice levels, thus
-+allowing vastly different nice levels to be used.
-+
-+SCHED_BATCH tasks are managed slightly differently, receiving only the top
-+slots from its priority bitmap giving it equal cpu as SCHED_NORMAL, but
-+slightly higher latencies.
-+
-+
-+Modelling deadline behaviour
-+============================
-+
-+As the accounting in this design is hard and not modified by sleep average
-+calculations or interactivity modifiers, it is possible to accurately
-+predict the maximum latency that a task may experience under different
-+conditions. This is a virtual deadline mechanism enforced by mandatory
-+timeslice expiration and not outside bandwidth measurement.
-+
-+The maximum duration a task can run during one major epoch is determined by its
-+nice value. Nice 0 tasks can run at 19 different priority levels for RR_INTERVAL
-+duration during each epoch. Nice 10 tasks can run at 9 priority levels for each
-+epoch, and so on. The table in the priority matrix above demonstrates how this
-+is enforced.
-+
-+Therefore the maximum duration a runqueue epoch can take is determined by
-+the number of tasks running, and their nice level. After that, the maximum
-+duration it can take before a task can wait before it get scheduled is
-+determined by the position of its first slot on the matrix.
-+
-+In the following examples, these are _worst case scenarios_ and would rarely
-+occur, but can be modelled nonetheless to determine the maximum possible
-+latency.
-+
-+So for example, if two nice 0 tasks are running, and one has just expired as
-+another is activated for the first time receiving a full quota for this
-+runqueue rotation, the first task will wait:
-+
-+nr_tasks * max_duration + nice_difference * rr_interval
-+1 * 19 * RR_INTERVAL + 0 = 152ms
-+
-+In the presence of a nice 10 task, a nice 0 task would wait a maximum of
-+1 * 10 * RR_INTERVAL + 0 = 80ms
-+
-+In the presence of a nice 0 task, a nice 10 task would wait a maximum of
-+1 * 19 * RR_INTERVAL + 1 * RR_INTERVAL = 160ms
-+
-+More useful than these values, though, are the average latencies which are
-+a matter of determining the average distance between priority slots of
-+different nice values and multiplying them by the tasks' quota. For example
-+in the presence of a nice -10 task, a nice 0 task will wait either one or
-+two slots. Given that nice -10 tasks have a quota 2.5 times the RR_INTERVAL,
-+this means the latencies will alternate between 2.5 and 5 RR_INTERVALs or
-+20 and 40ms respectively (on uniprocessor at 1000HZ).
-+
-+
-+Achieving interactivity
-+=======================
-+
-+A requirement of this scheduler design was to achieve good interactivity
-+despite being a completely fair deadline based design. The disadvantage of
-+designs that try to achieve interactivity is that they usually do so at
-+the expense of maintaining fairness. As cpu speeds increase, the requirement
-+for some sort of metered unfairness towards interactive tasks becomes a less
-+desirable phenomenon, but low latency and fairness remains mandatory to
-+good interactive performance.
-+
-+This design relies on the fact that interactive tasks, by their nature,
-+sleep often. Most fair scheduling designs end up penalising such tasks
-+indirectly giving them less than their fair possible share because of the
-+sleep, and have to use a mechanism of bonusing their priority to offset
-+this based on the duration they sleep. This becomes increasingly inaccurate
-+as the number of running tasks rises and more tasks spend time waiting on
-+runqueues rather than sleeping, and it is impossible to tell whether the
-+task that's waiting on a runqueue only intends to run for a short period and
-+then sleep again after than runqueue wait. Furthermore, all such designs rely
-+on a period of time to pass to accumulate some form of statistic on the task
-+before deciding on how much to give them preference. The shorter this period,
-+the more rapidly bursts of cpu ruin the interactive tasks behaviour. The
-+longer this period, the longer it takes for interactive tasks to get low
-+scheduling latencies and fair cpu.
-+
-+This design does not measure sleep time at all. Interactive tasks that sleep
-+often will wake up having consumed very little if any of their quota for
-+the current major priority rotation. The longer they have slept, the less
-+likely they are to even be on the current major priority rotation. Once
-+woken up, though, they get to use up a their full quota for that epoch,
-+whether part of a quota remains or a full quota. Overall, however, they
-+can still only run as much cpu time for that epoch as any other task of the
-+same nice level. This means that two tasks behaving completely differently
-+from fully cpu bound to waking/sleeping extremely frequently will still
-+get the same quota of cpu, but the latter will be using its quota for that
-+epoch in bursts rather than continuously. This guarantees that interactive
-+tasks get the same amount of cpu as cpu bound ones.
-+
-+The other requirement of interactive tasks is also to obtain low latencies
-+for when they are scheduled. Unlike fully cpu bound tasks and the maximum
-+latencies possible described in the modelling deadline behaviour section
-+above, tasks that sleep will wake up with quota available usually at the
-+current runqueue's priority_level or better. This means that the most latency
-+they are likely to see is one RR_INTERVAL, and often they will preempt the
-+current task if it is not of a sleeping nature. This then guarantees very
-+low latency for interactive tasks, and the lowest latencies for the least
-+cpu bound tasks.
-+
-+
-+Fri, 4 May 2007
-+Con Kolivas <kernel at kolivas.org>
-diff -urN linux-2.6.22.suspend2/Documentation/sysctl/kernel.txt linux-2.6.22.ck/Documentation/sysctl/kernel.txt
---- linux-2.6.22.suspend2/Documentation/sysctl/kernel.txt	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/Documentation/sysctl/kernel.txt	2007-08-02 13:23:45.653826740 +0200
-@@ -25,6 +25,9 @@
- - domainname
- - hostname
- - hotplug
-+- interactive
-+- iso_cpu
-+- iso_period
- - java-appletviewer           [ binfmt_java, obsolete ]
- - java-interpreter            [ binfmt_java, obsolete ]
- - kstack_depth_to_print       [ X86 only ]
-@@ -43,6 +46,7 @@
- - printk
- - real-root-dev               ==> Documentation/initrd.txt
- - reboot-cmd                  [ SPARC only ]
-+- rr_interval
- - rtsig-max
- - rtsig-nr
- - sem
-@@ -164,6 +168,40 @@
- 
- ==============================================================
- 
-+interactive:
-+
-+The staircase-deadline cpu scheduler can be set in either purely
-+forward-looking mode for absolutely rigid fairness and cpu distribution
-+according to nice level, or it can allow a small per-process history
-+to smooth out cpu usage perturbations common in interactive tasks by
-+enabling this sysctl. While small fairness issues can arise with this
-+enabled, overall fairness is usually still strongly maintained and
-+starvation is never possible. Enabling this can significantly smooth
-+out 3d graphics and games.
-+
-+Default value is 1 (enabled).
-+
-+==============================================================
-+
-+iso_cpu:
-+
-+This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
-+run effectively at realtime priority, averaged over a rolling iso_period
-+seconds.
-+
-+Set to 80 (percent) by default.
-+
-+==============================================================
-+
-+iso_period:
-+
-+This sets the number of seconds over which SCHED_ISO cpu usage is averaged
-+to see if it exceeds its allocated cpu bandwidth.
-+
-+Set to 5 (seconds) by default.
-+
-+==============================================================
-+
- l2cr: (PPC only)
- 
- This flag controls the L2 cache of G3 processor boards. If
-@@ -288,6 +326,19 @@
- 
- ==============================================================
- 
-+rr_interval:
-+
-+This is the smallest duration that any cpu process scheduling unit
-+will run for. Increasing this value can increase throughput of cpu
-+bound tasks substantially but at the expense of increased latencies
-+overall. This value is in milliseconds and the default value chosen
-+depends on the number of cpus available at scheduler initialisation
-+with a minimum of 8.
-+
-+Valid values are from 1-5000.
-+
-+==============================================================
-+
- rtsig-max & rtsig-nr:
- 
- The file rtsig-max can be used to tune the maximum number
-diff -urN linux-2.6.22.suspend2/Documentation/sysctl/vm.txt linux-2.6.22.ck/Documentation/sysctl/vm.txt
---- linux-2.6.22.suspend2/Documentation/sysctl/vm.txt	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/Documentation/sysctl/vm.txt	2007-08-02 13:23:45.741826344 +0200
-@@ -22,6 +22,8 @@
+diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
+index 6f31f0a..e226cfe 100644
+--- a/Documentation/sysctl/vm.txt
++++ b/Documentation/sysctl/vm.txt
+@@ -22,6 +22,8 @@ Currently, these files are in /proc/sys/vm:
  - dirty_background_ratio
  - dirty_expire_centisecs
  - dirty_writeback_centisecs
@@ -480,10 +30,10 @@
  - max_map_count
  - min_free_kbytes
  - laptop_mode
-@@ -31,12 +33,15 @@
- - min_unmapped_ratio
- - min_slab_ratio
- - panic_on_oom
+@@ -36,13 +38,16 @@ Currently, these files are in /proc/sys/vm:
+ - numa_zonelist_order
+ - nr_hugepages
+ - nr_overcommit_hugepages
 +- swap_prefetch
 +- swap_prefetch_delay
 +- swap_prefetch_sleep
@@ -492,12 +42,13 @@
  
  dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
  dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
--block_dump, swap_token_timeout, drop-caches:
-+block_dump, swap_token_timeout, drop-caches, tail_largefiles:
+ block_dump, swap_token_timeout, drop-caches,
+-hugepages_treat_as_movable:
++hugepages_treat_as_movable, tail_largefiles:
  
  See Documentation/filesystems/proc.txt
  
-@@ -86,6 +91,27 @@
+@@ -92,6 +97,27 @@ for swap because we only cluster swap data in 32-page groups.
  
  ==============================================================
  
@@ -525,10 +76,10 @@
  max_map_count:
  
  This file contains the maximum number of memory map areas a process
-@@ -216,3 +242,37 @@
- The default value is 0.
- 1 and 2 are for failover of clustering. Please select either
- according to your policy of failover.
+@@ -324,3 +350,37 @@ Change the maximum size of the hugepage pool. The maximum is
+ nr_hugepages + nr_overcommit_hugepages.
+ 
+ See Documentation/vm/hugetlbpage.txt
 +
 +==============================================================
 +
@@ -563,75 +114,182 @@
 +further.
 +
 +The default value is 5.
-diff -urN linux-2.6.22.suspend2/fs/buffer.c linux-2.6.22.ck/fs/buffer.c
---- linux-2.6.22.suspend2/fs/buffer.c	2007-07-09 01:32:17.000000000 +0200
-+++ linux-2.6.22.ck/fs/buffer.c	2007-08-02 13:23:45.793826110 +0200
-@@ -356,7 +356,7 @@
+diff --git a/Makefile b/Makefile
+index 189d8ef..7a93c73 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,8 +1,8 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 24
+-EXTRAVERSION =
+-NAME = Arr Matey! A Hairy Bilge Rat!
++EXTRAVERSION = -cks1
++NAME = Boing boing
+ 
+ # *DOCUMENTATION*
+ # To see a list of typical targets execute "make help"
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 80b7ba4..16406b0 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -755,7 +755,7 @@ endchoice
+ 
+ choice
+ 	depends on EXPERIMENTAL
+-	prompt "Memory split" if EMBEDDED
++	prompt "Memory split"
+ 	default VMSPLIT_3G
+ 	depends on X86_32
+ 	help
+@@ -775,17 +775,17 @@ choice
+ 	  option alone!
+ 
+ 	config VMSPLIT_3G
+-		bool "3G/1G user/kernel split"
++		bool "Default 896MB lowmem (3G/1G user/kernel split)"
+ 	config VMSPLIT_3G_OPT
+ 		depends on !X86_PAE
+-		bool "3G/1G user/kernel split (for full 1G low memory)"
++		bool "1GB lowmem (3G/1G user/kernel split)"
+ 	config VMSPLIT_2G
+-		bool "2G/2G user/kernel split"
++		bool "2GB lowmem (2G/2G user/kernel split)"
+ 	config VMSPLIT_2G_OPT
+ 		depends on !X86_PAE
+-		bool "2G/2G user/kernel split (for full 2G low memory)"
++		bool "2GB lowmem (2G/2G user/kernel split)"
+ 	config VMSPLIT_1G
+-		bool "1G/3G user/kernel split"
++		bool "3GB lowmem (1G/3G user/kernel split)"
+ endchoice
+ 
+ config PAGE_OFFSET
+diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
+index 54ee176..a82b82b 100644
+--- a/arch/x86/configs/i386_defconfig
++++ b/arch/x86/configs/i386_defconfig
+@@ -221,10 +221,10 @@ CONFIG_MTRR=y
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/SOURCES/kernel-desktop-ck.patch?r1=1.13&r2=1.14&f=u



More information about the pld-cvs-commit mailing list