packages (GRSECURITY_RAW): kernel/kernel-grsec_full.patch http://www.grsecu...

arekm arekm at pld-linux.org
Tue Aug 11 10:52:22 CEST 2009


Author: arekm                        Date: Tue Aug 11 08:52:22 2009 GMT
Module: packages                      Tag: GRSECURITY_RAW
---- Log message:
http://www.grsecurity.net/~spender/grsecurity-2.1.14-2.6.30.4-200908090749.patch

---- Files affected:
packages/kernel:
   kernel-grsec_full.patch (1.3.2.4 -> 1.3.2.5) 

---- Diffs:

================================================================
Index: packages/kernel/kernel-grsec_full.patch
diff -u packages/kernel/kernel-grsec_full.patch:1.3.2.4 packages/kernel/kernel-grsec_full.patch:1.3.2.5
--- packages/kernel/kernel-grsec_full.patch:1.3.2.4	Sun Aug  2 12:38:03 2009
+++ packages/kernel/kernel-grsec_full.patch	Tue Aug 11 10:52:15 2009
@@ -7086,7 +7086,7 @@
  #endif /* _ASM_X86_UACCESS_64_H */
 diff -urNp linux-2.6.30.4/arch/x86/include/asm/uaccess.h linux-2.6.30.4/arch/x86/include/asm/uaccess.h
 --- linux-2.6.30.4/arch/x86/include/asm/uaccess.h	2009-07-30 20:32:40.364705510 -0400
-+++ linux-2.6.30.4/arch/x86/include/asm/uaccess.h	2009-07-30 20:32:47.926577259 -0400
++++ linux-2.6.30.4/arch/x86/include/asm/uaccess.h	2009-08-09 07:48:47.926451868 -0400
 @@ -8,8 +8,10 @@
  #include <linux/thread_info.h>
  #include <linux/prefetch.h>
@@ -7111,22 +7111,32 @@
  
  #define segment_eq(a, b)	((a).seg == (b).seg)
  
-@@ -187,9 +194,12 @@ extern int __get_user_bad(void);
+@@ -183,13 +190,21 @@ extern int __get_user_bad(void);
+ 	asm volatile("call __put_user_" #size : "=a" (__ret_pu)	\
+ 		     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+ 
+-
++#ifdef CONFIG_X86_32
++#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n"
++#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n"
++#else
++#define _ASM_LOAD_USER_DS(ds)
++#define _ASM_LOAD_KERNEL_DS
++#endif
  
  #ifdef CONFIG_X86_32
  #define __put_user_asm_u64(x, addr, err, errret)			\
 -	asm volatile("1:	movl %%eax,0(%2)\n"			\
 -		     "2:	movl %%edx,4(%2)\n"			\
-+	asm volatile("		movw %w5,%%ds\n"			\
++	asm volatile(_ASM_LOAD_USER_DS(5)				\
 +		     "1:	movl %%eax,%%ds:0(%2)\n"		\
 +		     "2:	movl %%edx,%%ds:4(%2)\n"		\
  		     "3:\n"						\
-+		     "		pushl %%ss\n"				\
-+		     "		popl %%ds\n"				\
++		     _ASM_LOAD_KERNEL_DS				\
  		     ".section .fixup,\"ax\"\n"				\
  		     "4:	movl %3,%0\n"				\
  		     "	jmp 3b\n"					\
-@@ -197,7 +207,8 @@ extern int __get_user_bad(void);
+@@ -197,15 +212,18 @@ extern int __get_user_bad(void);
  		     _ASM_EXTABLE(1b, 4b)				\
  		     _ASM_EXTABLE(2b, 4b)				\
  		     : "=r" (err)					\
@@ -7135,110 +7145,89 @@
 +		       "r"(__USER_DS))
  
  #define __put_user_asm_ex_u64(x, addr)					\
- 	asm volatile("1:	movl %%eax,0(%1)\n"			\
-@@ -373,6 +384,22 @@ do {									\
- 	}								\
+-	asm volatile("1:	movl %%eax,0(%1)\n"			\
+-		     "2:	movl %%edx,4(%1)\n"			\
++	asm volatile(_ASM_LOAD_USER_DS(2)				\
++		     "1:	movl %%eax,%%ds:0(%1)\n"		\
++		     "2:	movl %%edx,%%ds:4(%1)\n"		\
+ 		     "3:\n"						\
++		     _ASM_LOAD_KERNEL_DS				\
+ 		     _ASM_EXTABLE(1b, 2b - 1b)				\
+ 		     _ASM_EXTABLE(2b, 3b - 2b)				\
+-		     : : "A" (x), "r" (addr))
++		     : : "A" (x), "r" (addr), "r"(__USER_DS))
+ 
+ #define __put_user_x8(x, ptr, __ret_pu)				\
+ 	asm volatile("call __put_user_8" : "=a" (__ret_pu)	\
+@@ -374,16 +392,18 @@ do {									\
  } while (0)
  
-+#ifdef CONFIG_X86_32
-+#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
-+	asm volatile("		movw %w5,%%ds\n"			\
-+		     "1:	mov"itype" %%ds:%2,%"rtype"1\n"		\
-+		     "2:\n"						\
-+		     "		pushl %%ss\n"				\
-+		     "		popl %%ds\n"				\
-+		     ".section .fixup,\"ax\"\n"				\
-+		     "3:	movl %3,%0\n"				\
-+		     "	xor"itype" %"rtype"1,%"rtype"1\n"		\
-+		     "	jmp 2b\n"					\
-+		     ".previous\n"					\
-+		     _ASM_EXTABLE(1b, 3b)				\
-+		     : "=r" (err), ltype (x)				\
-+		     : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
-+#else
  #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
- 	asm volatile("1:	mov"itype" %2,%"rtype"1\n"		\
+-	asm volatile("1:	mov"itype" %2,%"rtype"1\n"		\
++	asm volatile(_ASM_LOAD_USER_DS(5)				\
++		     "1:	mov"itype" %%ds:%2,%"rtype"1\n"		\
  		     "2:\n"						\
-@@ -384,6 +411,7 @@ do {									\
++		     _ASM_LOAD_KERNEL_DS				\
+ 		     ".section .fixup,\"ax\"\n"				\
+ 		     "3:	mov %3,%0\n"				\
+ 		     "	xor"itype" %"rtype"1,%"rtype"1\n"		\
+ 		     "	jmp 2b\n"					\
+ 		     ".previous\n"					\
  		     _ASM_EXTABLE(1b, 3b)				\
- 		     : "=r" (err), ltype(x)				\
- 		     : "m" (__m(addr)), "i" (errret), "0" (err))
-+#endif
+-		     : "=r" (err), ltype(x)				\
+-		     : "m" (__m(addr)), "i" (errret), "0" (err))
++		     : "=r" (err), ltype (x)				\
++		     : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
  
  #define __get_user_size_ex(x, ptr, size)				\
  do {									\
-@@ -406,11 +434,22 @@ do {									\
- 	}								\
+@@ -407,10 +427,12 @@ do {									\
  } while (0)
  
-+#ifdef CONFIG_X86_32
-+#define __get_user_asm_ex(x, addr, itype, rtype, ltype)			\
-+	asm volatile("		movw %w2,%%ds\n"			\
-+		     "1:	mov"itype" %%ds:%1,%"rtype"0\n"		\
-+		     "2:\n"						\
-+		     "		pushl %%ss\n"				\
-+		     "		popl %%ds\n"				\
-+		     _ASM_EXTABLE(1b, 2b - 1b)				\
-+		     : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
-+#else
  #define __get_user_asm_ex(x, addr, itype, rtype, ltype)			\
- 	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
+-	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
++	asm volatile(_ASM_LOAD_USER_DS(2)				\
++		     "1:	mov"itype" %%ds:%1,%"rtype"0\n"		\
  		     "2:\n"						\
++		     _ASM_LOAD_KERNEL_DS				\
  		     _ASM_EXTABLE(1b, 2b - 1b)				\
- 		     : ltype(x) : "m" (__m(addr)))
-+#endif
+-		     : ltype(x) : "m" (__m(addr)))
++		     : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
  
  #define __put_user_nocheck(x, ptr, size)			\
  ({								\
-@@ -437,6 +476,22 @@ struct __large_struct { unsigned long bu
-  * we do not write to any memory gcc knows about, so there are no
+@@ -438,21 +460,26 @@ struct __large_struct { unsigned long bu
   * aliasing issues.
   */
-+#ifdef CONFIG_X86_32
-+#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
-+	asm volatile("		movw %w5,%%ds\n"			\
-+		     "1:	mov"itype" %"rtype"1,%%ds:%2\n"		\
-+		     "2:\n"						\
-+		     "		pushl %%ss\n"				\
-+		     "		popl %%ds\n"				\
-+		     ".section .fixup,\"ax\"\n"				\
-+		     "3:	movl %3,%0\n"				\
-+		     "	jmp 2b\n"					\
-+		     ".previous\n"					\
-+		     _ASM_EXTABLE(1b, 3b)				\
-+		     : "=r"(err)					\
-+		     : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
-+		       "r"(__USER_DS))
-+#else
  #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
- 	asm volatile("1:	mov"itype" %"rtype"1,%2\n"		\
+-	asm volatile("1:	mov"itype" %"rtype"1,%2\n"		\
++	asm volatile(_ASM_LOAD_USER_DS(5)				\
++		     "1:	mov"itype" %"rtype"1,%%ds:%2\n"		\
  		     "2:\n"						\
-@@ -447,12 +502,24 @@ struct __large_struct { unsigned long bu
++		     _ASM_LOAD_KERNEL_DS				\
+ 		     ".section .fixup,\"ax\"\n"				\
+ 		     "3:	mov %3,%0\n"				\
+ 		     "	jmp 2b\n"					\
+ 		     ".previous\n"					\
  		     _ASM_EXTABLE(1b, 3b)				\
  		     : "=r"(err)					\
- 		     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
-+#endif
+-		     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++		     : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
++		       "r"(__USER_DS))
  
-+#ifdef CONFIG_X86_32
-+#define __put_user_asm_ex(x, addr, itype, rtype, ltype)			\
-+	asm volatile("		movw %w2,%%ds\n"			\
-+		     "1:	mov"itype" %"rtype"0,%%ds:%1\n"		\
-+		     "2:\n"						\
-+		     "		pushl %%ss\n"				\
-+		     "		popl %%ds\n"				\
-+		     _ASM_EXTABLE(1b, 2b - 1b)				\
-+		     : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
-+#else
  #define __put_user_asm_ex(x, addr, itype, rtype, ltype)			\
- 	asm volatile("1:	mov"itype" %"rtype"0,%1\n"		\
+-	asm volatile("1:	mov"itype" %"rtype"0,%1\n"		\
++	asm volatile(_ASM_LOAD_USER_DS(2)				\
++		     "1:	mov"itype" %"rtype"0,%%ds:%1\n"		\
  		     "2:\n"						\
++		     _ASM_LOAD_KERNEL_DS				\
  		     _ASM_EXTABLE(1b, 2b - 1b)				\
- 		     : : ltype(x), "m" (__m(addr)))
-+#endif
+-		     : : ltype(x), "m" (__m(addr)))
++		     : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
  
  /*
   * uaccess_try and catch
-@@ -567,6 +634,7 @@ extern struct movsl_mask {
+@@ -567,6 +594,7 @@ extern struct movsl_mask {
  
  #define ARCH_HAS_NOCACHE_UACCESS 1
  
@@ -7292,7 +7281,7 @@
  #endif /* _ASM_X86_VSYSCALL_H */
 diff -urNp linux-2.6.30.4/arch/x86/Kconfig linux-2.6.30.4/arch/x86/Kconfig
 --- linux-2.6.30.4/arch/x86/Kconfig	2009-07-24 17:47:51.000000000 -0400
-+++ linux-2.6.30.4/arch/x86/Kconfig	2009-07-30 12:32:41.330879042 -0400
++++ linux-2.6.30.4/arch/x86/Kconfig	2009-08-04 17:52:34.387861424 -0400
 @@ -345,6 +345,7 @@ config X86_VSMP
  	select PARAVIRT
  	depends on X86_64 && PCI
@@ -7351,17 +7340,28 @@
  	---help---
  	  This enables the kernel to use EFI runtime services that are
  	  available (such as the EFI variable services).
-@@ -1467,8 +1472,7 @@ config KEXEC_JUMP
+@@ -1466,9 +1471,7 @@ config KEXEC_JUMP
+ 
  config PHYSICAL_START
  	hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
- 	default "0x1000000" if X86_NUMAQ
+-	default "0x1000000" if X86_NUMAQ
 -	default "0x200000" if X86_64
 -	default "0x100000"
-+	default "0x200000"
++	default "0x1000000"
  	---help---
  	  This gives the physical address where the kernel is loaded.
  
-@@ -1560,9 +1564,10 @@ config HOTPLUG_CPU
+@@ -1527,8 +1530,7 @@ config RELOCATABLE
+ config PHYSICAL_ALIGN
+ 	hex
+ 	prompt "Alignment value to which kernel should be aligned" if X86_32
+-	default "0x100000" if X86_32
+-	default "0x200000" if X86_64
++	default "0x200000"
+ 	range 0x2000 0x400000
+ 	---help---
+ 	  This value puts the alignment restrictions on physical address
+@@ -1560,9 +1562,10 @@ config HOTPLUG_CPU
  	  Say N if you want to disable CPU hotplug.
  
  config COMPAT_VDSO
@@ -8641,7 +8641,7 @@
  	/* Reserve INITRD */
 diff -urNp linux-2.6.30.4/arch/x86/kernel/head_32.S linux-2.6.30.4/arch/x86/kernel/head_32.S
 --- linux-2.6.30.4/arch/x86/kernel/head_32.S	2009-07-24 17:47:51.000000000 -0400
-+++ linux-2.6.30.4/arch/x86/kernel/head_32.S	2009-07-30 19:56:23.400350396 -0400
++++ linux-2.6.30.4/arch/x86/kernel/head_32.S	2009-08-05 19:08:00.458589400 -0400
 @@ -20,6 +20,7 @@
  #include <asm/setup.h>
  #include <asm/processor-flags.h>
@@ -8692,10 +8692,11 @@
  ENTRY(startup_32)
  	/* test KEEP_SEGMENTS flag to see if the bootloader is asking
  		us to not reload segments */
-@@ -98,6 +110,56 @@ ENTRY(startup_32)
+@@ -98,6 +110,58 @@ ENTRY(startup_32)
  	movl %eax,%gs
  2:
  
++#ifdef CONFIG_SMP
 +	movl $pa(cpu_gdt_table),%edi
 +	movl $__per_cpu_load,%eax
 +	movw %ax,__KERNEL_PERCPU + 2(%edi)
@@ -8705,6 +8706,7 @@
 +	movl $__per_cpu_end - 1,%eax
 +	subl $__per_cpu_load,%eax
 +	movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
 +
 +#ifdef CONFIG_PAX_MEMORY_UDEREF
 +	/* check for VMware */
@@ -8749,7 +8751,7 @@
  /*
   * Clear BSS first so that there are no surprises...
   */
-@@ -141,9 +203,7 @@ ENTRY(startup_32)
+@@ -141,9 +205,7 @@ ENTRY(startup_32)
  	cmpl $num_subarch_entries, %eax
  	jae bad_subarch
  
@@ -8760,7 +8762,7 @@
  
  bad_subarch:
  WEAK(lguest_entry)
-@@ -155,9 +215,9 @@ WEAK(xen_entry)
+@@ -155,9 +217,9 @@ WEAK(xen_entry)
  	__INITDATA
  
  subarch_entries:
@@ -8773,7 +8775,7 @@
  num_subarch_entries = (. - subarch_entries) / 4
  .previous
  #endif /* CONFIG_PARAVIRT */
-@@ -218,8 +278,14 @@ default_entry:
+@@ -218,8 +280,14 @@ default_entry:
  	movl %eax, pa(max_pfn_mapped)
  
  	/* Do early initialization of the fixmap area */
@@ -8790,7 +8792,7 @@
  #else	/* Not PAE */
  
  page_pde_offset = (__PAGE_OFFSET >> 20);
-@@ -249,8 +315,14 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -249,8 +317,14 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
  	movl %eax, pa(max_pfn_mapped)
  
  	/* Do early initialization of the fixmap area */
@@ -8807,7 +8809,7 @@
  #endif
  	jmp 3f
  /*
-@@ -314,13 +386,16 @@ ENTRY(startup_32_smp)
+@@ -314,13 +388,16 @@ ENTRY(startup_32_smp)
  	jnc 6f
  
  	/* Setup EFER (Extended Feature Enable Register) */
@@ -8825,7 +8827,7 @@
  6:
  
  /*
-@@ -346,9 +421,7 @@ ENTRY(startup_32_smp)
+@@ -346,9 +423,7 @@ ENTRY(startup_32_smp)
  
  #ifdef CONFIG_SMP
  	cmpb $0, ready
@@ -8836,7 +8838,7 @@
  #endif /* CONFIG_SMP */
  
  /*
-@@ -426,7 +499,7 @@ is386:	movl $2,%ecx		# set MP
+@@ -426,7 +501,7 @@ is386:	movl $2,%ecx		# set MP
  1:	movl $(__KERNEL_DS),%eax	# reload all the segment registers
  	movl %eax,%ss			# after changing gdt.
  
@@ -8845,18 +8847,20 @@
  	movl %eax,%ds
  	movl %eax,%es
  
-@@ -440,8 +513,9 @@ is386:	movl $2,%ecx		# set MP
+@@ -440,8 +515,11 @@ is386:	movl $2,%ecx		# set MP
  	 */
  	cmpb $0,ready
  	jne 1f
 -	movl $per_cpu__gdt_page,%eax
 +	movl $cpu_gdt_table,%eax
  	movl $per_cpu__stack_canary,%ecx
++#ifdef CONFIG_SMP
 +	addl $__per_cpu_load,%ecx
++#endif
  	subl $20, %ecx
  	movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
  	shrl $16, %ecx
-@@ -460,10 +534,6 @@ is386:	movl $2,%ecx		# set MP
+@@ -460,10 +538,6 @@ is386:	movl $2,%ecx		# set MP
  #ifdef CONFIG_SMP
  	movb ready, %cl
  	movb $1, ready
@@ -8867,7 +8871,7 @@
  #endif /* CONFIG_SMP */
  	jmp *(initial_code)
  
-@@ -549,22 +619,22 @@ early_page_fault:
+@@ -549,22 +623,22 @@ early_page_fault:
  	jmp early_fault
  
  early_fault:
@@ -8895,7 +8899,7 @@
  hlt_loop:
  	hlt
  	jmp hlt_loop
-@@ -572,8 +642,11 @@ hlt_loop:
+@@ -572,8 +646,11 @@ hlt_loop:
  /* This is the default interrupt "handler" :-) */
  	ALIGN
  ignore_int:
@@ -8908,7 +8912,7 @@
  	pushl %eax
  	pushl %ecx
  	pushl %edx
-@@ -582,9 +655,6 @@ ignore_int:
+@@ -582,9 +659,6 @@ ignore_int:
  	movl $(__KERNEL_DS),%eax
  	movl %eax,%ds
  	movl %eax,%es
@@ -8918,7 +8922,7 @@
  	pushl 16(%esp)
  	pushl 24(%esp)
  	pushl 32(%esp)
-@@ -608,37 +678,49 @@ ignore_int:
+@@ -608,37 +682,49 @@ ignore_int:
  ENTRY(initial_code)
  	.long i386_start_kernel
  
@@ -8949,13 +8953,13 @@
 +	.fill 1024,4,0
 +
 +ENTRY(swapper_pg_fixmap1)
-+	.fill 1024,4,0
+ 	.fill 1024,4,0
 +
 +ENTRY(swapper_pg_fixmap2)
 +	.fill 1024,4,0
 +
 +ENTRY(swapper_pg_fixmap3)
- 	.fill 1024,4,0
++	.fill 1024,4,0
 +
 +.section .empty_zero_page,"a", at progbits
  ENTRY(empty_zero_page)
@@ -8981,7 +8985,7 @@
  ENTRY(swapper_pg_dir)
  	.long	pa(swapper_pg_pmd+PGD_IDENT_ATTR),0	/* low identity map */
  # if KPMDS == 3
-@@ -661,11 +743,12 @@ ENTRY(swapper_pg_dir)
+@@ -661,11 +747,12 @@ ENTRY(swapper_pg_dir)
  
  .data
  ENTRY(stack_start)
@@ -8995,7 +8999,7 @@
  early_recursion_flag:
  	.long 0
  
-@@ -701,7 +784,7 @@ fault_msg:
+@@ -701,7 +788,7 @@ fault_msg:
  	.word 0				# 32 bit align gdt_desc.address
  boot_gdt_descr:
  	.word __BOOT_DS+7
@@ -9004,7 +9008,7 @@
  
  	.word 0				# 32-bit align idt_desc.address
  idt_descr:
-@@ -712,7 +795,7 @@ idt_descr:
+@@ -712,7 +799,7 @@ idt_descr:
  	.word 0				# 32 bit align gdt_desc.address
  ENTRY(early_gdt_descr)
  	.word GDT_ENTRIES*8-1
@@ -9013,7 +9017,7 @@
  
  /*
   * The boot_gdt must mirror the equivalent in setup.S and is
-@@ -721,5 +804,59 @@ ENTRY(early_gdt_descr)
+@@ -721,5 +808,59 @@ ENTRY(early_gdt_descr)
  	.align L1_CACHE_BYTES
  ENTRY(boot_gdt)
  	.fill GDT_ENTRY_BOOT_CS,8,0
@@ -10154,7 +10158,7 @@
 -}
 diff -urNp linux-2.6.30.4/arch/x86/kernel/process.c linux-2.6.30.4/arch/x86/kernel/process.c
 --- linux-2.6.30.4/arch/x86/kernel/process.c	2009-07-24 17:47:51.000000000 -0400
-+++ linux-2.6.30.4/arch/x86/kernel/process.c	2009-07-30 09:48:09.950702241 -0400
++++ linux-2.6.30.4/arch/x86/kernel/process.c	2009-08-05 19:08:00.495411211 -0400
 @@ -71,7 +71,7 @@ void exit_thread(void)
  	unsigned long *bp = t->io_bitmap_ptr;
  
@@ -10168,7 +10172,7 @@
  
  	clear_tsk_thread_flag(tsk, TIF_DEBUG);
  
-+#ifndef CONFIG_CC_STACKPROTECTOR
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR)
 +	loadsegment(gs, 0);
 +#endif
  	tsk->thread.debugreg0 = 0;
@@ -10290,7 +10294,7 @@
  	bss_resource.end = virt_to_phys(&__bss_stop)-1;
 diff -urNp linux-2.6.30.4/arch/x86/kernel/setup_percpu.c linux-2.6.30.4/arch/x86/kernel/setup_percpu.c
 --- linux-2.6.30.4/arch/x86/kernel/setup_percpu.c	2009-07-24 17:47:51.000000000 -0400
-+++ linux-2.6.30.4/arch/x86/kernel/setup_percpu.c	2009-07-30 09:48:09.957530438 -0400
++++ linux-2.6.30.4/arch/x86/kernel/setup_percpu.c	2009-08-05 19:08:00.518752374 -0400
 @@ -25,19 +25,17 @@
  # define DBG(x...)
  #endif
@@ -10314,21 +10318,21 @@
  	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
  };
  EXPORT_SYMBOL(__per_cpu_offset);
-@@ -336,13 +334,16 @@ out_free_ar:
+@@ -336,13 +334,15 @@ out_free_ar:
  static inline void setup_percpu_segment(int cpu)
  {
  #ifdef CONFIG_X86_32
 -	struct desc_struct gdt;
-+	struct desc_struct d, *gdt = get_cpu_gdt_table(cpu);
-+	unsigned long base, limit;
- 
+-
 -	pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
 -			0x2 | DESCTYPE_S, 0x8);
 -	gdt.s = 1;
 -	write_gdt_entry(get_cpu_gdt_table(cpu),
 -			GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
-+	base = per_cpu_offset(cpu);
-+	limit = PERCPU_ENOUGH_ROOM - 1;
++	struct desc_struct d, *gdt = get_cpu_gdt_table(cpu);
++	unsigned long base = per_cpu_offset(cpu);
++	const unsigned long limit = VMALLOC_END - base - 1;
++
 +	if (limit < 64*1024)
 +		pack_descriptor(&d, base, limit, 0x80 | DESCTYPE_S | 0x3, 0x4);
 +	else
@@ -10337,7 +10341,7 @@
  #endif
  }
  
-@@ -381,6 +382,11 @@ void __init setup_per_cpu_areas(void)
+@@ -381,6 +381,11 @@ void __init setup_per_cpu_areas(void)
  	/* alrighty, percpu areas up and running */
  	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
  	for_each_possible_cpu(cpu) {
@@ -10349,7 +10353,7 @@
  		per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
  		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
  		per_cpu(cpu_number, cpu) = cpu;
-@@ -408,6 +414,12 @@ void __init setup_per_cpu_areas(void)
+@@ -408,6 +413,12 @@ void __init setup_per_cpu_areas(void)
  			early_per_cpu_map(x86_cpu_to_node_map, cpu);
  #endif
  #endif
@@ -11878,6 +11882,17 @@
  
  	if (kvm_x86_ops) {
  		printk(KERN_ERR "kvm: already loaded the other module\n");
+diff -urNp linux-2.6.30.4/arch/x86/lguest/Kconfig linux-2.6.30.4/arch/x86/lguest/Kconfig
+--- linux-2.6.30.4/arch/x86/lguest/Kconfig	2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.4/arch/x86/lguest/Kconfig	2009-08-02 09:47:36.165378342 -0400
+@@ -3,6 +3,7 @@ config LGUEST_GUEST
+ 	select PARAVIRT
+ 	depends on X86_32
+ 	depends on !X86_PAE
++	depends on !PAX_KERNEXEC
+ 	select VIRTIO
+ 	select VIRTIO_RING
+ 	select VIRTIO_CONSOLE
 diff -urNp linux-2.6.30.4/arch/x86/lib/checksum_32.S linux-2.6.30.4/arch/x86/lib/checksum_32.S
 --- linux-2.6.30.4/arch/x86/lib/checksum_32.S	2009-07-24 17:47:51.000000000 -0400
 +++ linux-2.6.30.4/arch/x86/lib/checksum_32.S	2009-07-30 09:48:09.967600435 -0400
@@ -13788,7 +13803,7 @@
  		pnp_bios_is_utter_crap = 1;
 diff -urNp linux-2.6.30.4/arch/x86/mm/fault.c linux-2.6.30.4/arch/x86/mm/fault.c
 --- linux-2.6.30.4/arch/x86/mm/fault.c	2009-07-24 17:47:51.000000000 -0400
-+++ linux-2.6.30.4/arch/x86/mm/fault.c	2009-07-30 11:10:48.941676108 -0400
++++ linux-2.6.30.4/arch/x86/mm/fault.c	2009-08-05 19:15:53.629625442 -0400
 @@ -27,6 +27,8 @@
  #include <linux/tty.h>
  #include <linux/smp.h>
@@ -13798,7 +13813,15 @@
  
  #include <asm-generic/sections.h>
  
-@@ -73,7 +75,7 @@ static inline int notify_page_fault(stru
+@@ -37,6 +39,7 @@
+ #include <asm/proto.h>
+ #include <asm/traps.h>
+ #include <asm/desc.h>
++#include <asm/vsyscall.h>
+ 
+ /*
+  * Page fault error code bits:
+@@ -73,7 +76,7 @@ static inline int notify_page_fault(stru
  	int ret = 0;
  
  	/* kprobe_running() needs smp_processor_id() */
@@ -13807,7 +13830,7 @@
  		preempt_disable();
  		if (kprobe_running() && kprobe_fault_handler(regs, 14))
  			ret = 1;
-@@ -193,6 +195,30 @@ force_sig_info_fault(int si_signo, int s
+@@ -193,6 +196,30 @@ force_sig_info_fault(int si_signo, int s
  	force_sig_info(si_signo, &info, tsk);
  }
  
@@ -13838,7 +13861,7 @@
  DEFINE_SPINLOCK(pgd_lock);
  LIST_HEAD(pgd_list);
  
-@@ -571,7 +597,7 @@ static int is_errata93(struct pt_regs *r
+@@ -571,7 +598,7 @@ static int is_errata93(struct pt_regs *r
  static int is_errata100(struct pt_regs *regs, unsigned long address)
  {
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel-grsec_full.patch?r1=1.3.2.4&r2=1.3.2.5&f=u



More information about the pld-cvs-commit mailing list