]> git.pld-linux.org Git - packages/kernel.git/blob - kernel-grsec_full.patch
- vserver up to patch-3.0.3-vs2.3.1-pre10.diff; grsec up to grsecurity-2.2.2-3.0...
[packages/kernel.git] / kernel-grsec_full.patch
1 diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2 --- linux-3.0.4/arch/alpha/include/asm/elf.h    2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.4/arch/alpha/include/asm/elf.h    2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5  
6  #define ELF_ET_DYN_BASE                (TASK_UNMAPPED_BASE + 0x1000000)
7  
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE    (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN     (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN    (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15  /* $0 is set by ld.so to a pointer to a function which might be 
16     registered using atexit.  This provides a mean for the dynamic
17     linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.4/arch/alpha/include/asm/pgtable.h        2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.4/arch/alpha/include/asm/pgtable.h        2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22  #define PAGE_SHARED    __pgprot(_PAGE_VALID | __ACCESS_BITS)
23  #define PAGE_COPY      __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24  #define PAGE_READONLY  __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC    __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC      __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC  __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC    PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC      PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC  PAGE_READONLY
34 +#endif
35 +
36  #define PAGE_KERNEL    __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37  
38  #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40 --- linux-3.0.4/arch/alpha/kernel/module.c      2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.4/arch/alpha/kernel/module.c      2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, 
43  
44         /* The small sections were sorted to the end of the segment.
45            The following should definitely cover them.  */
46 -       gp = (u64)me->module_core + me->core_size - 0x8000;
47 +       gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48         got = sechdrs[me->arch.gotsecindex].sh_addr;
49  
50         for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.4/arch/alpha/kernel/osf_sys.c     2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.4/arch/alpha/kernel/osf_sys.c     2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55                 /* At this point:  (!vma || addr < vma->vm_end). */
56                 if (limit - len < addr)
57                         return -ENOMEM;
58 -               if (!vma || addr + len <= vma->vm_start)
59 +               if (check_heap_stack_gap(vma, addr, len))
60                         return addr;
61                 addr = vma->vm_end;
62                 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64            merely specific addresses, but regions of memory -- perhaps
65            this feature should be incorporated into all ports?  */
66  
67 +#ifdef CONFIG_PAX_RANDMMAP
68 +       if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71         if (addr) {
72                 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73                 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75         }
76  
77         /* Next, try allocating at TASK_UNMAPPED_BASE.  */
78 -       addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 -                                        len, limit);
80 +       addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82         if (addr != (unsigned long) -ENOMEM)
83                 return addr;
84  
85 diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86 --- linux-3.0.4/arch/alpha/mm/fault.c   2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.4/arch/alpha/mm/fault.c   2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89         __reload_thread(pcb);
90  }
91  
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + *         2 when patched PLT trampoline was detected
98 + *         3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 +       int err;
105 +
106 +       do { /* PaX: patched PLT emulation #1 */
107 +               unsigned int ldah, ldq, jmp;
108 +
109 +               err = get_user(ldah, (unsigned int *)regs->pc);
110 +               err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 +               err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 +               if (err)
114 +                       break;
115 +
116 +               if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 +                   (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 +                   jmp == 0x6BFB0000U)
119 +               {
120 +                       unsigned long r27, addr;
121 +                       unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 +                       unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 +                       addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 +                       err = get_user(r27, (unsigned long *)addr);
126 +                       if (err)
127 +                               break;
128 +
129 +                       regs->r27 = r27;
130 +                       regs->pc = r27;
131 +                       return 2;
132 +               }
133 +       } while (0);
134 +
135 +       do { /* PaX: patched PLT emulation #2 */
136 +               unsigned int ldah, lda, br;
137 +
138 +               err = get_user(ldah, (unsigned int *)regs->pc);
139 +               err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 +               err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 +               if (err)
143 +                       break;
144 +
145 +               if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 +                   (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 +                   (br & 0xFFE00000U) == 0xC3E00000U)
148 +               {
149 +                       unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 +                       unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 +                       unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 +                       regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 +                       regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 +                       return 2;
156 +               }
157 +       } while (0);
158 +
159 +       do { /* PaX: unpatched PLT emulation */
160 +               unsigned int br;
161 +
162 +               err = get_user(br, (unsigned int *)regs->pc);
163 +
164 +               if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 +                       unsigned int br2, ldq, nop, jmp;
166 +                       unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 +                       addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 +                       err = get_user(br2, (unsigned int *)addr);
170 +                       err |= get_user(ldq, (unsigned int *)(addr+4));
171 +                       err |= get_user(nop, (unsigned int *)(addr+8));
172 +                       err |= get_user(jmp, (unsigned int *)(addr+12));
173 +                       err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 +                       if (err)
176 +                               break;
177 +
178 +                       if (br2 == 0xC3600000U &&
179 +                           ldq == 0xA77B000CU &&
180 +                           nop == 0x47FF041FU &&
181 +                           jmp == 0x6B7B0000U)
182 +                       {
183 +                               regs->r28 = regs->pc+4;
184 +                               regs->r27 = addr+16;
185 +                               regs->pc = resolver;
186 +                               return 3;
187 +                       }
188 +               }
189 +       } while (0);
190 +#endif
191 +
192 +       return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 +       unsigned long i;
198 +
199 +       printk(KERN_ERR "PAX: bytes at PC: ");
200 +       for (i = 0; i < 5; i++) {
201 +               unsigned int c;
202 +               if (get_user(c, (unsigned int *)pc+i))
203 +                       printk(KERN_CONT "???????? ");
204 +               else
205 +                       printk(KERN_CONT "%08x ", c);
206 +       }
207 +       printk("\n");
208 +}
209 +#endif
210  
211  /*
212   * This routine handles page faults.  It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214   good_area:
215         si_code = SEGV_ACCERR;
216         if (cause < 0) {
217 -               if (!(vma->vm_flags & VM_EXEC))
218 +               if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 +                       if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 +                               goto bad_area;
223 +
224 +                       up_read(&mm->mmap_sem);
225 +                       switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 +                       case 2:
229 +                       case 3:
230 +                               return;
231 +#endif
232 +
233 +                       }
234 +                       pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 +                       do_group_exit(SIGKILL);
236 +#else
237                         goto bad_area;
238 +#endif
239 +
240 +               }
241         } else if (!cause) {
242                 /* Allow reads even for write-only mappings */
243                 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245 --- linux-3.0.4/arch/arm/include/asm/elf.h      2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.4/arch/arm/include/asm/elf.h      2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248     the loader.  We need to make sure that it is out of the way of the program
249     that it will "exec", and that there is sufficient room for the brk.  */
250  
251 -#define ELF_ET_DYN_BASE        (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE    0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN     ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN    ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260  
261  /* When the program starts, a1 contains a pointer to a function to be 
262     registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264  extern void elf_set_personality(const struct elf32_hdr *);
265  #define SET_PERSONALITY(ex)    elf_set_personality(&(ex))
266  
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271  extern int vectors_user_mapping(void);
272  #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273  #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.4/arch/arm/include/asm/kmap_types.h       2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.4/arch/arm/include/asm/kmap_types.h       2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278         KM_L1_CACHE,
279         KM_L2_CACHE,
280         KM_KDB,
281 +       KM_CLEARPAGE,
282         KM_TYPE_NR
283  };
284  
285 diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.4/arch/arm/include/asm/uaccess.h  2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.4/arch/arm/include/asm/uaccess.h  2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289  #define VERIFY_READ 0
290  #define VERIFY_WRITE 1
291  
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294  /*
295   * The exception table consists of pairs of addresses: the first is the
296   * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do {                                                                       \
298  
299  
300  #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 +       if (!__builtin_constant_p(n))
309 +               check_object_size(to, n, false);
310 +       return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 +       if (!__builtin_constant_p(n))
316 +               check_object_size(from, n, true);
317 +       return ___copy_to_user(to, from, n);
318 +}
319 +
320  extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321  extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322  extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324  
325  static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326  {
327 +       if ((long)n < 0)
328 +               return n;
329 +
330         if (access_ok(VERIFY_READ, from, n))
331                 n = __copy_from_user(to, from, n);
332         else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334  
335  static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336  {
337 +       if ((long)n < 0)
338 +               return n;
339 +
340         if (access_ok(VERIFY_WRITE, to, n))
341                 n = __copy_to_user(to, from, n);
342         return n;
343 diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344 --- linux-3.0.4/arch/arm/kernel/armksyms.c      2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.4/arch/arm/kernel/armksyms.c      2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347  #ifdef CONFIG_MMU
348  EXPORT_SYMBOL(copy_page);
349  
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354  EXPORT_SYMBOL(__clear_user);
355  
356  EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358 --- linux-3.0.4/arch/arm/kernel/process.c       2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.4/arch/arm/kernel/process.c       2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361  #include <linux/tick.h>
362  #include <linux/utsname.h>
363  #include <linux/uaccess.h>
364 -#include <linux/random.h>
365  #include <linux/hw_breakpoint.h>
366  
367  #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369         return 0;
370  }
371  
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 -       unsigned long range_end = mm->brk + 0x02000000;
375 -       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378  #ifdef CONFIG_MMU
379  /*
380   * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382 --- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385  
386  static DEFINE_SPINLOCK(die_lock);
387  
388 +extern void gr_handle_kernel_exploit(void);
389 +
390  /*
391   * This function is protected against re-entrancy.
392   */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394                 panic("Fatal exception in interrupt");
395         if (panic_on_oops)
396                 panic("Fatal exception");
397 +
398 +       gr_handle_kernel_exploit();
399 +
400         if (ret != NOTIFY_STOP)
401                 do_exit(SIGSEGV);
402  }
403 diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.4/arch/arm/lib/copy_from_user.S   2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.4/arch/arm/lib/copy_from_user.S   2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407  /*
408   * Prototype:
409   *
410 - *     size_t __copy_from_user(void *to, const void *from, size_t n)
411 + *     size_t ___copy_from_user(void *to, const void *from, size_t n)
412   *
413   * Purpose:
414   *
415 @@ -84,11 +84,11 @@
416  
417         .text
418  
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421  
422  #include "copy_template.S"
423  
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426  
427         .pushsection .fixup,"ax"
428         .align 0
429 diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.4/arch/arm/lib/copy_to_user.S     2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.4/arch/arm/lib/copy_to_user.S     2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433  /*
434   * Prototype:
435   *
436 - *     size_t __copy_to_user(void *to, const void *from, size_t n)
437 + *     size_t ___copy_to_user(void *to, const void *from, size_t n)
438   *
439   * Purpose:
440   *
441 @@ -88,11 +88,11 @@
442         .text
443  
444  ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447  
448  #include "copy_template.S"
449  
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452  ENDPROC(__copy_to_user_std)
453  
454         .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456 --- linux-3.0.4/arch/arm/lib/uaccess.S  2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.4/arch/arm/lib/uaccess.S  2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459  
460  #define PAGE_SHIFT 12
461  
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464   * Purpose  : copy a block to user memory from kernel memory
465   * Params   : to   - user memory
466   *          : from - kernel memory
467 @@ -40,7 +40,7 @@ USER(         T(strgtb) r3, [r0], #1)                 @ May f
468                 sub     r2, r2, ip
469                 b       .Lc2u_dest_aligned
470  
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473                 stmfd   sp!, {r2, r4 - r7, lr}
474                 cmp     r2, #4
475                 blt     .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER(             T(strgeb) r3, [r0], #1)                 @ May f
477                 ldrgtb  r3, [r1], #0
478  USER(          T(strgtb) r3, [r0], #1)                 @ May fault
479                 b       .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482  
483                 .pushsection .fixup,"ax"
484                 .align  0
485  9001:          ldmfd   sp!, {r0, r4 - r7, pc}
486                 .popsection
487  
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490   * Purpose  : copy a block from user memory to kernel memory
491   * Params   : to   - kernel memory
492   *          : from - user memory
493 @@ -304,7 +304,7 @@ USER(               T(ldrgtb) r3, [r1], #1)                 @ May f
494                 sub     r2, r2, ip
495                 b       .Lcfu_dest_aligned
496  
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499                 stmfd   sp!, {r0, r2, r4 - r7, lr}
500                 cmp     r2, #4
501                 blt     .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER(               T(ldrgeb) r3, [r1], #1)                 @ May f
503  USER(          T(ldrgtb) r3, [r1], #1)                 @ May fault
504                 strgtb  r3, [r0], #1
505                 b       .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508  
509                 .pushsection .fixup,"ax"
510                 .align  0
511 diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c      2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c      2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515  }
516  
517  unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520  {
521         /*
522          * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c       2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c       2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527         return sprintf(buf, "0x%X\n", mbox_value);
528  }
529  
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532  
533  static int mbox_show(struct seq_file *s, void *data)
534  {
535 diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536 --- linux-3.0.4/arch/arm/mm/fault.c     2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.4/arch/arm/mm/fault.c     2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539         }
540  #endif
541  
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 +       if (fsr & FSR_LNX_PF) {
544 +               pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 +               do_group_exit(SIGKILL);
546 +       }
547 +#endif
548 +
549         tsk->thread.address = addr;
550         tsk->thread.error_code = fsr;
551         tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553  }
554  #endif                                 /* CONFIG_MMU */
555  
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 +       long i;
560 +
561 +       printk(KERN_ERR "PAX: bytes at PC: ");
562 +       for (i = 0; i < 20; i++) {
563 +               unsigned char c;
564 +               if (get_user(c, (__force unsigned char __user *)pc+i))
565 +                       printk(KERN_CONT "?? ");
566 +               else
567 +                       printk(KERN_CONT "%02x ", c);
568 +       }
569 +       printk("\n");
570 +
571 +       printk(KERN_ERR "PAX: bytes at SP-4: ");
572 +       for (i = -1; i < 20; i++) {
573 +               unsigned long c;
574 +               if (get_user(c, (__force unsigned long __user *)sp+i))
575 +                       printk(KERN_CONT "???????? ");
576 +               else
577 +                       printk(KERN_CONT "%08lx ", c);
578 +       }
579 +       printk("\n");
580 +}
581 +#endif
582 +
583  /*
584   * First Level Translation Fault Handler
585   *
586 diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587 --- linux-3.0.4/arch/arm/mm/mmap.c      2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.4/arch/arm/mm/mmap.c      2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590         if (len > TASK_SIZE)
591                 return -ENOMEM;
592  
593 +#ifdef CONFIG_PAX_RANDMMAP
594 +       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597         if (addr) {
598                 if (do_align)
599                         addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601                         addr = PAGE_ALIGN(addr);
602  
603                 vma = find_vma(mm, addr);
604 -               if (TASK_SIZE - len >= addr &&
605 -                   (!vma || addr + len <= vma->vm_start))
606 +               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607                         return addr;
608         }
609         if (len > mm->cached_hole_size) {
610 -               start_addr = addr = mm->free_area_cache;
611 +               start_addr = addr = mm->free_area_cache;
612         } else {
613 -               start_addr = addr = TASK_UNMAPPED_BASE;
614 -               mm->cached_hole_size = 0;
615 +               start_addr = addr = mm->mmap_base;
616 +               mm->cached_hole_size = 0;
617         }
618         /* 8 bits of randomness in 20 address space bits */
619         if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621                          * Start a new search - just in case we missed
622                          * some holes.
623                          */
624 -                       if (start_addr != TASK_UNMAPPED_BASE) {
625 -                               start_addr = addr = TASK_UNMAPPED_BASE;
626 +                       if (start_addr != mm->mmap_base) {
627 +                               start_addr = addr = mm->mmap_base;
628                                 mm->cached_hole_size = 0;
629                                 goto full_search;
630                         }
631                         return -ENOMEM;
632                 }
633 -               if (!vma || addr + len <= vma->vm_start) {
634 +               if (check_heap_stack_gap(vma, addr, len)) {
635                         /*
636                          * Remember the place where we stopped the search:
637                          */
638 diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639 --- linux-3.0.4/arch/avr32/include/asm/elf.h    2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.4/arch/avr32/include/asm/elf.h    2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642     the loader.  We need to make sure that it is out of the way of the program
643     that it will "exec", and that there is sufficient room for the brk.  */
644  
645 -#define ELF_ET_DYN_BASE         (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
647  
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE    0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN     15
652 +#define PAX_DELTA_STACK_LEN    15
653 +#endif
654  
655  /* This yields a mask that user programs can use to figure out what
656     instruction set this CPU supports.  This could be done in user space,
657 diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.4/arch/avr32/include/asm/kmap_types.h     2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h     2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661  D(11)  KM_IRQ1,
662  D(12)  KM_SOFTIRQ0,
663  D(13)  KM_SOFTIRQ1,
664 -D(14)  KM_TYPE_NR
665 +D(14)  KM_CLEARPAGE,
666 +D(15)  KM_TYPE_NR
667  };
668  
669  #undef D
670 diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671 --- linux-3.0.4/arch/avr32/mm/fault.c   2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.4/arch/avr32/mm/fault.c   2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674  
675  int exception_trace = 1;
676  
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 +       unsigned long i;
681 +
682 +       printk(KERN_ERR "PAX: bytes at PC: ");
683 +       for (i = 0; i < 20; i++) {
684 +               unsigned char c;
685 +               if (get_user(c, (unsigned char *)pc+i))
686 +                       printk(KERN_CONT "???????? ");
687 +               else
688 +                       printk(KERN_CONT "%02x ", c);
689 +       }
690 +       printk("\n");
691 +}
692 +#endif
693 +
694  /*
695   * This routine handles page faults. It determines the address and the
696   * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698         up_read(&mm->mmap_sem);
699  
700         if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 +               if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 +                       if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 +                               pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 +                               do_group_exit(SIGKILL);
707 +                       }
708 +               }
709 +#endif
710 +
711                 if (exception_trace && printk_ratelimit())
712                         printk("%s%s[%d]: segfault at %08lx pc %08lx "
713                                "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.4/arch/frv/include/asm/kmap_types.h       2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.4/arch/frv/include/asm/kmap_types.h       2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718         KM_IRQ1,
719         KM_SOFTIRQ0,
720         KM_SOFTIRQ1,
721 +       KM_CLEARPAGE,
722         KM_TYPE_NR
723  };
724  
725 diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729         if (addr) {
730                 addr = PAGE_ALIGN(addr);
731                 vma = find_vma(current->mm, addr);
732 -               if (TASK_SIZE - len >= addr &&
733 -                   (!vma || addr + len <= vma->vm_start))
734 +               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735                         goto success;
736         }
737  
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739                         for (; vma; vma = vma->vm_next) {
740                                 if (addr > limit)
741                                         break;
742 -                               if (addr + len <= vma->vm_start)
743 +                               if (check_heap_stack_gap(vma, addr, len))
744                                         goto success;
745                                 addr = vma->vm_end;
746                         }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748                 for (; vma; vma = vma->vm_next) {
749                         if (addr > limit)
750                                 break;
751 -                       if (addr + len <= vma->vm_start)
752 +                       if (check_heap_stack_gap(vma, addr, len))
753                                 goto success;
754                         addr = vma->vm_end;
755                 }
756 diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757 --- linux-3.0.4/arch/ia64/include/asm/elf.h     2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.4/arch/ia64/include/asm/elf.h     2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760   */
761  #define ELF_ET_DYN_BASE                (TASK_UNMAPPED_BASE + 0x800000000UL)
762  
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE    (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN     (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN    (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770  #define PT_IA_64_UNWIND                0x70000001
771  
772  /* IA-64 relocations: */
773 diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777   *     David Mosberger-Tang <davidm@hpl.hp.com>
778   */
779  
780 -
781 +#include <linux/const.h>
782  #include <asm/mman.h>
783  #include <asm/page.h>
784  #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786  #define PAGE_READONLY  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787  #define PAGE_COPY      __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788  #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC    __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC      __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC    PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC  PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC      PAGE_COPY
798 +#endif
799 +
800  #define PAGE_GATE      __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801  #define PAGE_KERNEL    __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
802  #define PAGE_KERNELRX  __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.4/arch/ia64/include/asm/spinlock.h        2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.4/arch/ia64/include/asm/spinlock.h        2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807         unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
808  
809         asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 -       ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 +       ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812  }
813  
814  static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819         const void *__cu_from = (from);                                                 \
820         long __cu_len = (n);                                                            \
821                                                                                         \
822 -       if (__access_ok(__cu_to, __cu_len, get_fs()))                                   \
823 +       if (__cu_len > 0  && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs()))                   \
824                 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);   \
825         __cu_len;                                                                       \
826  })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828         long __cu_len = (n);                                                            \
829                                                                                         \
830         __chk_user_ptr(__cu_from);                                                      \
831 -       if (__access_ok(__cu_from, __cu_len, get_fs()))                                 \
832 +       if (__cu_len > 0 && __cu_len <= INT_MAX  && __access_ok(__cu_from, __cu_len, get_fs()))                 \
833                 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
834         __cu_len;                                                                       \
835  })
836 diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837 --- linux-3.0.4/arch/ia64/kernel/module.c       2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.4/arch/ia64/kernel/module.c       2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840  void
841  module_free (struct module *mod, void *module_region)
842  {
843 -       if (mod && mod->arch.init_unw_table &&
844 -           module_region == mod->module_init) {
845 +       if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846                 unw_remove_unwind_table(mod->arch.init_unw_table);
847                 mod->arch.init_unw_table = NULL;
848         }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850  }
851  
852  static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 +       return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 +       return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865  in_init (const struct module *mod, uint64_t addr)
866  {
867 -       return addr - (uint64_t) mod->module_init < mod->init_size;
868 +       return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 +       return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 +       return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881  }
882  
883  static inline int
884  in_core (const struct module *mod, uint64_t addr)
885  {
886 -       return addr - (uint64_t) mod->module_core < mod->core_size;
887 +       return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888  }
889  
890  static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892                 break;
893  
894               case RV_BDREL:
895 -               val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 +               if (in_init_rx(mod, val))
897 +                       val -= (uint64_t) mod->module_init_rx;
898 +               else if (in_init_rw(mod, val))
899 +                       val -= (uint64_t) mod->module_init_rw;
900 +               else if (in_core_rx(mod, val))
901 +                       val -= (uint64_t) mod->module_core_rx;
902 +               else if (in_core_rw(mod, val))
903 +                       val -= (uint64_t) mod->module_core_rw;
904                 break;
905  
906               case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908                  *     addresses have been selected...
909                  */
910                 uint64_t gp;
911 -               if (mod->core_size > MAX_LTOFF)
912 +               if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913                         /*
914                          * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915                          * at the end of the module.
916                          */
917 -                       gp = mod->core_size - MAX_LTOFF / 2;
918 +                       gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919                 else
920 -                       gp = mod->core_size / 2;
921 -               gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 +                       gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 +               gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924                 mod->arch.gp = gp;
925                 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926         }
927 diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.4/arch/ia64/kernel/sys_ia64.c     2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c     2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931         if (REGION_NUMBER(addr) == RGN_HPAGE)
932                 addr = 0;
933  #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 +       if (mm->pax_flags & MF_PAX_RANDMMAP)
937 +               addr = mm->free_area_cache;
938 +       else
939 +#endif
940 +
941         if (!addr)
942                 addr = mm->free_area_cache;
943  
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946                 /* At this point:  (!vma || addr < vma->vm_end). */
947                 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 -                       if (start_addr != TASK_UNMAPPED_BASE) {
949 +                       if (start_addr != mm->mmap_base) {
950                                 /* Start a new search --- just in case we missed some holes.  */
951 -                               addr = TASK_UNMAPPED_BASE;
952 +                               addr = mm->mmap_base;
953                                 goto full_search;
954                         }
955                         return -ENOMEM;
956                 }
957 -               if (!vma || addr + len <= vma->vm_start) {
958 +               if (check_heap_stack_gap(vma, addr, len)) {
959                         /* Remember the address where we stopped this search:  */
960                         mm->free_area_cache = addr + len;
961                         return addr;
962 diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S  2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S  2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966         /* Per-cpu data: */
967         . = ALIGN(PERCPU_PAGE_SIZE);
968         PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 -       __phys_per_cpu_start = __per_cpu_load;
970 +       __phys_per_cpu_start = per_cpu_load;
971         /*
972          * ensure percpu data fits
973          * into percpu page size
974 diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975 --- linux-3.0.4/arch/ia64/mm/fault.c    2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.4/arch/ia64/mm/fault.c    2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned 
978         return pte_present(pte);
979  }
980  
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 +       unsigned long i;
985 +
986 +       printk(KERN_ERR "PAX: bytes at PC: ");
987 +       for (i = 0; i < 8; i++) {
988 +               unsigned int c;
989 +               if (get_user(c, (unsigned int *)pc+i))
990 +                       printk(KERN_CONT "???????? ");
991 +               else
992 +                       printk(KERN_CONT "%08x ", c);
993 +       }
994 +       printk("\n");
995 +}
996 +#endif
997 +
998  void __kprobes
999  ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000  {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002         mask = (  (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003                 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004  
1005 -       if ((vma->vm_flags & mask) != mask)
1006 +       if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 +               if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 +                       if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 +                               goto bad_area;
1012 +
1013 +                       up_read(&mm->mmap_sem);
1014 +                       pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 +                       do_group_exit(SIGKILL);
1016 +               }
1017 +#endif
1018 +
1019                 goto bad_area;
1020  
1021 +       }
1022 +
1023         /*
1024          * If for any reason at all we couldn't handle the fault, make
1025          * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.4/arch/ia64/mm/hugetlbpage.c      2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c      2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030                 /* At this point:  (!vmm || addr < vmm->vm_end). */
1031                 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032                         return -ENOMEM;
1033 -               if (!vmm || (addr + len) <= vmm->vm_start)
1034 +               if (check_heap_stack_gap(vmm, addr, len))
1035                         return addr;
1036                 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037         }
1038 diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039 --- linux-3.0.4/arch/ia64/mm/init.c     2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.4/arch/ia64/mm/init.c     2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042                 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043                 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044                 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 +               if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 +                       vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 +                       if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 +                               vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 +               }
1056 +#endif
1057 +
1058                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059                 down_write(&current->mm->mmap_sem);
1060                 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.4/arch/m32r/lib/usercopy.c        2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.4/arch/m32r/lib/usercopy.c        2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065  unsigned long
1066  __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067  {
1068 +       if ((long)n < 0)
1069 +               return n;
1070 +
1071         prefetch(from);
1072         if (access_ok(VERIFY_WRITE, to, n))
1073                 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, 
1075  unsigned long
1076  __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077  {
1078 +       if ((long)n < 0)
1079 +               return n;
1080 +
1081         prefetchw(to);
1082         if (access_ok(VERIFY_READ, from, n))
1083                 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085 --- linux-3.0.4/arch/mips/include/asm/elf.h     2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.4/arch/mips/include/asm/elf.h     2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088  #define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
1089  #endif
1090  
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE    (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN     (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098  #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099  struct linux_binprm;
1100  extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101                                        int uses_interp);
1102  
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107  #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109 --- linux-3.0.4/arch/mips/include/asm/page.h    2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.4/arch/mips/include/asm/page.h    2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112    #ifdef CONFIG_CPU_MIPS32
1113      typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114      #define pte_val(x)    ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 -    #define __pte(x)      ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 +    #define __pte(x)      ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117    #else
1118       typedef struct { unsigned long long pte; } pte_t;
1119       #define pte_val(x)        ((x).pte)
1120 diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121 --- linux-3.0.4/arch/mips/include/asm/system.h  2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.4/arch/mips/include/asm/system.h  2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124   */
1125  #define __ARCH_WANT_UNLOCKED_CTXSW
1126  
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129  
1130  #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c        2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c        2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135  #undef ELF_ET_DYN_BASE
1136  #define ELF_ET_DYN_BASE         (TASK32_SIZE / 3 * 2)
1137  
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE    (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN     (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145  #include <asm/processor.h>
1146  #include <linux/module.h>
1147  #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c        2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c        2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152  #undef ELF_ET_DYN_BASE
1153  #define ELF_ET_DYN_BASE         (TASK32_SIZE / 3 * 2)
1154  
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE    (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN     (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162  #include <asm/processor.h>
1163  
1164  /*
1165 diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166 --- linux-3.0.4/arch/mips/kernel/process.c      2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.4/arch/mips/kernel/process.c      2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169  out:
1170         return pc;
1171  }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 -       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 -               sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 -       return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185 --- linux-3.0.4/arch/mips/mm/fault.c    2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.4/arch/mips/mm/fault.c    2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188  #include <asm/highmem.h>               /* For VMALLOC_END */
1189  #include <linux/kdebug.h>
1190  
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 +       unsigned long i;
1195 +
1196 +       printk(KERN_ERR "PAX: bytes at PC: ");
1197 +       for (i = 0; i < 5; i++) {
1198 +               unsigned int c;
1199 +               if (get_user(c, (unsigned int *)pc+i))
1200 +                       printk(KERN_CONT "???????? ");
1201 +               else
1202 +                       printk(KERN_CONT "%08x ", c);
1203 +       }
1204 +       printk("\n");
1205 +}
1206 +#endif
1207 +
1208  /*
1209   * This routine handles page faults.  It determines the address,
1210   * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212 --- linux-3.0.4/arch/mips/mm/mmap.c     2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.4/arch/mips/mm/mmap.c     2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215         do_color_align = 0;
1216         if (filp || (flags & MAP_SHARED))
1217                 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 +       if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223         if (addr) {
1224                 if (do_color_align)
1225                         addr = COLOUR_ALIGN(addr, pgoff);
1226                 else
1227                         addr = PAGE_ALIGN(addr);
1228                 vmm = find_vma(current->mm, addr);
1229 -               if (TASK_SIZE - len >= addr &&
1230 -                   (!vmm || addr + len <= vmm->vm_start))
1231 +               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232                         return addr;
1233         }
1234         addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236                 /* At this point:  (!vmm || addr < vmm->vm_end). */
1237                 if (TASK_SIZE - len < addr)
1238                         return -ENOMEM;
1239 -               if (!vmm || addr + len <= vmm->vm_start)
1240 +               if (check_heap_stack_gap(vmm, addr, len))
1241                         return addr;
1242                 addr = vmm->vm_end;
1243                 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245         mm->get_unmapped_area = arch_get_unmapped_area;
1246         mm->unmap_area = arch_unmap_area;
1247  }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 -       unsigned long rnd = get_random_int();
1252 -
1253 -       rnd = rnd << PAGE_SHIFT;
1254 -       /* 8MB for 32bit, 256MB for 64bit */
1255 -       if (TASK_IS_32BIT_ADDR)
1256 -               rnd = rnd & 0x7ffffful;
1257 -       else
1258 -               rnd = rnd & 0xffffffful;
1259 -
1260 -       return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 -       unsigned long base = mm->brk;
1266 -       unsigned long ret;
1267 -
1268 -       ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 -       if (ret < mm->brk)
1271 -               return mm->brk;
1272 -
1273 -       return ret;
1274 -}
1275 diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.4/arch/parisc/include/asm/elf.h   2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.4/arch/parisc/include/asm/elf.h   2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs;    /* forward declaration..
1279  
1280  #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE + 0x01000000)
1281  
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE    0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN     16
1286 +#define PAX_DELTA_STACK_LEN    16
1287 +#endif
1288 +
1289  /* This yields a mask that user programs can use to figure out what
1290     instruction set this CPU supports.  This could be done in user space,
1291     but it's not easy, and we've already done it here.  */
1292 diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.4/arch/parisc/include/asm/pgtable.h       2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.4/arch/parisc/include/asm/pgtable.h       2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296  #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297  #define PAGE_COPY       PAGE_EXECREAD
1298  #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC    PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC      PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC  PAGE_READONLY
1308 +#endif
1309 +
1310  #define PAGE_KERNEL    __pgprot(_PAGE_KERNEL)
1311  #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_KERNEL_EXEC)
1312  #define PAGE_KERNEL_RWX        __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314 --- linux-3.0.4/arch/parisc/kernel/module.c     2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.4/arch/parisc/kernel/module.c     2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317  
1318  /* three functions to determine where in the module core
1319   * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 +       return (loc >= me->module_init_rx &&
1323 +               loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 +       return (loc >= me->module_init_rw &&
1329 +               loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332  static inline int in_init(struct module *me, void *loc)
1333  {
1334 -       return (loc >= me->module_init &&
1335 -               loc <= (me->module_init + me->init_size));
1336 +       return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 +       return (loc >= me->module_core_rx &&
1342 +               loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 +       return (loc >= me->module_core_rw &&
1348 +               loc < (me->module_core_rw + me->core_size_rw));
1349  }
1350  
1351  static inline int in_core(struct module *me, void *loc)
1352  {
1353 -       return (loc >= me->module_core &&
1354 -               loc <= (me->module_core + me->core_size));
1355 +       return in_core_rx(me, loc) || in_core_rw(me, loc);
1356  }
1357  
1358  static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360         }
1361  
1362         /* align things a bit */
1363 -       me->core_size = ALIGN(me->core_size, 16);
1364 -       me->arch.got_offset = me->core_size;
1365 -       me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 -       me->core_size = ALIGN(me->core_size, 16);
1368 -       me->arch.fdesc_offset = me->core_size;
1369 -       me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 +       me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 +       me->arch.got_offset = me->core_size_rw;
1372 +       me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 +       me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 +       me->arch.fdesc_offset = me->core_size_rw;
1376 +       me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377  
1378         me->arch.got_max = gots;
1379         me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module 
1381  
1382         BUG_ON(value == 0);
1383  
1384 -       got = me->module_core + me->arch.got_offset;
1385 +       got = me->module_core_rw + me->arch.got_offset;
1386         for (i = 0; got[i].addr; i++)
1387                 if (got[i].addr == value)
1388                         goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module 
1390  #ifdef CONFIG_64BIT
1391  static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392  {
1393 -       Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 +       Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395  
1396         if (!value) {
1397                 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module 
1399  
1400         /* Create new one */
1401         fdesc->addr = value;
1402 -       fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 +       fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404         return (Elf_Addr)fdesc;
1405  }
1406  #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408  
1409         table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410         end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 -       gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 +       gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413  
1414         DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415                me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420                 /* At this point:  (!vma || addr < vma->vm_end). */
1421                 if (TASK_SIZE - len < addr)
1422                         return -ENOMEM;
1423 -               if (!vma || addr + len <= vma->vm_start)
1424 +               if (check_heap_stack_gap(vma, addr, len))
1425                         return addr;
1426                 addr = vma->vm_end;
1427         }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429                 /* At this point:  (!vma || addr < vma->vm_end). */
1430                 if (TASK_SIZE - len < addr)
1431                         return -ENOMEM;
1432 -               if (!vma || addr + len <= vma->vm_start)
1433 +               if (check_heap_stack_gap(vma, addr, len))
1434                         return addr;
1435                 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436                 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438         if (flags & MAP_FIXED)
1439                 return addr;
1440         if (!addr)
1441 -               addr = TASK_UNMAPPED_BASE;
1442 +               addr = current->mm->mmap_base;
1443  
1444         if (filp) {
1445                 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447 --- linux-3.0.4/arch/parisc/kernel/traps.c      2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.4/arch/parisc/kernel/traps.c      2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450  
1451                         down_read(&current->mm->mmap_sem);
1452                         vma = find_vma(current->mm,regs->iaoq[0]);
1453 -                       if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 -                               && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 +                       if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457                                 fault_address = regs->iaoq[0];
1458                                 fault_space = regs->iasq[0];
1459  
1460 diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461 --- linux-3.0.4/arch/parisc/mm/fault.c  2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.4/arch/parisc/mm/fault.c  2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464  #include <linux/sched.h>
1465  #include <linux/interrupt.h>
1466  #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468  
1469  #include <asm/uaccess.h>
1470  #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472  static unsigned long
1473  parisc_acctyp(unsigned long code, unsigned int inst)
1474  {
1475 -       if (code == 6 || code == 16)
1476 +       if (code == 6 || code == 7 || code == 16)
1477             return VM_EXEC;
1478  
1479         switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481                         }
1482  #endif
1483  
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + *         2 when rt_sigreturn trampoline was detected
1490 + *         3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 +       int err;
1497 +
1498 +       do { /* PaX: unpatched PLT emulation */
1499 +               unsigned int bl, depwi;
1500 +
1501 +               err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 +               err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 +               if (err)
1505 +                       break;
1506 +
1507 +               if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 +                       unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 +                       err = get_user(ldw, (unsigned int *)addr);
1511 +                       err |= get_user(bv, (unsigned int *)(addr+4));
1512 +                       err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 +                       if (err)
1515 +                               break;
1516 +
1517 +                       if (ldw == 0x0E801096U &&
1518 +                           bv == 0xEAC0C000U &&
1519 +                           ldw2 == 0x0E881095U)
1520 +                       {
1521 +                               unsigned int resolver, map;
1522 +
1523 +                               err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 +                               err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 +                               if (err)
1526 +                                       break;
1527 +
1528 +                               regs->gr[20] = instruction_pointer(regs)+8;
1529 +                               regs->gr[21] = map;
1530 +                               regs->gr[22] = resolver;
1531 +                               regs->iaoq[0] = resolver | 3UL;
1532 +                               regs->iaoq[1] = regs->iaoq[0] + 4;
1533 +                               return 3;
1534 +                       }
1535 +               }
1536 +       } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 +       if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 +               return 1;
1544 +#endif
1545 +
1546 +       do { /* PaX: rt_sigreturn emulation */
1547 +               unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 +               err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 +               err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 +               err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 +               err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 +               if (err)
1555 +                       break;
1556 +
1557 +               if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 +                   ldi2 == 0x3414015AU &&
1559 +                   bel == 0xE4008200U &&
1560 +                   nop == 0x08000240U)
1561 +               {
1562 +                       regs->gr[25] = (ldi1 & 2) >> 1;
1563 +                       regs->gr[20] = __NR_rt_sigreturn;
1564 +                       regs->gr[31] = regs->iaoq[1] + 16;
1565 +                       regs->sr[0] = regs->iasq[1];
1566 +                       regs->iaoq[0] = 0x100UL;
1567 +                       regs->iaoq[1] = regs->iaoq[0] + 4;
1568 +                       regs->iasq[0] = regs->sr[2];
1569 +                       regs->iasq[1] = regs->sr[2];
1570 +                       return 2;
1571 +               }
1572 +       } while (0);
1573 +#endif
1574 +
1575 +       return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 +       unsigned long i;
1581 +
1582 +       printk(KERN_ERR "PAX: bytes at PC: ");
1583 +       for (i = 0; i < 5; i++) {
1584 +               unsigned int c;
1585 +               if (get_user(c, (unsigned int *)pc+i))
1586 +                       printk(KERN_CONT "???????? ");
1587 +               else
1588 +                       printk(KERN_CONT "%08x ", c);
1589 +       }
1590 +       printk("\n");
1591 +}
1592 +#endif
1593 +
1594  int fixup_exception(struct pt_regs *regs)
1595  {
1596         const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598  
1599         acc_type = parisc_acctyp(code,regs->iir);
1600  
1601 -       if ((vma->vm_flags & acc_type) != acc_type)
1602 +       if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 +               if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 +                   (address & ~3UL) == instruction_pointer(regs))
1607 +               {
1608 +                       up_read(&mm->mmap_sem);
1609 +                       switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 +                       case 3:
1613 +                               return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 +                       case 2:
1618 +                               return;
1619 +#endif
1620 +
1621 +                       }
1622 +                       pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 +                       do_group_exit(SIGKILL);
1624 +               }
1625 +#endif
1626 +
1627                 goto bad_area;
1628 +       }
1629  
1630         /*
1631          * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.4/arch/powerpc/include/asm/elf.h  2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.4/arch/powerpc/include/asm/elf.h  2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636     the loader.  We need to make sure that it is out of the way of the program
1637     that it will "exec", and that there is sufficient room for the brk.  */
1638  
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE                (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE                (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE    (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN     (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN    (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN     15
1651 +#define PAX_DELTA_STACK_LEN    15
1652 +#endif
1653 +#endif
1654  
1655  /*
1656   * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658         (0x7ff >> (PAGE_SHIFT - 12)) : \
1659         (0x3ffff >> (PAGE_SHIFT - 12)))
1660  
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664  #endif /* __KERNEL__ */
1665  
1666  /*
1667 diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h   2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h   2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671         KM_PPC_SYNC_PAGE,
1672         KM_PPC_SYNC_ICACHE,
1673         KM_KDB,
1674 +       KM_CLEARPAGE,
1675         KM_TYPE_NR
1676  };
1677  
1678 diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682  }
1683  #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684  
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687  {
1688         return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689  }
1690 diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.4/arch/powerpc/include/asm/page_64.h      2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.4/arch/powerpc/include/asm/page_64.h      2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do {                                              \
1694   * stack by default, so in the absence of a PT_GNU_STACK program header
1695   * we turn execute permission off.
1696   */
1697 -#define VM_STACK_DEFAULT_FLAGS32       (VM_READ | VM_WRITE | VM_EXEC | \
1698 -                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 +       (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 +        VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702  
1703  #define VM_STACK_DEFAULT_FLAGS64       (VM_READ | VM_WRITE | \
1704                                          VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705  
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707  #define VM_STACK_DEFAULT_FLAGS \
1708         (is_32bit_task() ? \
1709          VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711  
1712  #include <asm-generic/getorder.h>
1713  
1714 diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718   * and needs to be executable.  This means the whole heap ends
1719   * up being executable.
1720   */
1721 -#define VM_DATA_DEFAULT_FLAGS32        (VM_READ | VM_WRITE | VM_EXEC | \
1722 -                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 +       (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 +        VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726  
1727  #define VM_DATA_DEFAULT_FLAGS64        (VM_READ | VM_WRITE | \
1728                                  VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730  #define is_kernel_addr(x)      ((x) >= PAGE_OFFSET)
1731  #endif
1732  
1733 +#define ktla_ktva(addr)                (addr)
1734 +#define ktva_ktla(addr)                (addr)
1735 +
1736  #ifndef __ASSEMBLY__
1737  
1738  #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.4/arch/powerpc/include/asm/pgtable.h      2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h      2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743  #define _ASM_POWERPC_PGTABLE_H
1744  #ifdef __KERNEL__
1745  
1746 +#include <linux/const.h>
1747  #ifndef __ASSEMBLY__
1748  #include <asm/processor.h>             /* For TASK_SIZE */
1749  #include <asm/mmu.h>
1750 diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h   2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h   2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754  #define _PAGE_FILE     0x004   /* when !present: nonlinear file mapping */
1755  #define _PAGE_USER     0x004   /* usermode access allowed */
1756  #define _PAGE_GUARDED  0x008   /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC     _PAGE_GUARDED
1758  #define _PAGE_COHERENT 0x010   /* M: enforce memory coherence (SMP systems) */
1759  #define _PAGE_NO_CACHE 0x020   /* I: cache inhibit */
1760  #define _PAGE_WRITETHRU        0x040   /* W: cache write-through */
1761 diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.4/arch/powerpc/include/asm/reg.h  2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.4/arch/powerpc/include/asm/reg.h  2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765  #define SPRN_DBCR      0x136   /* e300 Data Breakpoint Control Reg */
1766  #define SPRN_DSISR     0x012   /* Data Storage Interrupt Status Register */
1767  #define   DSISR_NOHPTE         0x40000000      /* no translation found */
1768 +#define   DSISR_GUARDED                0x10000000      /* fetch from guarded storage */
1769  #define   DSISR_PROTFAULT      0x08000000      /* protection fault */
1770  #define   DSISR_ISSTORE                0x02000000      /* access was a store */
1771  #define   DSISR_DABRMATCH      0x00400000      /* hit data breakpoint */
1772 diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.4/arch/powerpc/include/asm/system.h       2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.4/arch/powerpc/include/asm/system.h       2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776  #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777  #endif
1778  
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781  
1782  /* Used in very early kernel initialization. */
1783  extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.4/arch/powerpc/include/asm/uaccess.h      2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h      2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788  #define VERIFY_READ    0
1789  #define VERIFY_WRITE   1
1790  
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793  /*
1794   * The fs value determines whether argument validity checking should be
1795   * performed or not.  If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do {                                                               \
1797  extern unsigned long __copy_tofrom_user(void __user *to,
1798                 const void __user *from, unsigned long size);
1799  
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 -               const void __user *from, unsigned long n)
1804 -{
1805 -       unsigned long over;
1806 -
1807 -       if (access_ok(VERIFY_READ, from, n))
1808 -               return __copy_tofrom_user((__force void __user *)to, from, n);
1809 -       if ((unsigned long)from < TASK_SIZE) {
1810 -               over = (unsigned long)from + n - TASK_SIZE;
1811 -               return __copy_tofrom_user((__force void __user *)to, from,
1812 -                               n - over) + over;
1813 -       }
1814 -       return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 -               const void *from, unsigned long n)
1819 -{
1820 -       unsigned long over;
1821 -
1822 -       if (access_ok(VERIFY_WRITE, to, n))
1823 -               return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 -       if ((unsigned long)to < TASK_SIZE) {
1825 -               over = (unsigned long)to + n - TASK_SIZE;
1826 -               return __copy_tofrom_user(to, (__force void __user *)from,
1827 -                               n - over) + over;
1828 -       }
1829 -       return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 -       __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 -                                   unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 -                                 unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 -                                 unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846  static inline unsigned long __copy_from_user_inatomic(void *to,
1847                 const void __user *from, unsigned long n)
1848  {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850                 if (ret == 0)
1851                         return 0;
1852         }
1853 +
1854 +       if (!__builtin_constant_p(n))
1855 +               check_object_size(to, n, false);
1856 +
1857         return __copy_tofrom_user((__force void __user *)to, from, n);
1858  }
1859  
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861                 if (ret == 0)
1862                         return 0;
1863         }
1864 +
1865 +       if (!__builtin_constant_p(n))
1866 +               check_object_size(from, n, true);
1867 +
1868         return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869  }
1870  
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872         return __copy_to_user_inatomic(to, from, size);
1873  }
1874  
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 +               const void __user *from, unsigned long n)
1879 +{
1880 +       unsigned long over;
1881 +
1882 +       if ((long)n < 0)
1883 +               return n;
1884 +
1885 +       if (access_ok(VERIFY_READ, from, n)) {
1886 +               if (!__builtin_constant_p(n))
1887 +                       check_object_size(to, n, false);
1888 +               return __copy_tofrom_user((__force void __user *)to, from, n);
1889 +       }
1890 +       if ((unsigned long)from < TASK_SIZE) {
1891 +               over = (unsigned long)from + n - TASK_SIZE;
1892 +               if (!__builtin_constant_p(n - over))
1893 +                       check_object_size(to, n - over, false);
1894 +               return __copy_tofrom_user((__force void __user *)to, from,
1895 +                               n - over) + over;
1896 +       }
1897 +       return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 +               const void *from, unsigned long n)
1902 +{
1903 +       unsigned long over;
1904 +
1905 +       if ((long)n < 0)
1906 +               return n;
1907 +
1908 +       if (access_ok(VERIFY_WRITE, to, n)) {
1909 +               if (!__builtin_constant_p(n))
1910 +                       check_object_size(from, n, true);
1911 +               return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 +       }
1913 +       if ((unsigned long)to < TASK_SIZE) {
1914 +               over = (unsigned long)to + n - TASK_SIZE;
1915 +               if (!__builtin_constant_p(n))
1916 +                       check_object_size(from, n - over, true);
1917 +               return __copy_tofrom_user(to, (__force void __user *)from,
1918 +                               n - over) + over;
1919 +       }
1920 +       return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 +       __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 +       if ((long)n < 0 || n > INT_MAX)
1931 +               return n;
1932 +
1933 +       if (!__builtin_constant_p(n))
1934 +               check_object_size(to, n, false);
1935 +
1936 +       if (likely(access_ok(VERIFY_READ, from, n)))
1937 +               n = __copy_from_user(to, from, n);
1938 +       else
1939 +               memset(to, 0, n);
1940 +       return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 +       if ((long)n < 0 || n > INT_MAX)
1946 +               return n;
1947 +
1948 +       if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 +               if (!__builtin_constant_p(n))
1950 +                       check_object_size(from, n, true);
1951 +               n = __copy_to_user(to, from, n);
1952 +       }
1953 +       return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 +                                 unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961  extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962  
1963  static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S    2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S    2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968         std     r14,_DAR(r1)
1969         std     r15,_DSISR(r1)
1970         addi    r3,r1,STACK_FRAME_OVERHEAD
1971 +       bl      .save_nvgprs
1972         mr      r4,r14
1973         mr      r5,r15
1974         ld      r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976         cmpdi   r3,0
1977         bne-    1f
1978         b       .ret_from_except_lite
1979 -1:     bl      .save_nvgprs
1980 -       mr      r5,r3
1981 +1:     mr      r5,r3
1982         addi    r3,r1,STACK_FRAME_OVERHEAD
1983         ld      r4,_DAR(r1)
1984         bl      .bad_page_fault
1985 diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S    2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S    2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989  11:    ld      r4,_DAR(r1)
1990         ld      r5,_DSISR(r1)
1991         addi    r3,r1,STACK_FRAME_OVERHEAD
1992 +       bl      .save_nvgprs
1993         bl      .do_page_fault
1994         cmpdi   r3,0
1995         beq+    13f
1996 -       bl      .save_nvgprs
1997         mr      r5,r3
1998         addi    r3,r1,STACK_FRAME_OVERHEAD
1999         lwz     r4,_DAR(r1)
2000 diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004                         me->arch.core_plt_section = i;
2005         }
2006         if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 -               printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 +               printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009                 return -ENOEXEC;
2010         }
2011  
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013  
2014         DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015         /* Init, or core PLT? */
2016 -       if (location >= mod->module_core
2017 -           && location < mod->module_core + mod->core_size)
2018 +       if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 +           (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020                 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 -       else
2022 +       else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 +                (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024                 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 +       else {
2026 +               printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 +               return ~0UL;
2028 +       }
2029  
2030         /* Find this entry, or if that fails, the next avail. entry */
2031         while (entry->jump[0]) {
2032 diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033 --- linux-3.0.4/arch/powerpc/kernel/module.c    2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.4/arch/powerpc/kernel/module.c    2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036  
2037  LIST_HEAD(module_bug_list);
2038  
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040  void *module_alloc(unsigned long size)
2041  {
2042         if (size == 0)
2043                 return NULL;
2044  
2045 +       return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 +       if (size == 0)
2055 +               return NULL;
2056 +
2057         return vmalloc_exec(size);
2058  }
2059  
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061         vfree(module_region);
2062  }
2063  
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 +       module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071  static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072                                     const Elf_Shdr *sechdrs,
2073                                     const char *name)
2074 diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075 --- linux-3.0.4/arch/powerpc/kernel/process.c   2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.4/arch/powerpc/kernel/process.c   2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078          * Lookup NIP late so we have the best change of getting the
2079          * above info out without failing
2080          */
2081 -       printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 -       printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 +       printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 +       printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085  #endif
2086         show_stack(current, (unsigned long *) regs->gpr[1]);
2087         if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089                 newsp = stack[0];
2090                 ip = stack[STACK_FRAME_LR_SAVE];
2091                 if (!firstframe || ip != lr) {
2092 -                       printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 +                       printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095                         if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 -                               printk(" (%pS)",
2097 +                               printk(" (%pA)",
2098                                        (void *)current->ret_stack[curr_frame].ret);
2099                                 curr_frame--;
2100                         }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102                         struct pt_regs *regs = (struct pt_regs *)
2103                                 (sp + STACK_FRAME_OVERHEAD);
2104                         lr = regs->link;
2105 -                       printk("--- Exception: %lx at %pS\n    LR = %pS\n",
2106 +                       printk("--- Exception: %lx at %pA\n    LR = %pA\n",
2107                                regs->trap, (void *)regs->nip, (void *)lr);
2108                         firstframe = 1;
2109                 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111  }
2112  
2113  #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 -       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 -               sp -= get_random_int() & ~PAGE_MASK;
2119 -       return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 -        unsigned long rnd = 0;
2125 -
2126 -       /* 8MB for 32bit, 1GB for 64bit */
2127 -       if (is_32bit_task())
2128 -               rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 -       else
2130 -               rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 -       return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 -       unsigned long base = mm->brk;
2138 -       unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 -       /*
2142 -        * If we are using 1TB segments and we are allowed to randomise
2143 -        * the heap, we can put it above 1TB so it is backed by a 1TB
2144 -        * segment. Otherwise the heap will be in the bottom 1TB
2145 -        * which always uses 256MB segments and this may result in a
2146 -        * performance penalty.
2147 -        */
2148 -       if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 -               base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 -       ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 -       if (ret < mm->brk)
2155 -               return mm->brk;
2156 -
2157 -       return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 -       unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 -       if (ret < base)
2165 -               return base;
2166 -
2167 -       return ret;
2168 -}
2169 diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173         /* Save user registers on the stack */
2174         frame = &rt_sf->uc.uc_mcontext;
2175         addr = frame;
2176 -       if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 +       if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178                 if (save_user_regs(regs, frame, 0, 1))
2179                         goto badframe;
2180                 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185         current->thread.fpscr.val = 0;
2186  
2187         /* Set up to return from userspace. */
2188 -       if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 +       if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190                 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191         } else {
2192                 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.4/arch/powerpc/kernel/traps.c     2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.4/arch/powerpc/kernel/traps.c     2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197  static inline void pmac_backlight_unblank(void) { }
2198  #endif
2199  
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202  int die(const char *str, struct pt_regs *regs, long err)
2203  {
2204         static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs 
2206         if (panic_on_oops)
2207                 panic("Fatal exception");
2208  
2209 +       gr_handle_kernel_exploit();
2210 +
2211         oops_exit();
2212         do_exit(err);
2213  
2214 diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.4/arch/powerpc/kernel/vdso.c      2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.4/arch/powerpc/kernel/vdso.c      2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218  #include <asm/firmware.h>
2219  #include <asm/vdso.h>
2220  #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222  
2223  #include "setup.h"
2224  
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226         vdso_base = VDSO32_MBASE;
2227  #endif
2228  
2229 -       current->mm->context.vdso_base = 0;
2230 +       current->mm->context.vdso_base = ~0UL;
2231  
2232         /* vDSO has a problem and was disabled, just don't "enable" it for the
2233          * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235         vdso_base = get_unmapped_area(NULL, vdso_base,
2236                                       (vdso_pages << PAGE_SHIFT) +
2237                                       ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 -                                     0, 0);
2239 +                                     0, MAP_PRIVATE | MAP_EXECUTABLE);
2240         if (IS_ERR_VALUE(vdso_base)) {
2241                 rc = vdso_base;
2242                 goto fail_mmapsem;
2243 diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.4/arch/powerpc/lib/usercopy_64.c  2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c  2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247  #include <linux/module.h>
2248  #include <asm/uaccess.h>
2249  
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 -       if (likely(access_ok(VERIFY_READ, from, n)))
2253 -               n = __copy_from_user(to, from, n);
2254 -       else
2255 -               memset(to, 0, n);
2256 -       return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 -       if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 -               n = __copy_to_user(to, from, n);
2263 -       return n;
2264 -}
2265 -
2266  unsigned long copy_in_user(void __user *to, const void __user *from,
2267                            unsigned long n)
2268  {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270         return n;
2271  }
2272  
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275  EXPORT_SYMBOL(copy_in_user);
2276  
2277 diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278 --- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281  #include <linux/perf_event.h>
2282  #include <linux/magic.h>
2283  #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288  
2289  #include <asm/firmware.h>
2290  #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292  #include <asm/tlbflush.h>
2293  #include <asm/siginfo.h>
2294  #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296  
2297  #ifdef CONFIG_KPROBES
2298  static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300  }
2301  #endif
2302  
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 +       return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 +       unsigned long i;
2317 +
2318 +       printk(KERN_ERR "PAX: bytes at PC: ");
2319 +       for (i = 0; i < 5; i++) {
2320 +               unsigned int c;
2321 +               if (get_user(c, (unsigned int __user *)pc+i))
2322 +                       printk(KERN_CONT "???????? ");
2323 +               else
2324 +                       printk(KERN_CONT "%08x ", c);
2325 +       }
2326 +       printk("\n");
2327 +}
2328 +#endif
2329 +
2330  /*
2331   * Check whether the instruction at regs->nip is a store using
2332   * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334          * indicate errors in DSISR but can validly be set in SRR1.
2335          */
2336         if (trap == 0x400)
2337 -               error_code &= 0x48200000;
2338 +               error_code &= 0x58200000;
2339         else
2340                 is_write = error_code & DSISR_ISSTORE;
2341  #else
2342 @@ -259,7 +291,7 @@ good_area:
2343           * "undefined".  Of those that can be set, this is the only
2344           * one which seems bad.
2345           */
2346 -       if (error_code & 0x10000000)
2347 +       if (error_code & DSISR_GUARDED)
2348                  /* Guarded storage error. */
2349                 goto bad_area;
2350  #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352                  * processors use the same I/D cache coherency mechanism
2353                  * as embedded.
2354                  */
2355 -               if (error_code & DSISR_PROTFAULT)
2356 +               if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357                         goto bad_area;
2358  #endif /* CONFIG_PPC_STD_MMU */
2359  
2360 @@ -343,6 +375,23 @@ bad_area:
2361  bad_area_nosemaphore:
2362         /* User mode accesses cause a SIGSEGV */
2363         if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 +               if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 +                       if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 +                       if (is_exec && regs->nip == address) {
2371 +#endif
2372 +                               switch (pax_handle_fetch_fault(regs)) {
2373 +                               }
2374 +
2375 +                               pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 +                               do_group_exit(SIGKILL);
2377 +                       }
2378 +               }
2379 +#endif
2380 +
2381                 _exception(SIGSEGV, regs, code, address);
2382                 return 0;
2383         }
2384 diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.4/arch/powerpc/mm/mmap_64.c       2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.4/arch/powerpc/mm/mmap_64.c       2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388          */
2389         if (mmap_is_legacy()) {
2390                 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 +                       mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397                 mm->get_unmapped_area = arch_get_unmapped_area;
2398                 mm->unmap_area = arch_unmap_area;
2399         } else {
2400                 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 +                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408                 mm->unmap_area = arch_unmap_area_topdown;
2409         }
2410 diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411 --- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414         if ((mm->task_size - len) < addr)
2415                 return 0;
2416         vma = find_vma(mm, addr);
2417 -       return (!vma || (addr + len) <= vma->vm_start);
2418 +       return check_heap_stack_gap(vma, addr, len);
2419  }
2420  
2421  static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423                                 addr = _ALIGN_UP(addr + 1,  1ul << SLICE_HIGH_SHIFT);
2424                         continue;
2425                 }
2426 -               if (!vma || addr + len <= vma->vm_start) {
2427 +               if (check_heap_stack_gap(vma, addr, len)) {
2428                         /*
2429                          * Remember the place where we stopped the search:
2430                          */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432                 }
2433         }
2434  
2435 -       addr = mm->mmap_base;
2436 -       while (addr > len) {
2437 +       if (mm->mmap_base < len)
2438 +               addr = -ENOMEM;
2439 +       else
2440 +               addr = mm->mmap_base - len;
2441 +
2442 +       while (!IS_ERR_VALUE(addr)) {
2443                 /* Go down by chunk size */
2444 -               addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 +               addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446  
2447                 /* Check for hit with different page size */
2448                 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450                  * return with success:
2451                  */
2452                 vma = find_vma(mm, addr);
2453 -               if (!vma || (addr + len) <= vma->vm_start) {
2454 +               if (check_heap_stack_gap(vma, addr, len)) {
2455                         /* remember the address as a hint for next time */
2456                         if (use_cache)
2457                                 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459                         mm->cached_hole_size = vma->vm_start - addr;
2460  
2461                 /* try just below the current vma->vm_start */
2462 -               addr = vma->vm_start;
2463 +               addr = skip_heap_stack_gap(vma, len);
2464         }
2465  
2466         /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468         if (fixed && addr > (mm->task_size - len))
2469                 return -EINVAL;
2470  
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 +       if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 +               addr = 0;
2474 +#endif
2475 +
2476         /* If hint, make sure it matches our alignment restrictions */
2477         if (!fixed && addr) {
2478                 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480 --- linux-3.0.4/arch/s390/include/asm/elf.h     2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.4/arch/s390/include/asm/elf.h     2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483     the loader.  We need to make sure that it is out of the way of the program
2484     that it will "exec", and that there is sufficient room for the brk.  */
2485  
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE                (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE                (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE    (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN     (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN    (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496  
2497  /* This yields a mask that user programs can use to figure out what
2498     instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500  #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501  int arch_setup_additional_pages(struct linux_binprm *, int);
2502  
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506  #endif
2507 diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508 --- linux-3.0.4/arch/s390/include/asm/system.h  2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.4/arch/s390/include/asm/system.h  2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511  extern void (*_machine_halt)(void);
2512  extern void (*_machine_power_off)(void);
2513  
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516  
2517  static inline int tprot(unsigned long addr)
2518  {
2519 diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523  copy_to_user(void __user *to, const void *from, unsigned long n)
2524  {
2525         might_fault();
2526 +
2527 +       if ((long)n < 0)
2528 +               return n;
2529 +
2530         if (access_ok(VERIFY_WRITE, to, n))
2531                 n = __copy_to_user(to, from, n);
2532         return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534  static inline unsigned long __must_check
2535  __copy_from_user(void *to, const void __user *from, unsigned long n)
2536  {
2537 +       if ((long)n < 0)
2538 +               return n;
2539 +
2540         if (__builtin_constant_p(n) && (n <= 256))
2541                 return uaccess.copy_from_user_small(n, from, to);
2542         else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544         unsigned int sz = __compiletime_object_size(to);
2545  
2546         might_fault();
2547 +
2548 +       if ((long)n < 0)
2549 +               return n;
2550 +
2551         if (unlikely(sz != -1 && sz < n)) {
2552                 copy_from_user_overflow();
2553                 return n;
2554 diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555 --- linux-3.0.4/arch/s390/kernel/module.c       2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.4/arch/s390/kernel/module.c       2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558  
2559         /* Increase core size by size of got & plt and set start
2560            offsets for got and plt. */
2561 -       me->core_size = ALIGN(me->core_size, 4);
2562 -       me->arch.got_offset = me->core_size;
2563 -       me->core_size += me->arch.got_size;
2564 -       me->arch.plt_offset = me->core_size;
2565 -       me->core_size += me->arch.plt_size;
2566 +       me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 +       me->arch.got_offset = me->core_size_rw;
2568 +       me->core_size_rw += me->arch.got_size;
2569 +       me->arch.plt_offset = me->core_size_rx;
2570 +       me->core_size_rx += me->arch.plt_size;
2571         return 0;
2572  }
2573  
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575                 if (info->got_initialized == 0) {
2576                         Elf_Addr *gotent;
2577  
2578 -                       gotent = me->module_core + me->arch.got_offset +
2579 +                       gotent = me->module_core_rw + me->arch.got_offset +
2580                                 info->got_offset;
2581                         *gotent = val;
2582                         info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584                 else if (r_type == R_390_GOTENT ||
2585                          r_type == R_390_GOTPLTENT)
2586                         *(unsigned int *) loc =
2587 -                               (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 +                               (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589                 else if (r_type == R_390_GOT64 ||
2590                          r_type == R_390_GOTPLT64)
2591                         *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593         case R_390_PLTOFF64:    /* 16 bit offset from GOT to PLT. */
2594                 if (info->plt_initialized == 0) {
2595                         unsigned int *ip;
2596 -                       ip = me->module_core + me->arch.plt_offset +
2597 +                       ip = me->module_core_rx + me->arch.plt_offset +
2598                                 info->plt_offset;
2599  #ifndef CONFIG_64BIT
2600                         ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602                                val - loc + 0xffffUL < 0x1ffffeUL) ||
2603                               (r_type == R_390_PLT32DBL &&
2604                                val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 -                               val = (Elf_Addr) me->module_core +
2606 +                               val = (Elf_Addr) me->module_core_rx +
2607                                         me->arch.plt_offset +
2608                                         info->plt_offset;
2609                         val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611         case R_390_GOTOFF32:    /* 32 bit offset to GOT.  */
2612         case R_390_GOTOFF64:    /* 64 bit offset to GOT. */
2613                 val = val + rela->r_addend -
2614 -                       ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 +                       ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616                 if (r_type == R_390_GOTOFF16)
2617                         *(unsigned short *) loc = val;
2618                 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620                 break;
2621         case R_390_GOTPC:       /* 32 bit PC relative offset to GOT. */
2622         case R_390_GOTPCDBL:    /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 -               val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 +               val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625                         rela->r_addend - loc;
2626                 if (r_type == R_390_GOTPC)
2627                         *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629 --- linux-3.0.4/arch/s390/kernel/process.c      2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.4/arch/s390/kernel/process.c      2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632         }
2633         return 0;
2634  }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 -       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 -               sp -= get_random_int() & ~PAGE_MASK;
2640 -       return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 -       /* 8MB for 32bit, 1GB for 64bit */
2646 -       if (is_32bit_task())
2647 -               return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 -       else
2649 -               return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 -       unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 -       if (ret < mm->brk)
2657 -               return mm->brk;
2658 -       return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 -       unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 -       if (!(current->flags & PF_RANDOMIZE))
2666 -               return base;
2667 -       if (ret < base)
2668 -               return base;
2669 -       return ret;
2670 -}
2671 diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672 --- linux-3.0.4/arch/s390/kernel/setup.c        2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.4/arch/s390/kernel/setup.c        2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675  }
2676  early_param("mem", early_parse_mem);
2677  
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680  EXPORT_SYMBOL_GPL(user_mode);
2681  
2682  static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684 --- linux-3.0.4/arch/s390/mm/mmap.c     2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.4/arch/s390/mm/mmap.c     2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687          */
2688         if (mmap_is_legacy()) {
2689                 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 +                       mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696                 mm->get_unmapped_area = arch_get_unmapped_area;
2697                 mm->unmap_area = arch_unmap_area;
2698         } else {
2699                 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 +                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707                 mm->unmap_area = arch_unmap_area_topdown;
2708         }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710          */
2711         if (mmap_is_legacy()) {
2712                 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 +                       mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719                 mm->get_unmapped_area = s390_get_unmapped_area;
2720                 mm->unmap_area = arch_unmap_area;
2721         } else {
2722                 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 +                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729                 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730                 mm->unmap_area = arch_unmap_area_topdown;
2731         }
2732 diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733 --- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do {                                                          \
2736  #define finish_arch_switch(prev)       do {} while (0)
2737  
2738  typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741  
2742  #define mb()           barrier()
2743  #define rmb()          barrier()
2744 diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745 --- linux-3.0.4/arch/score/kernel/process.c     2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.4/arch/score/kernel/process.c     2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748  
2749         return task_pt_regs(task)->cp0_epc;
2750  }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 -       return sp;
2755 -}
2756 diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757 --- linux-3.0.4/arch/sh/mm/mmap.c       2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.4/arch/sh/mm/mmap.c       2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760                         addr = PAGE_ALIGN(addr);
2761  
2762                 vma = find_vma(mm, addr);
2763 -               if (TASK_SIZE - len >= addr &&
2764 -                   (!vma || addr + len <= vma->vm_start))
2765 +               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766                         return addr;
2767         }
2768  
2769 @@ -106,7 +105,7 @@ full_search:
2770                         }
2771                         return -ENOMEM;
2772                 }
2773 -               if (likely(!vma || addr + len <= vma->vm_start)) {
2774 +               if (likely(check_heap_stack_gap(vma, addr, len))) {
2775                         /*
2776                          * Remember the place where we stopped the search:
2777                          */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779                         addr = PAGE_ALIGN(addr);
2780  
2781                 vma = find_vma(mm, addr);
2782 -               if (TASK_SIZE - len >= addr &&
2783 -                   (!vma || addr + len <= vma->vm_start))
2784 +               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785                         return addr;
2786         }
2787  
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789         /* make sure it can fit in the remaining address space */
2790         if (likely(addr > len)) {
2791                 vma = find_vma(mm, addr-len);
2792 -               if (!vma || addr <= vma->vm_start) {
2793 +               if (check_heap_stack_gap(vma, addr - len, len)) {
2794                         /* remember the address as a hint for next time */
2795                         return (mm->free_area_cache = addr-len);
2796                 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798         if (unlikely(mm->mmap_base < len))
2799                 goto bottomup;
2800  
2801 -       addr = mm->mmap_base-len;
2802 -       if (do_colour_align)
2803 -               addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 +       addr = mm->mmap_base - len;
2805  
2806         do {
2807 +               if (do_colour_align)
2808 +                       addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809                 /*
2810                  * Lookup failure means no vma is above this address,
2811                  * else if new region fits below vma->vm_start,
2812                  * return with success:
2813                  */
2814                 vma = find_vma(mm, addr);
2815 -               if (likely(!vma || addr+len <= vma->vm_start)) {
2816 +               if (likely(check_heap_stack_gap(vma, addr, len))) {
2817                         /* remember the address as a hint for next time */
2818                         return (mm->free_area_cache = addr);
2819                 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821                         mm->cached_hole_size = vma->vm_start - addr;
2822  
2823                 /* try just below the current vma->vm_start */
2824 -               addr = vma->vm_start-len;
2825 -               if (do_colour_align)
2826 -                       addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 -       } while (likely(len < vma->vm_start));
2828 +               addr = skip_heap_stack_gap(vma, len);
2829 +       } while (!IS_ERR_VALUE(addr));
2830  
2831  bottomup:
2832         /*
2833 diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.4/arch/sparc/include/asm/atomic_64.h      2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h      2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837  #define ATOMIC64_INIT(i)       { (i) }
2838  
2839  #define atomic_read(v)         (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 +       return v->counter;
2843 +}
2844  #define atomic64_read(v)       (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 +       return v->counter;
2848 +}
2849  
2850  #define atomic_set(v, i)       (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 +       v->counter = i;
2854 +}
2855  #define atomic64_set(v, i)     (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 +       v->counter = i;
2859 +}
2860  
2861  extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863  extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865  extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867  extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869  
2870  extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872  extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874  extern int atomic_sub_ret(int, atomic_t *);
2875  extern long atomic64_sub_ret(long, atomic64_t *);
2876  
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878  #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879  
2880  #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 +       return atomic_add_ret_unchecked(1, v);
2884 +}
2885  #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 +       return atomic64_add_ret_unchecked(1, v);
2889 +}
2890  
2891  #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892  #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893  
2894  #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 +       return atomic_add_ret_unchecked(i, v);
2898 +}
2899  #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 +       return atomic64_add_ret_unchecked(i, v);
2903 +}
2904  
2905  /*
2906   * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908   * other cases.
2909   */
2910  #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 +       return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915  #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916  
2917  #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919  #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920  
2921  #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 +       atomic_add_unchecked(1, v);
2925 +}
2926  #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 +       atomic64_add_unchecked(1, v);
2930 +}
2931  
2932  #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 +       atomic_sub_unchecked(1, v);
2936 +}
2937  #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 +       atomic64_sub_unchecked(1, v);
2941 +}
2942  
2943  #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944  #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945  
2946  #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 +       return cmpxchg(&v->counter, old, new);
2950 +}
2951  #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 +       return xchg(&v->counter, new);
2955 +}
2956  
2957  static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958  {
2959 -       int c, old;
2960 +       int c, old, new;
2961         c = atomic_read(v);
2962         for (;;) {
2963 -               if (unlikely(c == (u)))
2964 +               if (unlikely(c == u))
2965                         break;
2966 -               old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 +               asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 +                            "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 +                            : "=r" (new)
2975 +                            : "0" (c), "ir" (a)
2976 +                            : "cc");
2977 +
2978 +               old = atomic_cmpxchg(v, c, new);
2979                 if (likely(old == c))
2980                         break;
2981                 c = old;
2982         }
2983 -       return c != (u);
2984 +       return c != u;
2985  }
2986  
2987  #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989  #define atomic64_cmpxchg(v, o, n) \
2990         ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991  #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 +       return xchg(&v->counter, new);
2995 +}
2996  
2997  static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998  {
2999 -       long c, old;
3000 +       long c, old, new;
3001         c = atomic64_read(v);
3002         for (;;) {
3003 -               if (unlikely(c == (u)))
3004 +               if (unlikely(c == u))
3005                         break;
3006 -               old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 +               asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 +                            "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 +                            : "=r" (new)
3015 +                            : "0" (c), "ir" (a)
3016 +                            : "cc");
3017 +
3018 +               old = atomic64_cmpxchg(v, c, new);
3019                 if (likely(old == c))
3020                         break;
3021                 c = old;
3022         }
3023 -       return c != (u);
3024 +       return c != u;
3025  }
3026  
3027  #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.4/arch/sparc/include/asm/cache.h  2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.4/arch/sparc/include/asm/cache.h  2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032  #define ARCH_SLAB_MINALIGN     __alignof__(unsigned long long)
3033  
3034  #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037  
3038  #ifdef CONFIG_SPARC32
3039  #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044  
3045  #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE)
3046  
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE    0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN     16
3051 +#define PAX_DELTA_STACK_LEN    16
3052 +#endif
3053 +
3054  /* This yields a mask that user programs can use to figure out what
3055     instruction set this cpu supports.  This can NOT be done in userspace
3056     on Sparc.  */
3057 diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059 +++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061  #define ELF_ET_DYN_BASE                0x0000010000000000UL
3062  #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063  
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE    (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN     (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN    (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071  extern unsigned long sparc64_elf_hwcap;
3072  #define ELF_HWCAP      sparc64_elf_hwcap
3073  
3074 diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h     2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h     2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078  BTFIXUPDEF_INT(page_none)
3079  BTFIXUPDEF_INT(page_copy)
3080  BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088  BTFIXUPDEF_INT(page_kernel)
3089  
3090  #define PMD_SHIFT              SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092  #define PAGE_COPY      __pgprot(BTFIXUP_INT(page_copy))
3093  #define PAGE_READONLY  __pgprot(BTFIXUP_INT(page_readonly))
3094  
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC      __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC  __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC    PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC      PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC  PAGE_READONLY
3103 +#endif
3104 +
3105  extern unsigned long page_kernel;
3106  
3107  #ifdef MODULE
3108 diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h       2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h       2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112                                     SRMMU_EXEC | SRMMU_REF)
3113  #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114                                     SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC       __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC       __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122  #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123                                     SRMMU_DIRTY | SRMMU_REF)
3124  
3125 diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h    2011-07-21 22:17:23.000000000 -0400
3127 +++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h    2011-08-23 21:47:55.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129  
3130  /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131  
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134  {
3135         unsigned long tmp1, tmp2;
3136  
3137         __asm__ __volatile__ (
3138  "1:    ldsw            [%2], %0\n"
3139  "      brlz,pn         %0, 2f\n"
3140 -"4:     add            %0, 1, %1\n"
3141 +"4:     addcc          %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +"      tvs             %%icc, 6\n"
3145 +#endif
3146 +
3147  "      cas             [%2], %0, %1\n"
3148  "      cmp             %0, %1\n"
3149  "      bne,pn          %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151  "      .previous"
3152         : "=&r" (tmp1), "=&r" (tmp2)
3153         : "r" (lock)
3154 -       : "memory");
3155 +       : "memory", "cc");
3156  }
3157  
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160  {
3161         int tmp1, tmp2;
3162  
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164  "1:    ldsw            [%2], %0\n"
3165  "      brlz,a,pn       %0, 2f\n"
3166  "       mov            0, %0\n"
3167 -"      add             %0, 1, %1\n"
3168 +"      addcc           %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +"      tvs             %%icc, 6\n"
3172 +#endif
3173 +
3174  "      cas             [%2], %0, %1\n"
3175  "      cmp             %0, %1\n"
3176  "      bne,pn          %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178         return tmp1;
3179  }
3180  
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183  {
3184         unsigned long tmp1, tmp2;
3185  
3186         __asm__ __volatile__(
3187  "1:    lduw    [%2], %0\n"
3188 -"      sub     %0, 1, %1\n"
3189 +"      subcc   %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +"      tvs     %%icc, 6\n"
3193 +#endif
3194 +
3195  "      cas     [%2], %0, %1\n"
3196  "      cmp     %0, %1\n"
3197  "      bne,pn  %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199         : "memory");
3200  }
3201  
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204  {
3205         unsigned long mask, tmp1, tmp2;
3206  
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208         : "memory");
3209  }
3210  
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213  {
3214         __asm__ __volatile__(
3215  "      stw             %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217         : "memory");
3218  }
3219  
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222  {
3223         unsigned long mask, tmp1, tmp2, result;
3224  
3225 diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229         unsigned long           w_saved;
3230  
3231         struct restart_block    restart_block;
3232 +
3233 +       unsigned long           lowest_stack;
3234  };
3235  
3236  /*
3237 diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241         struct pt_regs          *kern_una_regs;
3242         unsigned int            kern_una_insn;
3243  
3244 +       unsigned long           lowest_stack;
3245 +
3246         unsigned long           fpregs[0] __attribute__ ((aligned(64)));
3247  };
3248  
3249 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h     2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h     2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253  
3254  static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255  {
3256 -       if (n && __access_ok((unsigned long) to, n))
3257 +       if ((long)n < 0)
3258 +               return n;
3259 +
3260 +       if (n && __access_ok((unsigned long) to, n)) {
3261 +               if (!__builtin_constant_p(n))
3262 +                       check_object_size(from, n, true);
3263                 return __copy_user(to, (__force void __user *) from, n);
3264 -       else
3265 +       } else
3266                 return n;
3267  }
3268  
3269  static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270  {
3271 +       if ((long)n < 0)
3272 +               return n;
3273 +
3274 +       if (!__builtin_constant_p(n))
3275 +               check_object_size(from, n, true);
3276 +
3277         return __copy_user(to, (__force void __user *) from, n);
3278  }
3279  
3280  static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281  {
3282 -       if (n && __access_ok((unsigned long) from, n))
3283 +       if ((long)n < 0)
3284 +               return n;
3285 +
3286 +       if (n && __access_ok((unsigned long) from, n)) {
3287 +               if (!__builtin_constant_p(n))
3288 +                       check_object_size(to, n, false);
3289                 return __copy_user((__force void __user *) to, from, n);
3290 -       else
3291 +       } else
3292                 return n;
3293  }
3294  
3295  static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296  {
3297 +       if ((long)n < 0)
3298 +               return n;
3299 +
3300         return __copy_user((__force void __user *) to, from, n);
3301  }
3302  
3303 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h     2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h     2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307  #include <linux/compiler.h>
3308  #include <linux/string.h>
3309  #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311  #include <asm/asi.h>
3312  #include <asm/system.h>
3313  #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315  static inline unsigned long __must_check
3316  copy_from_user(void *to, const void __user *from, unsigned long size)
3317  {
3318 -       unsigned long ret = ___copy_from_user(to, from, size);
3319 +       unsigned long ret;
3320  
3321 +       if ((long)size < 0 || size > INT_MAX)
3322 +               return size;
3323 +
3324 +       if (!__builtin_constant_p(size))
3325 +               check_object_size(to, size, false);
3326 +
3327 +       ret = ___copy_from_user(to, from, size);
3328         if (unlikely(ret))
3329                 ret = copy_from_user_fixup(to, from, size);
3330  
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332  static inline unsigned long __must_check
3333  copy_to_user(void __user *to, const void *from, unsigned long size)
3334  {
3335 -       unsigned long ret = ___copy_to_user(to, from, size);
3336 +       unsigned long ret;
3337 +
3338 +       if ((long)size < 0 || size > INT_MAX)
3339 +               return size;
3340 +
3341 +       if (!__builtin_constant_p(size))
3342 +               check_object_size(from, size, true);
3343  
3344 +       ret = ___copy_to_user(to, from, size);
3345         if (unlikely(ret))
3346                 ret = copy_to_user_fixup(to, from, size);
3347         return ret;
3348 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.4/arch/sparc/include/asm/uaccess.h        2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.4/arch/sparc/include/asm/uaccess.h        2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352  #ifndef ___ASM_SPARC_UACCESS_H
3353  #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362  #if defined(__sparc__) && defined(__arch64__)
3363  #include <asm/uaccess_64.h>
3364  #else
3365 diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366 --- linux-3.0.4/arch/sparc/kernel/Makefile      2011-07-21 22:17:23.000000000 -0400
3367 +++ linux-3.0.4/arch/sparc/kernel/Makefile      2011-08-23 21:47:55.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369  #
3370  
3371  asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374  
3375  extra-y     := head_$(BITS).o
3376  extra-y     += init_task.o
3377 diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.4/arch/sparc/kernel/process_32.c  2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.4/arch/sparc/kernel/process_32.c  2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381                        rw->ins[4], rw->ins[5],
3382                        rw->ins[6],
3383                        rw->ins[7]);
3384 -               printk("%pS\n", (void *) rw->ins[7]);
3385 +               printk("%pA\n", (void *) rw->ins[7]);
3386                 rw = (struct reg_window32 *) rw->ins[6];
3387         }
3388         spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390  
3391          printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx    %s\n",
3392                r->psr, r->pc, r->npc, r->y, print_tainted());
3393 -       printk("PC: <%pS>\n", (void *) r->pc);
3394 +       printk("PC: <%pA>\n", (void *) r->pc);
3395         printk("%%G: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
3396                r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397                r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398         printk("%%O: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
3399                r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400                r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 -       printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 +       printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403  
3404         printk("%%L: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
3405                rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407                 rw = (struct reg_window32 *) fp;
3408                 pc = rw->ins[7];
3409                 printk("[%08lx : ", pc);
3410 -               printk("%pS ] ", (void *) pc);
3411 +               printk("%pA ] ", (void *) pc);
3412                 fp = rw->ins[6];
3413         } while (++count < 16);
3414         printk("\n");
3415 diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.4/arch/sparc/kernel/process_64.c  2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.4/arch/sparc/kernel/process_64.c  2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419         printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420                rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421         if (regs->tstate & TSTATE_PRIV)
3422 -               printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 +               printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424  }
3425  
3426  void show_regs(struct pt_regs *regs)
3427  {
3428         printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
3429                regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 -       printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 +       printk("TPC: <%pA>\n", (void *) regs->tpc);
3432         printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433                regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434                regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436         printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437                regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438                regs->u_regs[15]);
3439 -       printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 +       printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441         show_regwindow(regs);
3442         show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443  }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445                        ((tp && tp->task) ? tp->task->pid : -1));
3446  
3447                 if (gp->tstate & TSTATE_PRIV) {
3448 -                       printk("             TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 +                       printk("             TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450                                (void *) gp->tpc,
3451                                (void *) gp->o7,
3452                                (void *) gp->i7,
3453 diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c        2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c        2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457         if (ARCH_SUN4C && len > 0x20000000)
3458                 return -ENOMEM;
3459         if (!addr)
3460 -               addr = TASK_UNMAPPED_BASE;
3461 +               addr = current->mm->mmap_base;
3462  
3463         if (flags & MAP_SHARED)
3464                 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466                 }
3467                 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468                         return -ENOMEM;
3469 -               if (!vmm || addr + len <= vmm->vm_start)
3470 +               if (check_heap_stack_gap(vmm, addr, len))
3471                         return addr;
3472                 addr = vmm->vm_end;
3473                 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c        2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c        2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478                 /* We do not accept a shared mapping if it would violate
3479                  * cache aliasing constraints.
3480                  */
3481 -               if ((flags & MAP_SHARED) &&
3482 +               if ((filp || (flags & MAP_SHARED)) &&
3483                     ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484                         return -EINVAL;
3485                 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487         if (filp || (flags & MAP_SHARED))
3488                 do_color_align = 1;
3489  
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 +       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494         if (addr) {
3495                 if (do_color_align)
3496                         addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498                         addr = PAGE_ALIGN(addr);
3499  
3500                 vma = find_vma(mm, addr);
3501 -               if (task_size - len >= addr &&
3502 -                   (!vma || addr + len <= vma->vm_start))
3503 +               if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504                         return addr;
3505         }
3506  
3507         if (len > mm->cached_hole_size) {
3508 -               start_addr = addr = mm->free_area_cache;
3509 +               start_addr = addr = mm->free_area_cache;
3510         } else {
3511 -               start_addr = addr = TASK_UNMAPPED_BASE;
3512 +               start_addr = addr = mm->mmap_base;
3513                 mm->cached_hole_size = 0;
3514         }
3515  
3516 @@ -174,14 +177,14 @@ full_search:
3517                         vma = find_vma(mm, VA_EXCLUDE_END);
3518                 }
3519                 if (unlikely(task_size < addr)) {
3520 -                       if (start_addr != TASK_UNMAPPED_BASE) {
3521 -                               start_addr = addr = TASK_UNMAPPED_BASE;
3522 +                       if (start_addr != mm->mmap_base) {
3523 +                               start_addr = addr = mm->mmap_base;
3524                                 mm->cached_hole_size = 0;
3525                                 goto full_search;
3526                         }
3527                         return -ENOMEM;
3528                 }
3529 -               if (likely(!vma || addr + len <= vma->vm_start)) {
3530 +               if (likely(check_heap_stack_gap(vma, addr, len))) {
3531                         /*
3532                          * Remember the place where we stopped the search:
3533                          */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535                 /* We do not accept a shared mapping if it would violate
3536                  * cache aliasing constraints.
3537                  */
3538 -               if ((flags & MAP_SHARED) &&
3539 +               if ((filp || (flags & MAP_SHARED)) &&
3540                     ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541                         return -EINVAL;
3542                 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544                         addr = PAGE_ALIGN(addr);
3545  
3546                 vma = find_vma(mm, addr);
3547 -               if (task_size - len >= addr &&
3548 -                   (!vma || addr + len <= vma->vm_start))
3549 +               if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550                         return addr;
3551         }
3552  
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554         /* make sure it can fit in the remaining address space */
3555         if (likely(addr > len)) {
3556                 vma = find_vma(mm, addr-len);
3557 -               if (!vma || addr <= vma->vm_start) {
3558 +               if (check_heap_stack_gap(vma, addr - len, len)) {
3559                         /* remember the address as a hint for next time */
3560                         return (mm->free_area_cache = addr-len);
3561                 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563         if (unlikely(mm->mmap_base < len))
3564                 goto bottomup;
3565  
3566 -       addr = mm->mmap_base-len;
3567 -       if (do_color_align)
3568 -               addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 +       addr = mm->mmap_base - len;
3570  
3571         do {
3572 +               if (do_color_align)
3573 +                       addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574                 /*
3575                  * Lookup failure means no vma is above this address,
3576                  * else if new region fits below vma->vm_start,
3577                  * return with success:
3578                  */
3579                 vma = find_vma(mm, addr);
3580 -               if (likely(!vma || addr+len <= vma->vm_start)) {
3581 +               if (likely(check_heap_stack_gap(vma, addr, len))) {
3582                         /* remember the address as a hint for next time */
3583                         return (mm->free_area_cache = addr);
3584                 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586                         mm->cached_hole_size = vma->vm_start - addr;
3587  
3588                 /* try just below the current vma->vm_start */
3589 -               addr = vma->vm_start-len;
3590 -               if (do_color_align)
3591 -                       addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 -       } while (likely(len < vma->vm_start));
3593 +               addr = skip_heap_stack_gap(vma, len);
3594 +       } while (!IS_ERR_VALUE(addr));
3595  
3596  bottomup:
3597         /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599             gap == RLIM_INFINITY ||
3600             sysctl_legacy_va_layout) {
3601                 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 +                       mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608                 mm->get_unmapped_area = arch_get_unmapped_area;
3609                 mm->unmap_area = arch_unmap_area;
3610         } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612                         gap = (task_size / 6 * 5);
3613  
3614                 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 +                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622                 mm->unmap_area = arch_unmap_area_topdown;
3623         }
3624 diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.4/arch/sparc/kernel/traps_32.c    2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.4/arch/sparc/kernel/traps_32.c    2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628  #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629  #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630  
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633  void die_if_kernel(char *str, struct pt_regs *regs)
3634  {
3635         static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637                       count++ < 30                              &&
3638                        (((unsigned long) rw) >= PAGE_OFFSET)    &&
3639                       !(((unsigned long) rw) & 0x7)) {
3640 -                       printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 +                       printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642                                (void *) rw->ins[7]);
3643                         rw = (struct reg_window32 *)rw->ins[6];
3644                 }
3645         }
3646         printk("Instruction DUMP:");
3647         instruction_dump ((unsigned long *) regs->pc);
3648 -       if(regs->psr & PSR_PS)
3649 +       if(regs->psr & PSR_PS) {
3650 +               gr_handle_kernel_exploit();
3651                 do_exit(SIGKILL);
3652 +       }
3653         do_exit(SIGSEGV);
3654  }
3655  
3656 diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.4/arch/sparc/kernel/traps_64.c    2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.4/arch/sparc/kernel/traps_64.c    2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660                        i + 1,
3661                        p->trapstack[i].tstate, p->trapstack[i].tpc,
3662                        p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 -               printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 +               printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665         }
3666  }
3667  
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669  
3670         lvl -= 0x100;
3671         if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 +               if (lvl == 6)
3675 +                       pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678                 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679                 die_if_kernel(buffer, regs);
3680         }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682  void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683  {
3684         char buffer[32];
3685 -       
3686 +
3687         if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688                        0, lvl, SIGTRAP) == NOTIFY_STOP)
3689                 return;
3690  
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 +       if (lvl == 6)
3693 +               pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697  
3698         sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700                regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701         printk("%s" "ERROR(%d): ",
3702                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 -       printk("TPC<%pS>\n", (void *) regs->tpc);
3704 +       printk("TPC<%pA>\n", (void *) regs->tpc);
3705         printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
3706                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707                (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709                        smp_processor_id(),
3710                        (type & 0x1) ? 'I' : 'D',
3711                        regs->tpc);
3712 -               printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 +               printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714                 panic("Irrecoverable Cheetah+ parity error.");
3715         }
3716  
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718                smp_processor_id(),
3719                (type & 0x1) ? 'I' : 'D',
3720                regs->tpc);
3721 -       printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 +       printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723  }
3724  
3725  struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727  
3728         printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729                regs->tpc, tl);
3730 -       printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 +       printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732         printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 -       printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 +       printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735                (void *) regs->u_regs[UREG_I7]);
3736         printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737                "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739  
3740         printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741                regs->tpc, tl);
3742 -       printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 +       printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744         printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 -       printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 +       printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747                (void *) regs->u_regs[UREG_I7]);
3748         printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749                "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751                         fp = (unsigned long)sf->fp + STACK_BIAS;
3752                 }
3753  
3754 -               printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 +               printk(" [%016lx] %pA\n", pc, (void *) pc);
3756  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757                 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758                         int index = tsk->curr_ret_stack;
3759                         if (tsk->ret_stack && index >= graph) {
3760                                 pc = tsk->ret_stack[index - graph].ret;
3761 -                               printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 +                               printk(" [%016lx] %pA\n", pc, (void *) pc);
3763                                 graph++;
3764                         }
3765                 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767         return (struct reg_window *) (fp + STACK_BIAS);
3768  }
3769  
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772  void die_if_kernel(char *str, struct pt_regs *regs)
3773  {
3774         static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776                 while (rw &&
3777                        count++ < 30 &&
3778                        kstack_valid(tp, (unsigned long) rw)) {
3779 -                       printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 +                       printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781                                (void *) rw->ins[7]);
3782  
3783                         rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785                 }
3786                 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787         }
3788 -       if (regs->tstate & TSTATE_PRIV)
3789 +       if (regs->tstate & TSTATE_PRIV) {
3790 +               gr_handle_kernel_exploit();
3791                 do_exit(SIGKILL);
3792 +       }
3793         do_exit(SIGSEGV);
3794  }
3795  EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.4/arch/sparc/kernel/unaligned_64.c        2011-08-23 21:44:40.000000000 -0400
3798 +++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c        2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800         static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801  
3802         if (__ratelimit(&ratelimit)) {
3803 -               printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 +               printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805                        regs->tpc, (void *) regs->tpc);
3806         }
3807  }
3808 diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.4/arch/sparc/lib/atomic_64.S      2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.4/arch/sparc/lib/atomic_64.S      2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812  atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813         BACKOFF_SETUP(%o2)
3814  1:     lduw    [%o1], %g1
3815 -       add     %g1, %o0, %g7
3816 +       addcc   %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 +       tvs     %icc, 6
3820 +#endif
3821 +
3822         cas     [%o1], %g1, %g7
3823         cmp     %g1, %g7
3824         bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826  2:     BACKOFF_SPIN(%o2, %o3, 1b)
3827         .size   atomic_add, .-atomic_add
3828  
3829 +       .globl  atomic_add_unchecked
3830 +       .type   atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 +       BACKOFF_SETUP(%o2)
3833 +1:     lduw    [%o1], %g1
3834 +       add     %g1, %o0, %g7
3835 +       cas     [%o1], %g1, %g7
3836 +       cmp     %g1, %g7
3837 +       bne,pn  %icc, 2f
3838 +        nop
3839 +       retl
3840 +        nop
3841 +2:     BACKOFF_SPIN(%o2, %o3, 1b)
3842 +       .size   atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844         .globl  atomic_sub
3845         .type   atomic_sub,#function
3846  atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847         BACKOFF_SETUP(%o2)
3848  1:     lduw    [%o1], %g1
3849 -       sub     %g1, %o0, %g7
3850 +       subcc   %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 +       tvs     %icc, 6
3854 +#endif
3855 +
3856         cas     [%o1], %g1, %g7
3857         cmp     %g1, %g7
3858         bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860  2:     BACKOFF_SPIN(%o2, %o3, 1b)
3861         .size   atomic_sub, .-atomic_sub
3862  
3863 +       .globl  atomic_sub_unchecked
3864 +       .type   atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 +       BACKOFF_SETUP(%o2)
3867 +1:     lduw    [%o1], %g1
3868 +       sub     %g1, %o0, %g7
3869 +       cas     [%o1], %g1, %g7
3870 +       cmp     %g1, %g7
3871 +       bne,pn  %icc, 2f
3872 +        nop
3873 +       retl
3874 +        nop
3875 +2:     BACKOFF_SPIN(%o2, %o3, 1b)
3876 +       .size   atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878         .globl  atomic_add_ret
3879         .type   atomic_add_ret,#function
3880  atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881         BACKOFF_SETUP(%o2)
3882  1:     lduw    [%o1], %g1
3883 -       add     %g1, %o0, %g7
3884 +       addcc   %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 +       tvs     %icc, 6
3888 +#endif
3889 +
3890         cas     [%o1], %g1, %g7
3891         cmp     %g1, %g7
3892         bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 
3894  2:     BACKOFF_SPIN(%o2, %o3, 1b)
3895         .size   atomic_add_ret, .-atomic_add_ret
3896  
3897 +       .globl  atomic_add_ret_unchecked
3898 +       .type   atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 +       BACKOFF_SETUP(%o2)
3901 +1:     lduw    [%o1], %g1
3902 +       addcc   %g1, %o0, %g7
3903 +       cas     [%o1], %g1, %g7
3904 +       cmp     %g1, %g7
3905 +       bne,pn  %icc, 2f
3906 +        add    %g7, %o0, %g7
3907 +       sra     %g7, 0, %o0
3908 +       retl
3909 +        nop
3910 +2:     BACKOFF_SPIN(%o2, %o3, 1b)
3911 +       .size   atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913         .globl  atomic_sub_ret
3914         .type   atomic_sub_ret,#function
3915  atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916         BACKOFF_SETUP(%o2)
3917  1:     lduw    [%o1], %g1
3918 -       sub     %g1, %o0, %g7
3919 +       subcc   %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 +       tvs     %icc, 6
3923 +#endif
3924 +
3925         cas     [%o1], %g1, %g7
3926         cmp     %g1, %g7
3927         bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 
3929  atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930         BACKOFF_SETUP(%o2)
3931  1:     ldx     [%o1], %g1
3932 -       add     %g1, %o0, %g7
3933 +       addcc   %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 +       tvs     %xcc, 6
3937 +#endif
3938 +
3939         casx    [%o1], %g1, %g7
3940         cmp     %g1, %g7
3941         bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = 
3943  2:     BACKOFF_SPIN(%o2, %o3, 1b)
3944         .size   atomic64_add, .-atomic64_add
3945  
3946 +       .globl  atomic64_add_unchecked
3947 +       .type   atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 +       BACKOFF_SETUP(%o2)
3950 +1:     ldx     [%o1], %g1
3951 +       addcc   %g1, %o0, %g7
3952 +       casx    [%o1], %g1, %g7
3953 +       cmp     %g1, %g7
3954 +       bne,pn  %xcc, 2f
3955 +        nop
3956 +       retl
3957 +        nop
3958 +2:     BACKOFF_SPIN(%o2, %o3, 1b)
3959 +       .size   atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961         .globl  atomic64_sub
3962         .type   atomic64_sub,#function
3963  atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964         BACKOFF_SETUP(%o2)
3965  1:     ldx     [%o1], %g1
3966 -       sub     %g1, %o0, %g7
3967 +       subcc   %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 +       tvs     %xcc, 6
3971 +#endif
3972 +
3973         casx    [%o1], %g1, %g7
3974         cmp     %g1, %g7
3975         bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = 
3977  2:     BACKOFF_SPIN(%o2, %o3, 1b)
3978         .size   atomic64_sub, .-atomic64_sub
3979  
3980 +       .globl  atomic64_sub_unchecked
3981 +       .type   atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 +       BACKOFF_SETUP(%o2)
3984 +1:     ldx     [%o1], %g1
3985 +       subcc   %g1, %o0, %g7
3986 +       casx    [%o1], %g1, %g7
3987 +       cmp     %g1, %g7
3988 +       bne,pn  %xcc, 2f
3989 +        nop
3990 +       retl
3991 +        nop
3992 +2:     BACKOFF_SPIN(%o2, %o3, 1b)
3993 +       .size   atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995         .globl  atomic64_add_ret
3996         .type   atomic64_add_ret,#function
3997  atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998         BACKOFF_SETUP(%o2)
3999  1:     ldx     [%o1], %g1
4000 -       add     %g1, %o0, %g7
4001 +       addcc   %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 +       tvs     %xcc, 6
4005 +#endif
4006 +
4007         casx    [%o1], %g1, %g7
4008         cmp     %g1, %g7
4009         bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011  2:     BACKOFF_SPIN(%o2, %o3, 1b)
4012         .size   atomic64_add_ret, .-atomic64_add_ret
4013  
4014 +       .globl  atomic64_add_ret_unchecked
4015 +       .type   atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 +       BACKOFF_SETUP(%o2)
4018 +1:     ldx     [%o1], %g1
4019 +       addcc   %g1, %o0, %g7
4020 +       casx    [%o1], %g1, %g7
4021 +       cmp     %g1, %g7
4022 +       bne,pn  %xcc, 2f
4023 +        add    %g7, %o0, %g7
4024 +       mov     %g7, %o0
4025 +       retl
4026 +        nop
4027 +2:     BACKOFF_SPIN(%o2, %o3, 1b)
4028 +       .size   atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030         .globl  atomic64_sub_ret
4031         .type   atomic64_sub_ret,#function
4032  atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033         BACKOFF_SETUP(%o2)
4034  1:     ldx     [%o1], %g1
4035 -       sub     %g1, %o0, %g7
4036 +       subcc   %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 +       tvs     %xcc, 6
4040 +#endif
4041 +
4042         casx    [%o1], %g1, %g7
4043         cmp     %g1, %g7
4044         bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.4/arch/sparc/lib/ksyms.c  2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.4/arch/sparc/lib/ksyms.c  2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049  
4050  /* Atomic counter implementation. */
4051  EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053  EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055  EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057  EXPORT_SYMBOL(atomic_sub_ret);
4058  EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060  EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062  EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064  EXPORT_SYMBOL(atomic64_sub_ret);
4065  
4066  /* Atomic bit operations. */
4067 diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068 --- linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069 +++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071  #
4072  
4073  asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076  
4077  lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078  lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080 --- linux-3.0.4/arch/sparc/Makefile     2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.4/arch/sparc/Makefile     2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE)    += arch/sparc
4083  # Export what is needed by arch/sparc/boot/Makefile
4084  export VMLINUX_INIT VMLINUX_MAIN
4085  VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088  VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089  VMLINUX_MAIN += $(drivers-y) $(net-y)
4090  
4091 diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.4/arch/sparc/mm/fault_32.c        2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.4/arch/sparc/mm/fault_32.c        2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095  #include <linux/interrupt.h>
4096  #include <linux/module.h>
4097  #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101  
4102  #include <asm/system.h>
4103  #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105         return safe_compute_effective_address(regs, insn);
4106  }
4107  
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 +       vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 +       unsigned int *kaddr;
4118 +
4119 +       vmf->page = alloc_page(GFP_HIGHUSER);
4120 +       if (!vmf->page)
4121 +               return VM_FAULT_OOM;
4122 +
4123 +       kaddr = kmap(vmf->page);
4124 +       memset(kaddr, 0, PAGE_SIZE);
4125 +       kaddr[0] = 0x9DE3BFA8U; /* save */
4126 +       flush_dcache_page(vmf->page);
4127 +       kunmap(vmf->page);
4128 +       return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 +       .close = pax_emuplt_close,
4133 +       .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 +       int ret;
4139 +
4140 +       INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 +       vma->vm_mm = current->mm;
4142 +       vma->vm_start = addr;
4143 +       vma->vm_end = addr + PAGE_SIZE;
4144 +       vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 +       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 +       vma->vm_ops = &pax_vm_ops;
4147 +
4148 +       ret = insert_vm_struct(current->mm, vma);
4149 +       if (ret)
4150 +               return ret;
4151 +
4152 +       ++current->mm->total_vm;
4153 +       return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + *         2 when patched PLT trampoline was detected
4162 + *         3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 +       int err;
4169 +
4170 +       do { /* PaX: patched PLT emulation #1 */
4171 +               unsigned int sethi1, sethi2, jmpl;
4172 +
4173 +               err = get_user(sethi1, (unsigned int *)regs->pc);
4174 +               err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 +               err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 +               if (err)
4178 +                       break;
4179 +
4180 +               if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 +                   (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 +                   (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 +               {
4184 +                       unsigned int addr;
4185 +
4186 +                       regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 +                       addr = regs->u_regs[UREG_G1];
4188 +                       addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 +                       regs->pc = addr;
4190 +                       regs->npc = addr+4;
4191 +                       return 2;
4192 +               }
4193 +       } while (0);
4194 +
4195 +       { /* PaX: patched PLT emulation #2 */
4196 +               unsigned int ba;
4197 +
4198 +               err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 +               if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 +                       unsigned int addr;
4202 +
4203 +                       addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 +                       regs->pc = addr;
4205 +                       regs->npc = addr+4;
4206 +                       return 2;
4207 +               }
4208 +       }
4209 +
4210 +       do { /* PaX: patched PLT emulation #3 */
4211 +               unsigned int sethi, jmpl, nop;
4212 +
4213 +               err = get_user(sethi, (unsigned int *)regs->pc);
4214 +               err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 +               err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 +               if (err)
4218 +                       break;
4219 +
4220 +               if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 +                   (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 +                   nop == 0x01000000U)
4223 +               {
4224 +                       unsigned int addr;
4225 +
4226 +                       addr = (sethi & 0x003FFFFFU) << 10;
4227 +                       regs->u_regs[UREG_G1] = addr;
4228 +                       addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 +                       regs->pc = addr;
4230 +                       regs->npc = addr+4;
4231 +                       return 2;
4232 +               }
4233 +       } while (0);
4234 +
4235 +       do { /* PaX: unpatched PLT emulation step 1 */
4236 +               unsigned int sethi, ba, nop;
4237 +
4238 +               err = get_user(sethi, (unsigned int *)regs->pc);
4239 +               err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 +               err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 +               if (err)
4243 +                       break;
4244 +
4245 +               if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 +                   ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 +                   nop == 0x01000000U)
4248 +               {
4249 +                       unsigned int addr, save, call;
4250 +
4251 +                       if ((ba & 0xFFC00000U) == 0x30800000U)
4252 +                               addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 +                       else
4254 +                               addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 +                       err = get_user(save, (unsigned int *)addr);
4257 +                       err |= get_user(call, (unsigned int *)(addr+4));
4258 +                       err |= get_user(nop, (unsigned int *)(addr+8));
4259 +                       if (err)
4260 +                               break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 +                       if (save == 0x9DE3BFA8U &&
4264 +                           (call & 0xC0000000U) == 0x40000000U &&
4265 +                           nop == 0x01000000U)
4266 +                       {
4267 +                               struct vm_area_struct *vma;
4268 +                               unsigned long call_dl_resolve;
4269 +
4270 +                               down_read(&current->mm->mmap_sem);
4271 +                               call_dl_resolve = current->mm->call_dl_resolve;
4272 +                               up_read(&current->mm->mmap_sem);
4273 +                               if (likely(call_dl_resolve))
4274 +                                       goto emulate;
4275 +
4276 +                               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 +                               down_write(&current->mm->mmap_sem);
4279 +                               if (current->mm->call_dl_resolve) {
4280 +                                       call_dl_resolve = current->mm->call_dl_resolve;
4281 +                                       up_write(&current->mm->mmap_sem);
4282 +                                       if (vma)
4283 +                                               kmem_cache_free(vm_area_cachep, vma);
4284 +                                       goto emulate;
4285 +                               }
4286 +
4287 +                               call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 +                               if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 +                                       up_write(&current->mm->mmap_sem);
4290 +                                       if (vma)
4291 +                                               kmem_cache_free(vm_area_cachep, vma);
4292 +                                       return 1;
4293 +                               }
4294 +
4295 +                               if (pax_insert_vma(vma, call_dl_resolve)) {
4296 +                                       up_write(&current->mm->mmap_sem);
4297 +                                       kmem_cache_free(vm_area_cachep, vma);
4298 +                                       return 1;
4299 +                               }
4300 +
4301 +                               current->mm->call_dl_resolve = call_dl_resolve;
4302 +                               up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 +                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 +                               regs->pc = call_dl_resolve;
4307 +                               regs->npc = addr+4;
4308 +                               return 3;
4309 +                       }
4310 +#endif
4311 +
4312 +                       /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 +                       if ((save & 0xFFC00000U) == 0x05000000U &&
4314 +                           (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 +                           nop == 0x01000000U)
4316 +                       {
4317 +                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 +                               regs->u_regs[UREG_G2] = addr + 4;
4319 +                               addr = (save & 0x003FFFFFU) << 10;
4320 +                               addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 +                               regs->pc = addr;
4322 +                               regs->npc = addr+4;
4323 +                               return 3;
4324 +                       }
4325 +               }
4326 +       } while (0);
4327 +
4328 +       do { /* PaX: unpatched PLT emulation step 2 */
4329 +               unsigned int save, call, nop;
4330 +
4331 +               err = get_user(save, (unsigned int *)(regs->pc-4));
4332 +               err |= get_user(call, (unsigned int *)regs->pc);
4333 +               err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 +               if (err)
4335 +                       break;
4336 +
4337 +               if (save == 0x9DE3BFA8U &&
4338 +                   (call & 0xC0000000U) == 0x40000000U &&
4339 +                   nop == 0x01000000U)
4340 +               {
4341 +                       unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 +                       regs->u_regs[UREG_RETPC] = regs->pc;
4344 +                       regs->pc = dl_resolve;
4345 +                       regs->npc = dl_resolve+4;
4346 +                       return 3;
4347 +               }
4348 +       } while (0);
4349 +#endif
4350 +
4351 +       return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 +       unsigned long i;
4357 +
4358 +       printk(KERN_ERR "PAX: bytes at PC: ");
4359 +       for (i = 0; i < 8; i++) {
4360 +               unsigned int c;
4361 +               if (get_user(c, (unsigned int *)pc+i))
4362 +                       printk(KERN_CONT "???????? ");
4363 +               else
4364 +                       printk(KERN_CONT "%08x ", c);
4365 +       }
4366 +       printk("\n");
4367 +}
4368 +#endif
4369 +
4370  static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371                                       int text_fault)
4372  {
4373 @@ -281,6 +546,24 @@ good_area:
4374                 if(!(vma->vm_flags & VM_WRITE))
4375                         goto bad_area;
4376         } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 +               if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 +                       up_read(&mm->mmap_sem);
4381 +                       switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 +                       case 2:
4385 +                       case 3:
4386 +                               return;
4387 +#endif
4388 +
4389 +                       }
4390 +                       pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 +                       do_group_exit(SIGKILL);
4392 +               }
4393 +#endif
4394 +
4395                 /* Allow reads even for write-only mappings */
4396                 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397                         goto bad_area;
4398 diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.4/arch/sparc/mm/fault_64.c        2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.4/arch/sparc/mm/fault_64.c        2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402  #include <linux/kprobes.h>
4403  #include <linux/kdebug.h>
4404  #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408  
4409  #include <asm/page.h>
4410  #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412         printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413                regs->tpc);
4414         printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 -       printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 +       printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417         printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418         dump_stack();
4419         unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421         show_regs(regs);
4422  }
4423  
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 +       vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 +       unsigned int *kaddr;
4434 +
4435 +       vmf->page = alloc_page(GFP_HIGHUSER);
4436 +       if (!vmf->page)
4437 +               return VM_FAULT_OOM;
4438 +
4439 +       kaddr = kmap(vmf->page);
4440 +       memset(kaddr, 0, PAGE_SIZE);
4441 +       kaddr[0] = 0x9DE3BFA8U; /* save */
4442 +       flush_dcache_page(vmf->page);
4443 +       kunmap(vmf->page);
4444 +       return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 +       .close = pax_emuplt_close,
4449 +       .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 +       int ret;
4455 +
4456 +       INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 +       vma->vm_mm = current->mm;
4458 +       vma->vm_start = addr;
4459 +       vma->vm_end = addr + PAGE_SIZE;
4460 +       vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 +       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 +       vma->vm_ops = &pax_vm_ops;
4463 +
4464 +       ret = insert_vm_struct(current->mm, vma);
4465 +       if (ret)
4466 +               return ret;
4467 +
4468 +       ++current->mm->total_vm;
4469 +       return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + *         2 when patched PLT trampoline was detected
4478 + *         3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 +       int err;
4485 +
4486 +       do { /* PaX: patched PLT emulation #1 */
4487 +               unsigned int sethi1, sethi2, jmpl;
4488 +
4489 +               err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 +               err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 +               err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 +               if (err)
4494 +                       break;
4495 +
4496 +               if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 +                   (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 +                   (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 +               {
4500 +                       unsigned long addr;
4501 +
4502 +                       regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 +                       addr = regs->u_regs[UREG_G1];
4504 +                       addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 +                       if (test_thread_flag(TIF_32BIT))
4507 +                               addr &= 0xFFFFFFFFUL;
4508 +
4509 +                       regs->tpc = addr;
4510 +                       regs->tnpc = addr+4;
4511 +                       return 2;
4512 +               }
4513 +       } while (0);
4514 +
4515 +       { /* PaX: patched PLT emulation #2 */
4516 +               unsigned int ba;
4517 +
4518 +               err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 +               if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 +                       unsigned long addr;
4522 +
4523 +                       addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 +                       if (test_thread_flag(TIF_32BIT))
4526 +                               addr &= 0xFFFFFFFFUL;
4527 +
4528 +                       regs->tpc = addr;
4529 +                       regs->tnpc = addr+4;
4530 +                       return 2;
4531 +               }
4532 +       }
4533 +
4534 +       do { /* PaX: patched PLT emulation #3 */
4535 +               unsigned int sethi, jmpl, nop;
4536 +
4537 +               err = get_user(sethi, (unsigned int *)regs->tpc);
4538 +               err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 +               err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 +               if (err)
4542 +                       break;
4543 +
4544 +               if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 +                   (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 +                   nop == 0x01000000U)
4547 +               {
4548 +                       unsigned long addr;
4549 +
4550 +                       addr = (sethi & 0x003FFFFFU) << 10;
4551 +                       regs->u_regs[UREG_G1] = addr;
4552 +                       addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 +                       if (test_thread_flag(TIF_32BIT))
4555 +                               addr &= 0xFFFFFFFFUL;
4556 +
4557 +                       regs->tpc = addr;
4558 +                       regs->tnpc = addr+4;
4559 +                       return 2;
4560 +               }
4561 +       } while (0);
4562 +
4563 +       do { /* PaX: patched PLT emulation #4 */
4564 +               unsigned int sethi, mov1, call, mov2;
4565 +
4566 +               err = get_user(sethi, (unsigned int *)regs->tpc);
4567 +               err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 +               err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 +               err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 +               if (err)
4572 +                       break;
4573 +
4574 +               if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 +                   mov1 == 0x8210000FU &&
4576 +                   (call & 0xC0000000U) == 0x40000000U &&
4577 +                   mov2 == 0x9E100001U)
4578 +               {
4579 +                       unsigned long addr;
4580 +
4581 +                       regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 +                       addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 +                       if (test_thread_flag(TIF_32BIT))
4585 +                               addr &= 0xFFFFFFFFUL;
4586 +
4587 +                       regs->tpc = addr;
4588 +                       regs->tnpc = addr+4;
4589 +                       return 2;
4590 +               }
4591 +       } while (0);
4592 +
4593 +       do { /* PaX: patched PLT emulation #5 */
4594 +               unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 +               err = get_user(sethi, (unsigned int *)regs->tpc);
4597 +               err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 +               err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 +               err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 +               err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 +               err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 +               err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 +               err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 +               if (err)
4606 +                       break;
4607 +
4608 +               if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 +                   (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 +                   (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 +                   (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 +                   (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 +                   sllx == 0x83287020U &&
4614 +                   jmpl == 0x81C04005U &&
4615 +                   nop == 0x01000000U)
4616 +               {
4617 +                       unsigned long addr;
4618 +
4619 +                       regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 +                       regs->u_regs[UREG_G1] <<= 32;
4621 +                       regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 +                       addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 +                       regs->tpc = addr;
4624 +                       regs->tnpc = addr+4;
4625 +                       return 2;
4626 +               }
4627 +       } while (0);
4628 +
4629 +       do { /* PaX: patched PLT emulation #6 */
4630 +               unsigned int sethi, sethi1, sethi2, sllx, or,  jmpl, nop;
4631 +
4632 +               err = get_user(sethi, (unsigned int *)regs->tpc);
4633 +               err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 +               err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 +               err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 +               err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 +               err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 +               err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 +               if (err)
4641 +                       break;
4642 +
4643 +               if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 +                   (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 +                   (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 +                   sllx == 0x83287020U &&
4647 +                   (or & 0xFFFFE000U) == 0x8A116000U &&
4648 +                   jmpl == 0x81C04005U &&
4649 +                   nop == 0x01000000U)
4650 +               {
4651 +                       unsigned long addr;
4652 +
4653 +                       regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 +                       regs->u_regs[UREG_G1] <<= 32;
4655 +                       regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 +                       addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 +                       regs->tpc = addr;
4658 +                       regs->tnpc = addr+4;
4659 +                       return 2;
4660 +               }
4661 +       } while (0);
4662 +
4663 +       do { /* PaX: unpatched PLT emulation step 1 */
4664 +               unsigned int sethi, ba, nop;
4665 +
4666 +               err = get_user(sethi, (unsigned int *)regs->tpc);
4667 +               err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 +               err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 +               if (err)
4671 +                       break;
4672 +
4673 +               if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 +                   ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 +                   nop == 0x01000000U)
4676 +               {
4677 +                       unsigned long addr;
4678 +                       unsigned int save, call;
4679 +                       unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 +                       if ((ba & 0xFFC00000U) == 0x30800000U)
4682 +                               addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 +                       else
4684 +                               addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 +                       if (test_thread_flag(TIF_32BIT))
4687 +                               addr &= 0xFFFFFFFFUL;
4688 +
4689 +                       err = get_user(save, (unsigned int *)addr);
4690 +                       err |= get_user(call, (unsigned int *)(addr+4));
4691 +                       err |= get_user(nop, (unsigned int *)(addr+8));
4692 +                       if (err)
4693 +                               break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 +                       if (save == 0x9DE3BFA8U &&
4697 +                           (call & 0xC0000000U) == 0x40000000U &&
4698 +                           nop == 0x01000000U)
4699 +                       {
4700 +                               struct vm_area_struct *vma;
4701 +                               unsigned long call_dl_resolve;
4702 +
4703 +                               down_read(&current->mm->mmap_sem);
4704 +                               call_dl_resolve = current->mm->call_dl_resolve;
4705 +                               up_read(&current->mm->mmap_sem);
4706 +                               if (likely(call_dl_resolve))
4707 +                                       goto emulate;
4708 +
4709 +                               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 +                               down_write(&current->mm->mmap_sem);
4712 +                               if (current->mm->call_dl_resolve) {
4713 +                                       call_dl_resolve = current->mm->call_dl_resolve;
4714 +                                       up_write(&current->mm->mmap_sem);
4715 +                                       if (vma)
4716 +                                               kmem_cache_free(vm_area_cachep, vma);
4717 +                                       goto emulate;
4718 +                               }
4719 +
4720 +                               call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 +                               if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 +                                       up_write(&current->mm->mmap_sem);
4723 +                                       if (vma)
4724 +                                               kmem_cache_free(vm_area_cachep, vma);
4725 +                                       return 1;
4726 +                               }
4727 +
4728 +                               if (pax_insert_vma(vma, call_dl_resolve)) {
4729 +                                       up_write(&current->mm->mmap_sem);
4730 +                                       kmem_cache_free(vm_area_cachep, vma);
4731 +                                       return 1;
4732 +                               }
4733 +
4734 +                               current->mm->call_dl_resolve = call_dl_resolve;
4735 +                               up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 +                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 +                               regs->tpc = call_dl_resolve;
4740 +                               regs->tnpc = addr+4;
4741 +                               return 3;
4742 +                       }
4743 +#endif
4744 +
4745 +                       /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 +                       if ((save & 0xFFC00000U) == 0x05000000U &&
4747 +                           (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 +                           nop == 0x01000000U)
4749 +                       {
4750 +                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 +                               regs->u_regs[UREG_G2] = addr + 4;
4752 +                               addr = (save & 0x003FFFFFU) << 10;
4753 +                               addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 +                               if (test_thread_flag(TIF_32BIT))
4756 +                                       addr &= 0xFFFFFFFFUL;
4757 +
4758 +                               regs->tpc = addr;
4759 +                               regs->tnpc = addr+4;
4760 +                               return 3;
4761 +                       }
4762 +
4763 +                       /* PaX: 64-bit PLT stub */
4764 +                       err = get_user(sethi1, (unsigned int *)addr);
4765 +                       err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 +                       err |= get_user(or1, (unsigned int *)(addr+8));
4767 +                       err |= get_user(or2, (unsigned int *)(addr+12));
4768 +                       err |= get_user(sllx, (unsigned int *)(addr+16));
4769 +                       err |= get_user(add, (unsigned int *)(addr+20));
4770 +                       err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 +                       err |= get_user(nop, (unsigned int *)(addr+28));
4772 +                       if (err)
4773 +                               break;
4774 +
4775 +                       if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 +                           (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 +                           (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 +                           (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 +                           sllx == 0x89293020U &&
4780 +                           add == 0x8A010005U &&
4781 +                           jmpl == 0x89C14000U &&
4782 +                           nop == 0x01000000U)
4783 +                       {
4784 +                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 +                               regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 +                               regs->u_regs[UREG_G4] <<= 32;
4787 +                               regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 +                               regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 +                               regs->u_regs[UREG_G4] = addr + 24;
4790 +                               addr = regs->u_regs[UREG_G5];
4791 +                               regs->tpc = addr;
4792 +                               regs->tnpc = addr+4;
4793 +                               return 3;
4794 +                       }
4795 +               }
4796 +       } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 +       do { /* PaX: unpatched PLT emulation step 2 */
4800 +               unsigned int save, call, nop;
4801 +
4802 +               err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 +               err |= get_user(call, (unsigned int *)regs->tpc);
4804 +               err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 +               if (err)
4806 +                       break;
4807 +
4808 +               if (save == 0x9DE3BFA8U &&
4809 +                   (call & 0xC0000000U) == 0x40000000U &&
4810 +                   nop == 0x01000000U)
4811 +               {
4812 +                       unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 +                       if (test_thread_flag(TIF_32BIT))
4815 +                               dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 +                       regs->u_regs[UREG_RETPC] = regs->tpc;
4818 +                       regs->tpc = dl_resolve;
4819 +                       regs->tnpc = dl_resolve+4;
4820 +                       return 3;
4821 +               }
4822 +       } while (0);
4823 +#endif
4824 +
4825 +       do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 +               unsigned int sethi, ba, nop;
4827 +
4828 +               err = get_user(sethi, (unsigned int *)regs->tpc);
4829 +               err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 +               err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 +               if (err)
4833 +                       break;
4834 +
4835 +               if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 +                   (ba & 0xFFF00000U) == 0x30600000U &&
4837 +                   nop == 0x01000000U)
4838 +               {
4839 +                       unsigned long addr;
4840 +
4841 +                       addr = (sethi & 0x003FFFFFU) << 10;
4842 +                       regs->u_regs[UREG_G1] = addr;
4843 +                       addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 +                       if (test_thread_flag(TIF_32BIT))
4846 +                               addr &= 0xFFFFFFFFUL;
4847 +
4848 +                       regs->tpc = addr;
4849 +                       regs->tnpc = addr+4;
4850 +                       return 2;
4851 +               }
4852 +       } while (0);
4853 +
4854 +#endif
4855 +
4856 +       return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 +       unsigned long i;
4862 +
4863 +       printk(KERN_ERR "PAX: bytes at PC: ");
4864 +       for (i = 0; i < 8; i++) {
4865 +               unsigned int c;
4866 +               if (get_user(c, (unsigned int *)pc+i))
4867 +                       printk(KERN_CONT "???????? ");
4868 +               else
4869 +                       printk(KERN_CONT "%08x ", c);
4870 +       }
4871 +       printk("\n");
4872 +}
4873 +#endif
4874 +
4875  asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876  {
4877         struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879         if (!vma)
4880                 goto bad_area;
4881  
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 +       /* PaX: detect ITLB misses on non-exec pages */
4884 +       if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 +           !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 +       {
4887 +               if (address != regs->tpc)
4888 +                       goto good_area;
4889 +
4890 +               up_read(&mm->mmap_sem);
4891 +               switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 +               case 2:
4895 +               case 3:
4896 +                       return;
4897 +#endif
4898 +
4899 +               }
4900 +               pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 +               do_group_exit(SIGKILL);
4902 +       }
4903 +#endif
4904 +
4905         /* Pure DTLB misses do not tell us whether the fault causing
4906          * load/store/atomic was a write or not, it only says that there
4907          * was no match.  So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.4/arch/sparc/mm/hugetlbpage.c     2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c     2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912                         }
4913                         return -ENOMEM;
4914                 }
4915 -               if (likely(!vma || addr + len <= vma->vm_start)) {
4916 +               if (likely(check_heap_stack_gap(vma, addr, len))) {
4917                         /*
4918                          * Remember the place where we stopped the search:
4919                          */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921         /* make sure it can fit in the remaining address space */
4922         if (likely(addr > len)) {
4923                 vma = find_vma(mm, addr-len);
4924 -               if (!vma || addr <= vma->vm_start) {
4925 +               if (check_heap_stack_gap(vma, addr - len, len)) {
4926                         /* remember the address as a hint for next time */
4927                         return (mm->free_area_cache = addr-len);
4928                 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930         if (unlikely(mm->mmap_base < len))
4931                 goto bottomup;
4932  
4933 -       addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 +       addr = mm->mmap_base - len;
4935  
4936         do {
4937 +               addr &= HPAGE_MASK;
4938                 /*
4939                  * Lookup failure means no vma is above this address,
4940                  * else if new region fits below vma->vm_start,
4941                  * return with success:
4942                  */
4943                 vma = find_vma(mm, addr);
4944 -               if (likely(!vma || addr+len <= vma->vm_start)) {
4945 +               if (likely(check_heap_stack_gap(vma, addr, len))) {
4946                         /* remember the address as a hint for next time */
4947                         return (mm->free_area_cache = addr);
4948                 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950                         mm->cached_hole_size = vma->vm_start - addr;
4951  
4952                 /* try just below the current vma->vm_start */
4953 -               addr = (vma->vm_start-len) & HPAGE_MASK;
4954 -       } while (likely(len < vma->vm_start));
4955 +               addr = skip_heap_stack_gap(vma, len);
4956 +       } while (!IS_ERR_VALUE(addr));
4957  
4958  bottomup:
4959         /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961         if (addr) {
4962                 addr = ALIGN(addr, HPAGE_SIZE);
4963                 vma = find_vma(mm, addr);
4964 -               if (task_size - len >= addr &&
4965 -                   (!vma || addr + len <= vma->vm_start))
4966 +               if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967                         return addr;
4968         }
4969         if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971 --- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974  pgprot_t PAGE_SHARED __read_mostly;
4975  EXPORT_SYMBOL(PAGE_SHARED);
4976  
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980  void __init paging_init(void)
4981  {
4982         switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984  
4985         /* Initialize the protection map with non-constant, MMU dependent values. */
4986         protection_map[0] = PAGE_NONE;
4987 -       protection_map[1] = PAGE_READONLY;
4988 -       protection_map[2] = PAGE_COPY;
4989 -       protection_map[3] = PAGE_COPY;
4990 +       protection_map[1] = PAGE_READONLY_NOEXEC;
4991 +       protection_map[2] = PAGE_COPY_NOEXEC;
4992 +       protection_map[3] = PAGE_COPY_NOEXEC;
4993         protection_map[4] = PAGE_READONLY;
4994         protection_map[5] = PAGE_READONLY;
4995         protection_map[6] = PAGE_COPY;
4996         protection_map[7] = PAGE_COPY;
4997         protection_map[8] = PAGE_NONE;
4998 -       protection_map[9] = PAGE_READONLY;
4999 -       protection_map[10] = PAGE_SHARED;
5000 -       protection_map[11] = PAGE_SHARED;
5001 +       protection_map[9] = PAGE_READONLY_NOEXEC;
5002 +       protection_map[10] = PAGE_SHARED_NOEXEC;
5003 +       protection_map[11] = PAGE_SHARED_NOEXEC;
5004         protection_map[12] = PAGE_READONLY;
5005         protection_map[13] = PAGE_READONLY;
5006         protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008 --- linux-3.0.4/arch/sparc/mm/Makefile  2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.4/arch/sparc/mm/Makefile  2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011  #
5012  
5013  asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016  
5017  obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o
5018  obj-y                   += fault_$(BITS).o
5019 diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.4/arch/sparc/mm/srmmu.c   2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.4/arch/sparc/mm/srmmu.c   2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023         PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024         BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025         BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 +       PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 +       BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 +       BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033         BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034         page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035  
5036 diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.4/arch/um/include/asm/kmap_types.h        2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.4/arch/um/include/asm/kmap_types.h        2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040         KM_IRQ1,
5041         KM_SOFTIRQ0,
5042         KM_SOFTIRQ1,
5043 +       KM_CLEARPAGE,
5044         KM_TYPE_NR
5045  };
5046  
5047 diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048 --- linux-3.0.4/arch/um/include/asm/page.h      2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.4/arch/um/include/asm/page.h      2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051  #define PAGE_SIZE      (_AC(1, UL) << PAGE_SHIFT)
5052  #define PAGE_MASK      (~(PAGE_SIZE-1))
5053  
5054 +#define ktla_ktva(addr)                        (addr)
5055 +#define ktva_ktla(addr)                        (addr)
5056 +
5057  #ifndef __ASSEMBLY__
5058  
5059  struct page;
5060 diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061 --- linux-3.0.4/arch/um/kernel/process.c        2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.4/arch/um/kernel/process.c        2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064         return 2;
5065  }
5066  
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 -       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 -               sp -= get_random_int() % 8192;
5079 -       return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083  unsigned long get_wchan(struct task_struct *p)
5084  {
5085         unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087 --- linux-3.0.4/arch/um/sys-i386/syscalls.c     2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.4/arch/um/sys-i386/syscalls.c     2011-08-23 21:47:55.000000000 -0400
5089 @@ -11,6 +11,21 @@
5090  #include "asm/uaccess.h"
5091  #include "asm/unistd.h"
5092  
5093 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094 +{
5095 +       unsigned long pax_task_size = TASK_SIZE;
5096 +
5097 +#ifdef CONFIG_PAX_SEGMEXEC
5098 +       if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099 +               pax_task_size = SEGMEXEC_TASK_SIZE;
5100 +#endif
5101 +
5102 +       if (len > pax_task_size || addr > pax_task_size - len)
5103 +               return -EINVAL;
5104 +
5105 +       return 0;
5106 +}
5107 +
5108  /*
5109   * The prototype on i386 is:
5110   *
5111 diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112 --- linux-3.0.4/arch/x86/boot/bitops.h  2011-07-21 22:17:23.000000000 -0400
5113 +++ linux-3.0.4/arch/x86/boot/bitops.h  2011-08-23 21:47:55.000000000 -0400
5114 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int 
5115         u8 v;
5116         const u32 *p = (const u32 *)addr;
5117  
5118 -       asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119 +       asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120         return v;
5121  }
5122  
5123 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int 
5124  
5125  static inline void set_bit(int nr, void *addr)
5126  {
5127 -       asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128 +       asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129  }
5130  
5131  #endif /* BOOT_BITOPS_H */
5132 diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133 --- linux-3.0.4/arch/x86/boot/boot.h    2011-07-21 22:17:23.000000000 -0400
5134 +++ linux-3.0.4/arch/x86/boot/boot.h    2011-08-23 21:47:55.000000000 -0400
5135 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5136  static inline u16 ds(void)
5137  {
5138         u16 seg;
5139 -       asm("movw %%ds,%0" : "=rm" (seg));
5140 +       asm volatile("movw %%ds,%0" : "=rm" (seg));
5141         return seg;
5142  }
5143  
5144 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t 
5145  static inline int memcmp(const void *s1, const void *s2, size_t len)
5146  {
5147         u8 diff;
5148 -       asm("repe; cmpsb; setnz %0"
5149 +       asm volatile("repe; cmpsb; setnz %0"
5150             : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151         return diff;
5152  }
5153 diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154 --- linux-3.0.4/arch/x86/boot/compressed/head_32.S      2011-07-21 22:17:23.000000000 -0400
5155 +++ linux-3.0.4/arch/x86/boot/compressed/head_32.S      2011-08-23 21:47:55.000000000 -0400
5156 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5157         notl    %eax
5158         andl    %eax, %ebx
5159  #else
5160 -       movl    $LOAD_PHYSICAL_ADDR, %ebx
5161 +       movl    $____LOAD_PHYSICAL_ADDR, %ebx
5162  #endif
5163  
5164         /* Target address to relocate to for decompression */
5165 @@ -162,7 +162,7 @@ relocated:
5166   * and where it was actually loaded.
5167   */
5168         movl    %ebp, %ebx
5169 -       subl    $LOAD_PHYSICAL_ADDR, %ebx
5170 +       subl    $____LOAD_PHYSICAL_ADDR, %ebx
5171         jz      2f      /* Nothing to be done if loaded at compiled addr. */
5172  /*
5173   * Process relocations.
5174 @@ -170,8 +170,7 @@ relocated:
5175  
5176  1:     subl    $4, %edi
5177         movl    (%edi), %ecx
5178 -       testl   %ecx, %ecx
5179 -       jz      2f
5180 +       jecxz   2f
5181         addl    %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182         jmp     1b
5183  2:
5184 diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185 --- linux-3.0.4/arch/x86/boot/compressed/head_64.S      2011-07-21 22:17:23.000000000 -0400
5186 +++ linux-3.0.4/arch/x86/boot/compressed/head_64.S      2011-08-23 21:47:55.000000000 -0400
5187 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5188         notl    %eax
5189         andl    %eax, %ebx
5190  #else
5191 -       movl    $LOAD_PHYSICAL_ADDR, %ebx
5192 +       movl    $____LOAD_PHYSICAL_ADDR, %ebx
5193  #endif
5194  
5195         /* Target address to relocate to for decompression */
5196 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5197         notq    %rax
5198         andq    %rax, %rbp
5199  #else
5200 -       movq    $LOAD_PHYSICAL_ADDR, %rbp
5201 +       movq    $____LOAD_PHYSICAL_ADDR, %rbp
5202  #endif
5203  
5204         /* Target address to relocate to for decompression */
5205 diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206 --- linux-3.0.4/arch/x86/boot/compressed/Makefile       2011-07-21 22:17:23.000000000 -0400
5207 +++ linux-3.0.4/arch/x86/boot/compressed/Makefile       2011-08-23 21:47:55.000000000 -0400
5208 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209  KBUILD_CFLAGS += $(cflags-y)
5210  KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211  KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212 +ifdef CONSTIFY_PLUGIN
5213 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214 +endif
5215  
5216  KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217  GCOV_PROFILE := n
5218 diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219 --- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220 +++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222                 case PT_LOAD:
5223  #ifdef CONFIG_RELOCATABLE
5224                         dest = output;
5225 -                       dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226 +                       dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227  #else
5228                         dest = (void *)(phdr->p_paddr);
5229  #endif
5230 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231                 error("Destination address too large");
5232  #endif
5233  #ifndef CONFIG_RELOCATABLE
5234 -       if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235 +       if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236                 error("Wrong destination address");
5237  #endif
5238  
5239 diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240 --- linux-3.0.4/arch/x86/boot/compressed/relocs.c       2011-07-21 22:17:23.000000000 -0400
5241 +++ linux-3.0.4/arch/x86/boot/compressed/relocs.c       2011-08-23 21:47:55.000000000 -0400
5242 @@ -13,8 +13,11 @@
5243  
5244  static void die(char *fmt, ...);
5245  
5246 +#include "../../../../include/generated/autoconf.h"
5247 +
5248  #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249  static Elf32_Ehdr ehdr;
5250 +static Elf32_Phdr *phdr;
5251  static unsigned long reloc_count, reloc_idx;
5252  static unsigned long *relocs;
5253  
5254 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255         }
5256  }
5257  
5258 +static void read_phdrs(FILE *fp)
5259 +{
5260 +       unsigned int i;
5261 +
5262 +       phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263 +       if (!phdr) {
5264 +               die("Unable to allocate %d program headers\n",
5265 +                   ehdr.e_phnum);
5266 +       }
5267 +       if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268 +               die("Seek to %d failed: %s\n",
5269 +                       ehdr.e_phoff, strerror(errno));
5270 +       }
5271 +       if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272 +               die("Cannot read ELF program headers: %s\n",
5273 +                       strerror(errno));
5274 +       }
5275 +       for(i = 0; i < ehdr.e_phnum; i++) {
5276 +               phdr[i].p_type      = elf32_to_cpu(phdr[i].p_type);
5277 +               phdr[i].p_offset    = elf32_to_cpu(phdr[i].p_offset);
5278 +               phdr[i].p_vaddr     = elf32_to_cpu(phdr[i].p_vaddr);
5279 +               phdr[i].p_paddr     = elf32_to_cpu(phdr[i].p_paddr);
5280 +               phdr[i].p_filesz    = elf32_to_cpu(phdr[i].p_filesz);
5281 +               phdr[i].p_memsz     = elf32_to_cpu(phdr[i].p_memsz);
5282 +               phdr[i].p_flags     = elf32_to_cpu(phdr[i].p_flags);
5283 +               phdr[i].p_align     = elf32_to_cpu(phdr[i].p_align);
5284 +       }
5285 +
5286 +}
5287 +
5288  static void read_shdrs(FILE *fp)
5289  {
5290 -       int i;
5291 +       unsigned int i;
5292         Elf32_Shdr shdr;
5293  
5294         secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296  
5297  static void read_strtabs(FILE *fp)
5298  {
5299 -       int i;
5300 +       unsigned int i;
5301         for (i = 0; i < ehdr.e_shnum; i++) {
5302                 struct section *sec = &secs[i];
5303                 if (sec->shdr.sh_type != SHT_STRTAB) {
5304 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305  
5306  static void read_symtabs(FILE *fp)
5307  {
5308 -       int i,j;
5309 +       unsigned int i,j;
5310         for (i = 0; i < ehdr.e_shnum; i++) {
5311                 struct section *sec = &secs[i];
5312                 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314  
5315  static void read_relocs(FILE *fp)
5316  {
5317 -       int i,j;
5318 +       unsigned int i,j;
5319 +       uint32_t base;
5320 +
5321         for (i = 0; i < ehdr.e_shnum; i++) {
5322                 struct section *sec = &secs[i];
5323                 if (sec->shdr.sh_type != SHT_REL) {
5324 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325                         die("Cannot read symbol table: %s\n",
5326                                 strerror(errno));
5327                 }
5328 +               base = 0;
5329 +               for (j = 0; j < ehdr.e_phnum; j++) {
5330 +                       if (phdr[j].p_type != PT_LOAD )
5331 +                               continue;
5332 +                       if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333 +                               continue;
5334 +                       base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335 +                       break;
5336 +               }
5337                 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338                         Elf32_Rel *rel = &sec->reltab[j];
5339 -                       rel->r_offset = elf32_to_cpu(rel->r_offset);
5340 +                       rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341                         rel->r_info   = elf32_to_cpu(rel->r_info);
5342                 }
5343         }
5344 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345  
5346  static void print_absolute_symbols(void)
5347  {
5348 -       int i;
5349 +       unsigned int i;
5350         printf("Absolute symbols\n");
5351         printf(" Num:    Value Size  Type       Bind        Visibility  Name\n");
5352         for (i = 0; i < ehdr.e_shnum; i++) {
5353                 struct section *sec = &secs[i];
5354                 char *sym_strtab;
5355                 Elf32_Sym *sh_symtab;
5356 -               int j;
5357 +               unsigned int j;
5358  
5359                 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360                         continue;
5361 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362  
5363  static void print_absolute_relocs(void)
5364  {
5365 -       int i, printed = 0;
5366 +       unsigned int i, printed = 0;
5367  
5368         for (i = 0; i < ehdr.e_shnum; i++) {
5369                 struct section *sec = &secs[i];
5370                 struct section *sec_applies, *sec_symtab;
5371                 char *sym_strtab;
5372                 Elf32_Sym *sh_symtab;
5373 -               int j;
5374 +               unsigned int j;
5375                 if (sec->shdr.sh_type != SHT_REL) {
5376                         continue;
5377                 }
5378 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379  
5380  static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381  {
5382 -       int i;
5383 +       unsigned int i;
5384         /* Walk through the relocations */
5385         for (i = 0; i < ehdr.e_shnum; i++) {
5386                 char *sym_strtab;
5387                 Elf32_Sym *sh_symtab;
5388                 struct section *sec_applies, *sec_symtab;
5389 -               int j;
5390 +               unsigned int j;
5391                 struct section *sec = &secs[i];
5392  
5393                 if (sec->shdr.sh_type != SHT_REL) {
5394 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395                             !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396                                 continue;
5397                         }
5398 +                       /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399 +                       if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400 +                               continue;
5401 +
5402 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403 +                       /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404 +                       if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405 +                               continue;
5406 +                       if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407 +                               continue;
5408 +                       if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409 +                               continue;
5410 +                       if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411 +                               continue;
5412 +#endif
5413 +
5414                         switch (r_type) {
5415                         case R_386_NONE:
5416                         case R_386_PC32:
5417 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418  
5419  static void emit_relocs(int as_text)
5420  {
5421 -       int i;
5422 +       unsigned int i;
5423         /* Count how many relocations I have and allocate space for them. */
5424         reloc_count = 0;
5425         walk_relocs(count_reloc);
5426 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427                         fname, strerror(errno));
5428         }
5429         read_ehdr(fp);
5430 +       read_phdrs(fp);
5431         read_shdrs(fp);
5432         read_strtabs(fp);
5433         read_symtabs(fp);
5434 diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435 --- linux-3.0.4/arch/x86/boot/cpucheck.c        2011-07-21 22:17:23.000000000 -0400
5436 +++ linux-3.0.4/arch/x86/boot/cpucheck.c        2011-08-23 21:47:55.000000000 -0400
5437 @@ -74,7 +74,7 @@ static int has_fpu(void)
5438         u16 fcw = -1, fsw = -1;
5439         u32 cr0;
5440  
5441 -       asm("movl %%cr0,%0" : "=r" (cr0));
5442 +       asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443         if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444                 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445                 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447  {
5448         u32 f0, f1;
5449  
5450 -       asm("pushfl ; "
5451 +       asm volatile("pushfl ; "
5452             "pushfl ; "
5453             "popl %0 ; "
5454             "movl %0,%1 ; "
5455 @@ -115,7 +115,7 @@ static void get_flags(void)
5456                 set_bit(X86_FEATURE_FPU, cpu.flags);
5457  
5458         if (has_eflag(X86_EFLAGS_ID)) {
5459 -               asm("cpuid"
5460 +               asm volatile("cpuid"
5461                     : "=a" (max_intel_level),
5462                       "=b" (cpu_vendor[0]),
5463                       "=d" (cpu_vendor[1]),
5464 @@ -124,7 +124,7 @@ static void get_flags(void)
5465  
5466                 if (max_intel_level >= 0x00000001 &&
5467                     max_intel_level <= 0x0000ffff) {
5468 -                       asm("cpuid"
5469 +                       asm volatile("cpuid"
5470                             : "=a" (tfms),
5471                               "=c" (cpu.flags[4]),
5472                               "=d" (cpu.flags[0])
5473 @@ -136,7 +136,7 @@ static void get_flags(void)
5474                                 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475                 }
5476  
5477 -               asm("cpuid"
5478 +               asm volatile("cpuid"
5479                     : "=a" (max_amd_level)
5480                     : "a" (0x80000000)
5481                     : "ebx", "ecx", "edx");
5482 @@ -144,7 +144,7 @@ static void get_flags(void)
5483                 if (max_amd_level >= 0x80000001 &&
5484                     max_amd_level <= 0x8000ffff) {
5485                         u32 eax = 0x80000001;
5486 -                       asm("cpuid"
5487 +                       asm volatile("cpuid"
5488                             : "+a" (eax),
5489                               "=c" (cpu.flags[6]),
5490                               "=d" (cpu.flags[1])
5491 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492                 u32 ecx = MSR_K7_HWCR;
5493                 u32 eax, edx;
5494  
5495 -               asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496 +               asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497                 eax &= ~(1 << 15);
5498 -               asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499 +               asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500  
5501                 get_flags();    /* Make sure it really did something */
5502                 err = check_flags();
5503 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504                 u32 ecx = MSR_VIA_FCR;
5505                 u32 eax, edx;
5506  
5507 -               asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508 +               asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509                 eax |= (1<<1)|(1<<7);
5510 -               asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511 +               asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512  
5513                 set_bit(X86_FEATURE_CX8, cpu.flags);
5514                 err = check_flags();
5515 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516                 u32 eax, edx;
5517                 u32 level = 1;
5518  
5519 -               asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520 -               asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521 -               asm("cpuid"
5522 +               asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523 +               asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524 +               asm volatile("cpuid"
5525                     : "+a" (level), "=d" (cpu.flags[0])
5526                     : : "ecx", "ebx");
5527 -               asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528 +               asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529  
5530                 err = check_flags();
5531         }
5532 diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533 --- linux-3.0.4/arch/x86/boot/header.S  2011-07-21 22:17:23.000000000 -0400
5534 +++ linux-3.0.4/arch/x86/boot/header.S  2011-08-23 21:47:55.000000000 -0400
5535 @@ -224,7 +224,7 @@ setup_data:         .quad 0                 # 64-bit physical
5536                                                 # single linked list of
5537                                                 # struct setup_data
5538  
5539 -pref_address:          .quad LOAD_PHYSICAL_ADDR        # preferred load addr
5540 +pref_address:          .quad ____LOAD_PHYSICAL_ADDR    # preferred load addr
5541  
5542  #define ZO_INIT_SIZE   (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543  #define VO_INIT_SIZE   (VO__end - VO__text)
5544 diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545 --- linux-3.0.4/arch/x86/boot/Makefile  2011-07-21 22:17:23.000000000 -0400
5546 +++ linux-3.0.4/arch/x86/boot/Makefile  2011-08-23 21:47:55.000000000 -0400
5547 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os 
5548                    $(call cc-option, -fno-stack-protector) \
5549                    $(call cc-option, -mpreferred-stack-boundary=2)
5550  KBUILD_CFLAGS  += $(call cc-option, -m32)
5551 +ifdef CONSTIFY_PLUGIN
5552 +KBUILD_CFLAGS  += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553 +endif
5554  KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555  GCOV_PROFILE := n
5556  
5557 diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558 --- linux-3.0.4/arch/x86/boot/memory.c  2011-07-21 22:17:23.000000000 -0400
5559 +++ linux-3.0.4/arch/x86/boot/memory.c  2011-08-23 21:47:55.000000000 -0400
5560 @@ -19,7 +19,7 @@
5561  
5562  static int detect_memory_e820(void)
5563  {
5564 -       int count = 0;
5565 +       unsigned int count = 0;
5566         struct biosregs ireg, oreg;
5567         struct e820entry *desc = boot_params.e820_map;
5568         static struct e820entry buf; /* static so it is zeroed */
5569 diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570 --- linux-3.0.4/arch/x86/boot/video.c   2011-07-21 22:17:23.000000000 -0400
5571 +++ linux-3.0.4/arch/x86/boot/video.c   2011-08-23 21:47:55.000000000 -0400
5572 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5573  static unsigned int get_entry(void)
5574  {
5575         char entry_buf[4];
5576 -       int i, len = 0;
5577 +       unsigned int i, len = 0;
5578         int key;
5579         unsigned int v;
5580  
5581 diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582 --- linux-3.0.4/arch/x86/boot/video-vesa.c      2011-07-21 22:17:23.000000000 -0400
5583 +++ linux-3.0.4/arch/x86/boot/video-vesa.c      2011-08-23 21:47:55.000000000 -0400
5584 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585  
5586         boot_params.screen_info.vesapm_seg = oreg.es;
5587         boot_params.screen_info.vesapm_off = oreg.di;
5588 +       boot_params.screen_info.vesapm_size = oreg.cx;
5589  }
5590  
5591  /*
5592 diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5593 --- linux-3.0.4/arch/x86/ia32/ia32_aout.c       2011-07-21 22:17:23.000000000 -0400
5594 +++ linux-3.0.4/arch/x86/ia32/ia32_aout.c       2011-08-23 21:48:14.000000000 -0400
5595 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596         unsigned long dump_start, dump_size;
5597         struct user32 dump;
5598  
5599 +       memset(&dump, 0, sizeof(dump));
5600 +
5601         fs = get_fs();
5602         set_fs(KERNEL_DS);
5603         has_dumped = 1;
5604 diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5605 --- linux-3.0.4/arch/x86/ia32/ia32entry.S       2011-07-21 22:17:23.000000000 -0400
5606 +++ linux-3.0.4/arch/x86/ia32/ia32entry.S       2011-08-25 17:36:37.000000000 -0400
5607 @@ -13,6 +13,7 @@
5608  #include <asm/thread_info.h>   
5609  #include <asm/segment.h>
5610  #include <asm/irqflags.h>
5611 +#include <asm/pgtable.h>
5612  #include <linux/linkage.h>
5613  
5614  /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
5615 @@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5616  ENDPROC(native_irq_enable_sysexit)
5617  #endif
5618  
5619 +       .macro pax_enter_kernel_user
5620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5621 +       call pax_enter_kernel_user
5622 +#endif
5623 +       .endm
5624 +
5625 +       .macro pax_exit_kernel_user
5626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5627 +       call pax_exit_kernel_user
5628 +#endif
5629 +#ifdef CONFIG_PAX_RANDKSTACK
5630 +       pushq %rax
5631 +       call pax_randomize_kstack
5632 +       popq %rax
5633 +#endif
5634 +       .endm
5635 +
5636 +       .macro pax_erase_kstack
5637 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5638 +       call pax_erase_kstack
5639 +#endif
5640 +       .endm
5641 +
5642  /*
5643   * 32bit SYSENTER instruction entry.
5644   *
5645 @@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5646         CFI_REGISTER    rsp,rbp
5647         SWAPGS_UNSAFE_STACK
5648         movq    PER_CPU_VAR(kernel_stack), %rsp
5649 -       addq    $(KERNEL_STACK_OFFSET),%rsp
5650 +       pax_enter_kernel_user
5651         /*
5652          * No need to follow this irqs on/off section: the syscall
5653          * disabled irqs, here we enable it straight after entry:
5654 @@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5655         CFI_REL_OFFSET rsp,0
5656         pushfq_cfi
5657         /*CFI_REL_OFFSET rflags,0*/
5658 -       movl    8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5659 +       GET_THREAD_INFO(%r10)
5660 +       movl    TI_sysenter_return(%r10), %r10d
5661         CFI_REGISTER rip,r10
5662         pushq_cfi $__USER32_CS
5663         /*CFI_REL_OFFSET cs,0*/
5664 @@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5665         SAVE_ARGS 0,0,1
5666         /* no need to do an access_ok check here because rbp has been
5667            32bit zero extended */ 
5668 +
5669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5670 +       mov $PAX_USER_SHADOW_BASE,%r10
5671 +       add %r10,%rbp
5672 +#endif
5673 +
5674  1:     movl    (%rbp),%ebp
5675         .section __ex_table,"a"
5676         .quad 1b,ia32_badarg
5677 @@ -168,6 +199,8 @@ sysenter_dispatch:
5678         testl   $_TIF_ALLWORK_MASK,TI_flags(%r10)
5679         jnz     sysexit_audit
5680  sysexit_from_sys_call:
5681 +       pax_exit_kernel_user
5682 +       pax_erase_kstack
5683         andl    $~TS_COMPAT,TI_status(%r10)
5684         /* clear IF, that popfq doesn't enable interrupts early */
5685         andl  $~0x200,EFLAGS-R11(%rsp) 
5686 @@ -194,6 +227,9 @@ sysexit_from_sys_call:
5687         movl %eax,%esi                  /* 2nd arg: syscall number */
5688         movl $AUDIT_ARCH_I386,%edi      /* 1st arg: audit arch */
5689         call audit_syscall_entry
5690 +
5691 +       pax_erase_kstack
5692 +
5693         movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall number */
5694         cmpq $(IA32_NR_syscalls-1),%rax
5695         ja ia32_badsys
5696 @@ -246,6 +282,9 @@ sysenter_tracesys:
5697         movq    $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5698         movq    %rsp,%rdi        /* &pt_regs -> arg1 */
5699         call    syscall_trace_enter
5700 +
5701 +       pax_erase_kstack
5702 +
5703         LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
5704         RESTORE_REST
5705         cmpq    $(IA32_NR_syscalls-1),%rax
5706 @@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5707  ENTRY(ia32_cstar_target)
5708         CFI_STARTPROC32 simple
5709         CFI_SIGNAL_FRAME
5710 -       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
5711 +       CFI_DEF_CFA     rsp,0
5712         CFI_REGISTER    rip,rcx
5713         /*CFI_REGISTER  rflags,r11*/
5714         SWAPGS_UNSAFE_STACK
5715         movl    %esp,%r8d
5716         CFI_REGISTER    rsp,r8
5717         movq    PER_CPU_VAR(kernel_stack),%rsp
5718 +
5719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5720 +       pax_enter_kernel_user
5721 +#endif
5722 +
5723         /*
5724          * No need to follow this irqs on/off section: the syscall
5725          * disabled irqs and here we enable it straight after entry:
5726          */
5727         ENABLE_INTERRUPTS(CLBR_NONE)
5728 -       SAVE_ARGS 8,1,1
5729 +       SAVE_ARGS 8*6,1,1
5730         movl    %eax,%eax       /* zero extension */
5731         movq    %rax,ORIG_RAX-ARGOFFSET(%rsp)
5732         movq    %rcx,RIP-ARGOFFSET(%rsp)
5733 @@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5734         /* no need to do an access_ok check here because r8 has been
5735            32bit zero extended */ 
5736         /* hardware stack frame is complete now */      
5737 +
5738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5739 +       mov $PAX_USER_SHADOW_BASE,%r10
5740 +       add %r10,%r8
5741 +#endif
5742 +
5743  1:     movl    (%r8),%r9d
5744         .section __ex_table,"a"
5745         .quad 1b,ia32_badarg
5746 @@ -327,6 +377,8 @@ cstar_dispatch:
5747         testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5748         jnz sysretl_audit
5749  sysretl_from_sys_call:
5750 +       pax_exit_kernel_user
5751 +       pax_erase_kstack
5752         andl $~TS_COMPAT,TI_status(%r10)
5753         RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5754         movl RIP-ARGOFFSET(%rsp),%ecx
5755 @@ -364,6 +416,9 @@ cstar_tracesys:
5756         movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5757         movq %rsp,%rdi        /* &pt_regs -> arg1 */
5758         call syscall_trace_enter
5759 +
5760 +       pax_erase_kstack
5761 +
5762         LOAD_ARGS32 ARGOFFSET, 1  /* reload args from stack in case ptrace changed it */
5763         RESTORE_REST
5764         xchgl %ebp,%r9d
5765 @@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5766         CFI_REL_OFFSET  rip,RIP-RIP
5767         PARAVIRT_ADJUST_EXCEPTION_FRAME
5768         SWAPGS
5769 +       pax_enter_kernel_user
5770         /*
5771          * No need to follow this irqs on/off section: the syscall
5772          * disabled irqs and here we enable it straight after entry:
5773 @@ -441,6 +497,9 @@ ia32_tracesys:                       
5774         movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5775         movq %rsp,%rdi        /* &pt_regs -> arg1 */
5776         call syscall_trace_enter
5777 +
5778 +       pax_erase_kstack
5779 +
5780         LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
5781         RESTORE_REST
5782         cmpq $(IA32_NR_syscalls-1),%rax
5783 diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5784 --- linux-3.0.4/arch/x86/ia32/ia32_signal.c     2011-07-21 22:17:23.000000000 -0400
5785 +++ linux-3.0.4/arch/x86/ia32/ia32_signal.c     2011-08-23 21:47:55.000000000 -0400
5786 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct 
5787         sp -= frame_size;
5788         /* Align the stack pointer according to the i386 ABI,
5789          * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5790 -       sp = ((sp + 4) & -16ul) - 4;
5791 +       sp = ((sp - 12) & -16ul) - 4;
5792         return (void __user *) sp;
5793  }
5794  
5795 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5796                  * These are actually not used anymore, but left because some
5797                  * gdb versions depend on them as a marker.
5798                  */
5799 -               put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5800 +               put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5801         } put_user_catch(err);
5802  
5803         if (err)
5804 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct 
5805                 0xb8,
5806                 __NR_ia32_rt_sigreturn,
5807                 0x80cd,
5808 -               0,
5809 +               0
5810         };
5811  
5812         frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5813 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct 
5814  
5815                 if (ka->sa.sa_flags & SA_RESTORER)
5816                         restorer = ka->sa.sa_restorer;
5817 +               else if (current->mm->context.vdso)
5818 +                       /* Return stub is in 32bit vsyscall page */
5819 +                       restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5820                 else
5821 -                       restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5822 -                                                rt_sigreturn);
5823 +                       restorer = &frame->retcode;
5824                 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5825  
5826                 /*
5827                  * Not actually used anymore, but left because some gdb
5828                  * versions need it.
5829                  */
5830 -               put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831 +               put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832         } put_user_catch(err);
5833  
5834         if (err)
5835 diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
5836 --- linux-3.0.4/arch/x86/include/asm/alternative.h      2011-07-21 22:17:23.000000000 -0400
5837 +++ linux-3.0.4/arch/x86/include/asm/alternative.h      2011-08-23 21:47:55.000000000 -0400
5838 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5839        ".section .discard,\"aw\",@progbits\n"                           \
5840        "         .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */   \
5841        ".previous\n"                                                    \
5842 -      ".section .altinstr_replacement, \"ax\"\n"                       \
5843 +      ".section .altinstr_replacement, \"a\"\n"                        \
5844        "663:\n\t" newinstr "\n664:\n"           /* replacement     */   \
5845        ".previous"
5846  
5847 diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
5848 --- linux-3.0.4/arch/x86/include/asm/apic.h     2011-07-21 22:17:23.000000000 -0400
5849 +++ linux-3.0.4/arch/x86/include/asm/apic.h     2011-08-23 21:48:14.000000000 -0400
5850 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5851  
5852  #ifdef CONFIG_X86_LOCAL_APIC
5853  
5854 -extern unsigned int apic_verbosity;
5855 +extern int apic_verbosity;
5856  extern int local_apic_timer_c2_ok;
5857  
5858  extern int disable_apic;
5859 diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
5860 --- linux-3.0.4/arch/x86/include/asm/apm.h      2011-07-21 22:17:23.000000000 -0400
5861 +++ linux-3.0.4/arch/x86/include/asm/apm.h      2011-08-23 21:47:55.000000000 -0400
5862 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5863         __asm__ __volatile__(APM_DO_ZERO_SEGS
5864                 "pushl %%edi\n\t"
5865                 "pushl %%ebp\n\t"
5866 -               "lcall *%%cs:apm_bios_entry\n\t"
5867 +               "lcall *%%ss:apm_bios_entry\n\t"
5868                 "setc %%al\n\t"
5869                 "popl %%ebp\n\t"
5870                 "popl %%edi\n\t"
5871 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5872         __asm__ __volatile__(APM_DO_ZERO_SEGS
5873                 "pushl %%edi\n\t"
5874                 "pushl %%ebp\n\t"
5875 -               "lcall *%%cs:apm_bios_entry\n\t"
5876 +               "lcall *%%ss:apm_bios_entry\n\t"
5877                 "setc %%bl\n\t"
5878                 "popl %%ebp\n\t"
5879                 "popl %%edi\n\t"
5880 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
5881 --- linux-3.0.4/arch/x86/include/asm/atomic64_32.h      2011-07-21 22:17:23.000000000 -0400
5882 +++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h      2011-08-23 21:47:55.000000000 -0400
5883 @@ -12,6 +12,14 @@ typedef struct {
5884         u64 __aligned(8) counter;
5885  } atomic64_t;
5886  
5887 +#ifdef CONFIG_PAX_REFCOUNT
5888 +typedef struct {
5889 +       u64 __aligned(8) counter;
5890 +} atomic64_unchecked_t;
5891 +#else
5892 +typedef atomic64_t atomic64_unchecked_t;
5893 +#endif
5894 +
5895  #define ATOMIC64_INIT(val)     { (val) }
5896  
5897  #ifdef CONFIG_X86_CMPXCHG64
5898 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5899  }
5900  
5901  /**
5902 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5903 + * @p: pointer to type atomic64_unchecked_t
5904 + * @o: expected value
5905 + * @n: new value
5906 + *
5907 + * Atomically sets @v to @n if it was equal to @o and returns
5908 + * the old value.
5909 + */
5910 +
5911 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5912 +{
5913 +       return cmpxchg64(&v->counter, o, n);
5914 +}
5915 +
5916 +/**
5917   * atomic64_xchg - xchg atomic64 variable
5918   * @v: pointer to type atomic64_t
5919   * @n: value to assign
5920 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5921  }
5922  
5923  /**
5924 + * atomic64_set_unchecked - set atomic64 variable
5925 + * @v: pointer to type atomic64_unchecked_t
5926 + * @n: value to assign
5927 + *
5928 + * Atomically sets the value of @v to @n.
5929 + */
5930 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5931 +{
5932 +       unsigned high = (unsigned)(i >> 32);
5933 +       unsigned low = (unsigned)i;
5934 +       asm volatile(ATOMIC64_ALTERNATIVE(set)
5935 +                    : "+b" (low), "+c" (high)
5936 +                    : "S" (v)
5937 +                    : "eax", "edx", "memory"
5938 +                    );
5939 +}
5940 +
5941 +/**
5942   * atomic64_read - read atomic64 variable
5943   * @v: pointer to type atomic64_t
5944   *
5945 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5946   }
5947  
5948  /**
5949 + * atomic64_read_unchecked - read atomic64 variable
5950 + * @v: pointer to type atomic64_unchecked_t
5951 + *
5952 + * Atomically reads the value of @v and returns it.
5953 + */
5954 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5955 +{
5956 +       long long r;
5957 +       asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5958 +                    : "=A" (r), "+c" (v)
5959 +                    : : "memory"
5960 +                    );
5961 +       return r;
5962 + }
5963 +
5964 +/**
5965   * atomic64_add_return - add and return
5966   * @i: integer value to add
5967   * @v: pointer to type atomic64_t
5968 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5969         return i;
5970  }
5971  
5972 +/**
5973 + * atomic64_add_return_unchecked - add and return
5974 + * @i: integer value to add
5975 + * @v: pointer to type atomic64_unchecked_t
5976 + *
5977 + * Atomically adds @i to @v and returns @i + *@v
5978 + */
5979 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5980 +{
5981 +       asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5982 +                    : "+A" (i), "+c" (v)
5983 +                    : : "memory"
5984 +                    );
5985 +       return i;
5986 +}
5987 +
5988  /*
5989   * Other variants with different arithmetic operators:
5990   */
5991 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5992         return a;
5993  }
5994  
5995 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5996 +{
5997 +       long long a;
5998 +       asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
5999 +                    : "=A" (a)
6000 +                    : "S" (v)
6001 +                    : "memory", "ecx"
6002 +                    );
6003 +       return a;
6004 +}
6005 +
6006  static inline long long atomic64_dec_return(atomic64_t *v)
6007  {
6008         long long a;
6009 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6010  }
6011  
6012  /**
6013 + * atomic64_add_unchecked - add integer to atomic64 variable
6014 + * @i: integer value to add
6015 + * @v: pointer to type atomic64_unchecked_t
6016 + *
6017 + * Atomically adds @i to @v.
6018 + */
6019 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6020 +{
6021 +       asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6022 +                    : "+A" (i), "+c" (v)
6023 +                    : : "memory"
6024 +                    );
6025 +       return i;
6026 +}
6027 +
6028 +/**
6029   * atomic64_sub - subtract the atomic64 variable
6030   * @i: integer value to subtract
6031   * @v: pointer to type atomic64_t
6032 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6033 --- linux-3.0.4/arch/x86/include/asm/atomic64_64.h      2011-07-21 22:17:23.000000000 -0400
6034 +++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h      2011-08-23 21:47:55.000000000 -0400
6035 @@ -18,7 +18,19 @@
6036   */
6037  static inline long atomic64_read(const atomic64_t *v)
6038  {
6039 -       return (*(volatile long *)&(v)->counter);
6040 +       return (*(volatile const long *)&(v)->counter);
6041 +}
6042 +
6043 +/**
6044 + * atomic64_read_unchecked - read atomic64 variable
6045 + * @v: pointer of type atomic64_unchecked_t
6046 + *
6047 + * Atomically reads the value of @v.
6048 + * Doesn't imply a read memory barrier.
6049 + */
6050 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6051 +{
6052 +       return (*(volatile const long *)&(v)->counter);
6053  }
6054  
6055  /**
6056 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6057  }
6058  
6059  /**
6060 + * atomic64_set_unchecked - set atomic64 variable
6061 + * @v: pointer to type atomic64_unchecked_t
6062 + * @i: required value
6063 + *
6064 + * Atomically sets the value of @v to @i.
6065 + */
6066 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6067 +{
6068 +       v->counter = i;
6069 +}
6070 +
6071 +/**
6072   * atomic64_add - add integer to atomic64 variable
6073   * @i: integer value to add
6074   * @v: pointer to type atomic64_t
6075 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6076   */
6077  static inline void atomic64_add(long i, atomic64_t *v)
6078  {
6079 +       asm volatile(LOCK_PREFIX "addq %1,%0\n"
6080 +
6081 +#ifdef CONFIG_PAX_REFCOUNT
6082 +                    "jno 0f\n"
6083 +                    LOCK_PREFIX "subq %1,%0\n"
6084 +                    "int $4\n0:\n"
6085 +                    _ASM_EXTABLE(0b, 0b)
6086 +#endif
6087 +
6088 +                    : "=m" (v->counter)
6089 +                    : "er" (i), "m" (v->counter));
6090 +}
6091 +
6092 +/**
6093 + * atomic64_add_unchecked - add integer to atomic64 variable
6094 + * @i: integer value to add
6095 + * @v: pointer to type atomic64_unchecked_t
6096 + *
6097 + * Atomically adds @i to @v.
6098 + */
6099 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6100 +{
6101         asm volatile(LOCK_PREFIX "addq %1,%0"
6102                      : "=m" (v->counter)
6103                      : "er" (i), "m" (v->counter));
6104 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, 
6105   */
6106  static inline void atomic64_sub(long i, atomic64_t *v)
6107  {
6108 -       asm volatile(LOCK_PREFIX "subq %1,%0"
6109 +       asm volatile(LOCK_PREFIX "subq %1,%0\n"
6110 +
6111 +#ifdef CONFIG_PAX_REFCOUNT
6112 +                    "jno 0f\n"
6113 +                    LOCK_PREFIX "addq %1,%0\n"
6114 +                    "int $4\n0:\n"
6115 +                    _ASM_EXTABLE(0b, 0b)
6116 +#endif
6117 +
6118 +                    : "=m" (v->counter)
6119 +                    : "er" (i), "m" (v->counter));
6120 +}
6121 +
6122 +/**
6123 + * atomic64_sub_unchecked - subtract the atomic64 variable
6124 + * @i: integer value to subtract
6125 + * @v: pointer to type atomic64_unchecked_t
6126 + *
6127 + * Atomically subtracts @i from @v.
6128 + */
6129 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6130 +{
6131 +       asm volatile(LOCK_PREFIX "subq %1,%0\n"
6132                      : "=m" (v->counter)
6133                      : "er" (i), "m" (v->counter));
6134  }
6135 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6136  {
6137         unsigned char c;
6138  
6139 -       asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6140 +       asm volatile(LOCK_PREFIX "subq %2,%0\n"
6141 +
6142 +#ifdef CONFIG_PAX_REFCOUNT
6143 +                    "jno 0f\n"
6144 +                    LOCK_PREFIX "addq %2,%0\n"
6145 +                    "int $4\n0:\n"
6146 +                    _ASM_EXTABLE(0b, 0b)
6147 +#endif
6148 +
6149 +                    "sete %1\n"
6150                      : "=m" (v->counter), "=qm" (c)
6151                      : "er" (i), "m" (v->counter) : "memory");
6152         return c;
6153 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6154   */
6155  static inline void atomic64_inc(atomic64_t *v)
6156  {
6157 +       asm volatile(LOCK_PREFIX "incq %0\n"
6158 +
6159 +#ifdef CONFIG_PAX_REFCOUNT
6160 +                    "jno 0f\n"
6161 +                    LOCK_PREFIX "decq %0\n"
6162 +                    "int $4\n0:\n"
6163 +                    _ASM_EXTABLE(0b, 0b)
6164 +#endif
6165 +
6166 +                    : "=m" (v->counter)
6167 +                    : "m" (v->counter));
6168 +}
6169 +
6170 +/**
6171 + * atomic64_inc_unchecked - increment atomic64 variable
6172 + * @v: pointer to type atomic64_unchecked_t
6173 + *
6174 + * Atomically increments @v by 1.
6175 + */
6176 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6177 +{
6178         asm volatile(LOCK_PREFIX "incq %0"
6179                      : "=m" (v->counter)
6180                      : "m" (v->counter));
6181 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6182   */
6183  static inline void atomic64_dec(atomic64_t *v)
6184  {
6185 -       asm volatile(LOCK_PREFIX "decq %0"
6186 +       asm volatile(LOCK_PREFIX "decq %0\n"
6187 +
6188 +#ifdef CONFIG_PAX_REFCOUNT
6189 +                    "jno 0f\n"
6190 +                    LOCK_PREFIX "incq %0\n"
6191 +                    "int $4\n0:\n"
6192 +                    _ASM_EXTABLE(0b, 0b)
6193 +#endif
6194 +
6195 +                    : "=m" (v->counter)
6196 +                    : "m" (v->counter));
6197 +}
6198 +
6199 +/**
6200 + * atomic64_dec_unchecked - decrement atomic64 variable
6201 + * @v: pointer to type atomic64_t
6202 + *
6203 + * Atomically decrements @v by 1.
6204 + */
6205 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6206 +{
6207 +       asm volatile(LOCK_PREFIX "decq %0\n"
6208                      : "=m" (v->counter)
6209                      : "m" (v->counter));
6210  }
6211 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6212  {
6213         unsigned char c;
6214  
6215 -       asm volatile(LOCK_PREFIX "decq %0; sete %1"
6216 +       asm volatile(LOCK_PREFIX "decq %0\n"
6217 +
6218 +#ifdef CONFIG_PAX_REFCOUNT
6219 +                    "jno 0f\n"
6220 +                    LOCK_PREFIX "incq %0\n"
6221 +                    "int $4\n0:\n"
6222 +                    _ASM_EXTABLE(0b, 0b)
6223 +#endif
6224 +
6225 +                    "sete %1\n"
6226                      : "=m" (v->counter), "=qm" (c)
6227                      : "m" (v->counter) : "memory");
6228         return c != 0;
6229 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6230  {
6231         unsigned char c;
6232  
6233 -       asm volatile(LOCK_PREFIX "incq %0; sete %1"
6234 +       asm volatile(LOCK_PREFIX "incq %0\n"
6235 +
6236 +#ifdef CONFIG_PAX_REFCOUNT
6237 +                    "jno 0f\n"
6238 +                    LOCK_PREFIX "decq %0\n"
6239 +                    "int $4\n0:\n"
6240 +                    _ASM_EXTABLE(0b, 0b)
6241 +#endif
6242 +
6243 +                    "sete %1\n"
6244                      : "=m" (v->counter), "=qm" (c)
6245                      : "m" (v->counter) : "memory");
6246         return c != 0;
6247 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6248  {
6249         unsigned char c;
6250  
6251 -       asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6252 +       asm volatile(LOCK_PREFIX "addq %2,%0\n"
6253 +
6254 +#ifdef CONFIG_PAX_REFCOUNT
6255 +                    "jno 0f\n"
6256 +                    LOCK_PREFIX "subq %2,%0\n"
6257 +                    "int $4\n0:\n"
6258 +                    _ASM_EXTABLE(0b, 0b)
6259 +#endif
6260 +
6261 +                    "sets %1\n"
6262                      : "=m" (v->counter), "=qm" (c)
6263                      : "er" (i), "m" (v->counter) : "memory");
6264         return c;
6265 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6266  static inline long atomic64_add_return(long i, atomic64_t *v)
6267  {
6268         long __i = i;
6269 -       asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6270 +       asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6271 +
6272 +#ifdef CONFIG_PAX_REFCOUNT
6273 +                    "jno 0f\n"
6274 +                    "movq %0, %1\n"
6275 +                    "int $4\n0:\n"
6276 +                    _ASM_EXTABLE(0b, 0b)
6277 +#endif
6278 +
6279 +                    : "+r" (i), "+m" (v->counter)
6280 +                    : : "memory");
6281 +       return i + __i;
6282 +}
6283 +
6284 +/**
6285 + * atomic64_add_return_unchecked - add and return
6286 + * @i: integer value to add
6287 + * @v: pointer to type atomic64_unchecked_t
6288 + *
6289 + * Atomically adds @i to @v and returns @i + @v
6290 + */
6291 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6292 +{
6293 +       long __i = i;
6294 +       asm volatile(LOCK_PREFIX "xaddq %0, %1"
6295                      : "+r" (i), "+m" (v->counter)
6296                      : : "memory");
6297         return i + __i;
6298 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6299  }
6300  
6301  #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
6302 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6303 +{
6304 +       return atomic64_add_return_unchecked(1, v);
6305 +}
6306  #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
6307  
6308  static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6309 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6310         return cmpxchg(&v->counter, old, new);
6311  }
6312  
6313 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6314 +{
6315 +       return cmpxchg(&v->counter, old, new);
6316 +}
6317 +
6318  static inline long atomic64_xchg(atomic64_t *v, long new)
6319  {
6320         return xchg(&v->counter, new);
6321 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6322   */
6323  static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6324  {
6325 -       long c, old;
6326 +       long c, old, new;
6327         c = atomic64_read(v);
6328         for (;;) {
6329 -               if (unlikely(c == (u)))
6330 +               if (unlikely(c == u))
6331                         break;
6332 -               old = atomic64_cmpxchg((v), c, c + (a));
6333 +
6334 +               asm volatile("add %2,%0\n"
6335 +
6336 +#ifdef CONFIG_PAX_REFCOUNT
6337 +                            "jno 0f\n"
6338 +                            "sub %2,%0\n"
6339 +                            "int $4\n0:\n"
6340 +                            _ASM_EXTABLE(0b, 0b)
6341 +#endif
6342 +
6343 +                            : "=r" (new)
6344 +                            : "0" (c), "ir" (a));
6345 +
6346 +               old = atomic64_cmpxchg(v, c, new);
6347                 if (likely(old == c))
6348                         break;
6349                 c = old;
6350         }
6351 -       return c != (u);
6352 +       return c != u;
6353  }
6354  
6355  #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6356 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6357 --- linux-3.0.4/arch/x86/include/asm/atomic.h   2011-07-21 22:17:23.000000000 -0400
6358 +++ linux-3.0.4/arch/x86/include/asm/atomic.h   2011-08-23 21:47:55.000000000 -0400
6359 @@ -22,7 +22,18 @@
6360   */
6361  static inline int atomic_read(const atomic_t *v)
6362  {
6363 -       return (*(volatile int *)&(v)->counter);
6364 +       return (*(volatile const int *)&(v)->counter);
6365 +}
6366 +
6367 +/**
6368 + * atomic_read_unchecked - read atomic variable
6369 + * @v: pointer of type atomic_unchecked_t
6370 + *
6371 + * Atomically reads the value of @v.
6372 + */
6373 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6374 +{
6375 +       return (*(volatile const int *)&(v)->counter);
6376  }
6377  
6378  /**
6379 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6380  }
6381  
6382  /**
6383 + * atomic_set_unchecked - set atomic variable
6384 + * @v: pointer of type atomic_unchecked_t
6385 + * @i: required value
6386 + *
6387 + * Atomically sets the value of @v to @i.
6388 + */
6389 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6390 +{
6391 +       v->counter = i;
6392 +}
6393 +
6394 +/**
6395   * atomic_add - add integer to atomic variable
6396   * @i: integer value to add
6397   * @v: pointer of type atomic_t
6398 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6399   */
6400  static inline void atomic_add(int i, atomic_t *v)
6401  {
6402 -       asm volatile(LOCK_PREFIX "addl %1,%0"
6403 +       asm volatile(LOCK_PREFIX "addl %1,%0\n"
6404 +
6405 +#ifdef CONFIG_PAX_REFCOUNT
6406 +                    "jno 0f\n"
6407 +                    LOCK_PREFIX "subl %1,%0\n"
6408 +                    "int $4\n0:\n"
6409 +                    _ASM_EXTABLE(0b, 0b)
6410 +#endif
6411 +
6412 +                    : "+m" (v->counter)
6413 +                    : "ir" (i));
6414 +}
6415 +
6416 +/**
6417 + * atomic_add_unchecked - add integer to atomic variable
6418 + * @i: integer value to add
6419 + * @v: pointer of type atomic_unchecked_t
6420 + *
6421 + * Atomically adds @i to @v.
6422 + */
6423 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6424 +{
6425 +       asm volatile(LOCK_PREFIX "addl %1,%0\n"
6426                      : "+m" (v->counter)
6427                      : "ir" (i));
6428  }
6429 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6430   */
6431  static inline void atomic_sub(int i, atomic_t *v)
6432  {
6433 -       asm volatile(LOCK_PREFIX "subl %1,%0"
6434 +       asm volatile(LOCK_PREFIX "subl %1,%0\n"
6435 +
6436 +#ifdef CONFIG_PAX_REFCOUNT
6437 +                    "jno 0f\n"
6438 +                    LOCK_PREFIX "addl %1,%0\n"
6439 +                    "int $4\n0:\n"
6440 +                    _ASM_EXTABLE(0b, 0b)
6441 +#endif
6442 +
6443 +                    : "+m" (v->counter)
6444 +                    : "ir" (i));
6445 +}
6446 +
6447 +/**
6448 + * atomic_sub_unchecked - subtract integer from atomic variable
6449 + * @i: integer value to subtract
6450 + * @v: pointer of type atomic_unchecked_t
6451 + *
6452 + * Atomically subtracts @i from @v.
6453 + */
6454 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6455 +{
6456 +       asm volatile(LOCK_PREFIX "subl %1,%0\n"
6457                      : "+m" (v->counter)
6458                      : "ir" (i));
6459  }
6460 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6461  {
6462         unsigned char c;
6463  
6464 -       asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6465 +       asm volatile(LOCK_PREFIX "subl %2,%0\n"
6466 +
6467 +#ifdef CONFIG_PAX_REFCOUNT
6468 +                    "jno 0f\n"
6469 +                    LOCK_PREFIX "addl %2,%0\n"
6470 +                    "int $4\n0:\n"
6471 +                    _ASM_EXTABLE(0b, 0b)
6472 +#endif
6473 +
6474 +                    "sete %1\n"
6475                      : "+m" (v->counter), "=qm" (c)
6476                      : "ir" (i) : "memory");
6477         return c;
6478 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6479   */
6480  static inline void atomic_inc(atomic_t *v)
6481  {
6482 -       asm volatile(LOCK_PREFIX "incl %0"
6483 +       asm volatile(LOCK_PREFIX "incl %0\n"
6484 +
6485 +#ifdef CONFIG_PAX_REFCOUNT
6486 +                    "jno 0f\n"
6487 +                    LOCK_PREFIX "decl %0\n"
6488 +                    "int $4\n0:\n"
6489 +                    _ASM_EXTABLE(0b, 0b)
6490 +#endif
6491 +
6492 +                    : "+m" (v->counter));
6493 +}
6494 +
6495 +/**
6496 + * atomic_inc_unchecked - increment atomic variable
6497 + * @v: pointer of type atomic_unchecked_t
6498 + *
6499 + * Atomically increments @v by 1.
6500 + */
6501 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6502 +{
6503 +       asm volatile(LOCK_PREFIX "incl %0\n"
6504                      : "+m" (v->counter));
6505  }
6506  
6507 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6508   */
6509  static inline void atomic_dec(atomic_t *v)
6510  {
6511 -       asm volatile(LOCK_PREFIX "decl %0"
6512 +       asm volatile(LOCK_PREFIX "decl %0\n"
6513 +
6514 +#ifdef CONFIG_PAX_REFCOUNT
6515 +                    "jno 0f\n"
6516 +                    LOCK_PREFIX "incl %0\n"
6517 +                    "int $4\n0:\n"
6518 +                    _ASM_EXTABLE(0b, 0b)
6519 +#endif
6520 +
6521 +                    : "+m" (v->counter));
6522 +}
6523 +
6524 +/**
6525 + * atomic_dec_unchecked - decrement atomic variable
6526 + * @v: pointer of type atomic_unchecked_t
6527 + *
6528 + * Atomically decrements @v by 1.
6529 + */
6530 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6531 +{
6532 +       asm volatile(LOCK_PREFIX "decl %0\n"
6533                      : "+m" (v->counter));
6534  }
6535  
6536 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6537  {
6538         unsigned char c;
6539  
6540 -       asm volatile(LOCK_PREFIX "decl %0; sete %1"
6541 +       asm volatile(LOCK_PREFIX "decl %0\n"
6542 +
6543 +#ifdef CONFIG_PAX_REFCOUNT
6544 +                    "jno 0f\n"
6545 +                    LOCK_PREFIX "incl %0\n"
6546 +                    "int $4\n0:\n"
6547 +                    _ASM_EXTABLE(0b, 0b)
6548 +#endif
6549 +
6550 +                    "sete %1\n"
6551                      : "+m" (v->counter), "=qm" (c)
6552                      : : "memory");
6553         return c != 0;
6554 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6555  {
6556         unsigned char c;
6557  
6558 -       asm volatile(LOCK_PREFIX "incl %0; sete %1"
6559 +       asm volatile(LOCK_PREFIX "incl %0\n"
6560 +
6561 +#ifdef CONFIG_PAX_REFCOUNT
6562 +                    "jno 0f\n"
6563 +                    LOCK_PREFIX "decl %0\n"
6564 +                    "int $4\n0:\n"
6565 +                    _ASM_EXTABLE(0b, 0b)
6566 +#endif
6567 +
6568 +                    "sete %1\n"
6569 +                    : "+m" (v->counter), "=qm" (c)
6570 +                    : : "memory");
6571 +       return c != 0;
6572 +}
6573 +
6574 +/**
6575 + * atomic_inc_and_test_unchecked - increment and test
6576 + * @v: pointer of type atomic_unchecked_t
6577 + *
6578 + * Atomically increments @v by 1
6579 + * and returns true if the result is zero, or false for all
6580 + * other cases.
6581 + */
6582 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6583 +{
6584 +       unsigned char c;
6585 +
6586 +       asm volatile(LOCK_PREFIX "incl %0\n"
6587 +                    "sete %1\n"
6588                      : "+m" (v->counter), "=qm" (c)
6589                      : : "memory");
6590         return c != 0;
6591 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6592  {
6593         unsigned char c;
6594  
6595 -       asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6596 +       asm volatile(LOCK_PREFIX "addl %2,%0\n"
6597 +
6598 +#ifdef CONFIG_PAX_REFCOUNT
6599 +                    "jno 0f\n"
6600 +                    LOCK_PREFIX "subl %2,%0\n"
6601 +                    "int $4\n0:\n"
6602 +                    _ASM_EXTABLE(0b, 0b)
6603 +#endif
6604 +
6605 +                    "sets %1\n"
6606                      : "+m" (v->counter), "=qm" (c)
6607                      : "ir" (i) : "memory");
6608         return c;
6609 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int 
6610  #endif
6611         /* Modern 486+ processor */
6612         __i = i;
6613 +       asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6614 +
6615 +#ifdef CONFIG_PAX_REFCOUNT
6616 +                    "jno 0f\n"
6617 +                    "movl %0, %1\n"
6618 +                    "int $4\n0:\n"
6619 +                    _ASM_EXTABLE(0b, 0b)
6620 +#endif
6621 +
6622 +                    : "+r" (i), "+m" (v->counter)
6623 +                    : : "memory");
6624 +       return i + __i;
6625 +
6626 +#ifdef CONFIG_M386
6627 +no_xadd: /* Legacy 386 processor */
6628 +       local_irq_save(flags);
6629 +       __i = atomic_read(v);
6630 +       atomic_set(v, i + __i);
6631 +       local_irq_restore(flags);
6632 +       return i + __i;
6633 +#endif
6634 +}
6635 +
6636 +/**
6637 + * atomic_add_return_unchecked - add integer and return
6638 + * @v: pointer of type atomic_unchecked_t
6639 + * @i: integer value to add
6640 + *
6641 + * Atomically adds @i to @v and returns @i + @v
6642 + */
6643 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6644 +{
6645 +       int __i;
6646 +#ifdef CONFIG_M386
6647 +       unsigned long flags;
6648 +       if (unlikely(boot_cpu_data.x86 <= 3))
6649 +               goto no_xadd;
6650 +#endif
6651 +       /* Modern 486+ processor */
6652 +       __i = i;
6653         asm volatile(LOCK_PREFIX "xaddl %0, %1"
6654                      : "+r" (i), "+m" (v->counter)
6655                      : : "memory");
6656 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int 
6657  }
6658  
6659  #define atomic_inc_return(v)  (atomic_add_return(1, v))
6660 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6661 +{
6662 +       return atomic_add_return_unchecked(1, v);
6663 +}
6664  #define atomic_dec_return(v)  (atomic_sub_return(1, v))
6665  
6666  static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6667 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6668         return cmpxchg(&v->counter, old, new);
6669  }
6670  
6671 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6672 +{
6673 +       return cmpxchg(&v->counter, old, new);
6674 +}
6675 +
6676  static inline int atomic_xchg(atomic_t *v, int new)
6677  {
6678         return xchg(&v->counter, new);
6679  }
6680  
6681 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6682 +{
6683 +       return xchg(&v->counter, new);
6684 +}
6685 +
6686  /**
6687   * atomic_add_unless - add unless the number is already a given value
6688   * @v: pointer of type atomic_t
6689 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6690   */
6691  static inline int atomic_add_unless(atomic_t *v, int a, int u)
6692  {
6693 -       int c, old;
6694 +       int c, old, new;
6695         c = atomic_read(v);
6696         for (;;) {
6697 -               if (unlikely(c == (u)))
6698 +               if (unlikely(c == u))
6699                         break;
6700 -               old = atomic_cmpxchg((v), c, c + (a));
6701 +
6702 +               asm volatile("addl %2,%0\n"
6703 +
6704 +#ifdef CONFIG_PAX_REFCOUNT
6705 +                            "jno 0f\n"
6706 +                            "subl %2,%0\n"
6707 +                            "int $4\n0:\n"
6708 +                            _ASM_EXTABLE(0b, 0b)
6709 +#endif
6710 +
6711 +                            : "=r" (new)
6712 +                            : "0" (c), "ir" (a));
6713 +
6714 +               old = atomic_cmpxchg(v, c, new);
6715                 if (likely(old == c))
6716                         break;
6717                 c = old;
6718         }
6719 -       return c != (u);
6720 +       return c != u;
6721  }
6722  
6723  #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6724  
6725 +/**
6726 + * atomic_inc_not_zero_hint - increment if not null
6727 + * @v: pointer of type atomic_t
6728 + * @hint: probable value of the atomic before the increment
6729 + *
6730 + * This version of atomic_inc_not_zero() gives a hint of probable
6731 + * value of the atomic. This helps processor to not read the memory
6732 + * before doing the atomic read/modify/write cycle, lowering
6733 + * number of bus transactions on some arches.
6734 + *
6735 + * Returns: 0 if increment was not done, 1 otherwise.
6736 + */
6737 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6738 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6739 +{
6740 +       int val, c = hint, new;
6741 +
6742 +       /* sanity test, should be removed by compiler if hint is a constant */
6743 +       if (!hint)
6744 +               return atomic_inc_not_zero(v);
6745 +
6746 +       do {
6747 +               asm volatile("incl %0\n"
6748 +
6749 +#ifdef CONFIG_PAX_REFCOUNT
6750 +                            "jno 0f\n"
6751 +                            "decl %0\n"
6752 +                            "int $4\n0:\n"
6753 +                            _ASM_EXTABLE(0b, 0b)
6754 +#endif
6755 +
6756 +                            : "=r" (new)
6757 +                            : "0" (c));
6758 +
6759 +               val = atomic_cmpxchg(v, c, new);
6760 +               if (val == c)
6761 +                       return 1;
6762 +               c = val;
6763 +       } while (c);
6764 +
6765 +       return 0;
6766 +}
6767 +
6768  /*
6769   * atomic_dec_if_positive - decrement by 1 if old value positive
6770   * @v: pointer of type atomic_t
6771 diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6772 --- linux-3.0.4/arch/x86/include/asm/bitops.h   2011-07-21 22:17:23.000000000 -0400
6773 +++ linux-3.0.4/arch/x86/include/asm/bitops.h   2011-08-23 21:47:55.000000000 -0400
6774 @@ -38,7 +38,7 @@
6775   * a mask operation on a byte.
6776   */
6777  #define IS_IMMEDIATE(nr)               (__builtin_constant_p(nr))
6778 -#define CONST_MASK_ADDR(nr, addr)      BITOP_ADDR((void *)(addr) + ((nr)>>3))
6779 +#define CONST_MASK_ADDR(nr, addr)      BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6780  #define CONST_MASK(nr)                 (1 << ((nr) & 7))
6781  
6782  /**
6783 diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6784 --- linux-3.0.4/arch/x86/include/asm/boot.h     2011-07-21 22:17:23.000000000 -0400
6785 +++ linux-3.0.4/arch/x86/include/asm/boot.h     2011-08-23 21:47:55.000000000 -0400
6786 @@ -11,10 +11,15 @@
6787  #include <asm/pgtable_types.h>
6788  
6789  /* Physical address where kernel should be loaded. */
6790 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6791 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792                                 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6793                                 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6794  
6795 +#ifndef __ASSEMBLY__
6796 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6797 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6798 +#endif
6799 +
6800  /* Minimum kernel alignment, as a power of two */
6801  #ifdef CONFIG_X86_64
6802  #define MIN_KERNEL_ALIGN_LG2   PMD_SHIFT
6803 diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
6804 --- linux-3.0.4/arch/x86/include/asm/cacheflush.h       2011-07-21 22:17:23.000000000 -0400
6805 +++ linux-3.0.4/arch/x86/include/asm/cacheflush.h       2011-08-23 21:47:55.000000000 -0400
6806 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6807         unsigned long pg_flags = pg->flags & _PGMT_MASK;
6808  
6809         if (pg_flags == _PGMT_DEFAULT)
6810 -               return -1;
6811 +               return ~0UL;
6812         else if (pg_flags == _PGMT_WC)
6813                 return _PAGE_CACHE_WC;
6814         else if (pg_flags == _PGMT_UC_MINUS)
6815 diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
6816 --- linux-3.0.4/arch/x86/include/asm/cache.h    2011-07-21 22:17:23.000000000 -0400
6817 +++ linux-3.0.4/arch/x86/include/asm/cache.h    2011-08-23 21:47:55.000000000 -0400
6818 @@ -5,12 +5,13 @@
6819  
6820  /* L1 cache line size */
6821  #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6822 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6823 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6824  
6825  #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6826 +#define __read_only __attribute__((__section__(".data..read_only")))
6827  
6828  #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6829 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6830 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6831  
6832  #ifdef CONFIG_X86_VSMP
6833  #ifdef CONFIG_SMP
6834 diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
6835 --- linux-3.0.4/arch/x86/include/asm/checksum_32.h      2011-07-21 22:17:23.000000000 -0400
6836 +++ linux-3.0.4/arch/x86/include/asm/checksum_32.h      2011-08-23 21:47:55.000000000 -0400
6837 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6838                                             int len, __wsum sum,
6839                                             int *src_err_ptr, int *dst_err_ptr);
6840  
6841 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6842 +                                                 int len, __wsum sum,
6843 +                                                 int *src_err_ptr, int *dst_err_ptr);
6844 +
6845 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6846 +                                                 int len, __wsum sum,
6847 +                                                 int *src_err_ptr, int *dst_err_ptr);
6848 +
6849  /*
6850   *     Note: when you get a NULL pointer exception here this means someone
6851   *     passed in an incorrect kernel address to one of these functions.
6852 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6853                                                  int *err_ptr)
6854  {
6855         might_sleep();
6856 -       return csum_partial_copy_generic((__force void *)src, dst,
6857 +       return csum_partial_copy_generic_from_user((__force void *)src, dst,
6858                                          len, sum, err_ptr, NULL);
6859  }
6860  
6861 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6862  {
6863         might_sleep();
6864         if (access_ok(VERIFY_WRITE, dst, len))
6865 -               return csum_partial_copy_generic(src, (__force void *)dst,
6866 +               return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6867                                                  len, sum, NULL, err_ptr);
6868  
6869         if (len)
6870 diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
6871 --- linux-3.0.4/arch/x86/include/asm/cpufeature.h       2011-07-21 22:17:23.000000000 -0400
6872 +++ linux-3.0.4/arch/x86/include/asm/cpufeature.h       2011-08-23 21:47:55.000000000 -0400
6873 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6874                              ".section .discard,\"aw\",@progbits\n"
6875                              " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6876                              ".previous\n"
6877 -                            ".section .altinstr_replacement,\"ax\"\n"
6878 +                            ".section .altinstr_replacement,\"a\"\n"
6879                              "3: movb $1,%0\n"
6880                              "4:\n"
6881                              ".previous\n"
6882 diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
6883 --- linux-3.0.4/arch/x86/include/asm/desc_defs.h        2011-07-21 22:17:23.000000000 -0400
6884 +++ linux-3.0.4/arch/x86/include/asm/desc_defs.h        2011-08-23 21:47:55.000000000 -0400
6885 @@ -31,6 +31,12 @@ struct desc_struct {
6886                         unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6887                         unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6888                 };
6889 +               struct {
6890 +                       u16 offset_low;
6891 +                       u16 seg;
6892 +                       unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6893 +                       unsigned offset_high: 16;
6894 +               } gate;
6895         };
6896  } __attribute__((packed));
6897  
6898 diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
6899 --- linux-3.0.4/arch/x86/include/asm/desc.h     2011-07-21 22:17:23.000000000 -0400
6900 +++ linux-3.0.4/arch/x86/include/asm/desc.h     2011-08-23 21:47:55.000000000 -0400
6901 @@ -4,6 +4,7 @@
6902  #include <asm/desc_defs.h>
6903  #include <asm/ldt.h>
6904  #include <asm/mmu.h>
6905 +#include <asm/pgtable.h>
6906  
6907  #include <linux/smp.h>
6908  
6909 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6910  
6911         desc->type              = (info->read_exec_only ^ 1) << 1;
6912         desc->type             |= info->contents << 2;
6913 +       desc->type             |= info->seg_not_present ^ 1;
6914  
6915         desc->s                 = 1;
6916         desc->dpl               = 0x3;
6917 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6918  }
6919  
6920  extern struct desc_ptr idt_descr;
6921 -extern gate_desc idt_table[];
6922 -
6923 -struct gdt_page {
6924 -       struct desc_struct gdt[GDT_ENTRIES];
6925 -} __attribute__((aligned(PAGE_SIZE)));
6926 -
6927 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6928 +extern gate_desc idt_table[256];
6929  
6930 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6931  static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6932  {
6933 -       return per_cpu(gdt_page, cpu).gdt;
6934 +       return cpu_gdt_table[cpu];
6935  }
6936  
6937  #ifdef CONFIG_X86_64
6938 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6939                              unsigned long base, unsigned dpl, unsigned flags,
6940                              unsigned short seg)
6941  {
6942 -       gate->a = (seg << 16) | (base & 0xffff);
6943 -       gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6944 +       gate->gate.offset_low   = base;
6945 +       gate->gate.seg          = seg;
6946 +       gate->gate.reserved     = 0;
6947 +       gate->gate.type         = type;
6948 +       gate->gate.s            = 0;
6949 +       gate->gate.dpl          = dpl;
6950 +       gate->gate.p            = 1;
6951 +       gate->gate.offset_high  = base >> 16;
6952  }
6953  
6954  #endif
6955 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6956  
6957  static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6958  {
6959 +       pax_open_kernel();
6960         memcpy(&idt[entry], gate, sizeof(*gate));
6961 +       pax_close_kernel();
6962  }
6963  
6964  static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6965  {
6966 +       pax_open_kernel();
6967         memcpy(&ldt[entry], desc, 8);
6968 +       pax_close_kernel();
6969  }
6970  
6971  static inline void
6972 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6973         default:        size = sizeof(*gdt);            break;
6974         }
6975  
6976 +       pax_open_kernel();
6977         memcpy(&gdt[entry], desc, size);
6978 +       pax_close_kernel();
6979  }
6980  
6981  static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6982 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const 
6983  
6984  static inline void native_load_tr_desc(void)
6985  {
6986 +       pax_open_kernel();
6987         asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6988 +       pax_close_kernel();
6989  }
6990  
6991  static inline void native_load_gdt(const struct desc_ptr *dtr)
6992 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6993         struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6994         unsigned int i;
6995  
6996 +       pax_open_kernel();
6997         for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6998                 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6999 +       pax_close_kernel();
7000  }
7001  
7002  #define _LDT_empty(info)                               \
7003 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7004         desc->limit = (limit >> 16) & 0xf;
7005  }
7006  
7007 -static inline void _set_gate(int gate, unsigned type, void *addr,
7008 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7009                              unsigned dpl, unsigned ist, unsigned seg)
7010  {
7011         gate_desc s;
7012 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7013   * Pentium F0 0F bugfix can have resulted in the mapped
7014   * IDT being write-protected.
7015   */
7016 -static inline void set_intr_gate(unsigned int n, void *addr)
7017 +static inline void set_intr_gate(unsigned int n, const void *addr)
7018  {
7019         BUG_ON((unsigned)n > 0xFF);
7020         _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7021 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7022  /*
7023   * This routine sets up an interrupt gate at directory privilege level 3.
7024   */
7025 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7026 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7027  {
7028         BUG_ON((unsigned)n > 0xFF);
7029         _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7030  }
7031  
7032 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7033 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7034  {
7035         BUG_ON((unsigned)n > 0xFF);
7036         _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7037  }
7038  
7039 -static inline void set_trap_gate(unsigned int n, void *addr)
7040 +static inline void set_trap_gate(unsigned int n, const void *addr)
7041  {
7042         BUG_ON((unsigned)n > 0xFF);
7043         _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7044 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7045  static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7046  {
7047         BUG_ON((unsigned)n > 0xFF);
7048 -       _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7049 +       _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7050  }
7051  
7052 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7053 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7054  {
7055         BUG_ON((unsigned)n > 0xFF);
7056         _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7057  }
7058  
7059 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7060 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7061  {
7062         BUG_ON((unsigned)n > 0xFF);
7063         _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7064  }
7065  
7066 +#ifdef CONFIG_X86_32
7067 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7068 +{
7069 +       struct desc_struct d;
7070 +
7071 +       if (likely(limit))
7072 +               limit = (limit - 1UL) >> PAGE_SHIFT;
7073 +       pack_descriptor(&d, base, limit, 0xFB, 0xC);
7074 +       write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7075 +}
7076 +#endif
7077 +
7078  #endif /* _ASM_X86_DESC_H */
7079 diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7080 --- linux-3.0.4/arch/x86/include/asm/e820.h     2011-07-21 22:17:23.000000000 -0400
7081 +++ linux-3.0.4/arch/x86/include/asm/e820.h     2011-08-23 21:47:55.000000000 -0400
7082 @@ -69,7 +69,7 @@ struct e820map {
7083  #define ISA_START_ADDRESS      0xa0000
7084  #define ISA_END_ADDRESS                0x100000
7085  
7086 -#define BIOS_BEGIN             0x000a0000
7087 +#define BIOS_BEGIN             0x000c0000
7088  #define BIOS_END               0x00100000
7089  
7090  #define BIOS_ROM_BASE          0xffe00000
7091 diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7092 --- linux-3.0.4/arch/x86/include/asm/elf.h      2011-07-21 22:17:23.000000000 -0400
7093 +++ linux-3.0.4/arch/x86/include/asm/elf.h      2011-08-23 21:47:55.000000000 -0400
7094 @@ -237,7 +237,25 @@ extern int force_personality32;
7095     the loader.  We need to make sure that it is out of the way of the program
7096     that it will "exec", and that there is sufficient room for the brk.  */
7097  
7098 +#ifdef CONFIG_PAX_SEGMEXEC
7099 +#define ELF_ET_DYN_BASE                ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7100 +#else
7101  #define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
7102 +#endif
7103 +
7104 +#ifdef CONFIG_PAX_ASLR
7105 +#ifdef CONFIG_X86_32
7106 +#define PAX_ELF_ET_DYN_BASE    0x10000000UL
7107 +
7108 +#define PAX_DELTA_MMAP_LEN     (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7109 +#define PAX_DELTA_STACK_LEN    (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110 +#else
7111 +#define PAX_ELF_ET_DYN_BASE    0x400000UL
7112 +
7113 +#define PAX_DELTA_MMAP_LEN     ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7114 +#define PAX_DELTA_STACK_LEN    ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115 +#endif
7116 +#endif
7117  
7118  /* This yields a mask that user programs can use to figure out what
7119     instruction set this CPU supports.  This could be done in user space,
7120 @@ -290,9 +308,7 @@ do {                                                                        \
7121  
7122  #define ARCH_DLINFO                                                    \
7123  do {                                                                   \
7124 -       if (vdso_enabled)                                               \
7125 -               NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
7126 -                           (unsigned long)current->mm->context.vdso);  \
7127 +       NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);        \
7128  } while (0)
7129  
7130  #define AT_SYSINFO             32
7131 @@ -303,7 +319,7 @@ do {                                                                        \
7132  
7133  #endif /* !CONFIG_X86_32 */
7134  
7135 -#define VDSO_CURRENT_BASE      ((unsigned long)current->mm->context.vdso)
7136 +#define VDSO_CURRENT_BASE      (current->mm->context.vdso)
7137  
7138  #define VDSO_ENTRY                                                     \
7139         ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7140 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7141  extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7142  #define compat_arch_setup_additional_pages     syscall32_setup_pages
7143  
7144 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7145 -#define arch_randomize_brk arch_randomize_brk
7146 -
7147  #endif /* _ASM_X86_ELF_H */
7148 diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7149 --- linux-3.0.4/arch/x86/include/asm/emergency-restart.h        2011-07-21 22:17:23.000000000 -0400
7150 +++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h        2011-08-23 21:47:55.000000000 -0400
7151 @@ -15,6 +15,6 @@ enum reboot_type {
7152  
7153  extern enum reboot_type reboot_type;
7154  
7155 -extern void machine_emergency_restart(void);
7156 +extern void machine_emergency_restart(void) __noreturn;
7157  
7158  #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7159 diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7160 --- linux-3.0.4/arch/x86/include/asm/futex.h    2011-07-21 22:17:23.000000000 -0400
7161 +++ linux-3.0.4/arch/x86/include/asm/futex.h    2011-08-23 21:47:55.000000000 -0400
7162 @@ -12,16 +12,18 @@
7163  #include <asm/system.h>
7164  
7165  #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)    \
7166 +       typecheck(u32 *, uaddr);                                \
7167         asm volatile("1:\t" insn "\n"                           \
7168                      "2:\t.section .fixup,\"ax\"\n"             \
7169                      "3:\tmov\t%3, %1\n"                        \
7170                      "\tjmp\t2b\n"                              \
7171                      "\t.previous\n"                            \
7172                      _ASM_EXTABLE(1b, 3b)                       \
7173 -                    : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7174 +                    : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7175                      : "i" (-EFAULT), "0" (oparg), "1" (0))
7176  
7177  #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)    \
7178 +       typecheck(u32 *, uaddr);                                \
7179         asm volatile("1:\tmovl  %2, %0\n"                       \
7180                      "\tmovl\t%0, %3\n"                         \
7181                      "\t" insn "\n"                             \
7182 @@ -34,7 +36,7 @@
7183                      _ASM_EXTABLE(1b, 4b)                       \
7184                      _ASM_EXTABLE(2b, 4b)                       \
7185                      : "=&a" (oldval), "=&r" (ret),             \
7186 -                      "+m" (*uaddr), "=&r" (tem)               \
7187 +                      "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7188                      : "r" (oparg), "i" (-EFAULT), "1" (0))
7189  
7190  static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7191 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7192  
7193         switch (op) {
7194         case FUTEX_OP_SET:
7195 -               __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7196 +               __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7197                 break;
7198         case FUTEX_OP_ADD:
7199 -               __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7200 +               __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7201                                    uaddr, oparg);
7202                 break;
7203         case FUTEX_OP_OR:
7204 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7205         if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7206                 return -EFAULT;
7207  
7208 -       asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7209 +       asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7210                      "2:\t.section .fixup, \"ax\"\n"
7211                      "3:\tmov     %3, %0\n"
7212                      "\tjmp     2b\n"
7213                      "\t.previous\n"
7214                      _ASM_EXTABLE(1b, 3b)
7215 -                    : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7216 +                    : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7217                      : "i" (-EFAULT), "r" (newval), "1" (oldval)
7218                      : "memory"
7219         );
7220 diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7221 --- linux-3.0.4/arch/x86/include/asm/hw_irq.h   2011-07-21 22:17:23.000000000 -0400
7222 +++ linux-3.0.4/arch/x86/include/asm/hw_irq.h   2011-08-23 21:47:55.000000000 -0400
7223 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7224  extern void enable_IO_APIC(void);
7225  
7226  /* Statistics */
7227 -extern atomic_t irq_err_count;
7228 -extern atomic_t irq_mis_count;
7229 +extern atomic_unchecked_t irq_err_count;
7230 +extern atomic_unchecked_t irq_mis_count;
7231  
7232  /* EISA */
7233  extern void eisa_set_level_irq(unsigned int irq);
7234 diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7235 --- linux-3.0.4/arch/x86/include/asm/i387.h     2011-07-21 22:17:23.000000000 -0400
7236 +++ linux-3.0.4/arch/x86/include/asm/i387.h     2011-08-23 21:47:55.000000000 -0400
7237 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7238  {
7239         int err;
7240  
7241 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7242 +       if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7243 +               fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7244 +#endif
7245 +
7246         /* See comment in fxsave() below. */
7247  #ifdef CONFIG_AS_FXSAVEQ
7248         asm volatile("1:  fxrstorq %[fx]\n\t"
7249 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7250  {
7251         int err;
7252  
7253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7254 +       if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7255 +               fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7256 +#endif
7257 +
7258         /*
7259          * Clear the bytes not touched by the fxsave and reserved
7260          * for the SW usage.
7261 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7262  #endif /* CONFIG_X86_64 */
7263  
7264  /* We need a safe address that is cheap to find and that is already
7265 -   in L1 during context switch. The best choices are unfortunately
7266 -   different for UP and SMP */
7267 -#ifdef CONFIG_SMP
7268 -#define safe_address (__per_cpu_offset[0])
7269 -#else
7270 -#define safe_address (kstat_cpu(0).cpustat.user)
7271 -#endif
7272 +   in L1 during context switch. */
7273 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7274  
7275  /*
7276   * These must be called with preempt disabled
7277 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7278         struct thread_info *me = current_thread_info();
7279         preempt_disable();
7280         if (me->status & TS_USEDFPU)
7281 -               __save_init_fpu(me->task);
7282 +               __save_init_fpu(current);
7283         else
7284                 clts();
7285  }
7286 diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7287 --- linux-3.0.4/arch/x86/include/asm/io.h       2011-07-21 22:17:23.000000000 -0400
7288 +++ linux-3.0.4/arch/x86/include/asm/io.h       2011-08-23 21:47:55.000000000 -0400
7289 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7290  
7291  #include <linux/vmalloc.h>
7292  
7293 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7294 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7295 +{
7296 +       return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7297 +}
7298 +
7299 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7300 +{
7301 +       return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7302 +}
7303 +
7304  /*
7305   * Convert a virtual cached pointer to an uncached pointer
7306   */
7307 diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7308 --- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7309 +++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7310 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7311         sti;                                    \
7312         sysexit
7313  
7314 +#define GET_CR0_INTO_RDI               mov %cr0, %rdi
7315 +#define SET_RDI_INTO_CR0               mov %rdi, %cr0
7316 +#define GET_CR3_INTO_RDI               mov %cr3, %rdi
7317 +#define SET_RDI_INTO_CR3               mov %rdi, %cr3
7318 +
7319  #else
7320  #define INTERRUPT_RETURN               iret
7321  #define ENABLE_INTERRUPTS_SYSEXIT      sti; sysexit
7322 diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7323 --- linux-3.0.4/arch/x86/include/asm/kprobes.h  2011-07-21 22:17:23.000000000 -0400
7324 +++ linux-3.0.4/arch/x86/include/asm/kprobes.h  2011-08-23 21:47:55.000000000 -0400
7325 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7326  #define RELATIVEJUMP_SIZE 5
7327  #define RELATIVECALL_OPCODE 0xe8
7328  #define RELATIVE_ADDR_SIZE 4
7329 -#define MAX_STACK_SIZE 64
7330 -#define MIN_STACK_SIZE(ADDR)                                          \
7331 -       (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7332 -                             THREAD_SIZE - (unsigned long)(ADDR)))    \
7333 -        ? (MAX_STACK_SIZE)                                            \
7334 -        : (((unsigned long)current_thread_info()) +                   \
7335 -           THREAD_SIZE - (unsigned long)(ADDR)))
7336 +#define MAX_STACK_SIZE 64UL
7337 +#define MIN_STACK_SIZE(ADDR)   min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7338  
7339  #define flush_insn_slot(p)     do { } while (0)
7340  
7341 diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7342 --- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7343 +++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7344 @@ -441,7 +441,7 @@ struct kvm_arch {
7345         unsigned int n_used_mmu_pages;
7346         unsigned int n_requested_mmu_pages;
7347         unsigned int n_max_mmu_pages;
7348 -       atomic_t invlpg_counter;
7349 +       atomic_unchecked_t invlpg_counter;
7350         struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7351         /*
7352          * Hash table of struct kvm_mmu_page.
7353 @@ -619,7 +619,7 @@ struct kvm_x86_ops {
7354                                enum x86_intercept_stage stage);
7355  
7356         const struct trace_print_flags *exit_reasons_str;
7357 -};
7358 +} __do_const;
7359  
7360  struct kvm_arch_async_pf {
7361         u32 token;
7362 diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7363 --- linux-3.0.4/arch/x86/include/asm/local.h    2011-07-21 22:17:23.000000000 -0400
7364 +++ linux-3.0.4/arch/x86/include/asm/local.h    2011-08-23 21:47:55.000000000 -0400
7365 @@ -18,26 +18,58 @@ typedef struct {
7366  
7367  static inline void local_inc(local_t *l)
7368  {
7369 -       asm volatile(_ASM_INC "%0"
7370 +       asm volatile(_ASM_INC "%0\n"
7371 +
7372 +#ifdef CONFIG_PAX_REFCOUNT
7373 +                    "jno 0f\n"
7374 +                    _ASM_DEC "%0\n"
7375 +                    "int $4\n0:\n"
7376 +                    _ASM_EXTABLE(0b, 0b)
7377 +#endif
7378 +
7379                      : "+m" (l->a.counter));
7380  }
7381  
7382  static inline void local_dec(local_t *l)
7383  {
7384 -       asm volatile(_ASM_DEC "%0"
7385 +       asm volatile(_ASM_DEC "%0\n"
7386 +
7387 +#ifdef CONFIG_PAX_REFCOUNT
7388 +                    "jno 0f\n"
7389 +                    _ASM_INC "%0\n"
7390 +                    "int $4\n0:\n"
7391 +                    _ASM_EXTABLE(0b, 0b)
7392 +#endif
7393 +
7394                      : "+m" (l->a.counter));
7395  }
7396  
7397  static inline void local_add(long i, local_t *l)
7398  {
7399 -       asm volatile(_ASM_ADD "%1,%0"
7400 +       asm volatile(_ASM_ADD "%1,%0\n"
7401 +
7402 +#ifdef CONFIG_PAX_REFCOUNT
7403 +                    "jno 0f\n"
7404 +                    _ASM_SUB "%1,%0\n"
7405 +                    "int $4\n0:\n"
7406 +                    _ASM_EXTABLE(0b, 0b)
7407 +#endif
7408 +
7409                      : "+m" (l->a.counter)
7410                      : "ir" (i));
7411  }
7412  
7413  static inline void local_sub(long i, local_t *l)
7414  {
7415 -       asm volatile(_ASM_SUB "%1,%0"
7416 +       asm volatile(_ASM_SUB "%1,%0\n"
7417 +
7418 +#ifdef CONFIG_PAX_REFCOUNT
7419 +                    "jno 0f\n"
7420 +                    _ASM_ADD "%1,%0\n"
7421 +                    "int $4\n0:\n"
7422 +                    _ASM_EXTABLE(0b, 0b)
7423 +#endif
7424 +
7425                      : "+m" (l->a.counter)
7426                      : "ir" (i));
7427  }
7428 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7429  {
7430         unsigned char c;
7431  
7432 -       asm volatile(_ASM_SUB "%2,%0; sete %1"
7433 +       asm volatile(_ASM_SUB "%2,%0\n"
7434 +
7435 +#ifdef CONFIG_PAX_REFCOUNT
7436 +                    "jno 0f\n"
7437 +                    _ASM_ADD "%2,%0\n"
7438 +                    "int $4\n0:\n"
7439 +                    _ASM_EXTABLE(0b, 0b)
7440 +#endif
7441 +
7442 +                    "sete %1\n"
7443                      : "+m" (l->a.counter), "=qm" (c)
7444                      : "ir" (i) : "memory");
7445         return c;
7446 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7447  {
7448         unsigned char c;
7449  
7450 -       asm volatile(_ASM_DEC "%0; sete %1"
7451 +       asm volatile(_ASM_DEC "%0\n"
7452 +
7453 +#ifdef CONFIG_PAX_REFCOUNT
7454 +                    "jno 0f\n"
7455 +                    _ASM_INC "%0\n"
7456 +                    "int $4\n0:\n"
7457 +                    _ASM_EXTABLE(0b, 0b)
7458 +#endif
7459 +
7460 +                    "sete %1\n"
7461                      : "+m" (l->a.counter), "=qm" (c)
7462                      : : "memory");
7463         return c != 0;
7464 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7465  {
7466         unsigned char c;
7467  
7468 -       asm volatile(_ASM_INC "%0; sete %1"
7469 +       asm volatile(_ASM_INC "%0\n"
7470 +
7471 +#ifdef CONFIG_PAX_REFCOUNT
7472 +                    "jno 0f\n"
7473 +                    _ASM_DEC "%0\n"
7474 +                    "int $4\n0:\n"
7475 +                    _ASM_EXTABLE(0b, 0b)
7476 +#endif
7477 +
7478 +                    "sete %1\n"
7479                      : "+m" (l->a.counter), "=qm" (c)
7480                      : : "memory");
7481         return c != 0;
7482 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7483  {
7484         unsigned char c;
7485  
7486 -       asm volatile(_ASM_ADD "%2,%0; sets %1"
7487 +       asm volatile(_ASM_ADD "%2,%0\n"
7488 +
7489 +#ifdef CONFIG_PAX_REFCOUNT
7490 +                    "jno 0f\n"
7491 +                    _ASM_SUB "%2,%0\n"
7492 +                    "int $4\n0:\n"
7493 +                    _ASM_EXTABLE(0b, 0b)
7494 +#endif
7495 +
7496 +                    "sets %1\n"
7497                      : "+m" (l->a.counter), "=qm" (c)
7498                      : "ir" (i) : "memory");
7499         return c;
7500 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7501  #endif
7502         /* Modern 486+ processor */
7503         __i = i;
7504 -       asm volatile(_ASM_XADD "%0, %1;"
7505 +       asm volatile(_ASM_XADD "%0, %1\n"
7506 +
7507 +#ifdef CONFIG_PAX_REFCOUNT
7508 +                    "jno 0f\n"
7509 +                    _ASM_MOV "%0,%1\n"
7510 +                    "int $4\n0:\n"
7511 +                    _ASM_EXTABLE(0b, 0b)
7512 +#endif
7513 +
7514                      : "+r" (i), "+m" (l->a.counter)
7515                      : : "memory");
7516         return i + __i;
7517 diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7518 --- linux-3.0.4/arch/x86/include/asm/mman.h     2011-07-21 22:17:23.000000000 -0400
7519 +++ linux-3.0.4/arch/x86/include/asm/mman.h     2011-08-23 21:47:55.000000000 -0400
7520 @@ -5,4 +5,14 @@
7521  
7522  #include <asm-generic/mman.h>
7523  
7524 +#ifdef __KERNEL__
7525 +#ifndef __ASSEMBLY__
7526 +#ifdef CONFIG_X86_32
7527 +#define arch_mmap_check        i386_mmap_check
7528 +int i386_mmap_check(unsigned long addr, unsigned long len,
7529 +               unsigned long flags);
7530 +#endif
7531 +#endif
7532 +#endif
7533 +
7534  #endif /* _ASM_X86_MMAN_H */
7535 diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7536 --- linux-3.0.4/arch/x86/include/asm/mmu_context.h      2011-07-21 22:17:23.000000000 -0400
7537 +++ linux-3.0.4/arch/x86/include/asm/mmu_context.h      2011-08-23 21:48:14.000000000 -0400
7538 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7539  
7540  static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7541  {
7542 +
7543 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7544 +       unsigned int i;
7545 +       pgd_t *pgd;
7546 +
7547 +       pax_open_kernel();
7548 +       pgd = get_cpu_pgd(smp_processor_id());
7549 +       for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7550 +               set_pgd_batched(pgd+i, native_make_pgd(0));
7551 +       pax_close_kernel();
7552 +#endif
7553 +
7554  #ifdef CONFIG_SMP
7555         if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7556                 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7557 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7558                              struct task_struct *tsk)
7559  {
7560         unsigned cpu = smp_processor_id();
7561 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7562 +       int tlbstate = TLBSTATE_OK;
7563 +#endif
7564  
7565         if (likely(prev != next)) {
7566  #ifdef CONFIG_SMP
7567 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7568 +               tlbstate = percpu_read(cpu_tlbstate.state);
7569 +#endif
7570                 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7571                 percpu_write(cpu_tlbstate.active_mm, next);
7572  #endif
7573                 cpumask_set_cpu(cpu, mm_cpumask(next));
7574  
7575                 /* Re-load page tables */
7576 +#ifdef CONFIG_PAX_PER_CPU_PGD
7577 +               pax_open_kernel();
7578 +               __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7579 +               __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7580 +               pax_close_kernel();
7581 +               load_cr3(get_cpu_pgd(cpu));
7582 +#else
7583                 load_cr3(next->pgd);
7584 +#endif
7585  
7586                 /* stop flush ipis for the previous mm */
7587                 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7588 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7589                  */
7590                 if (unlikely(prev->context.ldt != next->context.ldt))
7591                         load_LDT_nolock(&next->context);
7592 -       }
7593 +
7594 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7595 +               if (!(__supported_pte_mask & _PAGE_NX)) {
7596 +                       smp_mb__before_clear_bit();
7597 +                       cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7598 +                       smp_mb__after_clear_bit();
7599 +                       cpu_set(cpu, next->context.cpu_user_cs_mask);
7600 +               }
7601 +#endif
7602 +
7603 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7604 +               if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7605 +                            prev->context.user_cs_limit != next->context.user_cs_limit))
7606 +                       set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7607  #ifdef CONFIG_SMP
7608 +               else if (unlikely(tlbstate != TLBSTATE_OK))
7609 +                       set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7610 +#endif
7611 +#endif
7612 +
7613 +       }
7614         else {
7615 +
7616 +#ifdef CONFIG_PAX_PER_CPU_PGD
7617 +               pax_open_kernel();
7618 +               __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7619 +               __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7620 +               pax_close_kernel();
7621 +               load_cr3(get_cpu_pgd(cpu));
7622 +#endif
7623 +
7624 +#ifdef CONFIG_SMP
7625                 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7626                 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7627  
7628 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7629                          * tlb flush IPI delivery. We must reload CR3
7630                          * to make sure to use no freed page tables.
7631                          */
7632 +
7633 +#ifndef CONFIG_PAX_PER_CPU_PGD
7634                         load_cr3(next->pgd);
7635 +#endif
7636 +
7637                         load_LDT_nolock(&next->context);
7638 +
7639 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7640 +                       if (!(__supported_pte_mask & _PAGE_NX))
7641 +                               cpu_set(cpu, next->context.cpu_user_cs_mask);
7642 +#endif
7643 +
7644 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7645 +#ifdef CONFIG_PAX_PAGEEXEC
7646 +                       if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7647 +#endif
7648 +                               set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7649 +#endif
7650 +
7651                 }
7652 -       }
7653  #endif
7654 +       }
7655  }
7656  
7657  #define activate_mm(prev, next)                        \
7658 diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7659 --- linux-3.0.4/arch/x86/include/asm/mmu.h      2011-07-21 22:17:23.000000000 -0400
7660 +++ linux-3.0.4/arch/x86/include/asm/mmu.h      2011-08-23 21:47:55.000000000 -0400
7661 @@ -9,7 +9,7 @@
7662   * we put the segment information here.
7663   */
7664  typedef struct {
7665 -       void *ldt;
7666 +       struct desc_struct *ldt;
7667         int size;
7668  
7669  #ifdef CONFIG_X86_64
7670 @@ -18,7 +18,19 @@ typedef struct {
7671  #endif
7672  
7673         struct mutex lock;
7674 -       void *vdso;
7675 +       unsigned long vdso;
7676 +
7677 +#ifdef CONFIG_X86_32
7678 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7679 +       unsigned long user_cs_base;
7680 +       unsigned long user_cs_limit;
7681 +
7682 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7683 +       cpumask_t cpu_user_cs_mask;
7684 +#endif
7685 +
7686 +#endif
7687 +#endif
7688  } mm_context_t;
7689  
7690  #ifdef CONFIG_SMP
7691 diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7692 --- linux-3.0.4/arch/x86/include/asm/module.h   2011-07-21 22:17:23.000000000 -0400
7693 +++ linux-3.0.4/arch/x86/include/asm/module.h   2011-08-23 21:48:14.000000000 -0400
7694 @@ -5,6 +5,7 @@
7695  
7696  #ifdef CONFIG_X86_64
7697  /* X86_64 does not define MODULE_PROC_FAMILY */
7698 +#define MODULE_PROC_FAMILY ""
7699  #elif defined CONFIG_M386
7700  #define MODULE_PROC_FAMILY "386 "
7701  #elif defined CONFIG_M486
7702 @@ -59,8 +60,30 @@
7703  #error unknown processor family
7704  #endif
7705  
7706 -#ifdef CONFIG_X86_32
7707 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7709 +#define MODULE_PAX_UDEREF "UDEREF "
7710 +#else
7711 +#define MODULE_PAX_UDEREF ""
7712 +#endif
7713 +
7714 +#ifdef CONFIG_PAX_KERNEXEC
7715 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7716 +#else
7717 +#define MODULE_PAX_KERNEXEC ""
7718  #endif
7719  
7720 +#ifdef CONFIG_PAX_REFCOUNT
7721 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7722 +#else
7723 +#define MODULE_PAX_REFCOUNT ""
7724 +#endif
7725 +
7726 +#ifdef CONFIG_GRKERNSEC
7727 +#define MODULE_GRSEC "GRSECURITY "
7728 +#else
7729 +#define MODULE_GRSEC ""
7730 +#endif
7731 +
7732 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7733 +
7734  #endif /* _ASM_X86_MODULE_H */
7735 diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7736 --- linux-3.0.4/arch/x86/include/asm/page_64_types.h    2011-07-21 22:17:23.000000000 -0400
7737 +++ linux-3.0.4/arch/x86/include/asm/page_64_types.h    2011-08-23 21:47:55.000000000 -0400
7738 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7739  
7740  /* duplicated to the one in bootmem.h */
7741  extern unsigned long max_pfn;
7742 -extern unsigned long phys_base;
7743 +extern const unsigned long phys_base;
7744  
7745  extern unsigned long __phys_addr(unsigned long);
7746  #define __phys_reloc_hide(x)   (x)
7747 diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7748 --- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7749 +++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7750 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp, 
7751                             val);
7752  }
7753  
7754 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7755 +{
7756 +       pgdval_t val = native_pgd_val(pgd);
7757 +
7758 +       if (sizeof(pgdval_t) > sizeof(long))
7759 +               PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7760 +                           val, (u64)val >> 32);
7761 +       else
7762 +               PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7763 +                           val);
7764 +}
7765 +
7766  static inline void pgd_clear(pgd_t *pgdp)
7767  {
7768         set_pgd(pgdp, __pgd(0));
7769 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7770         pv_mmu_ops.set_fixmap(idx, phys, flags);
7771  }
7772  
7773 +#ifdef CONFIG_PAX_KERNEXEC
7774 +static inline unsigned long pax_open_kernel(void)
7775 +{
7776 +       return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7777 +}
7778 +
7779 +static inline unsigned long pax_close_kernel(void)
7780 +{
7781 +       return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7782 +}
7783 +#else
7784 +static inline unsigned long pax_open_kernel(void) { return 0; }
7785 +static inline unsigned long pax_close_kernel(void) { return 0; }
7786 +#endif
7787 +
7788  #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789  
7790  static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7791 @@ -955,7 +982,7 @@ extern void default_banner(void);
7792  
7793  #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
7794  #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7795 -#define PARA_INDIRECT(addr)    *%cs:addr
7796 +#define PARA_INDIRECT(addr)    *%ss:addr
7797  #endif
7798  
7799  #define INTERRUPT_RETURN                                               \
7800 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
7801         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
7802                   CLBR_NONE,                                            \
7803                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804 +
7805 +#define GET_CR0_INTO_RDI                               \
7806 +       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7807 +       mov %rax,%rdi
7808 +
7809 +#define SET_RDI_INTO_CR0                               \
7810 +       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811 +
7812 +#define GET_CR3_INTO_RDI                               \
7813 +       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7814 +       mov %rax,%rdi
7815 +
7816 +#define SET_RDI_INTO_CR3                               \
7817 +       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818 +
7819  #endif /* CONFIG_X86_32 */
7820  
7821  #endif /* __ASSEMBLY__ */
7822 diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
7823 --- linux-3.0.4/arch/x86/include/asm/paravirt_types.h   2011-07-21 22:17:23.000000000 -0400
7824 +++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h   2011-08-23 21:47:55.000000000 -0400
7825 @@ -78,19 +78,19 @@ struct pv_init_ops {
7826          */
7827         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7828                           unsigned long addr, unsigned len);
7829 -};
7830 +} __no_const;
7831  
7832  
7833  struct pv_lazy_ops {
7834         /* Set deferred update mode, used for batching operations. */
7835         void (*enter)(void);
7836         void (*leave)(void);
7837 -};
7838 +} __no_const;
7839  
7840  struct pv_time_ops {
7841         unsigned long long (*sched_clock)(void);
7842         unsigned long (*get_tsc_khz)(void);
7843 -};
7844 +} __no_const;
7845  
7846  struct pv_cpu_ops {
7847         /* hooks for various privileged instructions */
7848 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849  
7850         void (*start_context_switch)(struct task_struct *prev);
7851         void (*end_context_switch)(struct task_struct *next);
7852 -};
7853 +} __no_const;
7854  
7855  struct pv_irq_ops {
7856         /*
7857 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7858                                  unsigned long start_eip,
7859                                  unsigned long start_esp);
7860  #endif
7861 -};
7862 +} __no_const;
7863  
7864  struct pv_mmu_ops {
7865         unsigned long (*read_cr2)(void);
7866 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
7867         struct paravirt_callee_save make_pud;
7868  
7869         void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7870 +       void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7871  #endif /* PAGETABLE_LEVELS == 4 */
7872  #endif /* PAGETABLE_LEVELS >= 3 */
7873  
7874 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
7875            an mfn.  We can tell which is which from the index. */
7876         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7877                            phys_addr_t phys, pgprot_t flags);
7878 +
7879 +#ifdef CONFIG_PAX_KERNEXEC
7880 +       unsigned long (*pax_open_kernel)(void);
7881 +       unsigned long (*pax_close_kernel)(void);
7882 +#endif
7883 +
7884  };
7885  
7886  struct arch_spinlock;
7887 @@ -327,7 +334,7 @@ struct pv_lock_ops {
7888         void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7889         int (*spin_trylock)(struct arch_spinlock *lock);
7890         void (*spin_unlock)(struct arch_spinlock *lock);
7891 -};
7892 +} __no_const;
7893  
7894  /* This contains all the paravirt structures: we get a convenient
7895   * number for each function using the offset which we use to indicate
7896 diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
7897 --- linux-3.0.4/arch/x86/include/asm/pgalloc.h  2011-07-21 22:17:23.000000000 -0400
7898 +++ linux-3.0.4/arch/x86/include/asm/pgalloc.h  2011-08-23 21:47:55.000000000 -0400
7899 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7900                                        pmd_t *pmd, pte_t *pte)
7901  {
7902         paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7903 +       set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7904 +}
7905 +
7906 +static inline void pmd_populate_user(struct mm_struct *mm,
7907 +                                      pmd_t *pmd, pte_t *pte)
7908 +{
7909 +       paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7910         set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7911  }
7912  
7913 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
7914 --- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h   2011-07-21 22:17:23.000000000 -0400
7915 +++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h   2011-08-23 21:47:55.000000000 -0400
7916 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t 
7917  
7918  static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7919  {
7920 +       pax_open_kernel();
7921         *pmdp = pmd;
7922 +       pax_close_kernel();
7923  }
7924  
7925  static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7926 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
7927 --- linux-3.0.4/arch/x86/include/asm/pgtable_32.h       2011-07-21 22:17:23.000000000 -0400
7928 +++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h       2011-08-23 21:47:55.000000000 -0400
7929 @@ -25,9 +25,6 @@
7930  struct mm_struct;
7931  struct vm_area_struct;
7932  
7933 -extern pgd_t swapper_pg_dir[1024];
7934 -extern pgd_t initial_page_table[1024];
7935 -
7936  static inline void pgtable_cache_init(void) { }
7937  static inline void check_pgt_cache(void) { }
7938  void paging_init(void);
7939 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7940  # include <asm/pgtable-2level.h>
7941  #endif
7942  
7943 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7944 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7945 +#ifdef CONFIG_X86_PAE
7946 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7947 +#endif
7948 +
7949  #if defined(CONFIG_HIGHPTE)
7950  #define pte_offset_map(dir, address)                                   \
7951         ((pte_t *)kmap_atomic(pmd_page(*(dir))) +               \
7952 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7953  /* Clear a kernel PTE and flush it from the TLB */
7954  #define kpte_clear_flush(ptep, vaddr)          \
7955  do {                                           \
7956 +       pax_open_kernel();                      \
7957         pte_clear(&init_mm, (vaddr), (ptep));   \
7958 +       pax_close_kernel();                     \
7959         __flush_tlb_one((vaddr));               \
7960  } while (0)
7961  
7962 @@ -74,6 +79,9 @@ do {                                          \
7963  
7964  #endif /* !__ASSEMBLY__ */
7965  
7966 +#define HAVE_ARCH_UNMAPPED_AREA
7967 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7968 +
7969  /*
7970   * kern_addr_valid() is (1) for FLATMEM and (0) for
7971   * SPARSEMEM and DISCONTIGMEM
7972 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
7973 --- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7974 +++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7975 @@ -8,7 +8,7 @@
7976   */
7977  #ifdef CONFIG_X86_PAE
7978  # include <asm/pgtable-3level_types.h>
7979 -# define PMD_SIZE      (1UL << PMD_SHIFT)
7980 +# define PMD_SIZE      (_AC(1, UL) << PMD_SHIFT)
7981  # define PMD_MASK      (~(PMD_SIZE - 1))
7982  #else
7983  # include <asm/pgtable-2level_types.h>
7984 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set 
7985  # define VMALLOC_END   (FIXADDR_START - 2 * PAGE_SIZE)
7986  #endif
7987  
7988 +#ifdef CONFIG_PAX_KERNEXEC
7989 +#ifndef __ASSEMBLY__
7990 +extern unsigned char MODULES_EXEC_VADDR[];
7991 +extern unsigned char MODULES_EXEC_END[];
7992 +#endif
7993 +#include <asm/boot.h>
7994 +#define ktla_ktva(addr)                (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7995 +#define ktva_ktla(addr)                (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7996 +#else
7997 +#define ktla_ktva(addr)                (addr)
7998 +#define ktva_ktla(addr)                (addr)
7999 +#endif
8000 +
8001  #define MODULES_VADDR  VMALLOC_START
8002  #define MODULES_END    VMALLOC_END
8003  #define MODULES_LEN    (MODULES_VADDR - MODULES_END)
8004 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8005 --- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h   2011-07-21 22:17:23.000000000 -0400
8006 +++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h   2011-08-23 21:47:55.000000000 -0400
8007 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8008  
8009  static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8010  {
8011 +       pax_open_kernel();
8012         set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8013 +       pax_close_kernel();
8014  }
8015  
8016  static inline void native_set_pud(pud_t *pudp, pud_t pud)
8017  {
8018 +       pax_open_kernel();
8019         set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8020 +       pax_close_kernel();
8021  }
8022  
8023  /*
8024 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8025 --- linux-3.0.4/arch/x86/include/asm/pgtable_64.h       2011-07-21 22:17:23.000000000 -0400
8026 +++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h       2011-08-23 21:47:55.000000000 -0400
8027 @@ -16,10 +16,13 @@
8028  
8029  extern pud_t level3_kernel_pgt[512];
8030  extern pud_t level3_ident_pgt[512];
8031 +extern pud_t level3_vmalloc_pgt[512];
8032 +extern pud_t level3_vmemmap_pgt[512];
8033 +extern pud_t level2_vmemmap_pgt[512];
8034  extern pmd_t level2_kernel_pgt[512];
8035  extern pmd_t level2_fixmap_pgt[512];
8036 -extern pmd_t level2_ident_pgt[512];
8037 -extern pgd_t init_level4_pgt[];
8038 +extern pmd_t level2_ident_pgt[512*2];
8039 +extern pgd_t init_level4_pgt[512];
8040  
8041  #define swapper_pg_dir init_level4_pgt
8042  
8043 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8044  
8045  static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8046  {
8047 +       pax_open_kernel();
8048         *pmdp = pmd;
8049 +       pax_close_kernel();
8050  }
8051  
8052  static inline void native_pmd_clear(pmd_t *pmd)
8053 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8054  
8055  static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8056  {
8057 +       pax_open_kernel();
8058 +       *pgdp = pgd;
8059 +       pax_close_kernel();
8060 +}
8061 +
8062 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8063 +{
8064         *pgdp = pgd;
8065  }
8066  
8067 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8068 --- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8069 +++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8070 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8071  #define MODULES_VADDR    _AC(0xffffffffa0000000, UL)
8072  #define MODULES_END      _AC(0xffffffffff000000, UL)
8073  #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
8074 +#define MODULES_EXEC_VADDR MODULES_VADDR
8075 +#define MODULES_EXEC_END MODULES_END
8076 +
8077 +#define ktla_ktva(addr)                (addr)
8078 +#define ktva_ktla(addr)                (addr)
8079  
8080  #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8081 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8082 --- linux-3.0.4/arch/x86/include/asm/pgtable.h  2011-07-21 22:17:23.000000000 -0400
8083 +++ linux-3.0.4/arch/x86/include/asm/pgtable.h  2011-08-23 21:47:55.000000000 -0400
8084 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8085  
8086  #ifndef __PAGETABLE_PUD_FOLDED
8087  #define set_pgd(pgdp, pgd)             native_set_pgd(pgdp, pgd)
8088 +#define set_pgd_batched(pgdp, pgd)     native_set_pgd_batched(pgdp, pgd)
8089  #define pgd_clear(pgd)                 native_pgd_clear(pgd)
8090  #endif
8091  
8092 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8093  
8094  #define arch_end_context_switch(prev)  do {} while(0)
8095  
8096 +#define pax_open_kernel()      native_pax_open_kernel()
8097 +#define pax_close_kernel()     native_pax_close_kernel()
8098  #endif /* CONFIG_PARAVIRT */
8099  
8100 +#define  __HAVE_ARCH_PAX_OPEN_KERNEL
8101 +#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
8102 +
8103 +#ifdef CONFIG_PAX_KERNEXEC
8104 +static inline unsigned long native_pax_open_kernel(void)
8105 +{
8106 +       unsigned long cr0;
8107 +
8108 +       preempt_disable();
8109 +       barrier();
8110 +       cr0 = read_cr0() ^ X86_CR0_WP;
8111 +       BUG_ON(unlikely(cr0 & X86_CR0_WP));
8112 +       write_cr0(cr0);
8113 +       return cr0 ^ X86_CR0_WP;
8114 +}
8115 +
8116 +static inline unsigned long native_pax_close_kernel(void)
8117 +{
8118 +       unsigned long cr0;
8119 +
8120 +       cr0 = read_cr0() ^ X86_CR0_WP;
8121 +       BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8122 +       write_cr0(cr0);
8123 +       barrier();
8124 +       preempt_enable_no_resched();
8125 +       return cr0 ^ X86_CR0_WP;
8126 +}
8127 +#else
8128 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8129 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8130 +#endif
8131 +
8132  /*
8133   * The following only work if pte_present() is true.
8134   * Undefined behaviour if not..
8135   */
8136 +static inline int pte_user(pte_t pte)
8137 +{
8138 +       return pte_val(pte) & _PAGE_USER;
8139 +}
8140 +
8141  static inline int pte_dirty(pte_t pte)
8142  {
8143         return pte_flags(pte) & _PAGE_DIRTY;
8144 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t 
8145         return pte_clear_flags(pte, _PAGE_RW);
8146  }
8147  
8148 +static inline pte_t pte_mkread(pte_t pte)
8149 +{
8150 +       return __pte(pte_val(pte) | _PAGE_USER);
8151 +}
8152 +
8153  static inline pte_t pte_mkexec(pte_t pte)
8154  {
8155 -       return pte_clear_flags(pte, _PAGE_NX);
8156 +#ifdef CONFIG_X86_PAE
8157 +       if (__supported_pte_mask & _PAGE_NX)
8158 +               return pte_clear_flags(pte, _PAGE_NX);
8159 +       else
8160 +#endif
8161 +               return pte_set_flags(pte, _PAGE_USER);
8162 +}
8163 +
8164 +static inline pte_t pte_exprotect(pte_t pte)
8165 +{
8166 +#ifdef CONFIG_X86_PAE
8167 +       if (__supported_pte_mask & _PAGE_NX)
8168 +               return pte_set_flags(pte, _PAGE_NX);
8169 +       else
8170 +#endif
8171 +               return pte_clear_flags(pte, _PAGE_USER);
8172  }
8173  
8174  static inline pte_t pte_mkdirty(pte_t pte)
8175 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long 
8176  #endif
8177  
8178  #ifndef __ASSEMBLY__
8179 +
8180 +#ifdef CONFIG_PAX_PER_CPU_PGD
8181 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8182 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8183 +{
8184 +       return cpu_pgd[cpu];
8185 +}
8186 +#endif
8187 +
8188  #include <linux/mm_types.h>
8189  
8190  static inline int pte_none(pte_t pte)
8191 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8192  
8193  static inline int pgd_bad(pgd_t pgd)
8194  {
8195 -       return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8196 +       return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8197  }
8198  
8199  static inline int pgd_none(pgd_t pgd)
8200 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8201   * pgd_offset() returns a (pgd_t *)
8202   * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8203   */
8204 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8205 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8206 +
8207 +#ifdef CONFIG_PAX_PER_CPU_PGD
8208 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8209 +#endif
8210 +
8211  /*
8212   * a shortcut which implies the use of the kernel's pgd, instead
8213   * of a process's
8214 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8215  #define KERNEL_PGD_BOUNDARY    pgd_index(PAGE_OFFSET)
8216  #define KERNEL_PGD_PTRS                (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8217  
8218 +#ifdef CONFIG_X86_32
8219 +#define USER_PGD_PTRS          KERNEL_PGD_BOUNDARY
8220 +#else
8221 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8222 +#define USER_PGD_PTRS          (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8223 +
8224 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8225 +#define PAX_USER_SHADOW_BASE   (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8226 +#else
8227 +#define PAX_USER_SHADOW_BASE   (_AC(0,UL))
8228 +#endif
8229 +
8230 +#endif
8231 +
8232  #ifndef __ASSEMBLY__
8233  
8234  extern int direct_gbpages;
8235 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8236   * dst and src can be on the same page, but the range must not overlap,
8237   * and must not cross a page boundary.
8238   */
8239 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8240 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8241  {
8242 -       memcpy(dst, src, count * sizeof(pgd_t));
8243 +       pax_open_kernel();
8244 +       while (count--)
8245 +               *dst++ = *src++;
8246 +       pax_close_kernel();
8247  }
8248  
8249 +#ifdef CONFIG_PAX_PER_CPU_PGD
8250 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8251 +#endif
8252 +
8253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8254 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8255 +#else
8256 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8257 +#endif
8258  
8259  #include <asm-generic/pgtable.h>
8260  #endif /* __ASSEMBLY__ */
8261 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8262 --- linux-3.0.4/arch/x86/include/asm/pgtable_types.h    2011-07-21 22:17:23.000000000 -0400
8263 +++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h    2011-08-23 21:47:55.000000000 -0400
8264 @@ -16,13 +16,12 @@
8265  #define _PAGE_BIT_PSE          7       /* 4 MB (or 2MB) page */
8266  #define _PAGE_BIT_PAT          7       /* on 4KB pages */
8267  #define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
8268 -#define _PAGE_BIT_UNUSED1      9       /* available for programmer */
8269 +#define _PAGE_BIT_SPECIAL      9       /* special mappings, no associated struct page */
8270  #define _PAGE_BIT_IOMAP                10      /* flag used to indicate IO mapping */
8271  #define _PAGE_BIT_HIDDEN       11      /* hidden by kmemcheck */
8272  #define _PAGE_BIT_PAT_LARGE    12      /* On 2MB or 1GB pages */
8273 -#define _PAGE_BIT_SPECIAL      _PAGE_BIT_UNUSED1
8274 -#define _PAGE_BIT_CPA_TEST     _PAGE_BIT_UNUSED1
8275 -#define _PAGE_BIT_SPLITTING    _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8276 +#define _PAGE_BIT_CPA_TEST     _PAGE_BIT_SPECIAL
8277 +#define _PAGE_BIT_SPLITTING    _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8278  #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
8279  
8280  /* If _PAGE_BIT_PRESENT is clear, we use these: */
8281 @@ -40,7 +39,6 @@
8282  #define _PAGE_DIRTY    (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8283  #define _PAGE_PSE      (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8284  #define _PAGE_GLOBAL   (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8285 -#define _PAGE_UNUSED1  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8286  #define _PAGE_IOMAP    (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8287  #define _PAGE_PAT      (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8288  #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8289 @@ -57,8 +55,10 @@
8290  
8291  #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8292  #define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8293 -#else
8294 +#elif defined(CONFIG_KMEMCHECK)
8295  #define _PAGE_NX       (_AT(pteval_t, 0))
8296 +#else
8297 +#define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8298  #endif
8299  
8300  #define _PAGE_FILE     (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8301 @@ -96,6 +96,9 @@
8302  #define PAGE_READONLY_EXEC     __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
8303                                          _PAGE_ACCESSED)
8304  
8305 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8306 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8307 +
8308  #define __PAGE_KERNEL_EXEC                                             \
8309         (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8310  #define __PAGE_KERNEL          (__PAGE_KERNEL_EXEC | _PAGE_NX)
8311 @@ -106,8 +109,8 @@
8312  #define __PAGE_KERNEL_WC               (__PAGE_KERNEL | _PAGE_CACHE_WC)
8313  #define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8314  #define __PAGE_KERNEL_UC_MINUS         (__PAGE_KERNEL | _PAGE_PCD)
8315 -#define __PAGE_KERNEL_VSYSCALL         (__PAGE_KERNEL_RX | _PAGE_USER)
8316 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8317 +#define __PAGE_KERNEL_VSYSCALL         (__PAGE_KERNEL_RO | _PAGE_USER)
8318 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8319  #define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
8320  #define __PAGE_KERNEL_LARGE_NOCACHE    (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8321  #define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8322 @@ -166,8 +169,8 @@
8323   * bits are combined, this will alow user to access the high address mapped
8324   * VDSO in the presence of CONFIG_COMPAT_VDSO
8325   */
8326 -#define PTE_IDENT_ATTR  0x003          /* PRESENT+RW */
8327 -#define PDE_IDENT_ATTR  0x067          /* PRESENT+RW+USER+DIRTY+ACCESSED */
8328 +#define PTE_IDENT_ATTR  0x063          /* PRESENT+RW+DIRTY+ACCESSED */
8329 +#define PDE_IDENT_ATTR  0x063          /* PRESENT+RW+DIRTY+ACCESSED */
8330  #define PGD_IDENT_ATTR  0x001          /* PRESENT (no other attributes) */
8331  #endif
8332  
8333 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8334  {
8335         return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8336  }
8337 +#endif
8338  
8339 +#if PAGETABLE_LEVELS == 3
8340 +#include <asm-generic/pgtable-nopud.h>
8341 +#endif
8342 +
8343 +#if PAGETABLE_LEVELS == 2
8344 +#include <asm-generic/pgtable-nopmd.h>
8345 +#endif
8346 +
8347 +#ifndef __ASSEMBLY__
8348  #if PAGETABLE_LEVELS > 3
8349  typedef struct { pudval_t pud; } pud_t;
8350  
8351 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8352         return pud.pud;
8353  }
8354  #else
8355 -#include <asm-generic/pgtable-nopud.h>
8356 -
8357  static inline pudval_t native_pud_val(pud_t pud)
8358  {
8359         return native_pgd_val(pud.pgd);
8360 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8361         return pmd.pmd;
8362  }
8363  #else
8364 -#include <asm-generic/pgtable-nopmd.h>
8365 -
8366  static inline pmdval_t native_pmd_val(pmd_t pmd)
8367  {
8368         return native_pgd_val(pmd.pud.pgd);
8369 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8370  
8371  extern pteval_t __supported_pte_mask;
8372  extern void set_nx(void);
8373 -extern int nx_enabled;
8374  
8375  #define pgprot_writecombine    pgprot_writecombine
8376  extern pgprot_t pgprot_writecombine(pgprot_t prot);
8377 diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8378 --- linux-3.0.4/arch/x86/include/asm/processor.h        2011-07-21 22:17:23.000000000 -0400
8379 +++ linux-3.0.4/arch/x86/include/asm/processor.h        2011-08-23 21:47:55.000000000 -0400
8380 @@ -266,7 +266,7 @@ struct tss_struct {
8381  
8382  } ____cacheline_aligned;
8383  
8384 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8385 +extern struct tss_struct init_tss[NR_CPUS];
8386  
8387  /*
8388   * Save the original ist values for checking stack pointers during debugging
8389 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8390   */
8391  #define TASK_SIZE              PAGE_OFFSET
8392  #define TASK_SIZE_MAX          TASK_SIZE
8393 +
8394 +#ifdef CONFIG_PAX_SEGMEXEC
8395 +#define SEGMEXEC_TASK_SIZE     (TASK_SIZE / 2)
8396 +#define STACK_TOP              ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8397 +#else
8398  #define STACK_TOP              TASK_SIZE
8399 -#define STACK_TOP_MAX          STACK_TOP
8400 +#endif
8401 +
8402 +#define STACK_TOP_MAX          TASK_SIZE
8403  
8404  #define INIT_THREAD  {                                                   \
8405 -       .sp0                    = sizeof(init_stack) + (long)&init_stack, \
8406 +       .sp0                    = sizeof(init_stack) + (long)&init_stack - 8, \
8407         .vm86_info              = NULL,                                   \
8408         .sysenter_cs            = __KERNEL_CS,                            \
8409         .io_bitmap_ptr          = NULL,                                   \
8410 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8411   */
8412  #define INIT_TSS  {                                                      \
8413         .x86_tss = {                                                      \
8414 -               .sp0            = sizeof(init_stack) + (long)&init_stack, \
8415 +               .sp0            = sizeof(init_stack) + (long)&init_stack - 8, \
8416                 .ss0            = __KERNEL_DS,                            \
8417                 .ss1            = __KERNEL_CS,                            \
8418                 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,               \
8419 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8420  extern unsigned long thread_saved_pc(struct task_struct *tsk);
8421  
8422  #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
8423 -#define KSTK_TOP(info)                                                 \
8424 -({                                                                     \
8425 -       unsigned long *__ptr = (unsigned long *)(info);                 \
8426 -       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
8427 -})
8428 +#define KSTK_TOP(info)         ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8429  
8430  /*
8431   * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8432 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8433  #define task_pt_regs(task)                                             \
8434  ({                                                                     \
8435         struct pt_regs *__regs__;                                       \
8436 -       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8437 +       __regs__ = (struct pt_regs *)((task)->thread.sp0);              \
8438         __regs__ - 1;                                                   \
8439  })
8440  
8441 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8442  /*
8443   * User space process size. 47bits minus one guard page.
8444   */
8445 -#define TASK_SIZE_MAX  ((1UL << 47) - PAGE_SIZE)
8446 +#define TASK_SIZE_MAX  ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8447  
8448  /* This decides where the kernel will search for a free chunk of vm
8449   * space during mmap's.
8450   */
8451  #define IA32_PAGE_OFFSET       ((current->personality & ADDR_LIMIT_3GB) ? \
8452 -                                       0xc0000000 : 0xFFFFe000)
8453 +                                       0xc0000000 : 0xFFFFf000)
8454  
8455  #define TASK_SIZE              (test_thread_flag(TIF_IA32) ? \
8456                                         IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8457 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8458  #define STACK_TOP_MAX          TASK_SIZE_MAX
8459  
8460  #define INIT_THREAD  { \
8461 -       .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8462 +       .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8463  }
8464  
8465  #define INIT_TSS  { \
8466 -       .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8467 +       .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8468  }
8469  
8470  /*
8471 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs 
8472   */
8473  #define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 3))
8474  
8475 +#ifdef CONFIG_PAX_SEGMEXEC
8476 +#define SEGMEXEC_TASK_UNMAPPED_BASE    (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8477 +#endif
8478 +
8479  #define KSTK_EIP(task)         (task_pt_regs(task)->ip)
8480  
8481  /* Get/set a process' ability to use the timestamp counter instruction */
8482 diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8483 --- linux-3.0.4/arch/x86/include/asm/ptrace.h   2011-07-21 22:17:23.000000000 -0400
8484 +++ linux-3.0.4/arch/x86/include/asm/ptrace.h   2011-08-23 21:47:55.000000000 -0400
8485 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8486  }
8487  
8488  /*
8489 - * user_mode_vm(regs) determines whether a register set came from user mode.
8490 + * user_mode(regs) determines whether a register set came from user mode.
8491   * This is true if V8086 mode was enabled OR if the register set was from
8492   * protected mode with RPL-3 CS value.  This tricky test checks that with
8493   * one comparison.  Many places in the kernel can bypass this full check
8494 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8495 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8496 + * be used.
8497   */
8498 -static inline int user_mode(struct pt_regs *regs)
8499 +static inline int user_mode_novm(struct pt_regs *regs)
8500  {
8501  #ifdef CONFIG_X86_32
8502         return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8503  #else
8504 -       return !!(regs->cs & 3);
8505 +       return !!(regs->cs & SEGMENT_RPL_MASK);
8506  #endif
8507  }
8508  
8509 -static inline int user_mode_vm(struct pt_regs *regs)
8510 +static inline int user_mode(struct pt_regs *regs)
8511  {
8512  #ifdef CONFIG_X86_32
8513         return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8514                 USER_RPL;
8515  #else
8516 -       return user_mode(regs);
8517 +       return user_mode_novm(regs);
8518  #endif
8519  }
8520  
8521 diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8522 --- linux-3.0.4/arch/x86/include/asm/reboot.h   2011-07-21 22:17:23.000000000 -0400
8523 +++ linux-3.0.4/arch/x86/include/asm/reboot.h   2011-08-23 21:47:55.000000000 -0400
8524 @@ -6,19 +6,19 @@
8525  struct pt_regs;
8526  
8527  struct machine_ops {
8528 -       void (*restart)(char *cmd);
8529 -       void (*halt)(void);
8530 -       void (*power_off)(void);
8531 +       void (* __noreturn restart)(char *cmd);
8532 +       void (* __noreturn halt)(void);
8533 +       void (* __noreturn power_off)(void);
8534         void (*shutdown)(void);
8535         void (*crash_shutdown)(struct pt_regs *);
8536 -       void (*emergency_restart)(void);
8537 -};
8538 +       void (* __noreturn emergency_restart)(void);
8539 +} __no_const;
8540  
8541  extern struct machine_ops machine_ops;
8542  
8543  void native_machine_crash_shutdown(struct pt_regs *regs);
8544  void native_machine_shutdown(void);
8545 -void machine_real_restart(unsigned int type);
8546 +void machine_real_restart(unsigned int type) __noreturn;
8547  /* These must match dispatch_table in reboot_32.S */
8548  #define MRR_BIOS       0
8549  #define MRR_APM                1
8550 diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8551 --- linux-3.0.4/arch/x86/include/asm/rwsem.h    2011-07-21 22:17:23.000000000 -0400
8552 +++ linux-3.0.4/arch/x86/include/asm/rwsem.h    2011-08-23 21:47:55.000000000 -0400
8553 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8554  {
8555         asm volatile("# beginning down_read\n\t"
8556                      LOCK_PREFIX _ASM_INC "(%1)\n\t"
8557 +
8558 +#ifdef CONFIG_PAX_REFCOUNT
8559 +                    "jno 0f\n"
8560 +                    LOCK_PREFIX _ASM_DEC "(%1)\n"
8561 +                    "int $4\n0:\n"
8562 +                    _ASM_EXTABLE(0b, 0b)
8563 +#endif
8564 +
8565                      /* adds 0x00000001 */
8566                      "  jns        1f\n"
8567                      "  call call_rwsem_down_read_failed\n"
8568 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8569                      "1:\n\t"
8570                      "  mov          %1,%2\n\t"
8571                      "  add          %3,%2\n\t"
8572 +
8573 +#ifdef CONFIG_PAX_REFCOUNT
8574 +                    "jno 0f\n"
8575 +                    "sub %3,%2\n"
8576 +                    "int $4\n0:\n"
8577 +                    _ASM_EXTABLE(0b, 0b)
8578 +#endif
8579 +
8580                      "  jle          2f\n\t"
8581                      LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
8582                      "  jnz          1b\n\t"
8583 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8584         long tmp;
8585         asm volatile("# beginning down_write\n\t"
8586                      LOCK_PREFIX "  xadd      %1,(%2)\n\t"
8587 +
8588 +#ifdef CONFIG_PAX_REFCOUNT
8589 +                    "jno 0f\n"
8590 +                    "mov %1,(%2)\n"
8591 +                    "int $4\n0:\n"
8592 +                    _ASM_EXTABLE(0b, 0b)
8593 +#endif
8594 +
8595                      /* adds 0xffff0001, returns the old value */
8596                      "  test      %1,%1\n\t"
8597                      /* was the count 0 before? */
8598 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8599         long tmp;
8600         asm volatile("# beginning __up_read\n\t"
8601                      LOCK_PREFIX "  xadd      %1,(%2)\n\t"
8602 +
8603 +#ifdef CONFIG_PAX_REFCOUNT
8604 +                    "jno 0f\n"
8605 +                    "mov %1,(%2)\n"
8606 +                    "int $4\n0:\n"
8607 +                    _ASM_EXTABLE(0b, 0b)
8608 +#endif
8609 +
8610                      /* subtracts 1, returns the old value */
8611                      "  jns        1f\n\t"
8612                      "  call call_rwsem_wake\n" /* expects old value in %edx */
8613 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8614         long tmp;
8615         asm volatile("# beginning __up_write\n\t"
8616                      LOCK_PREFIX "  xadd      %1,(%2)\n\t"
8617 +
8618 +#ifdef CONFIG_PAX_REFCOUNT
8619 +                    "jno 0f\n"
8620 +                    "mov %1,(%2)\n"
8621 +                    "int $4\n0:\n"
8622 +                    _ASM_EXTABLE(0b, 0b)
8623 +#endif
8624 +
8625                      /* subtracts 0xffff0001, returns the old value */
8626                      "  jns        1f\n\t"
8627                      "  call call_rwsem_wake\n" /* expects old value in %edx */
8628 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8629  {
8630         asm volatile("# beginning __downgrade_write\n\t"
8631                      LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8632 +
8633 +#ifdef CONFIG_PAX_REFCOUNT
8634 +                    "jno 0f\n"
8635 +                    LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8636 +                    "int $4\n0:\n"
8637 +                    _ASM_EXTABLE(0b, 0b)
8638 +#endif
8639 +
8640                      /*
8641                       * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8642                       *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8643 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8644   */
8645  static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8646  {
8647 -       asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8648 +       asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8649 +
8650 +#ifdef CONFIG_PAX_REFCOUNT
8651 +                    "jno 0f\n"
8652 +                    LOCK_PREFIX _ASM_SUB "%1,%0\n"
8653 +                    "int $4\n0:\n"
8654 +                    _ASM_EXTABLE(0b, 0b)
8655 +#endif
8656 +
8657                      : "+m" (sem->count)
8658                      : "er" (delta));
8659  }
8660 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8661  {
8662         long tmp = delta;
8663  
8664 -       asm volatile(LOCK_PREFIX "xadd %0,%1"
8665 +       asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8666 +
8667 +#ifdef CONFIG_PAX_REFCOUNT
8668 +                    "jno 0f\n"
8669 +                    "mov %0,%1\n"
8670 +                    "int $4\n0:\n"
8671 +                    _ASM_EXTABLE(0b, 0b)
8672 +#endif
8673 +
8674                      : "+r" (tmp), "+m" (sem->count)
8675                      : : "memory");
8676  
8677 diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8678 --- linux-3.0.4/arch/x86/include/asm/segment.h  2011-07-21 22:17:23.000000000 -0400
8679 +++ linux-3.0.4/arch/x86/include/asm/segment.h  2011-08-23 21:47:55.000000000 -0400
8680 @@ -64,8 +64,8 @@
8681   *  26 - ESPFIX small SS
8682   *  27 - per-cpu                       [ offset to per-cpu data area ]
8683   *  28 - stack_canary-20               [ for stack protector ]
8684 - *  29 - unused
8685 - *  30 - unused
8686 + *  29 - PCI BIOS CS
8687 + *  30 - PCI BIOS DS
8688   *  31 - TSS for double fault handler
8689   */
8690  #define GDT_ENTRY_TLS_MIN      6
8691 @@ -79,6 +79,8 @@
8692  
8693  #define GDT_ENTRY_KERNEL_CS            (GDT_ENTRY_KERNEL_BASE+0)
8694  
8695 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS   (4)
8696 +
8697  #define GDT_ENTRY_KERNEL_DS            (GDT_ENTRY_KERNEL_BASE+1)
8698  
8699  #define GDT_ENTRY_TSS                  (GDT_ENTRY_KERNEL_BASE+4)
8700 @@ -104,6 +106,12 @@
8701  #define __KERNEL_STACK_CANARY          0
8702  #endif
8703  
8704 +#define GDT_ENTRY_PCIBIOS_CS           (GDT_ENTRY_KERNEL_BASE+17)
8705 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8706 +
8707 +#define GDT_ENTRY_PCIBIOS_DS           (GDT_ENTRY_KERNEL_BASE+18)
8708 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8709 +
8710  #define GDT_ENTRY_DOUBLEFAULT_TSS      31
8711  
8712  /*
8713 @@ -141,7 +149,7 @@
8714   */
8715  
8716  /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8717 -#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8718 +#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8719  
8720  
8721  #else
8722 @@ -165,6 +173,8 @@
8723  #define __USER32_CS   (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8724  #define __USER32_DS    __USER_DS
8725  
8726 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8727 +
8728  #define GDT_ENTRY_TSS 8        /* needs two entries */
8729  #define GDT_ENTRY_LDT 10 /* needs two entries */
8730  #define GDT_ENTRY_TLS_MIN 12
8731 @@ -185,6 +195,7 @@
8732  #endif
8733  
8734  #define __KERNEL_CS    (GDT_ENTRY_KERNEL_CS*8)
8735 +#define __KERNEXEC_KERNEL_CS   (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8736  #define __KERNEL_DS    (GDT_ENTRY_KERNEL_DS*8)
8737  #define __USER_DS      (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8738  #define __USER_CS      (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8739 diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8740 --- linux-3.0.4/arch/x86/include/asm/smp.h      2011-07-21 22:17:23.000000000 -0400
8741 +++ linux-3.0.4/arch/x86/include/asm/smp.h      2011-08-23 21:47:55.000000000 -0400
8742 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8743  /* cpus sharing the last level cache: */
8744  DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8745  DECLARE_PER_CPU(u16, cpu_llc_id);
8746 -DECLARE_PER_CPU(int, cpu_number);
8747 +DECLARE_PER_CPU(unsigned int, cpu_number);
8748  
8749  static inline struct cpumask *cpu_sibling_mask(int cpu)
8750  {
8751 @@ -77,7 +77,7 @@ struct smp_ops {
8752  
8753         void (*send_call_func_ipi)(const struct cpumask *mask);
8754         void (*send_call_func_single_ipi)(int cpu);
8755 -};
8756 +} __no_const;
8757  
8758  /* Globals due to paravirt */
8759  extern void set_cpu_sibling_map(int cpu);
8760 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8761  extern int safe_smp_processor_id(void);
8762  
8763  #elif defined(CONFIG_X86_64_SMP)
8764 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8765 -
8766 -#define stack_smp_processor_id()                                       \
8767 -({                                                             \
8768 -       struct thread_info *ti;                                         \
8769 -       __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));      \
8770 -       ti->cpu;                                                        \
8771 -})
8772 +#define raw_smp_processor_id()         (percpu_read(cpu_number))
8773 +#define stack_smp_processor_id()       raw_smp_processor_id()
8774  #define safe_smp_processor_id()                smp_processor_id()
8775  
8776  #endif
8777 diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8778 --- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8779 +++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8780 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8781  static inline void arch_read_lock(arch_rwlock_t *rw)
8782  {
8783         asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8784 +
8785 +#ifdef CONFIG_PAX_REFCOUNT
8786 +                    "jno 0f\n"
8787 +                    LOCK_PREFIX " addl $1,(%0)\n"
8788 +                    "int $4\n0:\n"
8789 +                    _ASM_EXTABLE(0b, 0b)
8790 +#endif
8791 +
8792                      "jns 1f\n"
8793                      "call __read_lock_failed\n\t"
8794                      "1:\n"
8795 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8796  static inline void arch_write_lock(arch_rwlock_t *rw)
8797  {
8798         asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8799 +
8800 +#ifdef CONFIG_PAX_REFCOUNT
8801 +                    "jno 0f\n"
8802 +                    LOCK_PREFIX " addl %1,(%0)\n"
8803 +                    "int $4\n0:\n"
8804 +                    _ASM_EXTABLE(0b, 0b)
8805 +#endif
8806 +
8807                      "jz 1f\n"
8808                      "call __write_lock_failed\n\t"
8809                      "1:\n"
8810 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8811  
8812  static inline void arch_read_unlock(arch_rwlock_t *rw)
8813  {
8814 -       asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8815 +       asm volatile(LOCK_PREFIX "incl %0\n"
8816 +
8817 +#ifdef CONFIG_PAX_REFCOUNT
8818 +                    "jno 0f\n"
8819 +                    LOCK_PREFIX "decl %0\n"
8820 +                    "int $4\n0:\n"
8821 +                    _ASM_EXTABLE(0b, 0b)
8822 +#endif
8823 +
8824 +                    :"+m" (rw->lock) : : "memory");
8825  }
8826  
8827  static inline void arch_write_unlock(arch_rwlock_t *rw)
8828  {
8829 -       asm volatile(LOCK_PREFIX "addl %1, %0"
8830 +       asm volatile(LOCK_PREFIX "addl %1, %0\n"
8831 +
8832 +#ifdef CONFIG_PAX_REFCOUNT
8833 +                    "jno 0f\n"
8834 +                    LOCK_PREFIX "subl %1, %0\n"
8835 +                    "int $4\n0:\n"
8836 +                    _ASM_EXTABLE(0b, 0b)
8837 +#endif
8838 +
8839                      : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8840  }
8841  
8842 diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
8843 --- linux-3.0.4/arch/x86/include/asm/stackprotector.h   2011-07-21 22:17:23.000000000 -0400
8844 +++ linux-3.0.4/arch/x86/include/asm/stackprotector.h   2011-08-23 21:47:55.000000000 -0400
8845 @@ -48,7 +48,7 @@
8846   * head_32 for boot CPU and setup_per_cpu_areas() for others.
8847   */
8848  #define GDT_STACK_CANARY_INIT                                          \
8849 -       [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8850 +       [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8851  
8852  /*
8853   * Initialize the stackprotector canary value.
8854 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8855  
8856  static inline void load_stack_canary_segment(void)
8857  {
8858 -#ifdef CONFIG_X86_32
8859 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8860         asm volatile ("mov %0, %%gs" : : "r" (0));
8861  #endif
8862  }
8863 diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
8864 --- linux-3.0.4/arch/x86/include/asm/stacktrace.h       2011-07-21 22:17:23.000000000 -0400
8865 +++ linux-3.0.4/arch/x86/include/asm/stacktrace.h       2011-08-23 21:47:55.000000000 -0400
8866 @@ -11,28 +11,20 @@
8867  
8868  extern int kstack_depth_to_print;
8869  
8870 -struct thread_info;
8871 +struct task_struct;
8872  struct stacktrace_ops;
8873  
8874 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8875 -                                     unsigned long *stack,
8876 -                                     unsigned long bp,
8877 -                                     const struct stacktrace_ops *ops,
8878 -                                     void *data,
8879 -                                     unsigned long *end,
8880 -                                     int *graph);
8881 -
8882 -extern unsigned long
8883 -print_context_stack(struct thread_info *tinfo,
8884 -                   unsigned long *stack, unsigned long bp,
8885 -                   const struct stacktrace_ops *ops, void *data,
8886 -                   unsigned long *end, int *graph);
8887 -
8888 -extern unsigned long
8889 -print_context_stack_bp(struct thread_info *tinfo,
8890 -                      unsigned long *stack, unsigned long bp,
8891 -                      const struct stacktrace_ops *ops, void *data,
8892 -                      unsigned long *end, int *graph);
8893 +typedef unsigned long walk_stack_t(struct task_struct *task,
8894 +                                  void *stack_start,
8895 +                                  unsigned long *stack,
8896 +                                  unsigned long bp,
8897 +                                  const struct stacktrace_ops *ops,
8898 +                                  void *data,
8899 +                                  unsigned long *end,
8900 +                                  int *graph);
8901 +
8902 +extern walk_stack_t print_context_stack;
8903 +extern walk_stack_t print_context_stack_bp;
8904  
8905  /* Generic stack tracer with callbacks */
8906  
8907 @@ -40,7 +32,7 @@ struct stacktrace_ops {
8908         void (*address)(void *data, unsigned long address, int reliable);
8909         /* On negative return stop dumping */
8910         int (*stack)(void *data, char *name);
8911 -       walk_stack_t    walk_stack;
8912 +       walk_stack_t    *walk_stack;
8913  };
8914  
8915  void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8916 diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
8917 --- linux-3.0.4/arch/x86/include/asm/system.h   2011-07-21 22:17:23.000000000 -0400
8918 +++ linux-3.0.4/arch/x86/include/asm/system.h   2011-08-23 21:47:55.000000000 -0400
8919 @@ -129,7 +129,7 @@ do {                                                                        \
8920              "call __switch_to\n\t"                                       \
8921              "movq "__percpu_arg([current_task])",%%rsi\n\t"              \
8922              __switch_canary                                              \
8923 -            "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
8924 +            "movq "__percpu_arg([thread_info])",%%r8\n\t"                \
8925              "movq %%rax,%%rdi\n\t"                                       \
8926              "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"                 \
8927              "jnz   ret_from_fork\n\t"                                    \
8928 @@ -140,7 +140,7 @@ do {                                                                        \
8929                [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8930                [ti_flags] "i" (offsetof(struct thread_info, flags)),      \
8931                [_tif_fork] "i" (_TIF_FORK),                               \
8932 -              [thread_info] "i" (offsetof(struct task_struct, stack)),   \
8933 +              [thread_info] "m" (current_tinfo),                         \
8934                [current_task] "m" (current_task)                          \
8935                __switch_canary_iparam                                     \
8936              : "memory", "cc" __EXTRA_CLOBBER)
8937 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8938  {
8939         unsigned long __limit;
8940         asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8941 -       return __limit + 1;
8942 +       return __limit;
8943  }
8944  
8945  static inline void native_clts(void)
8946 @@ -397,12 +397,12 @@ void enable_hlt(void);
8947  
8948  void cpu_idle_wait(void);
8949  
8950 -extern unsigned long arch_align_stack(unsigned long sp);
8951 +#define arch_align_stack(x) ((x) & ~0xfUL)
8952  extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8953  
8954  void default_idle(void);
8955  
8956 -void stop_this_cpu(void *dummy);
8957 +void stop_this_cpu(void *dummy) __noreturn;
8958  
8959  /*
8960   * Force strict CPU ordering.
8961 diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
8962 --- linux-3.0.4/arch/x86/include/asm/thread_info.h      2011-07-21 22:17:23.000000000 -0400
8963 +++ linux-3.0.4/arch/x86/include/asm/thread_info.h      2011-08-23 21:47:55.000000000 -0400
8964 @@ -10,6 +10,7 @@
8965  #include <linux/compiler.h>
8966  #include <asm/page.h>
8967  #include <asm/types.h>
8968 +#include <asm/percpu.h>
8969  
8970  /*
8971   * low level task data that entry.S needs immediate access to
8972 @@ -24,7 +25,6 @@ struct exec_domain;
8973  #include <asm/atomic.h>
8974  
8975  struct thread_info {
8976 -       struct task_struct      *task;          /* main task structure */
8977         struct exec_domain      *exec_domain;   /* execution domain */
8978         __u32                   flags;          /* low level flags */
8979         __u32                   status;         /* thread synchronous flags */
8980 @@ -34,18 +34,12 @@ struct thread_info {
8981         mm_segment_t            addr_limit;
8982         struct restart_block    restart_block;
8983         void __user             *sysenter_return;
8984 -#ifdef CONFIG_X86_32
8985 -       unsigned long           previous_esp;   /* ESP of the previous stack in
8986 -                                                  case of nested (IRQ) stacks
8987 -                                               */
8988 -       __u8                    supervisor_stack[0];
8989 -#endif
8990 +       unsigned long           lowest_stack;
8991         int                     uaccess_err;
8992  };
8993  
8994 -#define INIT_THREAD_INFO(tsk)                  \
8995 +#define INIT_THREAD_INFO                       \
8996  {                                              \
8997 -       .task           = &tsk,                 \
8998         .exec_domain    = &default_exec_domain, \
8999         .flags          = 0,                    \
9000         .cpu            = 0,                    \
9001 @@ -56,7 +50,7 @@ struct thread_info {
9002         },                                      \
9003  }
9004  
9005 -#define init_thread_info       (init_thread_union.thread_info)
9006 +#define init_thread_info       (init_thread_union.stack)
9007  #define init_stack             (init_thread_union.stack)
9008  
9009  #else /* !__ASSEMBLY__ */
9010 @@ -170,6 +164,23 @@ struct thread_info {
9011         ret;                                                            \
9012  })
9013  
9014 +#ifdef __ASSEMBLY__
9015 +/* how to get the thread information struct from ASM */
9016 +#define GET_THREAD_INFO(reg)    \
9017 +       mov PER_CPU_VAR(current_tinfo), reg
9018 +
9019 +/* use this one if reg already contains %esp */
9020 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9021 +#else
9022 +/* how to get the thread information struct from C */
9023 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9024 +
9025 +static __always_inline struct thread_info *current_thread_info(void)
9026 +{
9027 +       return percpu_read_stable(current_tinfo);
9028 +}
9029 +#endif
9030 +
9031  #ifdef CONFIG_X86_32
9032  
9033  #define STACK_WARN     (THREAD_SIZE/8)
9034 @@ -180,35 +191,13 @@ struct thread_info {
9035   */
9036  #ifndef __ASSEMBLY__
9037  
9038 -
9039  /* how to get the current stack pointer from C */
9040  register unsigned long current_stack_pointer asm("esp") __used;
9041  
9042 -/* how to get the thread information struct from C */
9043 -static inline struct thread_info *current_thread_info(void)
9044 -{
9045 -       return (struct thread_info *)
9046 -               (current_stack_pointer & ~(THREAD_SIZE - 1));
9047 -}
9048 -
9049 -#else /* !__ASSEMBLY__ */
9050 -
9051 -/* how to get the thread information struct from ASM */
9052 -#define GET_THREAD_INFO(reg)    \
9053 -       movl $-THREAD_SIZE, reg; \
9054 -       andl %esp, reg
9055 -
9056 -/* use this one if reg already contains %esp */
9057 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9058 -       andl $-THREAD_SIZE, reg
9059 -
9060  #endif
9061  
9062  #else /* X86_32 */
9063  
9064 -#include <asm/percpu.h>
9065 -#define KERNEL_STACK_OFFSET (5*8)
9066 -
9067  /*
9068   * macros/functions for gaining access to the thread information structure
9069   * preempt_count needs to be 1 initially, until the scheduler is functional.
9070 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9071  #ifndef __ASSEMBLY__
9072  DECLARE_PER_CPU(unsigned long, kernel_stack);
9073  
9074 -static inline struct thread_info *current_thread_info(void)
9075 -{
9076 -       struct thread_info *ti;
9077 -       ti = (void *)(percpu_read_stable(kernel_stack) +
9078 -                     KERNEL_STACK_OFFSET - THREAD_SIZE);
9079 -       return ti;
9080 -}
9081 -
9082 -#else /* !__ASSEMBLY__ */
9083 -
9084 -/* how to get the thread information struct from ASM */
9085 -#define GET_THREAD_INFO(reg) \
9086 -       movq PER_CPU_VAR(kernel_stack),reg ; \
9087 -       subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9088 -
9089 +/* how to get the current stack pointer from C */
9090 +register unsigned long current_stack_pointer asm("rsp") __used;
9091  #endif
9092  
9093  #endif /* !X86_32 */
9094 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9095  extern void free_thread_info(struct thread_info *ti);
9096  extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9097  #define arch_task_cache_init arch_task_cache_init
9098 +
9099 +#define __HAVE_THREAD_FUNCTIONS
9100 +#define task_thread_info(task) (&(task)->tinfo)
9101 +#define task_stack_page(task)  ((task)->stack)
9102 +#define setup_thread_stack(p, org) do {} while (0)
9103 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9104 +
9105 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9106 +extern struct task_struct *alloc_task_struct_node(int node);
9107 +extern void free_task_struct(struct task_struct *);
9108 +
9109  #endif
9110  #endif /* _ASM_X86_THREAD_INFO_H */
9111 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9112 --- linux-3.0.4/arch/x86/include/asm/uaccess_32.h       2011-07-21 22:17:23.000000000 -0400
9113 +++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h       2011-08-23 21:48:14.000000000 -0400
9114 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9115  static __always_inline unsigned long __must_check
9116  __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9117  {
9118 +       pax_track_stack();
9119 +
9120 +       if ((long)n < 0)
9121 +               return n;
9122 +
9123         if (__builtin_constant_p(n)) {
9124                 unsigned long ret;
9125  
9126 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9127                         return ret;
9128                 }
9129         }
9130 +       if (!__builtin_constant_p(n))
9131 +               check_object_size(from, n, true);
9132         return __copy_to_user_ll(to, from, n);
9133  }
9134  
9135 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9136  __copy_to_user(void __user *to, const void *from, unsigned long n)
9137  {
9138         might_fault();
9139 +
9140         return __copy_to_user_inatomic(to, from, n);
9141  }
9142  
9143  static __always_inline unsigned long
9144  __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9145  {
9146 +       if ((long)n < 0)
9147 +               return n;
9148 +
9149         /* Avoid zeroing the tail if the copy fails..
9150          * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9151          * but as the zeroing behaviour is only significant when n is not
9152 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9153  __copy_from_user(void *to, const void __user *from, unsigned long n)
9154  {
9155         might_fault();
9156 +
9157 +       pax_track_stack();
9158 +
9159 +       if ((long)n < 0)
9160 +               return n;
9161 +
9162         if (__builtin_constant_p(n)) {
9163                 unsigned long ret;
9164  
9165 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9166                         return ret;
9167                 }
9168         }
9169 +       if (!__builtin_constant_p(n))
9170 +               check_object_size(to, n, false);
9171         return __copy_from_user_ll(to, from, n);
9172  }
9173  
9174 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9175                                 const void __user *from, unsigned long n)
9176  {
9177         might_fault();
9178 +
9179 +       if ((long)n < 0)
9180 +               return n;
9181 +
9182         if (__builtin_constant_p(n)) {
9183                 unsigned long ret;
9184  
9185 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9186  __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9187                                   unsigned long n)
9188  {
9189 -       return __copy_from_user_ll_nocache_nozero(to, from, n);
9190 -}
9191 +       if ((long)n < 0)
9192 +               return n;
9193  
9194 -unsigned long __must_check copy_to_user(void __user *to,
9195 -                                       const void *from, unsigned long n);
9196 -unsigned long __must_check _copy_from_user(void *to,
9197 -                                         const void __user *from,
9198 -                                         unsigned long n);
9199 +       return __copy_from_user_ll_nocache_nozero(to, from, n);
9200 +}
9201  
9202 +extern void copy_to_user_overflow(void)
9203 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9204 +       __compiletime_error("copy_to_user() buffer size is not provably correct")
9205 +#else
9206 +       __compiletime_warning("copy_to_user() buffer size is not provably correct")
9207 +#endif
9208 +;
9209  
9210  extern void copy_from_user_overflow(void)
9211  #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9212 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9213  #endif
9214  ;
9215  
9216 -static inline unsigned long __must_check copy_from_user(void *to,
9217 -                                         const void __user *from,
9218 -                                         unsigned long n)
9219 +/**
9220 + * copy_to_user: - Copy a block of data into user space.
9221 + * @to:   Destination address, in user space.
9222 + * @from: Source address, in kernel space.
9223 + * @n:    Number of bytes to copy.
9224 + *
9225 + * Context: User context only.  This function may sleep.
9226 + *
9227 + * Copy data from kernel space to user space.
9228 + *
9229 + * Returns number of bytes that could not be copied.
9230 + * On success, this will be zero.
9231 + */
9232 +static inline unsigned long __must_check
9233 +copy_to_user(void __user *to, const void *from, unsigned long n)
9234 +{
9235 +       int sz = __compiletime_object_size(from);
9236 +
9237 +       if (unlikely(sz != -1 && sz < n))
9238 +               copy_to_user_overflow();
9239 +       else if (access_ok(VERIFY_WRITE, to, n))
9240 +               n = __copy_to_user(to, from, n);
9241 +       return n;
9242 +}
9243 +
9244 +/**
9245 + * copy_from_user: - Copy a block of data from user space.
9246 + * @to:   Destination address, in kernel space.
9247 + * @from: Source address, in user space.
9248 + * @n:    Number of bytes to copy.
9249 + *
9250 + * Context: User context only.  This function may sleep.
9251 + *
9252 + * Copy data from user space to kernel space.
9253 + *
9254 + * Returns number of bytes that could not be copied.
9255 + * On success, this will be zero.
9256 + *
9257 + * If some data could not be copied, this function will pad the copied
9258 + * data to the requested size using zero bytes.
9259 + */
9260 +static inline unsigned long __must_check
9261 +copy_from_user(void *to, const void __user *from, unsigned long n)
9262  {
9263         int sz = __compiletime_object_size(to);
9264  
9265 -       if (likely(sz == -1 || sz >= n))
9266 -               n = _copy_from_user(to, from, n);
9267 -       else
9268 +       if (unlikely(sz != -1 && sz < n))
9269                 copy_from_user_overflow();
9270 -
9271 +       else if (access_ok(VERIFY_READ, from, n))
9272 +               n = __copy_from_user(to, from, n);
9273 +       else if ((long)n > 0) {
9274 +               if (!__builtin_constant_p(n))
9275 +                       check_object_size(to, n, false);
9276 +               memset(to, 0, n);
9277 +       }
9278         return n;
9279  }
9280  
9281 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9282 --- linux-3.0.4/arch/x86/include/asm/uaccess_64.h       2011-07-21 22:17:23.000000000 -0400
9283 +++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h       2011-08-23 21:48:14.000000000 -0400
9284 @@ -10,6 +10,9 @@
9285  #include <asm/alternative.h>
9286  #include <asm/cpufeature.h>
9287  #include <asm/page.h>
9288 +#include <asm/pgtable.h>
9289 +
9290 +#define set_fs(x)      (current_thread_info()->addr_limit = (x))
9291  
9292  /*
9293   * Copy To/From Userspace
9294 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9295         return ret;
9296  }
9297  
9298 -__must_check unsigned long
9299 -_copy_to_user(void __user *to, const void *from, unsigned len);
9300 -__must_check unsigned long
9301 -_copy_from_user(void *to, const void __user *from, unsigned len);
9302 +static __always_inline __must_check unsigned long
9303 +__copy_to_user(void __user *to, const void *from, unsigned len);
9304 +static __always_inline __must_check unsigned long
9305 +__copy_from_user(void *to, const void __user *from, unsigned len);
9306  __must_check unsigned long
9307  copy_in_user(void __user *to, const void __user *from, unsigned len);
9308  
9309  static inline unsigned long __must_check copy_from_user(void *to,
9310                                           const void __user *from,
9311 -                                         unsigned long n)
9312 +                                         unsigned n)
9313  {
9314 -       int sz = __compiletime_object_size(to);
9315 -
9316         might_fault();
9317 -       if (likely(sz == -1 || sz >= n))
9318 -               n = _copy_from_user(to, from, n);
9319 -#ifdef CONFIG_DEBUG_VM
9320 -       else
9321 -               WARN(1, "Buffer overflow detected!\n");
9322 -#endif
9323 +
9324 +       if (access_ok(VERIFY_READ, from, n))
9325 +               n = __copy_from_user(to, from, n);
9326 +       else if ((int)n > 0) {
9327 +               if (!__builtin_constant_p(n))
9328 +                       check_object_size(to, n, false);
9329 +               memset(to, 0, n);
9330 +       }
9331         return n;
9332  }
9333  
9334 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9335  {
9336         might_fault();
9337  
9338 -       return _copy_to_user(dst, src, size);
9339 +       if (access_ok(VERIFY_WRITE, dst, size))
9340 +               size = __copy_to_user(dst, src, size);
9341 +       return size;
9342  }
9343  
9344  static __always_inline __must_check
9345 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9346 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9347  {
9348 -       int ret = 0;
9349 +       int sz = __compiletime_object_size(dst);
9350 +       unsigned ret = 0;
9351  
9352         might_fault();
9353 -       if (!__builtin_constant_p(size))
9354 -               return copy_user_generic(dst, (__force void *)src, size);
9355 +
9356 +       pax_track_stack();
9357 +
9358 +       if ((int)size < 0)
9359 +               return size;
9360 +
9361 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9362 +       if (!__access_ok(VERIFY_READ, src, size))
9363 +               return size;
9364 +#endif
9365 +
9366 +       if (unlikely(sz != -1 && sz < size)) {
9367 +#ifdef CONFIG_DEBUG_VM
9368 +               WARN(1, "Buffer overflow detected!\n");
9369 +#endif
9370 +               return size;
9371 +       }
9372 +
9373 +       if (!__builtin_constant_p(size)) {
9374 +               check_object_size(dst, size, false);
9375 +
9376 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9377 +               if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9378 +                       src += PAX_USER_SHADOW_BASE;
9379 +#endif
9380 +
9381 +               return copy_user_generic(dst, (__force const void *)src, size);
9382 +       }
9383         switch (size) {
9384 -       case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9385 +       case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9386                               ret, "b", "b", "=q", 1);
9387                 return ret;
9388 -       case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9389 +       case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9390                               ret, "w", "w", "=r", 2);
9391                 return ret;
9392 -       case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9393 +       case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9394                               ret, "l", "k", "=r", 4);
9395                 return ret;
9396 -       case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9397 +       case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9398                               ret, "q", "", "=r", 8);
9399                 return ret;
9400         case 10:
9401 -               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9402 +               __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9403                                ret, "q", "", "=r", 10);
9404                 if (unlikely(ret))
9405                         return ret;
9406                 __get_user_asm(*(u16 *)(8 + (char *)dst),
9407 -                              (u16 __user *)(8 + (char __user *)src),
9408 +                              (const u16 __user *)(8 + (const char __user *)src),
9409                                ret, "w", "w", "=r", 2);
9410                 return ret;
9411         case 16:
9412 -               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9413 +               __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9414                                ret, "q", "", "=r", 16);
9415                 if (unlikely(ret))
9416                         return ret;
9417                 __get_user_asm(*(u64 *)(8 + (char *)dst),
9418 -                              (u64 __user *)(8 + (char __user *)src),
9419 +                              (const u64 __user *)(8 + (const char __user *)src),
9420                                ret, "q", "", "=r", 8);
9421                 return ret;
9422         default:
9423 -               return copy_user_generic(dst, (__force void *)src, size);
9424 +
9425 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9426 +               if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9427 +                       src += PAX_USER_SHADOW_BASE;
9428 +#endif
9429 +
9430 +               return copy_user_generic(dst, (__force const void *)src, size);
9431         }
9432  }
9433  
9434  static __always_inline __must_check
9435 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9436 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9437  {
9438 -       int ret = 0;
9439 +       int sz = __compiletime_object_size(src);
9440 +       unsigned ret = 0;
9441  
9442         might_fault();
9443 -       if (!__builtin_constant_p(size))
9444 +
9445 +       pax_track_stack();
9446 +
9447 +       if ((int)size < 0)
9448 +               return size;
9449 +
9450 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9451 +       if (!__access_ok(VERIFY_WRITE, dst, size))
9452 +               return size;
9453 +#endif
9454 +
9455 +       if (unlikely(sz != -1 && sz < size)) {
9456 +#ifdef CONFIG_DEBUG_VM
9457 +               WARN(1, "Buffer overflow detected!\n");
9458 +#endif
9459 +               return size;
9460 +       }
9461 +
9462 +       if (!__builtin_constant_p(size)) {
9463 +               check_object_size(src, size, true);
9464 +
9465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9466 +               if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9467 +                       dst += PAX_USER_SHADOW_BASE;
9468 +#endif
9469 +
9470                 return copy_user_generic((__force void *)dst, src, size);
9471 +       }
9472         switch (size) {
9473 -       case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9474 +       case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9475                               ret, "b", "b", "iq", 1);
9476                 return ret;
9477 -       case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9478 +       case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9479                               ret, "w", "w", "ir", 2);
9480                 return ret;
9481 -       case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9482 +       case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9483                               ret, "l", "k", "ir", 4);
9484                 return ret;
9485 -       case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9486 +       case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9487                               ret, "q", "", "er", 8);
9488                 return ret;
9489         case 10:
9490 -               __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9491 +               __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9492                                ret, "q", "", "er", 10);
9493                 if (unlikely(ret))
9494                         return ret;
9495                 asm("":::"memory");
9496 -               __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9497 +               __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9498                                ret, "w", "w", "ir", 2);
9499                 return ret;
9500         case 16:
9501 -               __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9502 +               __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9503                                ret, "q", "", "er", 16);
9504                 if (unlikely(ret))
9505                         return ret;
9506                 asm("":::"memory");
9507 -               __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9508 +               __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9509                                ret, "q", "", "er", 8);
9510                 return ret;
9511         default:
9512 +
9513 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9514 +               if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9515 +                       dst += PAX_USER_SHADOW_BASE;
9516 +#endif
9517 +
9518                 return copy_user_generic((__force void *)dst, src, size);
9519         }
9520  }
9521  
9522  static __always_inline __must_check
9523 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9524 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525  {
9526 -       int ret = 0;
9527 +       unsigned ret = 0;
9528  
9529         might_fault();
9530 -       if (!__builtin_constant_p(size))
9531 +
9532 +       if ((int)size < 0)
9533 +               return size;
9534 +
9535 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9536 +       if (!__access_ok(VERIFY_READ, src, size))
9537 +               return size;
9538 +       if (!__access_ok(VERIFY_WRITE, dst, size))
9539 +               return size;
9540 +#endif
9541 +
9542 +       if (!__builtin_constant_p(size)) {
9543 +
9544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9545 +               if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9546 +                       src += PAX_USER_SHADOW_BASE;
9547 +               if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9548 +                       dst += PAX_USER_SHADOW_BASE;
9549 +#endif
9550 +
9551                 return copy_user_generic((__force void *)dst,
9552 -                                        (__force void *)src, size);
9553 +                                        (__force const void *)src, size);
9554 +       }
9555         switch (size) {
9556         case 1: {
9557                 u8 tmp;
9558 -               __get_user_asm(tmp, (u8 __user *)src,
9559 +               __get_user_asm(tmp, (const u8 __user *)src,
9560                                ret, "b", "b", "=q", 1);
9561                 if (likely(!ret))
9562                         __put_user_asm(tmp, (u8 __user *)dst,
9563 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9564         }
9565         case 2: {
9566                 u16 tmp;
9567 -               __get_user_asm(tmp, (u16 __user *)src,
9568 +               __get_user_asm(tmp, (const u16 __user *)src,
9569                                ret, "w", "w", "=r", 2);
9570                 if (likely(!ret))
9571                         __put_user_asm(tmp, (u16 __user *)dst,
9572 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9573  
9574         case 4: {
9575                 u32 tmp;
9576 -               __get_user_asm(tmp, (u32 __user *)src,
9577 +               __get_user_asm(tmp, (const u32 __user *)src,
9578                                ret, "l", "k", "=r", 4);
9579                 if (likely(!ret))
9580                         __put_user_asm(tmp, (u32 __user *)dst,
9581 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9582         }
9583         case 8: {
9584                 u64 tmp;
9585 -               __get_user_asm(tmp, (u64 __user *)src,
9586 +               __get_user_asm(tmp, (const u64 __user *)src,
9587                                ret, "q", "", "=r", 8);
9588                 if (likely(!ret))
9589                         __put_user_asm(tmp, (u64 __user *)dst,
9590 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9591                 return ret;
9592         }
9593         default:
9594 +
9595 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9596 +               if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9597 +                       src += PAX_USER_SHADOW_BASE;
9598 +               if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9599 +                       dst += PAX_USER_SHADOW_BASE;
9600 +#endif
9601 +
9602                 return copy_user_generic((__force void *)dst,
9603 -                                        (__force void *)src, size);
9604 +                                        (__force const void *)src, size);
9605         }
9606  }
9607  
9608 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9609  static __must_check __always_inline int
9610  __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9611  {
9612 +       pax_track_stack();
9613 +
9614 +       if ((int)size < 0)
9615 +               return size;
9616 +
9617 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9618 +       if (!__access_ok(VERIFY_READ, src, size))
9619 +               return size;
9620 +
9621 +       if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9622 +               src += PAX_USER_SHADOW_BASE;
9623 +#endif
9624 +
9625         return copy_user_generic(dst, (__force const void *)src, size);
9626  }
9627  
9628 -static __must_check __always_inline int
9629 +static __must_check __always_inline unsigned long
9630  __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9631  {
9632 +       if ((int)size < 0)
9633 +               return size;
9634 +
9635 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9636 +       if (!__access_ok(VERIFY_WRITE, dst, size))
9637 +               return size;
9638 +
9639 +       if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9640 +               dst += PAX_USER_SHADOW_BASE;
9641 +#endif
9642 +
9643         return copy_user_generic((__force void *)dst, src, size);
9644  }
9645  
9646 -extern long __copy_user_nocache(void *dst, const void __user *src,
9647 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9648                                 unsigned size, int zerorest);
9649  
9650 -static inline int
9651 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9652 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653  {
9654         might_sleep();
9655 +
9656 +       if ((int)size < 0)
9657 +               return size;
9658 +
9659 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9660 +       if (!__access_ok(VERIFY_READ, src, size))
9661 +               return size;
9662 +#endif
9663 +
9664         return __copy_user_nocache(dst, src, size, 1);
9665  }
9666  
9667 -static inline int
9668 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9669 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670                                   unsigned size)
9671  {
9672 +       if ((int)size < 0)
9673 +               return size;
9674 +
9675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9676 +       if (!__access_ok(VERIFY_READ, src, size))
9677 +               return size;
9678 +#endif
9679 +
9680         return __copy_user_nocache(dst, src, size, 0);
9681  }
9682  
9683 -unsigned long
9684 +extern unsigned long
9685  copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9686  
9687  #endif /* _ASM_X86_UACCESS_64_H */
9688 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9689 --- linux-3.0.4/arch/x86/include/asm/uaccess.h  2011-07-21 22:17:23.000000000 -0400
9690 +++ linux-3.0.4/arch/x86/include/asm/uaccess.h  2011-08-23 21:47:55.000000000 -0400
9691 @@ -7,12 +7,15 @@
9692  #include <linux/compiler.h>
9693  #include <linux/thread_info.h>
9694  #include <linux/string.h>
9695 +#include <linux/sched.h>
9696  #include <asm/asm.h>
9697  #include <asm/page.h>
9698  
9699  #define VERIFY_READ 0
9700  #define VERIFY_WRITE 1
9701  
9702 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9703 +
9704  /*
9705   * The fs value determines whether argument validity checking should be
9706   * performed or not.  If get_fs() == USER_DS, checking is performed, with
9707 @@ -28,7 +31,12 @@
9708  
9709  #define get_ds()       (KERNEL_DS)
9710  #define get_fs()       (current_thread_info()->addr_limit)
9711 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9712 +void __set_fs(mm_segment_t x);
9713 +void set_fs(mm_segment_t x);
9714 +#else
9715  #define set_fs(x)      (current_thread_info()->addr_limit = (x))
9716 +#endif
9717  
9718  #define segment_eq(a, b)       ((a).seg == (b).seg)
9719  
9720 @@ -76,7 +84,33 @@
9721   * checks that the pointer is in the user space range - after calling
9722   * this function, memory access functions may still return -EFAULT.
9723   */
9724 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9725 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726 +#define access_ok(type, addr, size)                                    \
9727 +({                                                                     \
9728 +       long __size = size;                                             \
9729 +       unsigned long __addr = (unsigned long)addr;                     \
9730 +       unsigned long __addr_ao = __addr & PAGE_MASK;                   \
9731 +       unsigned long __end_ao = __addr + __size - 1;                   \
9732 +       bool __ret_ao = __range_not_ok(__addr, __size) == 0;            \
9733 +       if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9734 +               while(__addr_ao <= __end_ao) {                          \
9735 +                       char __c_ao;                                    \
9736 +                       __addr_ao += PAGE_SIZE;                         \
9737 +                       if (__size > PAGE_SIZE)                         \
9738 +                               cond_resched();                         \
9739 +                       if (__get_user(__c_ao, (char __user *)__addr))  \
9740 +                               break;                                  \
9741 +                       if (type != VERIFY_WRITE) {                     \
9742 +                               __addr = __addr_ao;                     \
9743 +                               continue;                               \
9744 +                       }                                               \
9745 +                       if (__put_user(__c_ao, (char __user *)__addr))  \
9746 +                               break;                                  \
9747 +                       __addr = __addr_ao;                             \
9748 +               }                                                       \
9749 +       }                                                               \
9750 +       __ret_ao;                                                       \
9751 +})
9752  
9753  /*
9754   * The exception table consists of pairs of addresses: the first is the
9755 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9756         asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9757                      : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9758  
9759 -
9760 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9761 +#define __copyuser_seg "gs;"
9762 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9763 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9764 +#else
9765 +#define __copyuser_seg
9766 +#define __COPYUSER_SET_ES
9767 +#define __COPYUSER_RESTORE_ES
9768 +#endif
9769  
9770  #ifdef CONFIG_X86_32
9771  #define __put_user_asm_u64(x, addr, err, errret)                       \
9772 -       asm volatile("1:        movl %%eax,0(%2)\n"                     \
9773 -                    "2:        movl %%edx,4(%2)\n"                     \
9774 +       asm volatile("1:        "__copyuser_seg"movl %%eax,0(%2)\n"     \
9775 +                    "2:        "__copyuser_seg"movl %%edx,4(%2)\n"     \
9776                      "3:\n"                                             \
9777                      ".section .fixup,\"ax\"\n"                         \
9778                      "4:        movl %3,%0\n"                           \
9779 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9780                      : "A" (x), "r" (addr), "i" (errret), "0" (err))
9781  
9782  #define __put_user_asm_ex_u64(x, addr)                                 \
9783 -       asm volatile("1:        movl %%eax,0(%1)\n"                     \
9784 -                    "2:        movl %%edx,4(%1)\n"                     \
9785 +       asm volatile("1:        "__copyuser_seg"movl %%eax,0(%1)\n"     \
9786 +                    "2:        "__copyuser_seg"movl %%edx,4(%1)\n"     \
9787                      "3:\n"                                             \
9788                      _ASM_EXTABLE(1b, 2b - 1b)                          \
9789                      _ASM_EXTABLE(2b, 3b - 2b)                          \
9790 @@ -373,7 +415,7 @@ do {                                                                        \
9791  } while (0)
9792  
9793  #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
9794 -       asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
9795 +       asm volatile("1:        "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9796                      "2:\n"                                             \
9797                      ".section .fixup,\"ax\"\n"                         \
9798                      "3:        mov %3,%0\n"                            \
9799 @@ -381,7 +423,7 @@ do {                                                                        \
9800                      "  jmp 2b\n"                                       \
9801                      ".previous\n"                                      \
9802                      _ASM_EXTABLE(1b, 3b)                               \
9803 -                    : "=r" (err), ltype(x)                             \
9804 +                    : "=r" (err), ltype (x)                            \
9805                      : "m" (__m(addr)), "i" (errret), "0" (err))
9806  
9807  #define __get_user_size_ex(x, ptr, size)                               \
9808 @@ -406,7 +448,7 @@ do {                                                                        \
9809  } while (0)
9810  
9811  #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                        \
9812 -       asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
9813 +       asm volatile("1:        "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9814                      "2:\n"                                             \
9815                      _ASM_EXTABLE(1b, 2b - 1b)                          \
9816                      : ltype(x) : "m" (__m(addr)))
9817 @@ -423,13 +465,24 @@ do {                                                                      \
9818         int __gu_err;                                                   \
9819         unsigned long __gu_val;                                         \
9820         __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
9821 -       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
9822 +       (x) = (__typeof__(*(ptr)))__gu_val;                             \
9823         __gu_err;                                                       \
9824  })
9825  
9826  /* FIXME: this hack is definitely wrong -AK */
9827  struct __large_struct { unsigned long buf[100]; };
9828 -#define __m(x) (*(struct __large_struct __user *)(x))
9829 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9830 +#define ____m(x)                                       \
9831 +({                                                     \
9832 +       unsigned long ____x = (unsigned long)(x);       \
9833 +       if (____x < PAX_USER_SHADOW_BASE)               \
9834 +               ____x += PAX_USER_SHADOW_BASE;          \
9835 +       (void __user *)____x;                           \
9836 +})
9837 +#else
9838 +#define ____m(x) (x)
9839 +#endif
9840 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9841  
9842  /*
9843   * Tell gcc we read from memory instead of writing: this is because
9844 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9845   * aliasing issues.
9846   */
9847  #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
9848 -       asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
9849 +       asm volatile("1:        "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9850                      "2:\n"                                             \
9851                      ".section .fixup,\"ax\"\n"                         \
9852                      "3:        mov %3,%0\n"                            \
9853 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9854                      ".previous\n"                                      \
9855                      _ASM_EXTABLE(1b, 3b)                               \
9856                      : "=r"(err)                                        \
9857 -                    : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9858 +                    : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9859  
9860  #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                        \
9861 -       asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
9862 +       asm volatile("1:        "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9863                      "2:\n"                                             \
9864                      _ASM_EXTABLE(1b, 2b - 1b)                          \
9865                      : : ltype(x), "m" (__m(addr)))
9866 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9867   * On error, the variable @x is set to zero.
9868   */
9869  
9870 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9871 +#define __get_user(x, ptr)     get_user((x), (ptr))
9872 +#else
9873  #define __get_user(x, ptr)                                             \
9874         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9875 +#endif
9876  
9877  /**
9878   * __put_user: - Write a simple value into user space, with less checking.
9879 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9880   * Returns zero on success, or -EFAULT on error.
9881   */
9882  
9883 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884 +#define __put_user(x, ptr)     put_user((x), (ptr))
9885 +#else
9886  #define __put_user(x, ptr)                                             \
9887         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9888 +#endif
9889  
9890  #define __get_user_unaligned __get_user
9891  #define __put_user_unaligned __put_user
9892 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9893  #define get_user_ex(x, ptr)    do {                                    \
9894         unsigned long __gue_val;                                        \
9895         __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
9896 -       (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
9897 +       (x) = (__typeof__(*(ptr)))__gue_val;                            \
9898  } while (0)
9899  
9900  #ifdef CONFIG_X86_WP_WORKS_OK
9901 diff -urNp linux-3.0.4/arch/x86/include/asm/vgtod.h linux-3.0.4/arch/x86/include/asm/vgtod.h
9902 --- linux-3.0.4/arch/x86/include/asm/vgtod.h    2011-07-21 22:17:23.000000000 -0400
9903 +++ linux-3.0.4/arch/x86/include/asm/vgtod.h    2011-08-23 21:47:55.000000000 -0400
9904 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9905         int             sysctl_enabled;
9906         struct timezone sys_tz;
9907         struct { /* extract of a clocksource struct */
9908 +               char    name[8];
9909                 cycle_t (*vread)(void);
9910                 cycle_t cycle_last;
9911                 cycle_t mask;
9912 diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
9913 --- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9914 +++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9915 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9916         void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9917         void (*find_smp_config)(void);
9918         void (*get_smp_config)(unsigned int early);
9919 -};
9920 +} __no_const;
9921  
9922  /**
9923   * struct x86_init_resources - platform specific resource related ops
9924 @@ -42,7 +42,7 @@ struct x86_init_resources {
9925         void (*probe_roms)(void);
9926         void (*reserve_resources)(void);
9927         char *(*memory_setup)(void);
9928 -};
9929 +} __no_const;
9930  
9931  /**
9932   * struct x86_init_irqs - platform specific interrupt setup
9933 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9934         void (*pre_vector_init)(void);
9935         void (*intr_init)(void);
9936         void (*trap_init)(void);
9937 -};
9938 +} __no_const;
9939  
9940  /**
9941   * struct x86_init_oem - oem platform specific customizing functions
9942 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9943  struct x86_init_oem {
9944         void (*arch_setup)(void);
9945         void (*banner)(void);
9946 -};
9947 +} __no_const;
9948  
9949  /**
9950   * struct x86_init_mapping - platform specific initial kernel pagetable setup
9951 @@ -76,7 +76,7 @@ struct x86_init_oem {
9952   */
9953  struct x86_init_mapping {
9954         void (*pagetable_reserve)(u64 start, u64 end);
9955 -};
9956 +} __no_const;
9957  
9958  /**
9959   * struct x86_init_paging - platform specific paging functions
9960 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9961  struct x86_init_paging {
9962         void (*pagetable_setup_start)(pgd_t *base);
9963         void (*pagetable_setup_done)(pgd_t *base);
9964 -};
9965 +} __no_const;
9966  
9967  /**
9968   * struct x86_init_timers - platform specific timer setup
9969 @@ -101,7 +101,7 @@ struct x86_init_timers {
9970         void (*tsc_pre_init)(void);
9971         void (*timer_init)(void);
9972         void (*wallclock_init)(void);
9973 -};
9974 +} __no_const;
9975  
9976  /**
9977   * struct x86_init_iommu - platform specific iommu setup
9978 @@ -109,7 +109,7 @@ struct x86_init_timers {
9979   */
9980  struct x86_init_iommu {
9981         int (*iommu_init)(void);
9982 -};
9983 +} __no_const;
9984  
9985  /**
9986   * struct x86_init_pci - platform specific pci init functions
9987 @@ -123,7 +123,7 @@ struct x86_init_pci {
9988         int (*init)(void);
9989         void (*init_irq)(void);
9990         void (*fixup_irqs)(void);
9991 -};
9992 +} __no_const;
9993  
9994  /**
9995   * struct x86_init_ops - functions for platform specific setup
9996 @@ -139,7 +139,7 @@ struct x86_init_ops {
9997         struct x86_init_timers          timers;
9998         struct x86_init_iommu           iommu;
9999         struct x86_init_pci             pci;
10000 -};
10001 +} __no_const;
10002  
10003  /**
10004   * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10005 @@ -147,7 +147,7 @@ struct x86_init_ops {
10006   */
10007  struct x86_cpuinit_ops {
10008         void (*setup_percpu_clockev)(void);
10009 -};
10010 +} __no_const;
10011  
10012  /**
10013   * struct x86_platform_ops - platform specific runtime functions
10014 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10015         bool (*is_untracked_pat_range)(u64 start, u64 end);
10016         void (*nmi_init)(void);
10017         int (*i8042_detect)(void);
10018 -};
10019 +} __no_const;
10020  
10021  struct pci_dev;
10022  
10023 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10024         int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10025         void (*teardown_msi_irq)(unsigned int irq);
10026         void (*teardown_msi_irqs)(struct pci_dev *dev);
10027 -};
10028 +} __no_const;
10029  
10030  extern struct x86_init_ops x86_init;
10031  extern struct x86_cpuinit_ops x86_cpuinit;
10032 diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10033 --- linux-3.0.4/arch/x86/include/asm/xsave.h    2011-07-21 22:17:23.000000000 -0400
10034 +++ linux-3.0.4/arch/x86/include/asm/xsave.h    2011-08-23 21:47:55.000000000 -0400
10035 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10036  {
10037         int err;
10038  
10039 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10040 +       if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10041 +               buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10042 +#endif
10043 +
10044         /*
10045          * Clear the xsave header first, so that reserved fields are
10046          * initialized to zero.
10047 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10048         u32 lmask = mask;
10049         u32 hmask = mask >> 32;
10050  
10051 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10052 +       if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10053 +               xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10054 +#endif
10055 +
10056         __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10057                              "2:\n"
10058                              ".section .fixup,\"ax\"\n"
10059 diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10060 --- linux-3.0.4/arch/x86/Kconfig        2011-07-21 22:17:23.000000000 -0400
10061 +++ linux-3.0.4/arch/x86/Kconfig        2011-08-23 21:48:14.000000000 -0400
10062 @@ -229,7 +229,7 @@ config X86_HT
10063  
10064  config X86_32_LAZY_GS
10065         def_bool y
10066 -       depends on X86_32 && !CC_STACKPROTECTOR
10067 +       depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10068  
10069  config ARCH_HWEIGHT_CFLAGS
10070         string
10071 @@ -1018,7 +1018,7 @@ choice
10072  
10073  config NOHIGHMEM
10074         bool "off"
10075 -       depends on !X86_NUMAQ
10076 +       depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10077         ---help---
10078           Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10079           However, the address space of 32-bit x86 processors is only 4
10080 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10081  
10082  config HIGHMEM4G
10083         bool "4GB"
10084 -       depends on !X86_NUMAQ
10085 +       depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10086         ---help---
10087           Select this if you have a 32-bit processor and between 1 and 4
10088           gigabytes of physical RAM.
10089 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10090         hex
10091         default 0xB0000000 if VMSPLIT_3G_OPT
10092         default 0x80000000 if VMSPLIT_2G
10093 -       default 0x78000000 if VMSPLIT_2G_OPT
10094 +       default 0x70000000 if VMSPLIT_2G_OPT
10095         default 0x40000000 if VMSPLIT_1G
10096         default 0xC0000000
10097         depends on X86_32
10098 @@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10099  
10100  config EFI
10101         bool "EFI runtime service support"
10102 -       depends on ACPI
10103 +       depends on ACPI && !PAX_KERNEXEC
10104         ---help---
10105           This enables the kernel to use EFI runtime services that are
10106           available (such as the EFI variable services).
10107 @@ -1483,6 +1483,7 @@ config SECCOMP
10108  
10109  config CC_STACKPROTECTOR
10110         bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10111 +       depends on X86_64 || !PAX_MEMORY_UDEREF
10112         ---help---
10113           This option turns on the -fstack-protector GCC feature. This
10114           feature puts, at the beginning of functions, a canary value on
10115 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10116  config PHYSICAL_START
10117         hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10118         default "0x1000000"
10119 +       range 0x400000 0x40000000
10120         ---help---
10121           This gives the physical address where the kernel is loaded.
10122  
10123 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10124  config PHYSICAL_ALIGN
10125         hex "Alignment value to which kernel should be aligned" if X86_32
10126         default "0x1000000"
10127 +       range 0x400000 0x1000000 if PAX_KERNEXEC
10128         range 0x2000 0x1000000
10129         ---help---
10130           This value puts the alignment restrictions on physical address
10131 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10132           Say N if you want to disable CPU hotplug.
10133  
10134  config COMPAT_VDSO
10135 -       def_bool y
10136 +       def_bool n
10137         prompt "Compat VDSO support"
10138         depends on X86_32 || IA32_EMULATION
10139 +       depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10140         ---help---
10141           Map the 32-bit VDSO to the predictable old-style address too.
10142  
10143 diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10144 --- linux-3.0.4/arch/x86/Kconfig.cpu    2011-07-21 22:17:23.000000000 -0400
10145 +++ linux-3.0.4/arch/x86/Kconfig.cpu    2011-08-23 21:47:55.000000000 -0400
10146 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10147  
10148  config X86_F00F_BUG
10149         def_bool y
10150 -       depends on M586MMX || M586TSC || M586 || M486 || M386
10151 +       depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10152  
10153  config X86_INVD_BUG
10154         def_bool y
10155 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10156  
10157  config X86_ALIGNMENT_16
10158         def_bool y
10159 -       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10160 +       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161  
10162  config X86_INTEL_USERCOPY
10163         def_bool y
10164 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10165  # generates cmov.
10166  config X86_CMOV
10167         def_bool y
10168 -       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10169 +       depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170  
10171  config X86_MINIMUM_CPU_FAMILY
10172         int
10173 diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10174 --- linux-3.0.4/arch/x86/Kconfig.debug  2011-07-21 22:17:23.000000000 -0400
10175 +++ linux-3.0.4/arch/x86/Kconfig.debug  2011-08-23 21:47:55.000000000 -0400
10176 @@ -81,7 +81,7 @@ config X86_PTDUMP
10177  config DEBUG_RODATA
10178         bool "Write protect kernel read-only data structures"
10179         default y
10180 -       depends on DEBUG_KERNEL
10181 +       depends on DEBUG_KERNEL && BROKEN
10182         ---help---
10183           Mark the kernel read-only data as write-protected in the pagetables,
10184           in order to catch accidental (and incorrect) writes to such const
10185 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10186  
10187  config DEBUG_SET_MODULE_RONX
10188         bool "Set loadable kernel module data as NX and text as RO"
10189 -       depends on MODULES
10190 +       depends on MODULES && BROKEN
10191         ---help---
10192           This option helps catch unintended modifications to loadable
10193           kernel module's text and read-only data. It also prevents execution
10194 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10195 --- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile  2011-07-21 22:17:23.000000000 -0400
10196 +++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile  2011-08-23 21:47:55.000000000 -0400
10197 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os 
10198                    $(call cc-option, -fno-stack-protector) \
10199                    $(call cc-option, -mpreferred-stack-boundary=2)
10200  KBUILD_CFLAGS  += $(call cc-option, -m32)
10201 +ifdef CONSTIFY_PLUGIN
10202 +KBUILD_CFLAGS  += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10203 +endif
10204  KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10205  GCOV_PROFILE := n
10206  
10207 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10208 --- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S  2011-07-21 22:17:23.000000000 -0400
10209 +++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S  2011-08-23 21:48:14.000000000 -0400
10210 @@ -108,6 +108,9 @@ wakeup_code:
10211         /* Do any other stuff... */
10212  
10213  #ifndef CONFIG_64BIT
10214 +       /* Recheck NX bit overrides (64bit path does this in trampoline */
10215 +       call    verify_cpu
10216 +
10217         /* This could also be done in C code... */
10218         movl    pmode_cr3, %eax
10219         movl    %eax, %cr3
10220 @@ -131,6 +134,7 @@ wakeup_code:
10221         movl    pmode_cr0, %eax
10222         movl    %eax, %cr0
10223         jmp     pmode_return
10224 +# include "../../verify_cpu.S"
10225  #else
10226         pushw   $0
10227         pushw   trampoline_segment
10228 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10229 --- linux-3.0.4/arch/x86/kernel/acpi/sleep.c    2011-07-21 22:17:23.000000000 -0400
10230 +++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c    2011-08-23 21:47:55.000000000 -0400
10231 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10232         header->trampoline_segment = trampoline_address() >> 4;
10233  #ifdef CONFIG_SMP
10234         stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10235 +
10236 +       pax_open_kernel();
10237         early_gdt_descr.address =
10238                         (unsigned long)get_cpu_gdt_table(smp_processor_id());
10239 +       pax_close_kernel();
10240 +
10241         initial_gs = per_cpu_offset(smp_processor_id());
10242  #endif
10243         initial_code = (unsigned long)wakeup_long64;
10244 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10245 --- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S        2011-07-21 22:17:23.000000000 -0400
10246 +++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S        2011-08-23 21:47:55.000000000 -0400
10247 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10248         # and restore the stack ... but you need gdt for this to work
10249         movl    saved_context_esp, %esp
10250  
10251 -       movl    %cs:saved_magic, %eax
10252 -       cmpl    $0x12345678, %eax
10253 +       cmpl    $0x12345678, saved_magic
10254         jne     bogus_magic
10255  
10256         # jump to place where we left off
10257 -       movl    saved_eip, %eax
10258 -       jmp     *%eax
10259 +       jmp     *(saved_eip)
10260  
10261  bogus_magic:
10262         jmp     bogus_magic
10263 diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10264 --- linux-3.0.4/arch/x86/kernel/alternative.c   2011-07-21 22:17:23.000000000 -0400
10265 +++ linux-3.0.4/arch/x86/kernel/alternative.c   2011-08-23 21:47:55.000000000 -0400
10266 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const 
10267                 if (!*poff || ptr < text || ptr >= text_end)
10268                         continue;
10269                 /* turn DS segment override prefix into lock prefix */
10270 -               if (*ptr == 0x3e)
10271 +               if (*ktla_ktva(ptr) == 0x3e)
10272                         text_poke(ptr, ((unsigned char []){0xf0}), 1);
10273         };
10274         mutex_unlock(&text_mutex);
10275 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10276                 if (!*poff || ptr < text || ptr >= text_end)
10277                         continue;
10278                 /* turn lock prefix into DS segment override prefix */
10279 -               if (*ptr == 0xf0)
10280 +               if (*ktla_ktva(ptr) == 0xf0)
10281                         text_poke(ptr, ((unsigned char []){0x3E}), 1);
10282         };
10283         mutex_unlock(&text_mutex);
10284 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10285  
10286                 BUG_ON(p->len > MAX_PATCH_LEN);
10287                 /* prep the buffer with the original instructions */
10288 -               memcpy(insnbuf, p->instr, p->len);
10289 +               memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10290                 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10291                                          (unsigned long)p->instr, p->len);
10292  
10293 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10294         if (smp_alt_once)
10295                 free_init_pages("SMP alternatives",
10296                                 (unsigned long)__smp_locks,
10297 -                               (unsigned long)__smp_locks_end);
10298 +                               PAGE_ALIGN((unsigned long)__smp_locks_end));
10299  
10300         restart_nmi();
10301  }
10302 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10303   * instructions. And on the local CPU you need to be protected again NMI or MCE
10304   * handlers seeing an inconsistent instruction while you patch.
10305   */
10306 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10307 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10308                                               size_t len)
10309  {
10310         unsigned long flags;
10311         local_irq_save(flags);
10312 -       memcpy(addr, opcode, len);
10313 +
10314 +       pax_open_kernel();
10315 +       memcpy(ktla_ktva(addr), opcode, len);
10316         sync_core();
10317 +       pax_close_kernel();
10318 +
10319         local_irq_restore(flags);
10320         /* Could also do a CLFLUSH here to speed up CPU recovery; but
10321            that causes hangs on some VIA CPUs. */
10322 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10323   */
10324  void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10325  {
10326 -       unsigned long flags;
10327 -       char *vaddr;
10328 +       unsigned char *vaddr = ktla_ktva(addr);
10329         struct page *pages[2];
10330 -       int i;
10331 +       size_t i;
10332  
10333         if (!core_kernel_text((unsigned long)addr)) {
10334 -               pages[0] = vmalloc_to_page(addr);
10335 -               pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10336 +               pages[0] = vmalloc_to_page(vaddr);
10337 +               pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10338         } else {
10339 -               pages[0] = virt_to_page(addr);
10340 +               pages[0] = virt_to_page(vaddr);
10341                 WARN_ON(!PageReserved(pages[0]));
10342 -               pages[1] = virt_to_page(addr + PAGE_SIZE);
10343 +               pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10344         }
10345         BUG_ON(!pages[0]);
10346 -       local_irq_save(flags);
10347 -       set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10348 -       if (pages[1])
10349 -               set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10350 -       vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10351 -       memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10352 -       clear_fixmap(FIX_TEXT_POKE0);
10353 -       if (pages[1])
10354 -               clear_fixmap(FIX_TEXT_POKE1);
10355 -       local_flush_tlb();
10356 -       sync_core();
10357 -       /* Could also do a CLFLUSH here to speed up CPU recovery; but
10358 -          that causes hangs on some VIA CPUs. */
10359 +       text_poke_early(addr, opcode, len);
10360         for (i = 0; i < len; i++)
10361 -               BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10362 -       local_irq_restore(flags);
10363 +               BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10364         return addr;
10365  }
10366  
10367 diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10368 --- linux-3.0.4/arch/x86/kernel/apic/apic.c     2011-07-21 22:17:23.000000000 -0400
10369 +++ linux-3.0.4/arch/x86/kernel/apic/apic.c     2011-08-23 21:48:14.000000000 -0400
10370 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10371  /*
10372   * Debug level, exported for io_apic.c
10373   */
10374 -unsigned int apic_verbosity;
10375 +int apic_verbosity;
10376  
10377  int pic_mode;
10378  
10379 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs 
10380         apic_write(APIC_ESR, 0);
10381         v1 = apic_read(APIC_ESR);
10382         ack_APIC_irq();
10383 -       atomic_inc(&irq_err_count);
10384 +       atomic_inc_unchecked(&irq_err_count);
10385  
10386         apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10387                     smp_processor_id(), v0 , v1);
10388 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10389         u16 *bios_cpu_apicid;
10390         DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10391  
10392 +       pax_track_stack();
10393 +
10394         bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10395         bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10396  
10397 diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10398 --- linux-3.0.4/arch/x86/kernel/apic/io_apic.c  2011-07-21 22:17:23.000000000 -0400
10399 +++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c  2011-08-23 21:47:55.000000000 -0400
10400 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, 
10401  }
10402  EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10403  
10404 -void lock_vector_lock(void)
10405 +void lock_vector_lock(void) __acquires(vector_lock)
10406  {
10407         /* Used to the online set of cpus does not change
10408          * during assign_irq_vector.
10409 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10410         raw_spin_lock(&vector_lock);
10411  }
10412  
10413 -void unlock_vector_lock(void)
10414 +void unlock_vector_lock(void) __releases(vector_lock)
10415  {
10416         raw_spin_unlock(&vector_lock);
10417  }
10418 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10419         ack_APIC_irq();
10420  }
10421  
10422 -atomic_t irq_mis_count;
10423 +atomic_unchecked_t irq_mis_count;
10424  
10425  /*
10426   * IO-APIC versions below 0x20 don't support EOI register.
10427 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10428          * at the cpu.
10429          */
10430         if (!(v & (1 << (i & 0x1f)))) {
10431 -               atomic_inc(&irq_mis_count);
10432 +               atomic_inc_unchecked(&irq_mis_count);
10433  
10434                 eoi_ioapic_irq(irq, cfg);
10435         }
10436 diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10437 --- linux-3.0.4/arch/x86/kernel/apm_32.c        2011-07-21 22:17:23.000000000 -0400
10438 +++ linux-3.0.4/arch/x86/kernel/apm_32.c        2011-08-23 21:47:55.000000000 -0400
10439 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10440   * This is for buggy BIOS's that refer to (real mode) segment 0x40
10441   * even though they are called in protected mode.
10442   */
10443 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10444 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10445                         (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10446  
10447  static const char driver_version[] = "1.16ac"; /* no spaces */
10448 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10449         BUG_ON(cpu != 0);
10450         gdt = get_cpu_gdt_table(cpu);
10451         save_desc_40 = gdt[0x40 / 8];
10452 +
10453 +       pax_open_kernel();
10454         gdt[0x40 / 8] = bad_bios_desc;
10455 +       pax_close_kernel();
10456  
10457         apm_irq_save(flags);
10458         APM_DO_SAVE_SEGS;
10459 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10460                           &call->esi);
10461         APM_DO_RESTORE_SEGS;
10462         apm_irq_restore(flags);
10463 +
10464 +       pax_open_kernel();
10465         gdt[0x40 / 8] = save_desc_40;
10466 +       pax_close_kernel();
10467 +
10468         put_cpu();
10469  
10470         return call->eax & 0xff;
10471 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void 
10472         BUG_ON(cpu != 0);
10473         gdt = get_cpu_gdt_table(cpu);
10474         save_desc_40 = gdt[0x40 / 8];
10475 +
10476 +       pax_open_kernel();
10477         gdt[0x40 / 8] = bad_bios_desc;
10478 +       pax_close_kernel();
10479  
10480         apm_irq_save(flags);
10481         APM_DO_SAVE_SEGS;
10482 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void 
10483                                          &call->eax);
10484         APM_DO_RESTORE_SEGS;
10485         apm_irq_restore(flags);
10486 +
10487 +       pax_open_kernel();
10488         gdt[0x40 / 8] = save_desc_40;
10489 +       pax_close_kernel();
10490 +
10491         put_cpu();
10492         return error;
10493  }
10494 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10495          * code to that CPU.
10496          */
10497         gdt = get_cpu_gdt_table(0);
10498 +
10499 +       pax_open_kernel();
10500         set_desc_base(&gdt[APM_CS >> 3],
10501                  (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10502         set_desc_base(&gdt[APM_CS_16 >> 3],
10503                  (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10504         set_desc_base(&gdt[APM_DS >> 3],
10505                  (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10506 +       pax_close_kernel();
10507  
10508         proc_create("apm", 0, NULL, &apm_file_ops);
10509  
10510 diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10511 --- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c        2011-07-21 22:17:23.000000000 -0400
10512 +++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c        2011-08-23 21:47:55.000000000 -0400
10513 @@ -69,6 +69,7 @@ int main(void)
10514         BLANK();
10515  #undef ENTRY
10516  
10517 +       DEFINE(TSS_size, sizeof(struct tss_struct));
10518         OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10519         BLANK();
10520  
10521 diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10522 --- linux-3.0.4/arch/x86/kernel/asm-offsets.c   2011-07-21 22:17:23.000000000 -0400
10523 +++ linux-3.0.4/arch/x86/kernel/asm-offsets.c   2011-08-23 21:47:55.000000000 -0400
10524 @@ -33,6 +33,8 @@ void common(void) {
10525         OFFSET(TI_status, thread_info, status);
10526         OFFSET(TI_addr_limit, thread_info, addr_limit);
10527         OFFSET(TI_preempt_count, thread_info, preempt_count);
10528 +       OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10529 +       DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10530  
10531         BLANK();
10532         OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10533 @@ -53,8 +55,26 @@ void common(void) {
10534         OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10535         OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10536         OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10537 +
10538 +#ifdef CONFIG_PAX_KERNEXEC
10539 +       OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10540 +#endif
10541 +
10542 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10543 +       OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10544 +       OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10545 +#ifdef CONFIG_X86_64
10546 +       OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10547 +#endif
10548  #endif
10549  
10550 +#endif
10551 +
10552 +       BLANK();
10553 +       DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10554 +       DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10555 +       DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10556 +
10557  #ifdef CONFIG_XEN
10558         BLANK();
10559         OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10560 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10561 --- linux-3.0.4/arch/x86/kernel/cpu/amd.c       2011-07-21 22:17:23.000000000 -0400
10562 +++ linux-3.0.4/arch/x86/kernel/cpu/amd.c       2011-08-23 21:47:55.000000000 -0400
10563 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10564                                                         unsigned int size)
10565  {
10566         /* AMD errata T13 (order #21922) */
10567 -       if ((c->x86 == 6)) {
10568 +       if (c->x86 == 6) {
10569                 /* Duron Rev A0 */
10570                 if (c->x86_model == 3 && c->x86_mask == 0)
10571                         size = 64;
10572 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10573 --- linux-3.0.4/arch/x86/kernel/cpu/common.c    2011-07-21 22:17:23.000000000 -0400
10574 +++ linux-3.0.4/arch/x86/kernel/cpu/common.c    2011-08-23 21:47:55.000000000 -0400
10575 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10576  
10577  static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10578  
10579 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10580 -#ifdef CONFIG_X86_64
10581 -       /*
10582 -        * We need valid kernel segments for data and code in long mode too
10583 -        * IRET will check the segment types  kkeil 2000/10/28
10584 -        * Also sysret mandates a special GDT layout
10585 -        *
10586 -        * TLS descriptors are currently at a different place compared to i386.
10587 -        * Hopefully nobody expects them at a fixed place (Wine?)
10588 -        */
10589 -       [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10590 -       [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10591 -       [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10592 -       [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10593 -       [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10594 -       [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10595 -#else
10596 -       [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10597 -       [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10598 -       [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10599 -       [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10600 -       /*
10601 -        * Segments used for calling PnP BIOS have byte granularity.
10602 -        * They code segments and data segments have fixed 64k limits,
10603 -        * the transfer segment sizes are set at run time.
10604 -        */
10605 -       /* 32-bit code */
10606 -       [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10607 -       /* 16-bit code */
10608 -       [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10609 -       /* 16-bit data */
10610 -       [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10611 -       /* 16-bit data */
10612 -       [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
10613 -       /* 16-bit data */
10614 -       [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
10615 -       /*
10616 -        * The APM segments have byte granularity and their bases
10617 -        * are set at run time.  All have 64k limits.
10618 -        */
10619 -       /* 32-bit code */
10620 -       [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10621 -       /* 16-bit code */
10622 -       [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10623 -       /* data */
10624 -       [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10625 -
10626 -       [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10627 -       [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628 -       GDT_STACK_CANARY_INIT
10629 -#endif
10630 -} };
10631 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10632 -
10633  static int __init x86_xsave_setup(char *s)
10634  {
10635         setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10636 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10637  {
10638         struct desc_ptr gdt_descr;
10639  
10640 -       gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10641 +       gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10642         gdt_descr.size = GDT_SIZE - 1;
10643         load_gdt(&gdt_descr);
10644         /* Reload the per-cpu base */
10645 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10646         /* Filter out anything that depends on CPUID levels we don't have */
10647         filter_cpuid_features(c, true);
10648  
10649 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10650 +       setup_clear_cpu_cap(X86_FEATURE_SEP);
10651 +#endif
10652 +
10653         /* If the model name is still unset, do table lookup. */
10654         if (!c->x86_model_id[0]) {
10655                 const char *p;
10656 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10657  }
10658  __setup("clearcpuid=", setup_disablecpuid);
10659  
10660 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10661 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10662 +
10663  #ifdef CONFIG_X86_64
10664  struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10665  
10666 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10667  EXPORT_PER_CPU_SYMBOL(current_task);
10668  
10669  DEFINE_PER_CPU(unsigned long, kernel_stack) =
10670 -       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10671 +       (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10672  EXPORT_PER_CPU_SYMBOL(kernel_stack);
10673  
10674  DEFINE_PER_CPU(char *, irq_stack_ptr) =
10675 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10676  {
10677         memset(regs, 0, sizeof(struct pt_regs));
10678         regs->fs = __KERNEL_PERCPU;
10679 -       regs->gs = __KERNEL_STACK_CANARY;
10680 +       savesegment(gs, regs->gs);
10681  
10682         return regs;
10683  }
10684 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10685         int i;
10686  
10687         cpu = stack_smp_processor_id();
10688 -       t = &per_cpu(init_tss, cpu);
10689 +       t = init_tss + cpu;
10690         oist = &per_cpu(orig_ist, cpu);
10691  
10692  #ifdef CONFIG_NUMA
10693 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10694         switch_to_new_gdt(cpu);
10695         loadsegment(fs, 0);
10696  
10697 -       load_idt((const struct desc_ptr *)&idt_descr);
10698 +       load_idt(&idt_descr);
10699  
10700         memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10701         syscall_init();
10702 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10703         wrmsrl(MSR_KERNEL_GS_BASE, 0);
10704         barrier();
10705  
10706 -       x86_configure_nx();
10707         if (cpu != 0)
10708                 enable_x2apic();
10709  
10710 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10711  {
10712         int cpu = smp_processor_id();
10713         struct task_struct *curr = current;
10714 -       struct tss_struct *t = &per_cpu(init_tss, cpu);
10715 +       struct tss_struct *t = init_tss + cpu;
10716         struct thread_struct *thread = &curr->thread;
10717  
10718         if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10719 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10720 --- linux-3.0.4/arch/x86/kernel/cpu/intel.c     2011-08-29 23:26:13.000000000 -0400
10721 +++ linux-3.0.4/arch/x86/kernel/cpu/intel.c     2011-08-29 23:30:14.000000000 -0400
10722 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10723          * Update the IDT descriptor and reload the IDT so that
10724          * it uses the read-only mapped virtual address.
10725          */
10726 -       idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10727 +       idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10728         load_idt(&idt_descr);
10729  }
10730  #endif
10731 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10732 --- linux-3.0.4/arch/x86/kernel/cpu/Makefile    2011-07-21 22:17:23.000000000 -0400
10733 +++ linux-3.0.4/arch/x86/kernel/cpu/Makefile    2011-08-23 21:47:55.000000000 -0400
10734 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10735  CFLAGS_REMOVE_perf_event.o = -pg
10736  endif
10737  
10738 -# Make sure load_percpu_segment has no stackprotector
10739 -nostackp := $(call cc-option, -fno-stack-protector)
10740 -CFLAGS_common.o                := $(nostackp)
10741 -
10742  obj-y                  := intel_cacheinfo.o scattered.o topology.o
10743  obj-y                  += proc.o capflags.o powerflags.o common.o
10744  obj-y                  += vmware.o hypervisor.o sched.o mshyperv.o
10745 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10746 --- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c        2011-07-21 22:17:23.000000000 -0400
10747 +++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c        2011-08-23 21:47:55.000000000 -0400
10748 @@ -46,6 +46,7 @@
10749  #include <asm/ipi.h>
10750  #include <asm/mce.h>
10751  #include <asm/msr.h>
10752 +#include <asm/local.h>
10753  
10754  #include "mce-internal.h"
10755  
10756 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10757                         !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10758                                 m->cs, m->ip);
10759  
10760 -               if (m->cs == __KERNEL_CS)
10761 +               if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10762                         print_symbol("{%s}", m->ip);
10763                 pr_cont("\n");
10764         }
10765 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10766  
10767  #define PANIC_TIMEOUT 5 /* 5 seconds */
10768  
10769 -static atomic_t mce_paniced;
10770 +static atomic_unchecked_t mce_paniced;
10771  
10772  static int fake_panic;
10773 -static atomic_t mce_fake_paniced;
10774 +static atomic_unchecked_t mce_fake_paniced;
10775  
10776  /* Panic in progress. Enable interrupts and wait for final IPI */
10777  static void wait_for_panic(void)
10778 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct 
10779                 /*
10780                  * Make sure only one CPU runs in machine check panic
10781                  */
10782 -               if (atomic_inc_return(&mce_paniced) > 1)
10783 +               if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10784                         wait_for_panic();
10785                 barrier();
10786  
10787 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct 
10788                 console_verbose();
10789         } else {
10790                 /* Don't log too much for fake panic */
10791 -               if (atomic_inc_return(&mce_fake_paniced) > 1)
10792 +               if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10793                         return;
10794         }
10795         /* First print corrected ones that are still unlogged */
10796 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10797          * might have been modified by someone else.
10798          */
10799         rmb();
10800 -       if (atomic_read(&mce_paniced))
10801 +       if (atomic_read_unchecked(&mce_paniced))
10802                 wait_for_panic();
10803         if (!monarch_timeout)
10804                 goto out;
10805 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10806   */
10807  
10808  static DEFINE_SPINLOCK(mce_state_lock);
10809 -static int             open_count;             /* #times opened */
10810 +static local_t         open_count;             /* #times opened */
10811  static int             open_exclu;             /* already open exclusive? */
10812  
10813  static int mce_open(struct inode *inode, struct file *file)
10814  {
10815         spin_lock(&mce_state_lock);
10816  
10817 -       if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10818 +       if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10819                 spin_unlock(&mce_state_lock);
10820  
10821                 return -EBUSY;
10822 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10823  
10824         if (file->f_flags & O_EXCL)
10825                 open_exclu = 1;
10826 -       open_count++;
10827 +       local_inc(&open_count);
10828  
10829         spin_unlock(&mce_state_lock);
10830  
10831 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10832  {
10833         spin_lock(&mce_state_lock);
10834  
10835 -       open_count--;
10836 +       local_dec(&open_count);
10837         open_exclu = 0;
10838  
10839         spin_unlock(&mce_state_lock);
10840 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10841  static void mce_reset(void)
10842  {
10843         cpu_missing = 0;
10844 -       atomic_set(&mce_fake_paniced, 0);
10845 +       atomic_set_unchecked(&mce_fake_paniced, 0);
10846         atomic_set(&mce_executing, 0);
10847         atomic_set(&mce_callin, 0);
10848         atomic_set(&global_nwo, 0);
10849 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10850 --- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10851 +++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10852 @@ -215,7 +215,9 @@ static int inject_init(void)
10853         if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10854                 return -ENOMEM;
10855         printk(KERN_INFO "Machine check injector initialized\n");
10856 -       mce_chrdev_ops.write = mce_write;
10857 +       pax_open_kernel();
10858 +       *(void **)&mce_chrdev_ops.write = mce_write;
10859 +       pax_close_kernel();
10860         register_die_notifier(&mce_raise_nb);
10861         return 0;
10862  }
10863 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
10864 --- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:13.000000000 -0400
10865 +++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
10866 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10867  u64 size_or_mask, size_and_mask;
10868  static bool mtrr_aps_delayed_init;
10869  
10870 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10871 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10872  
10873  const struct mtrr_ops *mtrr_if;
10874  
10875 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10876 --- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10877 +++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10878 @@ -25,7 +25,7 @@ struct mtrr_ops {
10879         int     (*validate_add_page)(unsigned long base, unsigned long size,
10880                                      unsigned int type);
10881         int     (*have_wrcomb)(void);
10882 -};
10883 +} __do_const;
10884  
10885  extern int generic_get_free_region(unsigned long base, unsigned long size,
10886                                    int replace_reg);
10887 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
10888 --- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c        2011-07-21 22:17:23.000000000 -0400
10889 +++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c        2011-08-23 21:48:14.000000000 -0400
10890 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10891         int i, j, w, wmax, num = 0;
10892         struct hw_perf_event *hwc;
10893  
10894 +       pax_track_stack();
10895 +
10896         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10897  
10898         for (i = 0; i < n; i++) {
10899 @@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10900                         break;
10901  
10902                 perf_callchain_store(entry, frame.return_address);
10903 -               fp = frame.next_frame;
10904 +               fp = (__force const void __user *)frame.next_frame;
10905         }
10906  }
10907  
10908 diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
10909 --- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10910 +++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10911 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu, 
10912         regs = args->regs;
10913  
10914  #ifdef CONFIG_X86_32
10915 -       if (!user_mode_vm(regs)) {
10916 +       if (!user_mode(regs)) {
10917                 crash_fixup_ss_esp(&fixed_regs, regs);
10918                 regs = &fixed_regs;
10919         }
10920 diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
10921 --- linux-3.0.4/arch/x86/kernel/doublefault_32.c        2011-07-21 22:17:23.000000000 -0400
10922 +++ linux-3.0.4/arch/x86/kernel/doublefault_32.c        2011-08-23 21:47:55.000000000 -0400
10923 @@ -11,7 +11,7 @@
10924  
10925  #define DOUBLEFAULT_STACKSIZE (1024)
10926  static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10927 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10928 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10929  
10930  #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10931  
10932 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10933         unsigned long gdt, tss;
10934  
10935         store_gdt(&gdt_desc);
10936 -       gdt = gdt_desc.address;
10937 +       gdt = (unsigned long)gdt_desc.address;
10938  
10939         printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10940  
10941 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10942                 /* 0x2 bit is always set */
10943                 .flags          = X86_EFLAGS_SF | 0x2,
10944                 .sp             = STACK_START,
10945 -               .es             = __USER_DS,
10946 +               .es             = __KERNEL_DS,
10947                 .cs             = __KERNEL_CS,
10948                 .ss             = __KERNEL_DS,
10949 -               .ds             = __USER_DS,
10950 +               .ds             = __KERNEL_DS,
10951                 .fs             = __KERNEL_PERCPU,
10952  
10953                 .__cr3          = __pa_nodebug(swapper_pg_dir),
10954 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
10955 --- linux-3.0.4/arch/x86/kernel/dumpstack_32.c  2011-07-21 22:17:23.000000000 -0400
10956 +++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c  2011-08-23 21:47:55.000000000 -0400
10957 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10958                 bp = stack_frame(task, regs);
10959  
10960         for (;;) {
10961 -               struct thread_info *context;
10962 +               void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10963  
10964 -               context = (struct thread_info *)
10965 -                       ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10966 -               bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10967 +               bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10968  
10969 -               stack = (unsigned long *)context->previous_esp;
10970 -               if (!stack)
10971 +               if (stack_start == task_stack_page(task))
10972                         break;
10973 +               stack = *(unsigned long **)stack_start;
10974                 if (ops->stack(data, "IRQ") < 0)
10975                         break;
10976                 touch_nmi_watchdog();
10977 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10978          * When in-kernel, we also print out the stack and code at the
10979          * time of the fault..
10980          */
10981 -       if (!user_mode_vm(regs)) {
10982 +       if (!user_mode(regs)) {
10983                 unsigned int code_prologue = code_bytes * 43 / 64;
10984                 unsigned int code_len = code_bytes;
10985                 unsigned char c;
10986                 u8 *ip;
10987 +               unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10988  
10989                 printk(KERN_EMERG "Stack:\n");
10990                 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
10991  
10992                 printk(KERN_EMERG "Code: ");
10993  
10994 -               ip = (u8 *)regs->ip - code_prologue;
10995 +               ip = (u8 *)regs->ip - code_prologue + cs_base;
10996                 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
10997                         /* try starting at IP */
10998 -                       ip = (u8 *)regs->ip;
10999 +                       ip = (u8 *)regs->ip + cs_base;
11000                         code_len = code_len - code_prologue + 1;
11001                 }
11002                 for (i = 0; i < code_len; i++, ip++) {
11003 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11004                                 printk(" Bad EIP value.");
11005                                 break;
11006                         }
11007 -                       if (ip == (u8 *)regs->ip)
11008 +                       if (ip == (u8 *)regs->ip + cs_base)
11009                                 printk("<%02x> ", c);
11010                         else
11011                                 printk("%02x ", c);
11012 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11013  {
11014         unsigned short ud2;
11015  
11016 +       ip = ktla_ktva(ip);
11017         if (ip < PAGE_OFFSET)
11018                 return 0;
11019         if (probe_kernel_address((unsigned short *)ip, ud2))
11020 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11021 --- linux-3.0.4/arch/x86/kernel/dumpstack_64.c  2011-07-21 22:17:23.000000000 -0400
11022 +++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c  2011-08-23 21:47:55.000000000 -0400
11023 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11024         unsigned long *irq_stack_end =
11025                 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11026         unsigned used = 0;
11027 -       struct thread_info *tinfo;
11028         int graph = 0;
11029         unsigned long dummy;
11030 +       void *stack_start;
11031  
11032         if (!task)
11033                 task = current;
11034 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11035          * current stack address. If the stacks consist of nested
11036          * exceptions
11037          */
11038 -       tinfo = task_thread_info(task);
11039         for (;;) {
11040                 char *id;
11041                 unsigned long *estack_end;
11042 +
11043                 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11044                                                 &used, &id);
11045  
11046 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11047                         if (ops->stack(data, id) < 0)
11048                                 break;
11049  
11050 -                       bp = ops->walk_stack(tinfo, stack, bp, ops,
11051 +                       bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11052                                              data, estack_end, &graph);
11053                         ops->stack(data, "<EOE>");
11054                         /*
11055 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11056                         if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11057                                 if (ops->stack(data, "IRQ") < 0)
11058                                         break;
11059 -                               bp = ops->walk_stack(tinfo, stack, bp,
11060 +                               bp = ops->walk_stack(task, irq_stack, stack, bp,
11061                                         ops, data, irq_stack_end, &graph);
11062                                 /*
11063                                  * We link to the next stack (which would be
11064 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11065         /*
11066          * This handles the process stack:
11067          */
11068 -       bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11069 +       stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11070 +       bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11071         put_cpu();
11072  }
11073  EXPORT_SYMBOL(dump_trace);
11074 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11075 --- linux-3.0.4/arch/x86/kernel/dumpstack.c     2011-07-21 22:17:23.000000000 -0400
11076 +++ linux-3.0.4/arch/x86/kernel/dumpstack.c     2011-08-23 21:48:14.000000000 -0400
11077 @@ -2,6 +2,9 @@
11078   *  Copyright (C) 1991, 1992  Linus Torvalds
11079   *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11080   */
11081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11082 +#define __INCLUDED_BY_HIDESYM 1
11083 +#endif
11084  #include <linux/kallsyms.h>
11085  #include <linux/kprobes.h>
11086  #include <linux/uaccess.h>
11087 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11088  static void
11089  print_ftrace_graph_addr(unsigned long addr, void *data,
11090                         const struct stacktrace_ops *ops,
11091 -                       struct thread_info *tinfo, int *graph)
11092 +                       struct task_struct *task, int *graph)
11093  {
11094 -       struct task_struct *task = tinfo->task;
11095         unsigned long ret_addr;
11096         int index = task->curr_ret_stack;
11097  
11098 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11099  static inline void
11100  print_ftrace_graph_addr(unsigned long addr, void *data,
11101                         const struct stacktrace_ops *ops,
11102 -                       struct thread_info *tinfo, int *graph)
11103 +                       struct task_struct *task, int *graph)
11104  { }
11105  #endif
11106  
11107 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11108   * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11109   */
11110  
11111 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11112 -                       void *p, unsigned int size, void *end)
11113 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11114  {
11115 -       void *t = tinfo;
11116         if (end) {
11117                 if (p < end && p >= (end-THREAD_SIZE))
11118                         return 1;
11119 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11120  }
11121  
11122  unsigned long
11123 -print_context_stack(struct thread_info *tinfo,
11124 +print_context_stack(struct task_struct *task, void *stack_start,
11125                 unsigned long *stack, unsigned long bp,
11126                 const struct stacktrace_ops *ops, void *data,
11127                 unsigned long *end, int *graph)
11128  {
11129         struct stack_frame *frame = (struct stack_frame *)bp;
11130  
11131 -       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11132 +       while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11133                 unsigned long addr;
11134  
11135                 addr = *stack;
11136 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11137                         } else {
11138                                 ops->address(data, addr, 0);
11139                         }
11140 -                       print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11141 +                       print_ftrace_graph_addr(addr, data, ops, task, graph);
11142                 }
11143                 stack++;
11144         }
11145 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11146  EXPORT_SYMBOL_GPL(print_context_stack);
11147  
11148  unsigned long
11149 -print_context_stack_bp(struct thread_info *tinfo,
11150 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11151                        unsigned long *stack, unsigned long bp,
11152                        const struct stacktrace_ops *ops, void *data,
11153                        unsigned long *end, int *graph)
11154 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11155         struct stack_frame *frame = (struct stack_frame *)bp;
11156         unsigned long *ret_addr = &frame->return_address;
11157  
11158 -       while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11159 +       while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11160                 unsigned long addr = *ret_addr;
11161  
11162                 if (!__kernel_text_address(addr))
11163 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11164                 ops->address(data, addr, 1);
11165                 frame = frame->next_frame;
11166                 ret_addr = &frame->return_address;
11167 -               print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11168 +               print_ftrace_graph_addr(addr, data, ops, task, graph);
11169         }
11170  
11171         return (unsigned long)frame;
11172 @@ -186,7 +186,7 @@ void dump_stack(void)
11173  
11174         bp = stack_frame(current, NULL);
11175         printk("Pid: %d, comm: %.20s xid: #%u %s %s %.*s\n",
11176 -               current->pid, current->comm, current->xid, print_tainted(),
11177 +               task_pid_nr(current), current->comm, current->xid, print_tainted(),
11178                 init_utsname()->release,
11179                 (int)strcspn(init_utsname()->version, " "),
11180                 init_utsname()->version);
11181 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11182  }
11183  EXPORT_SYMBOL_GPL(oops_begin);
11184  
11185 +extern void gr_handle_kernel_exploit(void);
11186 +
11187  void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11188  {
11189         if (regs && kexec_should_crash(current))
11190 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11191                 panic("Fatal exception in interrupt");
11192         if (panic_on_oops)
11193                 panic("Fatal exception");
11194 -       do_exit(signr);
11195 +
11196 +       gr_handle_kernel_exploit();
11197 +
11198 +       do_group_exit(signr);
11199  }
11200  
11201  int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11202 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11203  
11204         show_registers(regs);
11205  #ifdef CONFIG_X86_32
11206 -       if (user_mode_vm(regs)) {
11207 +       if (user_mode(regs)) {
11208                 sp = regs->sp;
11209                 ss = regs->ss & 0xffff;
11210         } else {
11211 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11212         unsigned long flags = oops_begin();
11213         int sig = SIGSEGV;
11214  
11215 -       if (!user_mode_vm(regs))
11216 +       if (!user_mode(regs))
11217                 report_bug(regs->ip, regs);
11218  
11219         if (__die(str, regs, err))
11220 diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11221 --- linux-3.0.4/arch/x86/kernel/early_printk.c  2011-07-21 22:17:23.000000000 -0400
11222 +++ linux-3.0.4/arch/x86/kernel/early_printk.c  2011-08-23 21:48:14.000000000 -0400
11223 @@ -7,6 +7,7 @@
11224  #include <linux/pci_regs.h>
11225  #include <linux/pci_ids.h>
11226  #include <linux/errno.h>
11227 +#include <linux/sched.h>
11228  #include <asm/io.h>
11229  #include <asm/processor.h>
11230  #include <asm/fcntl.h>
11231 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char 
11232         int n;
11233         va_list ap;
11234  
11235 +       pax_track_stack();
11236 +
11237         va_start(ap, fmt);
11238         n = vscnprintf(buf, sizeof(buf), fmt, ap);
11239         early_console->write(early_console, buf, n);
11240 diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11241 --- linux-3.0.4/arch/x86/kernel/entry_32.S      2011-07-21 22:17:23.000000000 -0400
11242 +++ linux-3.0.4/arch/x86/kernel/entry_32.S      2011-08-23 21:48:14.000000000 -0400
11243 @@ -185,13 +185,146 @@
11244         /*CFI_REL_OFFSET gs, PT_GS*/
11245  .endm
11246  .macro SET_KERNEL_GS reg
11247 +
11248 +#ifdef CONFIG_CC_STACKPROTECTOR
11249         movl $(__KERNEL_STACK_CANARY), \reg
11250 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11251 +       movl $(__USER_DS), \reg
11252 +#else
11253 +       xorl \reg, \reg
11254 +#endif
11255 +
11256         movl \reg, %gs
11257  .endm
11258  
11259  #endif /* CONFIG_X86_32_LAZY_GS */
11260  
11261 -.macro SAVE_ALL
11262 +.macro pax_enter_kernel
11263 +#ifdef CONFIG_PAX_KERNEXEC
11264 +       call pax_enter_kernel
11265 +#endif
11266 +.endm
11267 +
11268 +.macro pax_exit_kernel
11269 +#ifdef CONFIG_PAX_KERNEXEC
11270 +       call pax_exit_kernel
11271 +#endif
11272 +.endm
11273 +
11274 +#ifdef CONFIG_PAX_KERNEXEC
11275 +ENTRY(pax_enter_kernel)
11276 +#ifdef CONFIG_PARAVIRT
11277 +       pushl %eax
11278 +       pushl %ecx
11279 +       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11280 +       mov %eax, %esi
11281 +#else
11282 +       mov %cr0, %esi
11283 +#endif
11284 +       bts $16, %esi
11285 +       jnc 1f
11286 +       mov %cs, %esi
11287 +       cmp $__KERNEL_CS, %esi
11288 +       jz 3f
11289 +       ljmp $__KERNEL_CS, $3f
11290 +1:     ljmp $__KERNEXEC_KERNEL_CS, $2f
11291 +2:
11292 +#ifdef CONFIG_PARAVIRT
11293 +       mov %esi, %eax
11294 +       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11295 +#else
11296 +       mov %esi, %cr0
11297 +#endif
11298 +3:
11299 +#ifdef CONFIG_PARAVIRT
11300 +       popl %ecx
11301 +       popl %eax
11302 +#endif
11303 +       ret
11304 +ENDPROC(pax_enter_kernel)
11305 +
11306 +ENTRY(pax_exit_kernel)
11307 +#ifdef CONFIG_PARAVIRT
11308 +       pushl %eax
11309 +       pushl %ecx
11310 +#endif
11311 +       mov %cs, %esi
11312 +       cmp $__KERNEXEC_KERNEL_CS, %esi
11313 +       jnz 2f
11314 +#ifdef CONFIG_PARAVIRT
11315 +       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11316 +       mov %eax, %esi
11317 +#else
11318 +       mov %cr0, %esi
11319 +#endif
11320 +       btr $16, %esi
11321 +       ljmp $__KERNEL_CS, $1f
11322 +1:
11323 +#ifdef CONFIG_PARAVIRT
11324 +       mov %esi, %eax
11325 +       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11326 +#else
11327 +       mov %esi, %cr0
11328 +#endif
11329 +2:
11330 +#ifdef CONFIG_PARAVIRT
11331 +       popl %ecx
11332 +       popl %eax
11333 +#endif
11334 +       ret
11335 +ENDPROC(pax_exit_kernel)
11336 +#endif
11337 +
11338 +.macro pax_erase_kstack
11339 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11340 +       call pax_erase_kstack
11341 +#endif
11342 +.endm
11343 +
11344 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11345 +/*
11346 + * ebp: thread_info
11347 + * ecx, edx: can be clobbered
11348 + */
11349 +ENTRY(pax_erase_kstack)
11350 +       pushl %edi
11351 +       pushl %eax
11352 +
11353 +       mov TI_lowest_stack(%ebp), %edi
11354 +       mov $-0xBEEF, %eax
11355 +       std
11356 +
11357 +1:     mov %edi, %ecx
11358 +       and $THREAD_SIZE_asm - 1, %ecx
11359 +       shr $2, %ecx
11360 +       repne scasl
11361 +       jecxz 2f
11362 +
11363 +       cmp $2*16, %ecx
11364 +       jc 2f
11365 +
11366 +       mov $2*16, %ecx
11367 +       repe scasl
11368 +       jecxz 2f
11369 +       jne 1b
11370 +
11371 +2:     cld
11372 +       mov %esp, %ecx
11373 +       sub %edi, %ecx
11374 +       shr $2, %ecx
11375 +       rep stosl
11376 +
11377 +       mov TI_task_thread_sp0(%ebp), %edi
11378 +       sub $128, %edi
11379 +       mov %edi, TI_lowest_stack(%ebp)
11380 +
11381 +       popl %eax
11382 +       popl %edi
11383 +       ret
11384 +ENDPROC(pax_erase_kstack)
11385 +#endif
11386 +
11387 +.macro __SAVE_ALL _DS
11388         cld
11389         PUSH_GS
11390         pushl_cfi %fs
11391 @@ -214,7 +347,7 @@
11392         CFI_REL_OFFSET ecx, 0
11393         pushl_cfi %ebx
11394         CFI_REL_OFFSET ebx, 0
11395 -       movl $(__USER_DS), %edx
11396 +       movl $\_DS, %edx
11397         movl %edx, %ds
11398         movl %edx, %es
11399         movl $(__KERNEL_PERCPU), %edx
11400 @@ -222,6 +355,15 @@
11401         SET_KERNEL_GS %edx
11402  .endm
11403  
11404 +.macro SAVE_ALL
11405 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11406 +       __SAVE_ALL __KERNEL_DS
11407 +       pax_enter_kernel
11408 +#else
11409 +       __SAVE_ALL __USER_DS
11410 +#endif
11411 +.endm
11412 +
11413  .macro RESTORE_INT_REGS
11414         popl_cfi %ebx
11415         CFI_RESTORE ebx
11416 @@ -332,7 +474,15 @@ check_userspace:
11417         movb PT_CS(%esp), %al
11418         andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11419         cmpl $USER_RPL, %eax
11420 +
11421 +#ifdef CONFIG_PAX_KERNEXEC
11422 +       jae resume_userspace
11423 +
11424 +       PAX_EXIT_KERNEL
11425 +       jmp resume_kernel
11426 +#else
11427         jb resume_kernel                # not returning to v8086 or userspace
11428 +#endif
11429  
11430  ENTRY(resume_userspace)
11431         LOCKDEP_SYS_EXIT
11432 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11433         andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
11434                                         # int/exception return?
11435         jne work_pending
11436 -       jmp restore_all
11437 +       jmp restore_all_pax
11438  END(ret_from_exception)
11439  
11440  #ifdef CONFIG_PREEMPT
11441 @@ -394,23 +544,34 @@ sysenter_past_esp:
11442         /*CFI_REL_OFFSET cs, 0*/
11443         /*
11444          * Push current_thread_info()->sysenter_return to the stack.
11445 -        * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11446 -        * pushed above; +8 corresponds to copy_thread's esp0 setting.
11447          */
11448 -       pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11449 +       pushl_cfi $0
11450         CFI_REL_OFFSET eip, 0
11451  
11452         pushl_cfi %eax
11453         SAVE_ALL
11454 +       GET_THREAD_INFO(%ebp)
11455 +       movl TI_sysenter_return(%ebp),%ebp
11456 +       movl %ebp,PT_EIP(%esp)
11457         ENABLE_INTERRUPTS(CLBR_NONE)
11458  
11459  /*
11460   * Load the potential sixth argument from user stack.
11461   * Careful about security.
11462   */
11463 +       movl PT_OLDESP(%esp),%ebp
11464 +
11465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11466 +       mov PT_OLDSS(%esp),%ds
11467 +1:     movl %ds:(%ebp),%ebp
11468 +       push %ss
11469 +       pop %ds
11470 +#else
11471         cmpl $__PAGE_OFFSET-3,%ebp
11472         jae syscall_fault
11473  1:     movl (%ebp),%ebp
11474 +#endif
11475 +
11476         movl %ebp,PT_EBP(%esp)
11477  .section __ex_table,"a"
11478         .align 4
11479 @@ -433,12 +594,23 @@ sysenter_do_call:
11480         testl $_TIF_ALLWORK_MASK, %ecx
11481         jne sysexit_audit
11482  sysenter_exit:
11483 +
11484 +#ifdef CONFIG_PAX_RANDKSTACK
11485 +       pushl_cfi %eax
11486 +       call pax_randomize_kstack
11487 +       popl_cfi %eax
11488 +#endif
11489 +
11490 +       pax_erase_kstack
11491 +
11492  /* if something modifies registers it must also disable sysexit */
11493         movl PT_EIP(%esp), %edx
11494         movl PT_OLDESP(%esp), %ecx
11495         xorl %ebp,%ebp
11496         TRACE_IRQS_ON
11497  1:     mov  PT_FS(%esp), %fs
11498 +2:     mov  PT_DS(%esp), %ds
11499 +3:     mov  PT_ES(%esp), %es
11500         PTGS_TO_GS
11501         ENABLE_INTERRUPTS_SYSEXIT
11502  
11503 @@ -455,6 +627,9 @@ sysenter_audit:
11504         movl %eax,%edx                  /* 2nd arg: syscall number */
11505         movl $AUDIT_ARCH_I386,%eax      /* 1st arg: audit arch */
11506         call audit_syscall_entry
11507 +
11508 +       pax_erase_kstack
11509 +
11510         pushl_cfi %ebx
11511         movl PT_EAX(%esp),%eax          /* reload syscall number */
11512         jmp sysenter_do_call
11513 @@ -481,11 +656,17 @@ sysexit_audit:
11514  
11515         CFI_ENDPROC
11516  .pushsection .fixup,"ax"
11517 -2:     movl $0,PT_FS(%esp)
11518 +4:     movl $0,PT_FS(%esp)
11519 +       jmp 1b
11520 +5:     movl $0,PT_DS(%esp)
11521 +       jmp 1b
11522 +6:     movl $0,PT_ES(%esp)
11523         jmp 1b
11524  .section __ex_table,"a"
11525         .align 4
11526 -       .long 1b,2b
11527 +       .long 1b,4b
11528 +       .long 2b,5b
11529 +       .long 3b,6b
11530  .popsection
11531         PTGS_TO_GS_EX
11532  ENDPROC(ia32_sysenter_target)
11533 @@ -518,6 +699,14 @@ syscall_exit:
11534         testl $_TIF_ALLWORK_MASK, %ecx  # current->work
11535         jne syscall_exit_work
11536  
11537 +restore_all_pax:
11538 +
11539 +#ifdef CONFIG_PAX_RANDKSTACK
11540 +       call pax_randomize_kstack
11541 +#endif
11542 +
11543 +       pax_erase_kstack
11544 +
11545  restore_all:
11546         TRACE_IRQS_IRET
11547  restore_all_notrace:
11548 @@ -577,14 +766,34 @@ ldt_ss:
11549   * compensating for the offset by changing to the ESPFIX segment with
11550   * a base address that matches for the difference.
11551   */
11552 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11553 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11554         mov %esp, %edx                  /* load kernel esp */
11555         mov PT_OLDESP(%esp), %eax       /* load userspace esp */
11556         mov %dx, %ax                    /* eax: new kernel esp */
11557         sub %eax, %edx                  /* offset (low word is 0) */
11558 +#ifdef CONFIG_SMP
11559 +       movl PER_CPU_VAR(cpu_number), %ebx
11560 +       shll $PAGE_SHIFT_asm, %ebx
11561 +       addl $cpu_gdt_table, %ebx
11562 +#else
11563 +       movl $cpu_gdt_table, %ebx
11564 +#endif
11565         shr $16, %edx
11566 -       mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11567 -       mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11568 +
11569 +#ifdef CONFIG_PAX_KERNEXEC
11570 +       mov %cr0, %esi
11571 +       btr $16, %esi
11572 +       mov %esi, %cr0
11573 +#endif
11574 +
11575 +       mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11576 +       mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11577 +
11578 +#ifdef CONFIG_PAX_KERNEXEC
11579 +       bts $16, %esi
11580 +       mov %esi, %cr0
11581 +#endif
11582 +
11583         pushl_cfi $__ESPFIX_SS
11584         pushl_cfi %eax                  /* new kernel esp */
11585         /* Disable interrupts, but do not irqtrace this section: we
11586 @@ -613,29 +822,23 @@ work_resched:
11587         movl TI_flags(%ebp), %ecx
11588         andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
11589                                         # than syscall tracing?
11590 -       jz restore_all
11591 +       jz restore_all_pax
11592         testb $_TIF_NEED_RESCHED, %cl
11593         jnz work_resched
11594  
11595  work_notifysig:                                # deal with pending signals and
11596                                         # notify-resume requests
11597 +       movl %esp, %eax
11598  #ifdef CONFIG_VM86
11599         testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11600 -       movl %esp, %eax
11601 -       jne work_notifysig_v86          # returning to kernel-space or
11602 +       jz 1f                           # returning to kernel-space or
11603                                         # vm86-space
11604 -       xorl %edx, %edx
11605 -       call do_notify_resume
11606 -       jmp resume_userspace_sig
11607  
11608 -       ALIGN
11609 -work_notifysig_v86:
11610         pushl_cfi %ecx                  # save ti_flags for do_notify_resume
11611         call save_v86_state             # %eax contains pt_regs pointer
11612         popl_cfi %ecx
11613         movl %eax, %esp
11614 -#else
11615 -       movl %esp, %eax
11616 +1:
11617  #endif
11618         xorl %edx, %edx
11619         call do_notify_resume
11620 @@ -648,6 +851,9 @@ syscall_trace_entry:
11621         movl $-ENOSYS,PT_EAX(%esp)
11622         movl %esp, %eax
11623         call syscall_trace_enter
11624 +
11625 +       pax_erase_kstack
11626 +
11627         /* What it returned is what we'll actually use.  */
11628         cmpl $(nr_syscalls), %eax
11629         jnae syscall_call
11630 @@ -670,6 +876,10 @@ END(syscall_exit_work)
11631  
11632         RING0_INT_FRAME                 # can't unwind into user space anyway
11633  syscall_fault:
11634 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11635 +       push %ss
11636 +       pop %ds
11637 +#endif
11638         GET_THREAD_INFO(%ebp)
11639         movl $-EFAULT,PT_EAX(%esp)
11640         jmp resume_userspace
11641 @@ -752,6 +962,36 @@ ptregs_clone:
11642         CFI_ENDPROC
11643  ENDPROC(ptregs_clone)
11644  
11645 +       ALIGN;
11646 +ENTRY(kernel_execve)
11647 +       CFI_STARTPROC
11648 +       pushl_cfi %ebp
11649 +       sub $PT_OLDSS+4,%esp
11650 +       pushl_cfi %edi
11651 +       pushl_cfi %ecx
11652 +       pushl_cfi %eax
11653 +       lea 3*4(%esp),%edi
11654 +       mov $PT_OLDSS/4+1,%ecx
11655 +       xorl %eax,%eax
11656 +       rep stosl
11657 +       popl_cfi %eax
11658 +       popl_cfi %ecx
11659 +       popl_cfi %edi
11660 +       movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11661 +       pushl_cfi %esp
11662 +       call sys_execve
11663 +       add $4,%esp
11664 +       CFI_ADJUST_CFA_OFFSET -4
11665 +       GET_THREAD_INFO(%ebp)
11666 +       test %eax,%eax
11667 +       jz syscall_exit
11668 +       add $PT_OLDSS+4,%esp
11669 +       CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11670 +       popl_cfi %ebp
11671 +       ret
11672 +       CFI_ENDPROC
11673 +ENDPROC(kernel_execve)
11674 +
11675  .macro FIXUP_ESPFIX_STACK
11676  /*
11677   * Switch back for ESPFIX stack to the normal zerobased stack
11678 @@ -761,8 +1001,15 @@ ENDPROC(ptregs_clone)
11679   * normal stack and adjusts ESP with the matching offset.
11680   */
11681         /* fixup the stack */
11682 -       mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11683 -       mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11684 +#ifdef CONFIG_SMP
11685 +       movl PER_CPU_VAR(cpu_number), %ebx
11686 +       shll $PAGE_SHIFT_asm, %ebx
11687 +       addl $cpu_gdt_table, %ebx
11688 +#else
11689 +       movl $cpu_gdt_table, %ebx
11690 +#endif
11691 +       mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11692 +       mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11693         shl $16, %eax
11694         addl %esp, %eax                 /* the adjusted stack pointer */
11695         pushl_cfi $__KERNEL_DS
11696 @@ -1213,7 +1460,6 @@ return_to_handler:
11697         jmp *%ecx
11698  #endif
11699  
11700 -.section .rodata,"a"
11701  #include "syscall_table_32.S"
11702  
11703  syscall_table_size=(.-sys_call_table)
11704 @@ -1259,9 +1505,12 @@ error_code:
11705         movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
11706         REG_TO_PTGS %ecx
11707         SET_KERNEL_GS %ecx
11708 -       movl $(__USER_DS), %ecx
11709 +       movl $(__KERNEL_DS), %ecx
11710         movl %ecx, %ds
11711         movl %ecx, %es
11712 +
11713 +       pax_enter_kernel
11714 +
11715         TRACE_IRQS_OFF
11716         movl %esp,%eax                  # pt_regs pointer
11717         call *%edi
11718 @@ -1346,6 +1595,9 @@ nmi_stack_correct:
11719         xorl %edx,%edx          # zero error code
11720         movl %esp,%eax          # pt_regs pointer
11721         call do_nmi
11722 +
11723 +       pax_exit_kernel
11724 +
11725         jmp restore_all_notrace
11726         CFI_ENDPROC
11727  
11728 @@ -1382,6 +1634,9 @@ nmi_espfix_stack:
11729         FIXUP_ESPFIX_STACK              # %eax == %esp
11730         xorl %edx,%edx                  # zero error code
11731         call do_nmi
11732 +
11733 +       pax_exit_kernel
11734 +
11735         RESTORE_REGS
11736         lss 12+4(%esp), %esp            # back to espfix stack
11737         CFI_ADJUST_CFA_OFFSET -24
11738 diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11739 --- linux-3.0.4/arch/x86/kernel/entry_64.S      2011-07-21 22:17:23.000000000 -0400
11740 +++ linux-3.0.4/arch/x86/kernel/entry_64.S      2011-08-26 19:49:56.000000000 -0400
11741 @@ -53,6 +53,7 @@
11742  #include <asm/paravirt.h>
11743  #include <asm/ftrace.h>
11744  #include <asm/percpu.h>
11745 +#include <asm/pgtable.h>
11746  
11747  /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
11748  #include <linux/elf-em.h>
11749 @@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11750  ENDPROC(native_usergs_sysret64)
11751  #endif /* CONFIG_PARAVIRT */
11752  
11753 +       .macro ljmpq sel, off
11754 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11755 +       .byte 0x48; ljmp *1234f(%rip)
11756 +       .pushsection .rodata
11757 +       .align 16
11758 +       1234: .quad \off; .word \sel
11759 +       .popsection
11760 +#else
11761 +       pushq $\sel
11762 +       pushq $\off
11763 +       lretq
11764 +#endif
11765 +       .endm
11766 +
11767 +       .macro pax_enter_kernel
11768 +#ifdef CONFIG_PAX_KERNEXEC
11769 +       call pax_enter_kernel
11770 +#endif
11771 +       .endm
11772 +
11773 +       .macro pax_exit_kernel
11774 +#ifdef CONFIG_PAX_KERNEXEC
11775 +       call pax_exit_kernel
11776 +#endif
11777 +       .endm
11778 +
11779 +#ifdef CONFIG_PAX_KERNEXEC
11780 +ENTRY(pax_enter_kernel)
11781 +       pushq %rdi
11782 +
11783 +#ifdef CONFIG_PARAVIRT
11784 +       PV_SAVE_REGS(CLBR_RDI)
11785 +#endif
11786 +
11787 +       GET_CR0_INTO_RDI
11788 +       bts $16,%rdi
11789 +       jnc 1f
11790 +       mov %cs,%edi
11791 +       cmp $__KERNEL_CS,%edi
11792 +       jz 3f
11793 +       ljmpq __KERNEL_CS,3f
11794 +1:     ljmpq __KERNEXEC_KERNEL_CS,2f
11795 +2:     SET_RDI_INTO_CR0
11796 +3:
11797 +
11798 +#ifdef CONFIG_PARAVIRT
11799 +       PV_RESTORE_REGS(CLBR_RDI)
11800 +#endif
11801 +
11802 +       popq %rdi
11803 +       retq
11804 +ENDPROC(pax_enter_kernel)
11805 +
11806 +ENTRY(pax_exit_kernel)
11807 +       pushq %rdi
11808 +
11809 +#ifdef CONFIG_PARAVIRT
11810 +       PV_SAVE_REGS(CLBR_RDI)
11811 +#endif
11812 +
11813 +       mov %cs,%rdi
11814 +       cmp $__KERNEXEC_KERNEL_CS,%edi
11815 +       jnz 2f
11816 +       GET_CR0_INTO_RDI
11817 +       btr $16,%rdi
11818 +       ljmpq __KERNEL_CS,1f
11819 +1:     SET_RDI_INTO_CR0
11820 +2:
11821 +
11822 +#ifdef CONFIG_PARAVIRT
11823 +       PV_RESTORE_REGS(CLBR_RDI);
11824 +#endif
11825 +
11826 +       popq %rdi
11827 +       retq
11828 +ENDPROC(pax_exit_kernel)
11829 +#endif
11830 +
11831 +       .macro pax_enter_kernel_user
11832 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11833 +       call pax_enter_kernel_user
11834 +#endif
11835 +       .endm
11836 +
11837 +       .macro pax_exit_kernel_user
11838 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11839 +       call pax_exit_kernel_user
11840 +#endif
11841 +#ifdef CONFIG_PAX_RANDKSTACK
11842 +       push %rax
11843 +       call pax_randomize_kstack
11844 +       pop %rax
11845 +#endif
11846 +       .endm
11847 +
11848 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11849 +ENTRY(pax_enter_kernel_user)
11850 +       pushq %rdi
11851 +       pushq %rbx
11852 +
11853 +#ifdef CONFIG_PARAVIRT
11854 +       PV_SAVE_REGS(CLBR_RDI)
11855 +#endif
11856 +
11857 +       GET_CR3_INTO_RDI
11858 +       mov %rdi,%rbx
11859 +       add $__START_KERNEL_map,%rbx
11860 +       sub phys_base(%rip),%rbx
11861 +
11862 +#ifdef CONFIG_PARAVIRT
11863 +       pushq %rdi
11864 +       cmpl $0, pv_info+PARAVIRT_enabled
11865 +       jz 1f
11866 +       i = 0
11867 +       .rept USER_PGD_PTRS
11868 +       mov i*8(%rbx),%rsi
11869 +       mov $0,%sil
11870 +       lea i*8(%rbx),%rdi
11871 +       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11872 +       i = i + 1
11873 +       .endr
11874 +       jmp 2f
11875 +1:
11876 +#endif
11877 +
11878 +       i = 0
11879 +       .rept USER_PGD_PTRS
11880 +       movb $0,i*8(%rbx)
11881 +       i = i + 1
11882 +       .endr
11883 +
11884 +#ifdef CONFIG_PARAVIRT
11885 +2:     popq %rdi
11886 +#endif
11887 +       SET_RDI_INTO_CR3
11888 +
11889 +#ifdef CONFIG_PAX_KERNEXEC
11890 +       GET_CR0_INTO_RDI
11891 +       bts $16,%rdi
11892 +       SET_RDI_INTO_CR0
11893 +#endif
11894 +
11895 +#ifdef CONFIG_PARAVIRT
11896 +       PV_RESTORE_REGS(CLBR_RDI)
11897 +#endif
11898 +
11899 +       popq %rbx
11900 +       popq %rdi
11901 +       retq
11902 +ENDPROC(pax_enter_kernel_user)
11903 +
11904 +ENTRY(pax_exit_kernel_user)
11905 +       push %rdi
11906 +
11907 +#ifdef CONFIG_PARAVIRT
11908 +       pushq %rbx
11909 +       PV_SAVE_REGS(CLBR_RDI)
11910 +#endif
11911 +
11912 +#ifdef CONFIG_PAX_KERNEXEC
11913 +       GET_CR0_INTO_RDI
11914 +       btr $16,%rdi
11915 +       SET_RDI_INTO_CR0
11916 +#endif
11917 +
11918 +       GET_CR3_INTO_RDI
11919 +       add $__START_KERNEL_map,%rdi
11920 +       sub phys_base(%rip),%rdi
11921 +
11922 +#ifdef CONFIG_PARAVIRT
11923 +       cmpl $0, pv_info+PARAVIRT_enabled
11924 +       jz 1f
11925 +       mov %rdi,%rbx
11926 +       i = 0
11927 +       .rept USER_PGD_PTRS
11928 +       mov i*8(%rbx),%rsi
11929 +       mov $0x67,%sil
11930 +       lea i*8(%rbx),%rdi
11931 +       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11932 +       i = i + 1
11933 +       .endr
11934 +       jmp 2f
11935 +1:
11936 +#endif
11937 +
11938 +       i = 0
11939 +       .rept USER_PGD_PTRS
11940 +       movb $0x67,i*8(%rdi)
11941 +       i = i + 1
11942 +       .endr
11943 +
11944 +#ifdef CONFIG_PARAVIRT
11945 +2:     PV_RESTORE_REGS(CLBR_RDI)
11946 +       popq %rbx
11947 +#endif
11948 +
11949 +       popq %rdi
11950 +       retq
11951 +ENDPROC(pax_exit_kernel_user)
11952 +#endif
11953 +
11954 +       .macro pax_erase_kstack
11955 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11956 +       call pax_erase_kstack
11957 +#endif
11958 +       .endm
11959 +
11960 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11961 +/*
11962 + * r10: thread_info
11963 + * rcx, rdx: can be clobbered
11964 + */
11965 +ENTRY(pax_erase_kstack)
11966 +       pushq %rdi
11967 +       pushq %rax
11968 +       pushq %r10
11969 +
11970 +       GET_THREAD_INFO(%r10)
11971 +       mov TI_lowest_stack(%r10), %rdi
11972 +       mov $-0xBEEF, %rax
11973 +       std
11974 +
11975 +1:     mov %edi, %ecx
11976 +       and $THREAD_SIZE_asm - 1, %ecx
11977 +       shr $3, %ecx
11978 +       repne scasq
11979 +       jecxz 2f
11980 +
11981 +       cmp $2*8, %ecx
11982 +       jc 2f
11983 +
11984 +       mov $2*8, %ecx
11985 +       repe scasq
11986 +       jecxz 2f
11987 +       jne 1b
11988 +
11989 +2:     cld
11990 +       mov %esp, %ecx
11991 +       sub %edi, %ecx
11992 +
11993 +       cmp $THREAD_SIZE_asm, %rcx
11994 +       jb 3f
11995 +       ud2
11996 +3:
11997 +
11998 +       shr $3, %ecx
11999 +       rep stosq
12000 +
12001 +       mov TI_task_thread_sp0(%r10), %rdi
12002 +       sub $256, %rdi
12003 +       mov %rdi, TI_lowest_stack(%r10)
12004 +
12005 +       popq %r10
12006 +       popq %rax
12007 +       popq %rdi
12008 +       ret
12009 +ENDPROC(pax_erase_kstack)
12010 +#endif
12011  
12012  .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12013  #ifdef CONFIG_TRACE_IRQFLAGS
12014 @@ -318,7 +577,7 @@ ENTRY(save_args)
12015         leaq -RBP+8(%rsp),%rdi  /* arg1 for handler */
12016         movq_cfi rbp, 8         /* push %rbp */
12017         leaq 8(%rsp), %rbp              /* mov %rsp, %ebp */
12018 -       testl $3, CS(%rdi)
12019 +       testb $3, CS(%rdi)
12020         je 1f
12021         SWAPGS
12022         /*
12023 @@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12024  
12025         RESTORE_REST
12026  
12027 -       testl $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
12028 +       testb $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
12029         je   int_ret_from_sys_call
12030  
12031         testl $_TIF_IA32, TI_flags(%rcx)        # 32-bit compat task needs IRET
12032 @@ -455,7 +714,7 @@ END(ret_from_fork)
12033  ENTRY(system_call)
12034         CFI_STARTPROC   simple
12035         CFI_SIGNAL_FRAME
12036 -       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
12037 +       CFI_DEF_CFA     rsp,0
12038         CFI_REGISTER    rip,rcx
12039         /*CFI_REGISTER  rflags,r11*/
12040         SWAPGS_UNSAFE_STACK
12041 @@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12042  
12043         movq    %rsp,PER_CPU_VAR(old_rsp)
12044         movq    PER_CPU_VAR(kernel_stack),%rsp
12045 +       pax_enter_kernel_user
12046         /*
12047          * No need to follow this irqs off/on section - it's straight
12048          * and short:
12049          */
12050         ENABLE_INTERRUPTS(CLBR_NONE)
12051 -       SAVE_ARGS 8,1
12052 +       SAVE_ARGS 8*6,1
12053         movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
12054         movq  %rcx,RIP-ARGOFFSET(%rsp)
12055         CFI_REL_OFFSET rip,RIP-ARGOFFSET
12056 @@ -502,6 +762,8 @@ sysret_check:
12057         andl %edi,%edx
12058         jnz  sysret_careful
12059         CFI_REMEMBER_STATE
12060 +       pax_exit_kernel_user
12061 +       pax_erase_kstack
12062         /*
12063          * sysretq will re-enable interrupts:
12064          */
12065 @@ -560,6 +822,9 @@ auditsys:
12066         movq %rax,%rsi                  /* 2nd arg: syscall number */
12067         movl $AUDIT_ARCH_X86_64,%edi    /* 1st arg: audit arch */
12068         call audit_syscall_entry
12069 +
12070 +       pax_erase_kstack
12071 +
12072         LOAD_ARGS 0             /* reload call-clobbered registers */
12073         jmp system_call_fastpath
12074  
12075 @@ -590,6 +855,9 @@ tracesys:
12076         FIXUP_TOP_OF_STACK %rdi
12077         movq %rsp,%rdi
12078         call syscall_trace_enter
12079 +
12080 +       pax_erase_kstack
12081 +
12082         /*
12083          * Reload arg registers from stack in case ptrace changed them.
12084          * We don't reload %rax because syscall_trace_enter() returned
12085 @@ -611,7 +879,7 @@ tracesys:
12086  GLOBAL(int_ret_from_sys_call)
12087         DISABLE_INTERRUPTS(CLBR_NONE)
12088         TRACE_IRQS_OFF
12089 -       testl $3,CS-ARGOFFSET(%rsp)
12090 +       testb $3,CS-ARGOFFSET(%rsp)
12091         je retint_restore_args
12092         movl $_TIF_ALLWORK_MASK,%edi
12093         /* edi: mask to check */
12094 @@ -793,6 +1061,16 @@ END(interrupt)
12095         CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12096         call save_args
12097         PARTIAL_FRAME 0
12098 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12099 +       testb $3, CS(%rdi)
12100 +       jnz 1f
12101 +       pax_enter_kernel
12102 +       jmp 2f
12103 +1:     pax_enter_kernel_user
12104 +2:
12105 +#else
12106 +       pax_enter_kernel
12107 +#endif
12108         call \func
12109         .endm
12110  
12111 @@ -825,7 +1103,7 @@ ret_from_intr:
12112         CFI_ADJUST_CFA_OFFSET   -8
12113  exit_intr:
12114         GET_THREAD_INFO(%rcx)
12115 -       testl $3,CS-ARGOFFSET(%rsp)
12116 +       testb $3,CS-ARGOFFSET(%rsp)
12117         je retint_kernel
12118  
12119         /* Interrupt came from user space */
12120 @@ -847,12 +1125,15 @@ retint_swapgs:           /* return to user-space 
12121          * The iretq could re-enable interrupts:
12122          */
12123         DISABLE_INTERRUPTS(CLBR_ANY)
12124 +       pax_exit_kernel_user
12125 +       pax_erase_kstack
12126         TRACE_IRQS_IRETQ
12127         SWAPGS
12128         jmp restore_args
12129  
12130  retint_restore_args:   /* return to kernel space */
12131         DISABLE_INTERRUPTS(CLBR_ANY)
12132 +       pax_exit_kernel
12133         /*
12134          * The iretq could re-enable interrupts:
12135          */
12136 @@ -1027,6 +1308,16 @@ ENTRY(\sym)
12137         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12138         call error_entry
12139         DEFAULT_FRAME 0
12140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12141 +       testb $3, CS(%rsp)
12142 +       jnz 1f
12143 +       pax_enter_kernel
12144 +       jmp 2f
12145 +1:     pax_enter_kernel_user
12146 +2:
12147 +#else
12148 +       pax_enter_kernel
12149 +#endif
12150         movq %rsp,%rdi          /* pt_regs pointer */
12151         xorl %esi,%esi          /* no error code */
12152         call \do_sym
12153 @@ -1044,6 +1335,16 @@ ENTRY(\sym)
12154         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12155         call save_paranoid
12156         TRACE_IRQS_OFF
12157 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12158 +       testb $3, CS(%rsp)
12159 +       jnz 1f
12160 +       pax_enter_kernel
12161 +       jmp 2f
12162 +1:     pax_enter_kernel_user
12163 +2:
12164 +#else
12165 +       pax_enter_kernel
12166 +#endif
12167         movq %rsp,%rdi          /* pt_regs pointer */
12168         xorl %esi,%esi          /* no error code */
12169         call \do_sym
12170 @@ -1052,7 +1353,7 @@ ENTRY(\sym)
12171  END(\sym)
12172  .endm
12173  
12174 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12175 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12176  .macro paranoidzeroentry_ist sym do_sym ist
12177  ENTRY(\sym)
12178         INTR_FRAME
12179 @@ -1062,8 +1363,24 @@ ENTRY(\sym)
12180         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12181         call save_paranoid
12182         TRACE_IRQS_OFF
12183 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12184 +       testb $3, CS(%rsp)
12185 +       jnz 1f
12186 +       pax_enter_kernel
12187 +       jmp 2f
12188 +1:     pax_enter_kernel_user
12189 +2:
12190 +#else
12191 +       pax_enter_kernel
12192 +#endif
12193         movq %rsp,%rdi          /* pt_regs pointer */
12194         xorl %esi,%esi          /* no error code */
12195 +#ifdef CONFIG_SMP
12196 +       imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12197 +       lea init_tss(%r12), %r12
12198 +#else
12199 +       lea init_tss(%rip), %r12
12200 +#endif
12201         subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12202         call \do_sym
12203         addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12204 @@ -1080,6 +1397,16 @@ ENTRY(\sym)
12205         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12206         call error_entry
12207         DEFAULT_FRAME 0
12208 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12209 +       testb $3, CS(%rsp)
12210 +       jnz 1f
12211 +       pax_enter_kernel
12212 +       jmp 2f
12213 +1:     pax_enter_kernel_user
12214 +2:
12215 +#else
12216 +       pax_enter_kernel
12217 +#endif
12218         movq %rsp,%rdi                  /* pt_regs pointer */
12219         movq ORIG_RAX(%rsp),%rsi        /* get error code */
12220         movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
12221 @@ -1099,6 +1426,16 @@ ENTRY(\sym)
12222         call save_paranoid
12223         DEFAULT_FRAME 0
12224         TRACE_IRQS_OFF
12225 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12226 +       testb $3, CS(%rsp)
12227 +       jnz 1f
12228 +       pax_enter_kernel
12229 +       jmp 2f
12230 +1:     pax_enter_kernel_user
12231 +2:
12232 +#else
12233 +       pax_enter_kernel
12234 +#endif
12235         movq %rsp,%rdi                  /* pt_regs pointer */
12236         movq ORIG_RAX(%rsp),%rsi        /* get error code */
12237         movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
12238 @@ -1361,14 +1698,27 @@ ENTRY(paranoid_exit)
12239         TRACE_IRQS_OFF
12240         testl %ebx,%ebx                         /* swapgs needed? */
12241         jnz paranoid_restore
12242 -       testl $3,CS(%rsp)
12243 +       testb $3,CS(%rsp)
12244         jnz   paranoid_userspace
12245 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12246 +       pax_exit_kernel
12247 +       TRACE_IRQS_IRETQ 0
12248 +       SWAPGS_UNSAFE_STACK
12249 +       RESTORE_ALL 8
12250 +       jmp irq_return
12251 +#endif
12252  paranoid_swapgs:
12253 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12254 +       pax_exit_kernel_user
12255 +#else
12256 +       pax_exit_kernel
12257 +#endif
12258         TRACE_IRQS_IRETQ 0
12259         SWAPGS_UNSAFE_STACK
12260         RESTORE_ALL 8
12261         jmp irq_return
12262  paranoid_restore:
12263 +       pax_exit_kernel
12264         TRACE_IRQS_IRETQ 0
12265         RESTORE_ALL 8
12266         jmp irq_return
12267 @@ -1426,7 +1776,7 @@ ENTRY(error_entry)
12268         movq_cfi r14, R14+8
12269         movq_cfi r15, R15+8
12270         xorl %ebx,%ebx
12271 -       testl $3,CS+8(%rsp)
12272 +       testb $3,CS+8(%rsp)
12273         je error_kernelspace
12274  error_swapgs:
12275         SWAPGS
12276 @@ -1490,6 +1840,16 @@ ENTRY(nmi)
12277         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12278         call save_paranoid
12279         DEFAULT_FRAME 0
12280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12281 +       testb $3, CS(%rsp)
12282 +       jnz 1f
12283 +       pax_enter_kernel
12284 +       jmp 2f
12285 +1:     pax_enter_kernel_user
12286 +2:
12287 +#else
12288 +       pax_enter_kernel
12289 +#endif
12290         /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12291         movq %rsp,%rdi
12292         movq $-1,%rsi
12293 @@ -1500,11 +1860,25 @@ ENTRY(nmi)
12294         DISABLE_INTERRUPTS(CLBR_NONE)
12295         testl %ebx,%ebx                         /* swapgs needed? */
12296         jnz nmi_restore
12297 -       testl $3,CS(%rsp)
12298 +       testb $3,CS(%rsp)
12299         jnz nmi_userspace
12300 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12301 +       pax_exit_kernel
12302 +       SWAPGS_UNSAFE_STACK
12303 +       RESTORE_ALL 8
12304 +       jmp irq_return
12305 +#endif
12306  nmi_swapgs:
12307 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12308 +       pax_exit_kernel_user
12309 +#else
12310 +       pax_exit_kernel
12311 +#endif
12312         SWAPGS_UNSAFE_STACK
12313 +       RESTORE_ALL 8
12314 +       jmp irq_return
12315  nmi_restore:
12316 +       pax_exit_kernel
12317         RESTORE_ALL 8
12318         jmp irq_return
12319  nmi_userspace:
12320 diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12321 --- linux-3.0.4/arch/x86/kernel/ftrace.c        2011-07-21 22:17:23.000000000 -0400
12322 +++ linux-3.0.4/arch/x86/kernel/ftrace.c        2011-08-23 21:47:55.000000000 -0400
12323 @@ -126,7 +126,7 @@ static void *mod_code_ip;           /* holds the 
12324  static const void *mod_code_newcode;   /* holds the text to write to the IP */
12325  
12326  static unsigned nmi_wait_count;
12327 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12328 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12329  
12330  int ftrace_arch_read_dyn_info(char *buf, int size)
12331  {
12332 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12333  
12334         r = snprintf(buf, size, "%u %u",
12335                      nmi_wait_count,
12336 -                    atomic_read(&nmi_update_count));
12337 +                    atomic_read_unchecked(&nmi_update_count));
12338         return r;
12339  }
12340  
12341 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12342  
12343         if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12344                 smp_rmb();
12345 +               pax_open_kernel();
12346                 ftrace_mod_code();
12347 -               atomic_inc(&nmi_update_count);
12348 +               pax_close_kernel();
12349 +               atomic_inc_unchecked(&nmi_update_count);
12350         }
12351         /* Must have previous changes seen before executions */
12352         smp_mb();
12353 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12354  {
12355         unsigned char replaced[MCOUNT_INSN_SIZE];
12356  
12357 +       ip = ktla_ktva(ip);
12358 +
12359         /*
12360          * Note: Due to modules and __init, code can
12361          *  disappear and change, we need to protect against faulting
12362 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12363         unsigned char old[MCOUNT_INSN_SIZE], *new;
12364         int ret;
12365  
12366 -       memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12367 +       memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12368         new = ftrace_call_replace(ip, (unsigned long)func);
12369         ret = ftrace_modify_code(ip, old, new);
12370  
12371 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long 
12372  {
12373         unsigned char code[MCOUNT_INSN_SIZE];
12374  
12375 +       ip = ktla_ktva(ip);
12376 +
12377         if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12378                 return -EFAULT;
12379  
12380 diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12381 --- linux-3.0.4/arch/x86/kernel/head32.c        2011-07-21 22:17:23.000000000 -0400
12382 +++ linux-3.0.4/arch/x86/kernel/head32.c        2011-08-23 21:47:55.000000000 -0400
12383 @@ -19,6 +19,7 @@
12384  #include <asm/io_apic.h>
12385  #include <asm/bios_ebda.h>
12386  #include <asm/tlbflush.h>
12387 +#include <asm/boot.h>
12388  
12389  static void __init i386_default_early_setup(void)
12390  {
12391 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12392  {
12393         memblock_init();
12394  
12395 -       memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12396 +       memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12397  
12398  #ifdef CONFIG_BLK_DEV_INITRD
12399         /* Reserve INITRD */
12400 diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12401 --- linux-3.0.4/arch/x86/kernel/head_32.S       2011-07-21 22:17:23.000000000 -0400
12402 +++ linux-3.0.4/arch/x86/kernel/head_32.S       2011-08-23 21:47:55.000000000 -0400
12403 @@ -25,6 +25,12 @@
12404  /* Physical address */
12405  #define pa(X) ((X) - __PAGE_OFFSET)
12406  
12407 +#ifdef CONFIG_PAX_KERNEXEC
12408 +#define ta(X) (X)
12409 +#else
12410 +#define ta(X) ((X) - __PAGE_OFFSET)
12411 +#endif
12412 +
12413  /*
12414   * References to members of the new_cpu_data structure.
12415   */
12416 @@ -54,11 +60,7 @@
12417   * and small than max_low_pfn, otherwise will waste some page table entries
12418   */
12419  
12420 -#if PTRS_PER_PMD > 1
12421 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12422 -#else
12423 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12424 -#endif
12425 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12426  
12427  /* Number of possible pages in the lowmem region */
12428  LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12429 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12430  RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12431  
12432  /*
12433 + * Real beginning of normal "text" segment
12434 + */
12435 +ENTRY(stext)
12436 +ENTRY(_stext)
12437 +
12438 +/*
12439   * 32-bit kernel entrypoint; only used by the boot CPU.  On entry,
12440   * %esi points to the real-mode code as a 32-bit pointer.
12441   * CS and DS must be 4 GB flat segments, but we don't depend on
12442 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12443   * can.
12444   */
12445  __HEAD
12446 +
12447 +#ifdef CONFIG_PAX_KERNEXEC
12448 +       jmp startup_32
12449 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12450 +.fill PAGE_SIZE-5,1,0xcc
12451 +#endif
12452 +
12453  ENTRY(startup_32)
12454         movl pa(stack_start),%ecx
12455         
12456 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12457  2:
12458         leal -__PAGE_OFFSET(%ecx),%esp
12459  
12460 +#ifdef CONFIG_SMP
12461 +       movl $pa(cpu_gdt_table),%edi
12462 +       movl $__per_cpu_load,%eax
12463 +       movw %ax,__KERNEL_PERCPU + 2(%edi)
12464 +       rorl $16,%eax
12465 +       movb %al,__KERNEL_PERCPU + 4(%edi)
12466 +       movb %ah,__KERNEL_PERCPU + 7(%edi)
12467 +       movl $__per_cpu_end - 1,%eax
12468 +       subl $__per_cpu_start,%eax
12469 +       movw %ax,__KERNEL_PERCPU + 0(%edi)
12470 +#endif
12471 +
12472 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12473 +       movl $NR_CPUS,%ecx
12474 +       movl $pa(cpu_gdt_table),%edi
12475 +1:
12476 +       movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12477 +       movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12478 +       movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12479 +       addl $PAGE_SIZE_asm,%edi
12480 +       loop 1b
12481 +#endif
12482 +
12483 +#ifdef CONFIG_PAX_KERNEXEC
12484 +       movl $pa(boot_gdt),%edi
12485 +       movl $__LOAD_PHYSICAL_ADDR,%eax
12486 +       movw %ax,__BOOT_CS + 2(%edi)
12487 +       rorl $16,%eax
12488 +       movb %al,__BOOT_CS + 4(%edi)
12489 +       movb %ah,__BOOT_CS + 7(%edi)
12490 +       rorl $16,%eax
12491 +
12492 +       ljmp $(__BOOT_CS),$1f
12493 +1:
12494 +
12495 +       movl $NR_CPUS,%ecx
12496 +       movl $pa(cpu_gdt_table),%edi
12497 +       addl $__PAGE_OFFSET,%eax
12498 +1:
12499 +       movw %ax,__KERNEL_CS + 2(%edi)
12500 +       movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12501 +       rorl $16,%eax
12502 +       movb %al,__KERNEL_CS + 4(%edi)
12503 +       movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12504 +       movb %ah,__KERNEL_CS + 7(%edi)
12505 +       movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12506 +       rorl $16,%eax
12507 +       addl $PAGE_SIZE_asm,%edi
12508 +       loop 1b
12509 +#endif
12510 +
12511  /*
12512   * Clear BSS first so that there are no surprises...
12513   */
12514 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12515         movl %eax, pa(max_pfn_mapped)
12516  
12517         /* Do early initialization of the fixmap area */
12518 -       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12519 -       movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12520 +#ifdef CONFIG_COMPAT_VDSO
12521 +       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12522 +#else
12523 +       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12524 +#endif
12525  #else  /* Not PAE */
12526  
12527  page_pde_offset = (__PAGE_OFFSET >> 20);
12528 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12529         movl %eax, pa(max_pfn_mapped)
12530  
12531         /* Do early initialization of the fixmap area */
12532 -       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12533 -       movl %eax,pa(initial_page_table+0xffc)
12534 +#ifdef CONFIG_COMPAT_VDSO
12535 +       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12536 +#else
12537 +       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12538 +#endif
12539  #endif
12540  
12541  #ifdef CONFIG_PARAVIRT
12542 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12543         cmpl $num_subarch_entries, %eax
12544         jae bad_subarch
12545  
12546 -       movl pa(subarch_entries)(,%eax,4), %eax
12547 -       subl $__PAGE_OFFSET, %eax
12548 -       jmp *%eax
12549 +       jmp *pa(subarch_entries)(,%eax,4)
12550  
12551  bad_subarch:
12552  WEAK(lguest_entry)
12553 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12554         __INITDATA
12555  
12556  subarch_entries:
12557 -       .long default_entry             /* normal x86/PC */
12558 -       .long lguest_entry              /* lguest hypervisor */
12559 -       .long xen_entry                 /* Xen hypervisor */
12560 -       .long default_entry             /* Moorestown MID */
12561 +       .long ta(default_entry)         /* normal x86/PC */
12562 +       .long ta(lguest_entry)          /* lguest hypervisor */
12563 +       .long ta(xen_entry)             /* Xen hypervisor */
12564 +       .long ta(default_entry)         /* Moorestown MID */
12565  num_subarch_entries = (. - subarch_entries) / 4
12566  .previous
12567  #else
12568 @@ -312,6 +382,7 @@ default_entry:
12569         orl %edx,%eax
12570         movl %eax,%cr4
12571  
12572 +#ifdef CONFIG_X86_PAE
12573         testb $X86_CR4_PAE, %al         # check if PAE is enabled
12574         jz 6f
12575  
12576 @@ -340,6 +411,9 @@ default_entry:
12577         /* Make changes effective */
12578         wrmsr
12579  
12580 +       btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12581 +#endif
12582 +
12583  6:
12584  
12585  /*
12586 @@ -443,7 +517,7 @@ is386:      movl $2,%ecx            # set MP
12587  1:     movl $(__KERNEL_DS),%eax        # reload all the segment registers
12588         movl %eax,%ss                   # after changing gdt.
12589  
12590 -       movl $(__USER_DS),%eax          # DS/ES contains default USER segment
12591 +#      movl $(__KERNEL_DS),%eax        # DS/ES contains default KERNEL segment
12592         movl %eax,%ds
12593         movl %eax,%es
12594  
12595 @@ -457,15 +531,22 @@ is386:    movl $2,%ecx            # set MP
12596          */
12597         cmpb $0,ready
12598         jne 1f
12599 -       movl $gdt_page,%eax
12600 +       movl $cpu_gdt_table,%eax
12601         movl $stack_canary,%ecx
12602 +#ifdef CONFIG_SMP
12603 +       addl $__per_cpu_load,%ecx
12604 +#endif
12605         movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12606         shrl $16, %ecx
12607         movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12608         movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12609  1:
12610 -#endif
12611         movl $(__KERNEL_STACK_CANARY),%eax
12612 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12613 +       movl $(__USER_DS),%eax
12614 +#else
12615 +       xorl %eax,%eax
12616 +#endif
12617         movl %eax,%gs
12618  
12619         xorl %eax,%eax                  # Clear LDT
12620 @@ -558,22 +639,22 @@ early_page_fault:
12621         jmp early_fault
12622  
12623  early_fault:
12624 -       cld
12625  #ifdef CONFIG_PRINTK
12626 +       cmpl $1,%ss:early_recursion_flag
12627 +       je hlt_loop
12628 +       incl %ss:early_recursion_flag
12629 +       cld
12630         pusha
12631         movl $(__KERNEL_DS),%eax
12632         movl %eax,%ds
12633         movl %eax,%es
12634 -       cmpl $2,early_recursion_flag
12635 -       je hlt_loop
12636 -       incl early_recursion_flag
12637         movl %cr2,%eax
12638         pushl %eax
12639         pushl %edx              /* trapno */
12640         pushl $fault_msg
12641         call printk
12642 +;      call dump_stack
12643  #endif
12644 -       call dump_stack
12645  hlt_loop:
12646         hlt
12647         jmp hlt_loop
12648 @@ -581,8 +662,11 @@ hlt_loop:
12649  /* This is the default interrupt "handler" :-) */
12650         ALIGN
12651  ignore_int:
12652 -       cld
12653  #ifdef CONFIG_PRINTK
12654 +       cmpl $2,%ss:early_recursion_flag
12655 +       je hlt_loop
12656 +       incl %ss:early_recursion_flag
12657 +       cld
12658         pushl %eax
12659         pushl %ecx
12660         pushl %edx
12661 @@ -591,9 +675,6 @@ ignore_int:
12662         movl $(__KERNEL_DS),%eax
12663         movl %eax,%ds
12664         movl %eax,%es
12665 -       cmpl $2,early_recursion_flag
12666 -       je hlt_loop
12667 -       incl early_recursion_flag
12668         pushl 16(%esp)
12669         pushl 24(%esp)
12670         pushl 32(%esp)
12671 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12672  /*
12673   * BSS section
12674   */
12675 -__PAGE_ALIGNED_BSS
12676 -       .align PAGE_SIZE
12677  #ifdef CONFIG_X86_PAE
12678 +.section .initial_pg_pmd,"a",@progbits
12679  initial_pg_pmd:
12680         .fill 1024*KPMDS,4,0
12681  #else
12682 +.section .initial_page_table,"a",@progbits
12683  ENTRY(initial_page_table)
12684         .fill 1024,4,0
12685  #endif
12686 +.section .initial_pg_fixmap,"a",@progbits
12687  initial_pg_fixmap:
12688         .fill 1024,4,0
12689 +.section .empty_zero_page,"a",@progbits
12690  ENTRY(empty_zero_page)
12691         .fill 4096,1,0
12692 +.section .swapper_pg_dir,"a",@progbits
12693  ENTRY(swapper_pg_dir)
12694 +#ifdef CONFIG_X86_PAE
12695 +       .fill 4,8,0
12696 +#else
12697         .fill 1024,4,0
12698 +#endif
12699 +
12700 +/*
12701 + * The IDT has to be page-aligned to simplify the Pentium
12702 + * F0 0F bug workaround.. We have a special link segment
12703 + * for this.
12704 + */
12705 +.section .idt,"a",@progbits
12706 +ENTRY(idt_table)
12707 +       .fill 256,8,0
12708  
12709  /*
12710   * This starts the data section.
12711   */
12712  #ifdef CONFIG_X86_PAE
12713 -__PAGE_ALIGNED_DATA
12714 -       /* Page-aligned for the benefit of paravirt? */
12715 -       .align PAGE_SIZE
12716 +.section .initial_page_table,"a",@progbits
12717  ENTRY(initial_page_table)
12718         .long   pa(initial_pg_pmd+PGD_IDENT_ATTR),0     /* low identity map */
12719  # if KPMDS == 3
12720 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12721  #  error "Kernel PMDs should be 1, 2 or 3"
12722  # endif
12723         .align PAGE_SIZE                /* needs to be page-sized too */
12724 +
12725 +#ifdef CONFIG_PAX_PER_CPU_PGD
12726 +ENTRY(cpu_pgd)
12727 +       .rept NR_CPUS
12728 +       .fill   4,8,0
12729 +       .endr
12730 +#endif
12731 +
12732  #endif
12733  
12734  .data
12735  .balign 4
12736  ENTRY(stack_start)
12737 -       .long init_thread_union+THREAD_SIZE
12738 +       .long init_thread_union+THREAD_SIZE-8
12739 +
12740 +ready: .byte 0
12741  
12742 +.section .rodata,"a",@progbits
12743  early_recursion_flag:
12744         .long 0
12745  
12746 -ready: .byte 0
12747 -
12748  int_msg:
12749         .asciz "Unknown interrupt or fault at: %p %p %p\n"
12750  
12751 @@ -707,7 +811,7 @@ fault_msg:
12752         .word 0                         # 32 bit align gdt_desc.address
12753  boot_gdt_descr:
12754         .word __BOOT_DS+7
12755 -       .long boot_gdt - __PAGE_OFFSET
12756 +       .long pa(boot_gdt)
12757  
12758         .word 0                         # 32-bit align idt_desc.address
12759  idt_descr:
12760 @@ -718,7 +822,7 @@ idt_descr:
12761         .word 0                         # 32 bit align gdt_desc.address
12762  ENTRY(early_gdt_descr)
12763         .word GDT_ENTRIES*8-1
12764 -       .long gdt_page                  /* Overwritten for secondary CPUs */
12765 +       .long cpu_gdt_table             /* Overwritten for secondary CPUs */
12766  
12767  /*
12768   * The boot_gdt must mirror the equivalent in setup.S and is
12769 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12770         .align L1_CACHE_BYTES
12771  ENTRY(boot_gdt)
12772         .fill GDT_ENTRY_BOOT_CS,8,0
12773 -       .quad 0x00cf9a000000ffff        /* kernel 4GB code at 0x00000000 */
12774 -       .quad 0x00cf92000000ffff        /* kernel 4GB data at 0x00000000 */
12775 +       .quad 0x00cf9b000000ffff        /* kernel 4GB code at 0x00000000 */
12776 +       .quad 0x00cf93000000ffff        /* kernel 4GB data at 0x00000000 */
12777 +
12778 +       .align PAGE_SIZE_asm
12779 +ENTRY(cpu_gdt_table)
12780 +       .rept NR_CPUS
12781 +       .quad 0x0000000000000000        /* NULL descriptor */
12782 +       .quad 0x0000000000000000        /* 0x0b reserved */
12783 +       .quad 0x0000000000000000        /* 0x13 reserved */
12784 +       .quad 0x0000000000000000        /* 0x1b reserved */
12785 +
12786 +#ifdef CONFIG_PAX_KERNEXEC
12787 +       .quad 0x00cf9b000000ffff        /* 0x20 alternate kernel 4GB code at 0x00000000 */
12788 +#else
12789 +       .quad 0x0000000000000000        /* 0x20 unused */
12790 +#endif
12791 +
12792 +       .quad 0x0000000000000000        /* 0x28 unused */
12793 +       .quad 0x0000000000000000        /* 0x33 TLS entry 1 */
12794 +       .quad 0x0000000000000000        /* 0x3b TLS entry 2 */
12795 +       .quad 0x0000000000000000        /* 0x43 TLS entry 3 */
12796 +       .quad 0x0000000000000000        /* 0x4b reserved */
12797 +       .quad 0x0000000000000000        /* 0x53 reserved */
12798 +       .quad 0x0000000000000000        /* 0x5b reserved */
12799 +
12800 +       .quad 0x00cf9b000000ffff        /* 0x60 kernel 4GB code at 0x00000000 */
12801 +       .quad 0x00cf93000000ffff        /* 0x68 kernel 4GB data at 0x00000000 */
12802 +       .quad 0x00cffb000000ffff        /* 0x73 user 4GB code at 0x00000000 */
12803 +       .quad 0x00cff3000000ffff        /* 0x7b user 4GB data at 0x00000000 */
12804 +
12805 +       .quad 0x0000000000000000        /* 0x80 TSS descriptor */
12806 +       .quad 0x0000000000000000        /* 0x88 LDT descriptor */
12807 +
12808 +       /*
12809 +        * Segments used for calling PnP BIOS have byte granularity.
12810 +        * The code segments and data segments have fixed 64k limits,
12811 +        * the transfer segment sizes are set at run time.
12812 +        */
12813 +       .quad 0x00409b000000ffff        /* 0x90 32-bit code */
12814 +       .quad 0x00009b000000ffff        /* 0x98 16-bit code */
12815 +       .quad 0x000093000000ffff        /* 0xa0 16-bit data */
12816 +       .quad 0x0000930000000000        /* 0xa8 16-bit data */
12817 +       .quad 0x0000930000000000        /* 0xb0 16-bit data */
12818 +
12819 +       /*
12820 +        * The APM segments have byte granularity and their bases
12821 +        * are set at run time.  All have 64k limits.
12822 +        */
12823 +       .quad 0x00409b000000ffff        /* 0xb8 APM CS    code */
12824 +       .quad 0x00009b000000ffff        /* 0xc0 APM CS 16 code (16 bit) */
12825 +       .quad 0x004093000000ffff        /* 0xc8 APM DS    data */
12826 +
12827 +       .quad 0x00c0930000000000        /* 0xd0 - ESPFIX SS */
12828 +       .quad 0x0040930000000000        /* 0xd8 - PERCPU */
12829 +       .quad 0x0040910000000017        /* 0xe0 - STACK_CANARY */
12830 +       .quad 0x0000000000000000        /* 0xe8 - PCIBIOS_CS */
12831 +       .quad 0x0000000000000000        /* 0xf0 - PCIBIOS_DS */
12832 +       .quad 0x0000000000000000        /* 0xf8 - GDT entry 31: double-fault TSS */
12833 +
12834 +       /* Be sure this is zeroed to avoid false validations in Xen */
12835 +       .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12836 +       .endr
12837 diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
12838 --- linux-3.0.4/arch/x86/kernel/head_64.S       2011-07-21 22:17:23.000000000 -0400
12839 +++ linux-3.0.4/arch/x86/kernel/head_64.S       2011-08-23 21:47:55.000000000 -0400
12840 @@ -19,6 +19,7 @@
12841  #include <asm/cache.h>
12842  #include <asm/processor-flags.h>
12843  #include <asm/percpu.h>
12844 +#include <asm/cpufeature.h>
12845  
12846  #ifdef CONFIG_PARAVIRT
12847  #include <asm/asm-offsets.h>
12848 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12849  L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12850  L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12851  L3_START_KERNEL = pud_index(__START_KERNEL_map)
12852 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12853 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12854 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12855 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12856  
12857         .text
12858         __HEAD
12859 @@ -85,35 +90,22 @@ startup_64:
12860          */
12861         addq    %rbp, init_level4_pgt + 0(%rip)
12862         addq    %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12863 +       addq    %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12864 +       addq    %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12865         addq    %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12866  
12867         addq    %rbp, level3_ident_pgt + 0(%rip)
12868 +#ifndef CONFIG_XEN
12869 +       addq    %rbp, level3_ident_pgt + 8(%rip)
12870 +#endif
12871  
12872 -       addq    %rbp, level3_kernel_pgt + (510*8)(%rip)
12873 -       addq    %rbp, level3_kernel_pgt + (511*8)(%rip)
12874 +       addq    %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12875  
12876 -       addq    %rbp, level2_fixmap_pgt + (506*8)(%rip)
12877 +       addq    %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12878 +       addq    %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12879  
12880 -       /* Add an Identity mapping if I am above 1G */
12881 -       leaq    _text(%rip), %rdi
12882 -       andq    $PMD_PAGE_MASK, %rdi
12883 -
12884 -       movq    %rdi, %rax
12885 -       shrq    $PUD_SHIFT, %rax
12886 -       andq    $(PTRS_PER_PUD - 1), %rax
12887 -       jz      ident_complete
12888 -
12889 -       leaq    (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12890 -       leaq    level3_ident_pgt(%rip), %rbx
12891 -       movq    %rdx, 0(%rbx, %rax, 8)
12892 -
12893 -       movq    %rdi, %rax
12894 -       shrq    $PMD_SHIFT, %rax
12895 -       andq    $(PTRS_PER_PMD - 1), %rax
12896 -       leaq    __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12897 -       leaq    level2_spare_pgt(%rip), %rbx
12898 -       movq    %rdx, 0(%rbx, %rax, 8)
12899 -ident_complete:
12900 +       addq    %rbp, level2_fixmap_pgt + (506*8)(%rip)
12901 +       addq    %rbp, level2_fixmap_pgt + (507*8)(%rip)
12902  
12903         /*
12904          * Fixup the kernel text+data virtual addresses. Note that
12905 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12906          * after the boot processor executes this code.
12907          */
12908  
12909 -       /* Enable PAE mode and PGE */
12910 -       movl    $(X86_CR4_PAE | X86_CR4_PGE), %eax
12911 +       /* Enable PAE mode and PSE/PGE */
12912 +       movl    $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12913         movq    %rax, %cr4
12914  
12915         /* Setup early boot stage 4 level pagetables. */
12916 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12917         movl    $MSR_EFER, %ecx
12918         rdmsr
12919         btsl    $_EFER_SCE, %eax        /* Enable System Call */
12920 -       btl     $20,%edi                /* No Execute supported? */
12921 +       btl     $(X86_FEATURE_NX & 31),%edi     /* No Execute supported? */
12922         jnc     1f
12923         btsl    $_EFER_NX, %eax
12924 +       leaq    init_level4_pgt(%rip), %rdi
12925 +       btsq    $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12926 +       btsq    $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12927 +       btsq    $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12928 +       btsq    $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12929  1:     wrmsr                           /* Make changes effective */
12930  
12931         /* Setup cr0 */
12932 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12933  bad_address:
12934         jmp bad_address
12935  
12936 -       .section ".init.text","ax"
12937 +       __INIT
12938  #ifdef CONFIG_EARLY_PRINTK
12939         .globl early_idt_handlers
12940  early_idt_handlers:
12941 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12942  #endif /* EARLY_PRINTK */
12943  1:     hlt
12944         jmp 1b
12945 +       .previous
12946  
12947  #ifdef CONFIG_EARLY_PRINTK
12948 +       __INITDATA
12949  early_recursion_flag:
12950         .long 0
12951 +       .previous
12952  
12953 +       .section .rodata,"a",@progbits
12954  early_idt_msg:
12955         .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12956  early_idt_ripmsg:
12957         .asciz "RIP %s\n"
12958 -#endif /* CONFIG_EARLY_PRINTK */
12959         .previous
12960 +#endif /* CONFIG_EARLY_PRINTK */
12961  
12962 +       .section .rodata,"a",@progbits
12963  #define NEXT_PAGE(name) \
12964         .balign PAGE_SIZE; \
12965  ENTRY(name)
12966 @@ -338,7 +340,6 @@ ENTRY(name)
12967         i = i + 1 ;                                     \
12968         .endr
12969  
12970 -       .data
12971         /*
12972          * This default setting generates an ident mapping at address 0x100000
12973          * and a mapping for the kernel that precisely maps virtual address
12974 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12975         .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12976         .org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
12977         .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12978 +       .org    init_level4_pgt + L4_VMALLOC_START*8, 0
12979 +       .quad   level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12980 +       .org    init_level4_pgt + L4_VMEMMAP_START*8, 0
12981 +       .quad   level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12982         .org    init_level4_pgt + L4_START_KERNEL*8, 0
12983         /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12984         .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12985  
12986 +#ifdef CONFIG_PAX_PER_CPU_PGD
12987 +NEXT_PAGE(cpu_pgd)
12988 +       .rept NR_CPUS
12989 +       .fill   512,8,0
12990 +       .endr
12991 +#endif
12992 +
12993  NEXT_PAGE(level3_ident_pgt)
12994         .quad   level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12995 +#ifdef CONFIG_XEN
12996         .fill   511,8,0
12997 +#else
12998 +       .quad   level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12999 +       .fill   510,8,0
13000 +#endif
13001 +
13002 +NEXT_PAGE(level3_vmalloc_pgt)
13003 +       .fill   512,8,0
13004 +
13005 +NEXT_PAGE(level3_vmemmap_pgt)
13006 +       .fill   L3_VMEMMAP_START,8,0
13007 +       .quad   level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13008  
13009  NEXT_PAGE(level3_kernel_pgt)
13010         .fill   L3_START_KERNEL,8,0
13011 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13012         .quad   level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13013         .quad   level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13014  
13015 +NEXT_PAGE(level2_vmemmap_pgt)
13016 +       .fill   512,8,0
13017 +
13018  NEXT_PAGE(level2_fixmap_pgt)
13019 -       .fill   506,8,0
13020 -       .quad   level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13021 -       /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13022 -       .fill   5,8,0
13023 +       .fill   507,8,0
13024 +       .quad   level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13025 +       /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13026 +       .fill   4,8,0
13027  
13028 -NEXT_PAGE(level1_fixmap_pgt)
13029 +NEXT_PAGE(level1_vsyscall_pgt)
13030         .fill   512,8,0
13031  
13032 -NEXT_PAGE(level2_ident_pgt)
13033 -       /* Since I easily can, map the first 1G.
13034 +       /* Since I easily can, map the first 2G.
13035          * Don't set NX because code runs from these pages.
13036          */
13037 -       PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13038 +NEXT_PAGE(level2_ident_pgt)
13039 +       PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13040  
13041  NEXT_PAGE(level2_kernel_pgt)
13042         /*
13043 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13044          *  If you want to increase this then increase MODULES_VADDR
13045          *  too.)
13046          */
13047 -       PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13048 -               KERNEL_IMAGE_SIZE/PMD_SIZE)
13049 -
13050 -NEXT_PAGE(level2_spare_pgt)
13051 -       .fill   512, 8, 0
13052 +       PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13053  
13054  #undef PMDS
13055  #undef NEXT_PAGE
13056  
13057 -       .data
13058 +       .align PAGE_SIZE
13059 +ENTRY(cpu_gdt_table)
13060 +       .rept NR_CPUS
13061 +       .quad   0x0000000000000000      /* NULL descriptor */
13062 +       .quad   0x00cf9b000000ffff      /* __KERNEL32_CS */
13063 +       .quad   0x00af9b000000ffff      /* __KERNEL_CS */
13064 +       .quad   0x00cf93000000ffff      /* __KERNEL_DS */
13065 +       .quad   0x00cffb000000ffff      /* __USER32_CS */
13066 +       .quad   0x00cff3000000ffff      /* __USER_DS, __USER32_DS  */
13067 +       .quad   0x00affb000000ffff      /* __USER_CS */
13068 +
13069 +#ifdef CONFIG_PAX_KERNEXEC
13070 +       .quad   0x00af9b000000ffff      /* __KERNEXEC_KERNEL_CS */
13071 +#else
13072 +       .quad   0x0                     /* unused */
13073 +#endif
13074 +
13075 +       .quad   0,0                     /* TSS */
13076 +       .quad   0,0                     /* LDT */
13077 +       .quad   0,0,0                   /* three TLS descriptors */
13078 +       .quad   0x0000f40000000000      /* node/CPU stored in limit */
13079 +       /* asm/segment.h:GDT_ENTRIES must match this */
13080 +
13081 +       /* zero the remaining page */
13082 +       .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13083 +       .endr
13084 +
13085         .align 16
13086         .globl early_gdt_descr
13087  early_gdt_descr:
13088         .word   GDT_ENTRIES*8-1
13089  early_gdt_descr_base:
13090 -       .quad   INIT_PER_CPU_VAR(gdt_page)
13091 +       .quad   cpu_gdt_table
13092  
13093  ENTRY(phys_base)
13094         /* This must match the first entry in level2_kernel_pgt */
13095         .quad   0x0000000000000000
13096  
13097  #include "../../x86/xen/xen-head.S"
13098 -       
13099 -       .section .bss, "aw", @nobits
13100 +
13101 +       .section .rodata,"a",@progbits
13102         .align L1_CACHE_BYTES
13103  ENTRY(idt_table)
13104 -       .skip IDT_ENTRIES * 16
13105 +       .fill 512,8,0
13106  
13107         __PAGE_ALIGNED_BSS
13108         .align PAGE_SIZE
13109 diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13110 --- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13111 +++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13112 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13113  EXPORT_SYMBOL(cmpxchg8b_emu);
13114  #endif
13115  
13116 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13117 +
13118  /* Networking helper routines. */
13119  EXPORT_SYMBOL(csum_partial_copy_generic);
13120 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13121 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13122  
13123  EXPORT_SYMBOL(__get_user_1);
13124  EXPORT_SYMBOL(__get_user_2);
13125 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13126  
13127  EXPORT_SYMBOL(csum_partial);
13128  EXPORT_SYMBOL(empty_zero_page);
13129 +
13130 +#ifdef CONFIG_PAX_KERNEXEC
13131 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13132 +#endif
13133 diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13134 --- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13135 +++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13136 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13137                                "spurious 8259A interrupt: IRQ%d.\n", irq);
13138                         spurious_irq_mask |= irqmask;
13139                 }
13140 -               atomic_inc(&irq_err_count);
13141 +               atomic_inc_unchecked(&irq_err_count);
13142                 /*
13143                  * Theoretically we do not have to handle this IRQ,
13144                  * but in Linux this does not cause problems and is
13145 diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13146 --- linux-3.0.4/arch/x86/kernel/init_task.c     2011-07-21 22:17:23.000000000 -0400
13147 +++ linux-3.0.4/arch/x86/kernel/init_task.c     2011-08-23 21:47:55.000000000 -0400
13148 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13149   * way process stacks are handled. This is done by having a special
13150   * "init_task" linker map entry..
13151   */
13152 -union thread_union init_thread_union __init_task_data =
13153 -       { INIT_THREAD_INFO(init_task) };
13154 +union thread_union init_thread_union __init_task_data;
13155  
13156  /*
13157   * Initial task structure.
13158 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13159   * section. Since TSS's are completely CPU-local, we want them
13160   * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13161   */
13162 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13163 -
13164 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13165 +EXPORT_SYMBOL(init_tss);
13166 diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13167 --- linux-3.0.4/arch/x86/kernel/ioport.c        2011-07-21 22:17:23.000000000 -0400
13168 +++ linux-3.0.4/arch/x86/kernel/ioport.c        2011-08-23 21:48:14.000000000 -0400
13169 @@ -6,6 +6,7 @@
13170  #include <linux/sched.h>
13171  #include <linux/kernel.h>
13172  #include <linux/capability.h>
13173 +#include <linux/security.h>
13174  #include <linux/errno.h>
13175  #include <linux/types.h>
13176  #include <linux/ioport.h>
13177 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13178  
13179         if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13180                 return -EINVAL;
13181 +#ifdef CONFIG_GRKERNSEC_IO
13182 +       if (turn_on && grsec_disable_privio) {
13183 +               gr_handle_ioperm();
13184 +               return -EPERM;
13185 +       }
13186 +#endif
13187         if (turn_on && !capable(CAP_SYS_RAWIO))
13188                 return -EPERM;
13189  
13190 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13191          * because the ->io_bitmap_max value must match the bitmap
13192          * contents:
13193          */
13194 -       tss = &per_cpu(init_tss, get_cpu());
13195 +       tss = init_tss + get_cpu();
13196  
13197         if (turn_on)
13198                 bitmap_clear(t->io_bitmap_ptr, from, num);
13199 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13200                 return -EINVAL;
13201         /* Trying to gain more privileges? */
13202         if (level > old) {
13203 +#ifdef CONFIG_GRKERNSEC_IO
13204 +               if (grsec_disable_privio) {
13205 +                       gr_handle_iopl();
13206 +                       return -EPERM;
13207 +               }
13208 +#endif
13209                 if (!capable(CAP_SYS_RAWIO))
13210                         return -EPERM;
13211         }
13212 diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13213 --- linux-3.0.4/arch/x86/kernel/irq_32.c        2011-07-21 22:17:23.000000000 -0400
13214 +++ linux-3.0.4/arch/x86/kernel/irq_32.c        2011-08-23 21:47:55.000000000 -0400
13215 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13216         __asm__ __volatile__("andl %%esp,%0" :
13217                              "=r" (sp) : "0" (THREAD_SIZE - 1));
13218  
13219 -       return sp < (sizeof(struct thread_info) + STACK_WARN);
13220 +       return sp < STACK_WARN;
13221  }
13222  
13223  static void print_stack_overflow(void)
13224 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13225   * per-CPU IRQ handling contexts (thread information and stack)
13226   */
13227  union irq_ctx {
13228 -       struct thread_info      tinfo;
13229 -       u32                     stack[THREAD_SIZE/sizeof(u32)];
13230 +       unsigned long           previous_esp;
13231 +       u32                     stack[THREAD_SIZE/sizeof(u32)];
13232  } __attribute__((aligned(THREAD_SIZE)));
13233  
13234  static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13235 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13236  static inline int
13237  execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13238  {
13239 -       union irq_ctx *curctx, *irqctx;
13240 +       union irq_ctx *irqctx;
13241         u32 *isp, arg1, arg2;
13242  
13243 -       curctx = (union irq_ctx *) current_thread_info();
13244         irqctx = __this_cpu_read(hardirq_ctx);
13245  
13246         /*
13247 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13248          * handler) we can't do that and just have to keep using the
13249          * current stack (which is the irq stack already after all)
13250          */
13251 -       if (unlikely(curctx == irqctx))
13252 +       if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13253                 return 0;
13254  
13255         /* build the stack frame on the IRQ stack */
13256 -       isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13257 -       irqctx->tinfo.task = curctx->tinfo.task;
13258 -       irqctx->tinfo.previous_esp = current_stack_pointer;
13259 +       isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13260 +       irqctx->previous_esp = current_stack_pointer;
13261  
13262 -       /*
13263 -        * Copy the softirq bits in preempt_count so that the
13264 -        * softirq checks work in the hardirq context.
13265 -        */
13266 -       irqctx->tinfo.preempt_count =
13267 -               (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13268 -               (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13269 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13270 +       __set_fs(MAKE_MM_SEG(0));
13271 +#endif
13272  
13273         if (unlikely(overflow))
13274                 call_on_stack(print_stack_overflow, isp);
13275 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13276                      :  "0" (irq),   "1" (desc),  "2" (isp),
13277                         "D" (desc->handle_irq)
13278                      : "memory", "cc", "ecx");
13279 +
13280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13281 +       __set_fs(current_thread_info()->addr_limit);
13282 +#endif
13283 +
13284         return 1;
13285  }
13286  
13287 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13288   */
13289  void __cpuinit irq_ctx_init(int cpu)
13290  {
13291 -       union irq_ctx *irqctx;
13292 -
13293         if (per_cpu(hardirq_ctx, cpu))
13294                 return;
13295  
13296 -       irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13297 -                                              THREAD_FLAGS,
13298 -                                              THREAD_ORDER));
13299 -       memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13300 -       irqctx->tinfo.cpu               = cpu;
13301 -       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
13302 -       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
13303 -
13304 -       per_cpu(hardirq_ctx, cpu) = irqctx;
13305 -
13306 -       irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13307 -                                              THREAD_FLAGS,
13308 -                                              THREAD_ORDER));
13309 -       memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13310 -       irqctx->tinfo.cpu               = cpu;
13311 -       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
13312 -
13313 -       per_cpu(softirq_ctx, cpu) = irqctx;
13314 +       per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13315 +       per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13316  
13317         printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13318                cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
13319 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13320  asmlinkage void do_softirq(void)
13321  {
13322         unsigned long flags;
13323 -       struct thread_info *curctx;
13324         union irq_ctx *irqctx;
13325         u32 *isp;
13326  
13327 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13328         local_irq_save(flags);
13329  
13330         if (local_softirq_pending()) {
13331 -               curctx = current_thread_info();
13332                 irqctx = __this_cpu_read(softirq_ctx);
13333 -               irqctx->tinfo.task = curctx->task;
13334 -               irqctx->tinfo.previous_esp = current_stack_pointer;
13335 +               irqctx->previous_esp = current_stack_pointer;
13336  
13337                 /* build the stack frame on the softirq stack */
13338 -               isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13339 +               isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13340 +
13341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13342 +               __set_fs(MAKE_MM_SEG(0));
13343 +#endif
13344  
13345                 call_on_stack(__do_softirq, isp);
13346 +
13347 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13348 +               __set_fs(current_thread_info()->addr_limit);
13349 +#endif
13350 +
13351                 /*
13352                  * Shouldn't happen, we returned above if in_interrupt():
13353                  */
13354 diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13355 --- linux-3.0.4/arch/x86/kernel/irq.c   2011-07-21 22:17:23.000000000 -0400
13356 +++ linux-3.0.4/arch/x86/kernel/irq.c   2011-08-23 21:47:55.000000000 -0400
13357 @@ -17,7 +17,7 @@
13358  #include <asm/mce.h>
13359  #include <asm/hw_irq.h>
13360  
13361 -atomic_t irq_err_count;
13362 +atomic_unchecked_t irq_err_count;
13363  
13364  /* Function pointer for generic interrupt vector handling */
13365  void (*x86_platform_ipi_callback)(void) = NULL;
13366 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13367                 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13368         seq_printf(p, "  Machine check polls\n");
13369  #endif
13370 -       seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13371 +       seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13372  #if defined(CONFIG_X86_IO_APIC)
13373 -       seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13374 +       seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13375  #endif
13376         return 0;
13377  }
13378 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13379  
13380  u64 arch_irq_stat(void)
13381  {
13382 -       u64 sum = atomic_read(&irq_err_count);
13383 +       u64 sum = atomic_read_unchecked(&irq_err_count);
13384  
13385  #ifdef CONFIG_X86_IO_APIC
13386 -       sum += atomic_read(&irq_mis_count);
13387 +       sum += atomic_read_unchecked(&irq_mis_count);
13388  #endif
13389         return sum;
13390  }
13391 diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13392 --- linux-3.0.4/arch/x86/kernel/kgdb.c  2011-07-21 22:17:23.000000000 -0400
13393 +++ linux-3.0.4/arch/x86/kernel/kgdb.c  2011-08-23 21:47:55.000000000 -0400
13394 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, 
13395  #ifdef CONFIG_X86_32
13396         switch (regno) {
13397         case GDB_SS:
13398 -               if (!user_mode_vm(regs))
13399 +               if (!user_mode(regs))
13400                         *(unsigned long *)mem = __KERNEL_DS;
13401                 break;
13402         case GDB_SP:
13403 -               if (!user_mode_vm(regs))
13404 +               if (!user_mode(regs))
13405                         *(unsigned long *)mem = kernel_stack_pointer(regs);
13406                 break;
13407         case GDB_GS:
13408 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13409         case 'k':
13410                 /* clear the trace bit */
13411                 linux_regs->flags &= ~X86_EFLAGS_TF;
13412 -               atomic_set(&kgdb_cpu_doing_single_step, -1);
13413 +               atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13414  
13415                 /* set the trace bit if we're stepping */
13416                 if (remcomInBuffer[0] == 's') {
13417                         linux_regs->flags |= X86_EFLAGS_TF;
13418 -                       atomic_set(&kgdb_cpu_doing_single_step,
13419 +                       atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13420                                    raw_smp_processor_id());
13421                 }
13422  
13423 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13424                 return NOTIFY_DONE;
13425  
13426         case DIE_DEBUG:
13427 -               if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13428 +               if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13429                         if (user_mode(regs))
13430                                 return single_step_cont(regs, args);
13431                         break;
13432 diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13433 --- linux-3.0.4/arch/x86/kernel/kprobes.c       2011-07-21 22:17:23.000000000 -0400
13434 +++ linux-3.0.4/arch/x86/kernel/kprobes.c       2011-08-23 21:47:55.000000000 -0400
13435 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13436         } __attribute__((packed)) *insn;
13437  
13438         insn = (struct __arch_relative_insn *)from;
13439 +
13440 +       pax_open_kernel();
13441         insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13442         insn->op = op;
13443 +       pax_close_kernel();
13444  }
13445  
13446  /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13447 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13448         kprobe_opcode_t opcode;
13449         kprobe_opcode_t *orig_opcodes = opcodes;
13450  
13451 -       if (search_exception_tables((unsigned long)opcodes))
13452 +       if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13453                 return 0;       /* Page fault may occur on this address. */
13454  
13455  retry:
13456 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13457                 }
13458         }
13459         insn_get_length(&insn);
13460 +       pax_open_kernel();
13461         memcpy(dest, insn.kaddr, insn.length);
13462 +       pax_close_kernel();
13463  
13464  #ifdef CONFIG_X86_64
13465         if (insn_rip_relative(&insn)) {
13466 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13467                           (u8 *) dest;
13468                 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check.  */
13469                 disp = (u8 *) dest + insn_offset_displacement(&insn);
13470 +               pax_open_kernel();
13471                 *(s32 *) disp = (s32) newdisp;
13472 +               pax_close_kernel();
13473         }
13474  #endif
13475         return insn.length;
13476 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13477          */
13478         __copy_instruction(p->ainsn.insn, p->addr, 0);
13479  
13480 -       if (can_boost(p->addr))
13481 +       if (can_boost(ktla_ktva(p->addr)))
13482                 p->ainsn.boostable = 0;
13483         else
13484                 p->ainsn.boostable = -1;
13485  
13486 -       p->opcode = *p->addr;
13487 +       p->opcode = *(ktla_ktva(p->addr));
13488  }
13489  
13490  int __kprobes arch_prepare_kprobe(struct kprobe *p)
13491 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13492                  * nor set current_kprobe, because it doesn't use single
13493                  * stepping.
13494                  */
13495 -               regs->ip = (unsigned long)p->ainsn.insn;
13496 +               regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13497                 preempt_enable_no_resched();
13498                 return;
13499         }
13500 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13501         if (p->opcode == BREAKPOINT_INSTRUCTION)
13502                 regs->ip = (unsigned long)p->addr;
13503         else
13504 -               regs->ip = (unsigned long)p->ainsn.insn;
13505 +               regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13506  }
13507  
13508  /*
13509 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13510                                 setup_singlestep(p, regs, kcb, 0);
13511                         return 1;
13512                 }
13513 -       } else if (*addr != BREAKPOINT_INSTRUCTION) {
13514 +       } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13515                 /*
13516                  * The breakpoint instruction was removed right
13517                  * after we hit it.  Another cpu has removed
13518 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13519                 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13520  {
13521         unsigned long *tos = stack_addr(regs);
13522 -       unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13523 +       unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13524         unsigned long orig_ip = (unsigned long)p->addr;
13525         kprobe_opcode_t *insn = p->ainsn.insn;
13526  
13527 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13528         struct die_args *args = data;
13529         int ret = NOTIFY_DONE;
13530  
13531 -       if (args->regs && user_mode_vm(args->regs))
13532 +       if (args->regs && user_mode(args->regs))
13533                 return ret;
13534  
13535         switch (val) {
13536 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13537          * Verify if the address gap is in 2GB range, because this uses
13538          * a relative jump.
13539          */
13540 -       rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13541 +       rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13542         if (abs(rel) > 0x7fffffff)
13543                 return -ERANGE;
13544  
13545 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13546         synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13547  
13548         /* Set probe function call */
13549 -       synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13550 +       synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13551  
13552         /* Set returning jmp instruction at the tail of out-of-line buffer */
13553         synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13554 -                          (u8 *)op->kp.addr + op->optinsn.size);
13555 +                          (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13556  
13557         flush_icache_range((unsigned long) buf,
13558                            (unsigned long) buf + TMPL_END_IDX +
13559 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13560                         ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13561  
13562         /* Backup instructions which will be replaced by jump address */
13563 -       memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13564 +       memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13565                RELATIVE_ADDR_SIZE);
13566  
13567         insn_buf[0] = RELATIVEJUMP_OPCODE;
13568 diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13569 --- linux-3.0.4/arch/x86/kernel/kvm.c   2011-07-21 22:17:23.000000000 -0400
13570 +++ linux-3.0.4/arch/x86/kernel/kvm.c   2011-08-24 18:10:12.000000000 -0400
13571 @@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13572                 pv_mmu_ops.set_pud = kvm_set_pud;
13573  #if PAGETABLE_LEVELS == 4
13574                 pv_mmu_ops.set_pgd = kvm_set_pgd;
13575 +               pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13576  #endif
13577  #endif
13578                 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13579 diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13580 --- linux-3.0.4/arch/x86/kernel/ldt.c   2011-07-21 22:17:23.000000000 -0400
13581 +++ linux-3.0.4/arch/x86/kernel/ldt.c   2011-08-23 21:47:55.000000000 -0400
13582 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13583         if (reload) {
13584  #ifdef CONFIG_SMP
13585                 preempt_disable();
13586 -               load_LDT(pc);
13587 +               load_LDT_nolock(pc);
13588                 if (!cpumask_equal(mm_cpumask(current->mm),
13589                                    cpumask_of(smp_processor_id())))
13590                         smp_call_function(flush_ldt, current->mm, 1);
13591                 preempt_enable();
13592  #else
13593 -               load_LDT(pc);
13594 +               load_LDT_nolock(pc);
13595  #endif
13596         }
13597         if (oldsize) {
13598 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t 
13599                 return err;
13600  
13601         for (i = 0; i < old->size; i++)
13602 -               write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13603 +               write_ldt_entry(new->ldt, i, old->ldt + i);
13604         return 0;
13605  }
13606  
13607 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct 
13608                 retval = copy_ldt(&mm->context, &old_mm->context);
13609                 mutex_unlock(&old_mm->context.lock);
13610         }
13611 +
13612 +       if (tsk == current) {
13613 +               mm->context.vdso = 0;
13614 +
13615 +#ifdef CONFIG_X86_32
13616 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13617 +               mm->context.user_cs_base = 0UL;
13618 +               mm->context.user_cs_limit = ~0UL;
13619 +
13620 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13621 +               cpus_clear(mm->context.cpu_user_cs_mask);
13622 +#endif
13623 +
13624 +#endif
13625 +#endif
13626 +
13627 +       }
13628 +
13629         return retval;
13630  }
13631  
13632 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13633                 }
13634         }
13635  
13636 +#ifdef CONFIG_PAX_SEGMEXEC
13637 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13638 +               error = -EINVAL;
13639 +               goto out_unlock;
13640 +       }
13641 +#endif
13642 +
13643         fill_ldt(&ldt, &ldt_info);
13644         if (oldmode)
13645                 ldt.avl = 0;
13646 diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13647 --- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c      2011-07-21 22:17:23.000000000 -0400
13648 +++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c      2011-08-23 21:47:55.000000000 -0400
13649 @@ -27,7 +27,7 @@
13650  #include <asm/cacheflush.h>
13651  #include <asm/debugreg.h>
13652  
13653 -static void set_idt(void *newidt, __u16 limit)
13654 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13655  {
13656         struct desc_ptr curidt;
13657  
13658 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 
13659  }
13660  
13661  
13662 -static void set_gdt(void *newgdt, __u16 limit)
13663 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13664  {
13665         struct desc_ptr curgdt;
13666  
13667 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13668         }
13669  
13670         control_page = page_address(image->control_code_page);
13671 -       memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13672 +       memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13673  
13674         relocate_kernel_ptr = control_page;
13675         page_list[PA_CONTROL_PAGE] = __pa(control_page);
13676 diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
13677 --- linux-3.0.4/arch/x86/kernel/microcode_intel.c       2011-07-21 22:17:23.000000000 -0400
13678 +++ linux-3.0.4/arch/x86/kernel/microcode_intel.c       2011-08-23 21:47:55.000000000 -0400
13679 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13680  
13681  static int get_ucode_user(void *to, const void *from, size_t n)
13682  {
13683 -       return copy_from_user(to, from, n);
13684 +       return copy_from_user(to, (__force const void __user *)from, n);
13685  }
13686  
13687  static enum ucode_state
13688  request_microcode_user(int cpu, const void __user *buf, size_t size)
13689  {
13690 -       return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13691 +       return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13692  }
13693  
13694  static void microcode_fini_cpu(int cpu)
13695 diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
13696 --- linux-3.0.4/arch/x86/kernel/module.c        2011-07-21 22:17:23.000000000 -0400
13697 +++ linux-3.0.4/arch/x86/kernel/module.c        2011-08-23 21:47:55.000000000 -0400
13698 @@ -36,21 +36,66 @@
13699  #define DEBUGP(fmt...)
13700  #endif
13701  
13702 -void *module_alloc(unsigned long size)
13703 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13704  {
13705         if (PAGE_ALIGN(size) > MODULES_LEN)
13706                 return NULL;
13707         return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13708 -                               GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13709 +                               GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13710                                 -1, __builtin_return_address(0));
13711  }
13712  
13713 +void *module_alloc(unsigned long size)
13714 +{
13715 +
13716 +#ifdef CONFIG_PAX_KERNEXEC
13717 +       return __module_alloc(size, PAGE_KERNEL);
13718 +#else
13719 +       return __module_alloc(size, PAGE_KERNEL_EXEC);
13720 +#endif
13721 +
13722 +}
13723 +
13724  /* Free memory returned from module_alloc */
13725  void module_free(struct module *mod, void *module_region)
13726  {
13727         vfree(module_region);
13728  }
13729  
13730 +#ifdef CONFIG_PAX_KERNEXEC
13731 +#ifdef CONFIG_X86_32
13732 +void *module_alloc_exec(unsigned long size)
13733 +{
13734 +       struct vm_struct *area;
13735 +
13736 +       if (size == 0)
13737 +               return NULL;
13738 +
13739 +       area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13740 +       return area ? area->addr : NULL;
13741 +}
13742 +EXPORT_SYMBOL(module_alloc_exec);
13743 +
13744 +void module_free_exec(struct module *mod, void *module_region)
13745 +{
13746 +       vunmap(module_region);
13747 +}
13748 +EXPORT_SYMBOL(module_free_exec);
13749 +#else
13750 +void module_free_exec(struct module *mod, void *module_region)
13751 +{
13752 +       module_free(mod, module_region);
13753 +}
13754 +EXPORT_SYMBOL(module_free_exec);
13755 +
13756 +void *module_alloc_exec(unsigned long size)
13757 +{
13758 +       return __module_alloc(size, PAGE_KERNEL_RX);
13759 +}
13760 +EXPORT_SYMBOL(module_alloc_exec);
13761 +#endif
13762 +#endif
13763 +
13764  /* We don't need anything special. */
13765  int module_frob_arch_sections(Elf_Ehdr *hdr,
13766                               Elf_Shdr *sechdrs,
13767 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13768         unsigned int i;
13769         Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13770         Elf32_Sym *sym;
13771 -       uint32_t *location;
13772 +       uint32_t *plocation, location;
13773  
13774         DEBUGP("Applying relocate section %u to %u\n", relsec,
13775                sechdrs[relsec].sh_info);
13776         for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13777                 /* This is where to make the change */
13778 -               location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13779 -                       + rel[i].r_offset;
13780 +               plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13781 +               location = (uint32_t)plocation;
13782 +               if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13783 +                       plocation = ktla_ktva((void *)plocation);
13784                 /* This is the symbol it is referring to.  Note that all
13785                    undefined symbols have been resolved.  */
13786                 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13787 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13788                 switch (ELF32_R_TYPE(rel[i].r_info)) {
13789                 case R_386_32:
13790                         /* We add the value into the location given */
13791 -                       *location += sym->st_value;
13792 +                       pax_open_kernel();
13793 +                       *plocation += sym->st_value;
13794 +                       pax_close_kernel();
13795                         break;
13796                 case R_386_PC32:
13797                         /* Add the value, subtract its postition */
13798 -                       *location += sym->st_value - (uint32_t)location;
13799 +                       pax_open_kernel();
13800 +                       *plocation += sym->st_value - location;
13801 +                       pax_close_kernel();
13802                         break;
13803                 default:
13804                         printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13805 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13806                 case R_X86_64_NONE:
13807                         break;
13808                 case R_X86_64_64:
13809 +                       pax_open_kernel();
13810                         *(u64 *)loc = val;
13811 +                       pax_close_kernel();
13812                         break;
13813                 case R_X86_64_32:
13814 +                       pax_open_kernel();
13815                         *(u32 *)loc = val;
13816 +                       pax_close_kernel();
13817                         if (val != *(u32 *)loc)
13818                                 goto overflow;
13819                         break;
13820                 case R_X86_64_32S:
13821 +                       pax_open_kernel();
13822                         *(s32 *)loc = val;
13823 +                       pax_close_kernel();
13824                         if ((s64)val != *(s32 *)loc)
13825                                 goto overflow;
13826                         break;
13827                 case R_X86_64_PC32:
13828                         val -= (u64)loc;
13829 +                       pax_open_kernel();
13830                         *(u32 *)loc = val;
13831 +                       pax_close_kernel();
13832 +
13833  #if 0
13834                         if ((s64)val != *(s32 *)loc)
13835                                 goto overflow;
13836 diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
13837 --- linux-3.0.4/arch/x86/kernel/paravirt.c      2011-07-21 22:17:23.000000000 -0400
13838 +++ linux-3.0.4/arch/x86/kernel/paravirt.c      2011-08-23 21:48:14.000000000 -0400
13839 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13840  {
13841         return x;
13842  }
13843 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13844 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13845 +#endif
13846  
13847  void __init default_banner(void)
13848  {
13849 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13850   * corresponding structure. */
13851  static void *get_call_destination(u8 type)
13852  {
13853 -       struct paravirt_patch_template tmpl = {
13854 +       const struct paravirt_patch_template tmpl = {
13855                 .pv_init_ops = pv_init_ops,
13856                 .pv_time_ops = pv_time_ops,
13857                 .pv_cpu_ops = pv_cpu_ops,
13858 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13859                 .pv_lock_ops = pv_lock_ops,
13860  #endif
13861         };
13862 +
13863 +       pax_track_stack();
13864 +
13865         return *((void **)&tmpl + type);
13866  }
13867  
13868 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13869         if (opfunc == NULL)
13870                 /* If there's no function, patch it with a ud2a (BUG) */
13871                 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13872 -       else if (opfunc == _paravirt_nop)
13873 +       else if (opfunc == (void *)_paravirt_nop)
13874                 /* If the operation is a nop, then nop the callsite */
13875                 ret = paravirt_patch_nop();
13876  
13877         /* identity functions just return their single argument */
13878 -       else if (opfunc == _paravirt_ident_32)
13879 +       else if (opfunc == (void *)_paravirt_ident_32)
13880                 ret = paravirt_patch_ident_32(insnbuf, len);
13881 -       else if (opfunc == _paravirt_ident_64)
13882 +       else if (opfunc == (void *)_paravirt_ident_64)
13883                 ret = paravirt_patch_ident_64(insnbuf, len);
13884 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13885 +       else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13886 +               ret = paravirt_patch_ident_64(insnbuf, len);
13887 +#endif
13888  
13889         else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13890                  type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13891 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13892         if (insn_len > len || start == NULL)
13893                 insn_len = len;
13894         else
13895 -               memcpy(insnbuf, start, insn_len);
13896 +               memcpy(insnbuf, ktla_ktva(start), insn_len);
13897  
13898         return insn_len;
13899  }
13900 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13901         preempt_enable();
13902  }
13903  
13904 -struct pv_info pv_info = {
13905 +struct pv_info pv_info __read_only = {
13906         .name = "bare hardware",
13907         .paravirt_enabled = 0,
13908         .kernel_rpl = 0,
13909         .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13910  };
13911  
13912 -struct pv_init_ops pv_init_ops = {
13913 +struct pv_init_ops pv_init_ops __read_only = {
13914         .patch = native_patch,
13915  };
13916  
13917 -struct pv_time_ops pv_time_ops = {
13918 +struct pv_time_ops pv_time_ops __read_only = {
13919         .sched_clock = native_sched_clock,
13920  };
13921  
13922 -struct pv_irq_ops pv_irq_ops = {
13923 +struct pv_irq_ops pv_irq_ops __read_only = {
13924         .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13925         .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13926         .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13927 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13928  #endif
13929  };
13930  
13931 -struct pv_cpu_ops pv_cpu_ops = {
13932 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13933         .cpuid = native_cpuid,
13934         .get_debugreg = native_get_debugreg,
13935         .set_debugreg = native_set_debugreg,
13936 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13937         .end_context_switch = paravirt_nop,
13938  };
13939  
13940 -struct pv_apic_ops pv_apic_ops = {
13941 +struct pv_apic_ops pv_apic_ops __read_only = {
13942  #ifdef CONFIG_X86_LOCAL_APIC
13943         .startup_ipi_hook = paravirt_nop,
13944  #endif
13945  };
13946  
13947 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13948 +#ifdef CONFIG_X86_32
13949 +#ifdef CONFIG_X86_PAE
13950 +/* 64-bit pagetable entries */
13951 +#define PTE_IDENT      PV_CALLEE_SAVE(_paravirt_ident_64)
13952 +#else
13953  /* 32-bit pagetable entries */
13954  #define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13955 +#endif
13956  #else
13957  /* 64-bit pagetable entries */
13958  #define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13959  #endif
13960  
13961 -struct pv_mmu_ops pv_mmu_ops = {
13962 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13963  
13964         .read_cr2 = native_read_cr2,
13965         .write_cr2 = native_write_cr2,
13966 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13967         .make_pud = PTE_IDENT,
13968  
13969         .set_pgd = native_set_pgd,
13970 +       .set_pgd_batched = native_set_pgd_batched,
13971  #endif
13972  #endif /* PAGETABLE_LEVELS >= 3 */
13973  
13974 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13975         },
13976  
13977         .set_fixmap = native_set_fixmap,
13978 +
13979 +#ifdef CONFIG_PAX_KERNEXEC
13980 +       .pax_open_kernel = native_pax_open_kernel,
13981 +       .pax_close_kernel = native_pax_close_kernel,
13982 +#endif
13983 +
13984  };
13985  
13986  EXPORT_SYMBOL_GPL(pv_time_ops);
13987 diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
13988 --- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c    2011-07-21 22:17:23.000000000 -0400
13989 +++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c    2011-08-23 21:47:55.000000000 -0400
13990 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t 
13991         arch_spin_lock(lock);
13992  }
13993  
13994 -struct pv_lock_ops pv_lock_ops = {
13995 +struct pv_lock_ops pv_lock_ops __read_only = {
13996  #ifdef CONFIG_SMP
13997         .spin_is_locked = __ticket_spin_is_locked,
13998         .spin_is_contended = __ticket_spin_is_contended,
13999 diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
14000 --- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c       2011-07-21 22:17:23.000000000 -0400
14001 +++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c       2011-08-23 21:48:14.000000000 -0400
14002 @@ -2,7 +2,7 @@
14003  #include <asm/iommu_table.h>
14004  #include <linux/string.h>
14005  #include <linux/kallsyms.h>
14006 -
14007 +#include <linux/sched.h>
14008  
14009  #define DEBUG 1
14010  
14011 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14012  {
14013         struct iommu_table_entry *p, *q, *x;
14014  
14015 +       pax_track_stack();
14016 +
14017         /* Simple cyclic dependency checker. */
14018         for (p = start; p < finish; p++) {
14019                 q = find_dependents_of(start, finish, p);
14020 diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14021 --- linux-3.0.4/arch/x86/kernel/process_32.c    2011-07-21 22:17:23.000000000 -0400
14022 +++ linux-3.0.4/arch/x86/kernel/process_32.c    2011-08-23 21:47:55.000000000 -0400
14023 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14024  unsigned long thread_saved_pc(struct task_struct *tsk)
14025  {
14026         return ((unsigned long *)tsk->thread.sp)[3];
14027 +//XXX  return tsk->thread.eip;
14028  }
14029  
14030  #ifndef CONFIG_SMP
14031 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14032         unsigned long sp;
14033         unsigned short ss, gs;
14034  
14035 -       if (user_mode_vm(regs)) {
14036 +       if (user_mode(regs)) {
14037                 sp = regs->sp;
14038                 ss = regs->ss & 0xffff;
14039 -               gs = get_user_gs(regs);
14040         } else {
14041                 sp = kernel_stack_pointer(regs);
14042                 savesegment(ss, ss);
14043 -               savesegment(gs, gs);
14044         }
14045 +       gs = get_user_gs(regs);
14046  
14047         show_regs_common();
14048  
14049 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14050         struct task_struct *tsk;
14051         int err;
14052  
14053 -       childregs = task_pt_regs(p);
14054 +       childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14055         *childregs = *regs;
14056         childregs->ax = 0;
14057         childregs->sp = sp;
14058  
14059         p->thread.sp = (unsigned long) childregs;
14060         p->thread.sp0 = (unsigned long) (childregs+1);
14061 +       p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14062  
14063         p->thread.ip = (unsigned long) ret_from_fork;
14064  
14065 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p, 
14066         struct thread_struct *prev = &prev_p->thread,
14067                                  *next = &next_p->thread;
14068         int cpu = smp_processor_id();
14069 -       struct tss_struct *tss = &per_cpu(init_tss, cpu);
14070 +       struct tss_struct *tss = init_tss + cpu;
14071         bool preload_fpu;
14072  
14073         /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14074 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p, 
14075          */
14076         lazy_save_gs(prev->gs);
14077  
14078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14079 +       __set_fs(task_thread_info(next_p)->addr_limit);
14080 +#endif
14081 +
14082         /*
14083          * Load the per-thread Thread-Local Storage descriptor.
14084          */
14085 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p, 
14086          */
14087         arch_end_context_switch(next_p);
14088  
14089 +       percpu_write(current_task, next_p);
14090 +       percpu_write(current_tinfo, &next_p->tinfo);
14091 +
14092         if (preload_fpu)
14093                 __math_state_restore();
14094  
14095 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p, 
14096         if (prev->gs | next->gs)
14097                 lazy_load_gs(next->gs);
14098  
14099 -       percpu_write(current_task, next_p);
14100 -
14101         return prev_p;
14102  }
14103  
14104 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14105         } while (count++ < 16);
14106         return 0;
14107  }
14108 -
14109 diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14110 --- linux-3.0.4/arch/x86/kernel/process_64.c    2011-07-21 22:17:23.000000000 -0400
14111 +++ linux-3.0.4/arch/x86/kernel/process_64.c    2011-08-23 21:47:55.000000000 -0400
14112 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14113  void exit_idle(void)
14114  {
14115         /* idle loop has pid 0 */
14116 -       if (current->pid)
14117 +       if (task_pid_nr(current))
14118                 return;
14119         __exit_idle();
14120  }
14121 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14122         struct pt_regs *childregs;
14123         struct task_struct *me = current;
14124  
14125 -       childregs = ((struct pt_regs *)
14126 -                       (THREAD_SIZE + task_stack_page(p))) - 1;
14127 +       childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14128         *childregs = *regs;
14129  
14130         childregs->ax = 0;
14131 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14132         p->thread.sp = (unsigned long) childregs;
14133         p->thread.sp0 = (unsigned long) (childregs+1);
14134         p->thread.usersp = me->thread.usersp;
14135 +       p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14136  
14137         set_tsk_thread_flag(p, TIF_FORK);
14138  
14139 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p, 
14140         struct thread_struct *prev = &prev_p->thread;
14141         struct thread_struct *next = &next_p->thread;
14142         int cpu = smp_processor_id();
14143 -       struct tss_struct *tss = &per_cpu(init_tss, cpu);
14144 +       struct tss_struct *tss = init_tss + cpu;
14145         unsigned fsindex, gsindex;
14146         bool preload_fpu;
14147  
14148 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p, 
14149         prev->usersp = percpu_read(old_rsp);
14150         percpu_write(old_rsp, next->usersp);
14151         percpu_write(current_task, next_p);
14152 +       percpu_write(current_tinfo, &next_p->tinfo);
14153  
14154 -       percpu_write(kernel_stack,
14155 -                 (unsigned long)task_stack_page(next_p) +
14156 -                 THREAD_SIZE - KERNEL_STACK_OFFSET);
14157 +       percpu_write(kernel_stack, next->sp0);
14158  
14159         /*
14160          * Now maybe reload the debug registers and handle I/O bitmaps
14161 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14162         if (!p || p == current || p->state == TASK_RUNNING)
14163                 return 0;
14164         stack = (unsigned long)task_stack_page(p);
14165 -       if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14166 +       if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14167                 return 0;
14168         fp = *(u64 *)(p->thread.sp);
14169         do {
14170 -               if (fp < (unsigned long)stack ||
14171 -                   fp >= (unsigned long)stack+THREAD_SIZE)
14172 +               if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14173                         return 0;
14174                 ip = *(u64 *)(fp+8);
14175                 if (!in_sched_functions(ip))
14176 diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14177 --- linux-3.0.4/arch/x86/kernel/process.c       2011-07-21 22:17:23.000000000 -0400
14178 +++ linux-3.0.4/arch/x86/kernel/process.c       2011-08-23 21:47:55.000000000 -0400
14179 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14180  
14181  void free_thread_info(struct thread_info *ti)
14182  {
14183 -       free_thread_xstate(ti->task);
14184         free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14185  }
14186  
14187 +static struct kmem_cache *task_struct_cachep;
14188 +
14189  void arch_task_cache_init(void)
14190  {
14191 -        task_xstate_cachep =
14192 -               kmem_cache_create("task_xstate", xstate_size,
14193 +       /* create a slab on which task_structs can be allocated */
14194 +       task_struct_cachep =
14195 +               kmem_cache_create("task_struct", sizeof(struct task_struct),
14196 +                       ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14197 +
14198 +       task_xstate_cachep =
14199 +               kmem_cache_create("task_xstate", xstate_size,
14200                                   __alignof__(union thread_xstate),
14201 -                                 SLAB_PANIC | SLAB_NOTRACK, NULL);
14202 +                                 SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14203 +}
14204 +
14205 +struct task_struct *alloc_task_struct_node(int node)
14206 +{
14207 +       return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14208 +}
14209 +
14210 +void free_task_struct(struct task_struct *task)
14211 +{
14212 +       free_thread_xstate(task);
14213 +       kmem_cache_free(task_struct_cachep, task);
14214  }
14215  
14216  /*
14217 @@ -70,7 +87,7 @@ void exit_thread(void)
14218         unsigned long *bp = t->io_bitmap_ptr;
14219  
14220         if (bp) {
14221 -               struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14222 +               struct tss_struct *tss = init_tss + get_cpu();
14223  
14224                 t->io_bitmap_ptr = NULL;
14225                 clear_thread_flag(TIF_IO_BITMAP);
14226 @@ -106,7 +123,7 @@ void show_regs_common(void)
14227  
14228         printk(KERN_CONT "\n");
14229         printk(KERN_DEFAULT "Pid: %d, xid: #%u, comm: %.20s %s %s %.*s",
14230 -               current->pid, current->xid, current->comm, print_tainted(),
14231 +               task_pid_nr(current), current->xid, current->comm, print_tainted(),
14232                 init_utsname()->release,
14233                 (int)strcspn(init_utsname()->version, " "),
14234                 init_utsname()->version);
14235 @@ -120,6 +137,9 @@ void flush_thread(void)
14236  {
14237         struct task_struct *tsk = current;
14238  
14239 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14240 +       loadsegment(gs, 0);
14241 +#endif
14242         flush_ptrace_hw_breakpoint(tsk);
14243         memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14244         /*
14245 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14246         regs.di = (unsigned long) arg;
14247  
14248  #ifdef CONFIG_X86_32
14249 -       regs.ds = __USER_DS;
14250 -       regs.es = __USER_DS;
14251 +       regs.ds = __KERNEL_DS;
14252 +       regs.es = __KERNEL_DS;
14253         regs.fs = __KERNEL_PERCPU;
14254 -       regs.gs = __KERNEL_STACK_CANARY;
14255 +       savesegment(gs, regs.gs);
14256  #else
14257         regs.ss = __KERNEL_DS;
14258  #endif
14259 @@ -403,7 +423,7 @@ void default_idle(void)
14260  EXPORT_SYMBOL(default_idle);
14261  #endif
14262  
14263 -void stop_this_cpu(void *dummy)
14264 +__noreturn void stop_this_cpu(void *dummy)
14265  {
14266         local_irq_disable();
14267         /*
14268 @@ -668,16 +688,34 @@ static int __init idle_setup(char *str)
14269  }
14270  early_param("idle", idle_setup);
14271  
14272 -unsigned long arch_align_stack(unsigned long sp)
14273 +#ifdef CONFIG_PAX_RANDKSTACK
14274 +asmlinkage void pax_randomize_kstack(void)
14275  {
14276 -       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14277 -               sp -= get_random_int() % 8192;
14278 -       return sp & ~0xf;
14279 -}
14280 +       struct thread_struct *thread = &current->thread;
14281 +       unsigned long time;
14282  
14283 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14284 -{
14285 -       unsigned long range_end = mm->brk + 0x02000000;
14286 -       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14287 -}
14288 +       if (!randomize_va_space)
14289 +               return;
14290 +
14291 +       rdtscl(time);
14292 +
14293 +       /* P4 seems to return a 0 LSB, ignore it */
14294 +#ifdef CONFIG_MPENTIUM4
14295 +       time &= 0x3EUL;
14296 +       time <<= 2;
14297 +#elif defined(CONFIG_X86_64)
14298 +       time &= 0xFUL;
14299 +       time <<= 4;
14300 +#else
14301 +       time &= 0x1FUL;
14302 +       time <<= 3;
14303 +#endif
14304 +
14305 +       thread->sp0 ^= time;
14306 +       load_sp0(init_tss + smp_processor_id(), thread);
14307  
14308 +#ifdef CONFIG_X86_64
14309 +       percpu_write(kernel_stack, thread->sp0);
14310 +#endif
14311 +}
14312 +#endif
14313 diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14314 --- linux-3.0.4/arch/x86/kernel/ptrace.c        2011-07-21 22:17:23.000000000 -0400
14315 +++ linux-3.0.4/arch/x86/kernel/ptrace.c        2011-08-23 21:47:55.000000000 -0400
14316 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14317                  unsigned long addr, unsigned long data)
14318  {
14319         int ret;
14320 -       unsigned long __user *datap = (unsigned long __user *)data;
14321 +       unsigned long __user *datap = (__force unsigned long __user *)data;
14322  
14323         switch (request) {
14324         /* read the word at location addr in the USER area. */
14325 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14326                 if ((int) addr < 0)
14327                         return -EIO;
14328                 ret = do_get_thread_area(child, addr,
14329 -                                       (struct user_desc __user *)data);
14330 +                                       (__force struct user_desc __user *) data);
14331                 break;
14332  
14333         case PTRACE_SET_THREAD_AREA:
14334                 if ((int) addr < 0)
14335                         return -EIO;
14336                 ret = do_set_thread_area(child, addr,
14337 -                                       (struct user_desc __user *)data, 0);
14338 +                                       (__force struct user_desc __user *) data, 0);
14339                 break;
14340  #endif
14341  
14342 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14343         memset(info, 0, sizeof(*info));
14344         info->si_signo = SIGTRAP;
14345         info->si_code = si_code;
14346 -       info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14347 +       info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14348  }
14349  
14350  void user_single_step_siginfo(struct task_struct *tsk,
14351 diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14352 --- linux-3.0.4/arch/x86/kernel/pvclock.c       2011-07-21 22:17:23.000000000 -0400
14353 +++ linux-3.0.4/arch/x86/kernel/pvclock.c       2011-08-23 21:47:55.000000000 -0400
14354 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14355         return pv_tsc_khz;
14356  }
14357  
14358 -static atomic64_t last_value = ATOMIC64_INIT(0);
14359 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14360  
14361  void pvclock_resume(void)
14362  {
14363 -       atomic64_set(&last_value, 0);
14364 +       atomic64_set_unchecked(&last_value, 0);
14365  }
14366  
14367  cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14368 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct 
14369          * updating at the same time, and one of them could be slightly behind,
14370          * making the assumption that last_value always go forward fail to hold.
14371          */
14372 -       last = atomic64_read(&last_value);
14373 +       last = atomic64_read_unchecked(&last_value);
14374         do {
14375                 if (ret < last)
14376                         return last;
14377 -               last = atomic64_cmpxchg(&last_value, last, ret);
14378 +               last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14379         } while (unlikely(last != ret));
14380  
14381         return ret;
14382 diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14383 --- linux-3.0.4/arch/x86/kernel/reboot.c        2011-07-21 22:17:23.000000000 -0400
14384 +++ linux-3.0.4/arch/x86/kernel/reboot.c        2011-08-23 21:47:55.000000000 -0400
14385 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14386  EXPORT_SYMBOL(pm_power_off);
14387  
14388  static const struct desc_ptr no_idt = {};
14389 -static int reboot_mode;
14390 +static unsigned short reboot_mode;
14391  enum reboot_type reboot_type = BOOT_ACPI;
14392  int reboot_force;
14393  
14394 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
14395  extern const unsigned char machine_real_restart_asm[];
14396  extern const u64 machine_real_restart_gdt[3];
14397  
14398 -void machine_real_restart(unsigned int type)
14399 +__noreturn void machine_real_restart(unsigned int type)
14400  {
14401         void *restart_va;
14402         unsigned long restart_pa;
14403 -       void (*restart_lowmem)(unsigned int);
14404 +       void (* __noreturn restart_lowmem)(unsigned int);
14405         u64 *lowmem_gdt;
14406  
14407 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14408 +       struct desc_struct *gdt;
14409 +#endif
14410 +
14411         local_irq_disable();
14412  
14413         /* Write zero to CMOS register number 0x0f, which the BIOS POST
14414 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14415            boot)".  This seems like a fairly standard thing that gets set by
14416            REBOOT.COM programs, and the previous reset routine did this
14417            too. */
14418 -       *((unsigned short *)0x472) = reboot_mode;
14419 +       *(unsigned short *)(__va(0x472)) = reboot_mode;
14420  
14421         /* Patch the GDT in the low memory trampoline */
14422         lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14423  
14424         restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14425         restart_pa = virt_to_phys(restart_va);
14426 -       restart_lowmem = (void (*)(unsigned int))restart_pa;
14427 +       restart_lowmem = (void *)restart_pa;
14428  
14429         /* GDT[0]: GDT self-pointer */
14430         lowmem_gdt[0] =
14431 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14432                 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14433  
14434         /* Jump to the identity-mapped low memory code */
14435 +
14436 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14437 +       gdt = get_cpu_gdt_table(smp_processor_id());
14438 +       pax_open_kernel();
14439 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14440 +       gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14441 +       gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14442 +       asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14443 +#endif
14444 +#ifdef CONFIG_PAX_KERNEXEC
14445 +       gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14446 +       gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14447 +       gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14448 +       gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14449 +       gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14450 +       gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14451 +#endif
14452 +       pax_close_kernel();
14453 +#endif
14454 +
14455 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14456 +       asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14457 +       unreachable();
14458 +#else
14459         restart_lowmem(type);
14460 +#endif
14461 +
14462  }
14463  #ifdef CONFIG_APM_MODULE
14464  EXPORT_SYMBOL(machine_real_restart);
14465 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14466   * try to force a triple fault and then cycle between hitting the keyboard
14467   * controller and doing that
14468   */
14469 -static void native_machine_emergency_restart(void)
14470 +__noreturn static void native_machine_emergency_restart(void)
14471  {
14472         int i;
14473         int attempt = 0;
14474 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14475  #endif
14476  }
14477  
14478 -static void __machine_emergency_restart(int emergency)
14479 +static __noreturn void __machine_emergency_restart(int emergency)
14480  {
14481         reboot_emergency = emergency;
14482         machine_ops.emergency_restart();
14483  }
14484  
14485 -static void native_machine_restart(char *__unused)
14486 +static __noreturn void native_machine_restart(char *__unused)
14487  {
14488         printk("machine restart\n");
14489  
14490 @@ -662,7 +692,7 @@ static void native_machine_restart(char 
14491         __machine_emergency_restart(0);
14492  }
14493  
14494 -static void native_machine_halt(void)
14495 +static __noreturn void native_machine_halt(void)
14496  {
14497         /* stop other cpus and apics */
14498         machine_shutdown();
14499 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
14500         stop_this_cpu(NULL);
14501  }
14502  
14503 -static void native_machine_power_off(void)
14504 +__noreturn static void native_machine_power_off(void)
14505  {
14506         if (pm_power_off) {
14507                 if (!reboot_force)
14508 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14509         }
14510         /* a fallback in case there is no PM info available */
14511         tboot_shutdown(TB_SHUTDOWN_HALT);
14512 +       unreachable();
14513  }
14514  
14515  struct machine_ops machine_ops = {
14516 diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14517 --- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14518 +++ linux-3.0.4/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14519 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14520          * area (640->1Mb) as ram even though it is not.
14521          * take them out.
14522          */
14523 -       e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14524 +       e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14525         sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14526  }
14527  
14528 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14529  
14530         if (!boot_params.hdr.root_flags)
14531                 root_mountflags &= ~MS_RDONLY;
14532 -       init_mm.start_code = (unsigned long) _text;
14533 -       init_mm.end_code = (unsigned long) _etext;
14534 +       init_mm.start_code = ktla_ktva((unsigned long) _text);
14535 +       init_mm.end_code = ktla_ktva((unsigned long) _etext);
14536         init_mm.end_data = (unsigned long) _edata;
14537         init_mm.brk = _brk_end;
14538  
14539 -       code_resource.start = virt_to_phys(_text);
14540 -       code_resource.end = virt_to_phys(_etext)-1;
14541 -       data_resource.start = virt_to_phys(_etext);
14542 +       code_resource.start = virt_to_phys(ktla_ktva(_text));
14543 +       code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14544 +       data_resource.start = virt_to_phys(_sdata);
14545         data_resource.end = virt_to_phys(_edata)-1;
14546         bss_resource.start = virt_to_phys(&__bss_start);
14547         bss_resource.end = virt_to_phys(&__bss_stop)-1;
14548 diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14549 --- linux-3.0.4/arch/x86/kernel/setup_percpu.c  2011-07-21 22:17:23.000000000 -0400
14550 +++ linux-3.0.4/arch/x86/kernel/setup_percpu.c  2011-08-23 21:47:55.000000000 -0400
14551 @@ -21,19 +21,17 @@
14552  #include <asm/cpu.h>
14553  #include <asm/stackprotector.h>
14554  
14555 -DEFINE_PER_CPU(int, cpu_number);
14556 +#ifdef CONFIG_SMP
14557 +DEFINE_PER_CPU(unsigned int, cpu_number);
14558  EXPORT_PER_CPU_SYMBOL(cpu_number);
14559 +#endif
14560  
14561 -#ifdef CONFIG_X86_64
14562  #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14563 -#else
14564 -#define BOOT_PERCPU_OFFSET 0
14565 -#endif
14566  
14567  DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14568  EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14569  
14570 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14571 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14572         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14573  };
14574  EXPORT_SYMBOL(__per_cpu_offset);
14575 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14576  {
14577  #ifdef CONFIG_X86_32
14578         struct desc_struct gdt;
14579 +       unsigned long base = per_cpu_offset(cpu);
14580  
14581 -       pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14582 -                       0x2 | DESCTYPE_S, 0x8);
14583 -       gdt.s = 1;
14584 +       pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14585 +                       0x83 | DESCTYPE_S, 0xC);
14586         write_gdt_entry(get_cpu_gdt_table(cpu),
14587                         GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14588  #endif
14589 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14590         /* alrighty, percpu areas up and running */
14591         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14592         for_each_possible_cpu(cpu) {
14593 +#ifdef CONFIG_CC_STACKPROTECTOR
14594 +#ifdef CONFIG_X86_32
14595 +               unsigned long canary = per_cpu(stack_canary.canary, cpu);
14596 +#endif
14597 +#endif
14598                 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14599                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14600                 per_cpu(cpu_number, cpu) = cpu;
14601 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14602                  */
14603                 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14604  #endif
14605 +#ifdef CONFIG_CC_STACKPROTECTOR
14606 +#ifdef CONFIG_X86_32
14607 +               if (!cpu)
14608 +                       per_cpu(stack_canary.canary, cpu) = canary;
14609 +#endif
14610 +#endif
14611                 /*
14612                  * Up to this point, the boot CPU has been using .init.data
14613                  * area.  Reload any changed state for the boot CPU.
14614 diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14615 --- linux-3.0.4/arch/x86/kernel/signal.c        2011-07-21 22:17:23.000000000 -0400
14616 +++ linux-3.0.4/arch/x86/kernel/signal.c        2011-08-23 21:48:14.000000000 -0400
14617 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14618          * Align the stack pointer according to the i386 ABI,
14619          * i.e. so that on function entry ((sp + 4) & 15) == 0.
14620          */
14621 -       sp = ((sp + 4) & -16ul) - 4;
14622 +       sp = ((sp - 12) & -16ul) - 4;
14623  #else /* !CONFIG_X86_32 */
14624         sp = round_down(sp, 16) - 8;
14625  #endif
14626 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14627          * Return an always-bogus address instead so we will die with SIGSEGV.
14628          */
14629         if (onsigstack && !likely(on_sig_stack(sp)))
14630 -               return (void __user *)-1L;
14631 +               return (__force void __user *)-1L;
14632  
14633         /* save i387 state */
14634         if (used_math() && save_i387_xstate(*fpstate) < 0)
14635 -               return (void __user *)-1L;
14636 +               return (__force void __user *)-1L;
14637  
14638         return (void __user *)sp;
14639  }
14640 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14641         }
14642  
14643         if (current->mm->context.vdso)
14644 -               restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14645 +               restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14646         else
14647 -               restorer = &frame->retcode;
14648 +               restorer = (void __user *)&frame->retcode;
14649         if (ka->sa.sa_flags & SA_RESTORER)
14650                 restorer = ka->sa.sa_restorer;
14651  
14652 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14653          * reasons and because gdb uses it as a signature to notice
14654          * signal handler stack frames.
14655          */
14656 -       err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14657 +       err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14658  
14659         if (err)
14660                 return -EFAULT;
14661 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14662                 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14663  
14664                 /* Set up to return from userspace.  */
14665 -               restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14666 +               if (current->mm->context.vdso)
14667 +                       restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14668 +               else
14669 +                       restorer = (void __user *)&frame->retcode;
14670                 if (ka->sa.sa_flags & SA_RESTORER)
14671                         restorer = ka->sa.sa_restorer;
14672                 put_user_ex(restorer, &frame->pretcode);
14673 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14674                  * reasons and because gdb uses it as a signature to notice
14675                  * signal handler stack frames.
14676                  */
14677 -               put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14678 +               put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14679         } put_user_catch(err);
14680  
14681         if (err)
14682 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14683         int signr;
14684         sigset_t *oldset;
14685  
14686 +       pax_track_stack();
14687 +
14688         /*
14689          * We want the common case to go fast, which is why we may in certain
14690          * cases get here from kernel mode. Just return without doing anything
14691 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14692          * X86_32: vm86 regs switched out by assembly code before reaching
14693          * here, so testing against kernel CS suffices.
14694          */
14695 -       if (!user_mode(regs))
14696 +       if (!user_mode_novm(regs))
14697                 return;
14698  
14699         if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14700 diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
14701 --- linux-3.0.4/arch/x86/kernel/smpboot.c       2011-07-21 22:17:23.000000000 -0400
14702 +++ linux-3.0.4/arch/x86/kernel/smpboot.c       2011-08-23 21:47:55.000000000 -0400
14703 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14704         set_idle_for_cpu(cpu, c_idle.idle);
14705  do_rest:
14706         per_cpu(current_task, cpu) = c_idle.idle;
14707 +       per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14708  #ifdef CONFIG_X86_32
14709         /* Stack for startup_32 can be just as for start_secondary onwards */
14710         irq_ctx_init(cpu);
14711  #else
14712         clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14713         initial_gs = per_cpu_offset(cpu);
14714 -       per_cpu(kernel_stack, cpu) =
14715 -               (unsigned long)task_stack_page(c_idle.idle) -
14716 -               KERNEL_STACK_OFFSET + THREAD_SIZE;
14717 +       per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14718  #endif
14719 +
14720 +       pax_open_kernel();
14721         early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14722 +       pax_close_kernel();
14723 +
14724         initial_code = (unsigned long)start_secondary;
14725         stack_start  = c_idle.idle->thread.sp;
14726  
14727 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14728  
14729         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14730  
14731 +#ifdef CONFIG_PAX_PER_CPU_PGD
14732 +       clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14733 +                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14734 +                       KERNEL_PGD_PTRS);
14735 +#endif
14736 +
14737         err = do_boot_cpu(apicid, cpu);
14738         if (err) {
14739                 pr_debug("do_boot_cpu failed %d\n", err);
14740 diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
14741 --- linux-3.0.4/arch/x86/kernel/step.c  2011-07-21 22:17:23.000000000 -0400
14742 +++ linux-3.0.4/arch/x86/kernel/step.c  2011-08-23 21:47:55.000000000 -0400
14743 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14744                 struct desc_struct *desc;
14745                 unsigned long base;
14746  
14747 -               seg &= ~7UL;
14748 +               seg >>= 3;
14749  
14750                 mutex_lock(&child->mm->context.lock);
14751 -               if (unlikely((seg >> 3) >= child->mm->context.size))
14752 +               if (unlikely(seg >= child->mm->context.size))
14753                         addr = -1L; /* bogus selector, access would fault */
14754                 else {
14755                         desc = child->mm->context.ldt + seg;
14756 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14757                         addr += base;
14758                 }
14759                 mutex_unlock(&child->mm->context.lock);
14760 -       }
14761 +       } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14762 +               addr = ktla_ktva(addr);
14763  
14764         return addr;
14765  }
14766 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14767         unsigned char opcode[15];
14768         unsigned long addr = convert_ip_to_linear(child, regs);
14769  
14770 +       if (addr == -EINVAL)
14771 +               return 0;
14772 +
14773         copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14774         for (i = 0; i < copied; i++) {
14775                 switch (opcode[i]) {
14776 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14777  
14778  #ifdef CONFIG_X86_64
14779                 case 0x40 ... 0x4f:
14780 -                       if (regs->cs != __USER_CS)
14781 +                       if ((regs->cs & 0xffff) != __USER_CS)
14782                                 /* 32-bit mode: register increment */
14783                                 return 0;
14784                         /* 64-bit mode: REX prefix */
14785 diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
14786 --- linux-3.0.4/arch/x86/kernel/syscall_table_32.S      2011-07-21 22:17:23.000000000 -0400
14787 +++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S      2011-08-23 21:47:55.000000000 -0400
14788 @@ -1,3 +1,4 @@
14789 +.section .rodata,"a",@progbits
14790  ENTRY(sys_call_table)
14791         .long sys_restart_syscall       /* 0 - old "setup()" system call, used for restarting */
14792         .long sys_exit
14793 diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
14794 --- linux-3.0.4/arch/x86/kernel/sys_i386_32.c   2011-07-21 22:17:23.000000000 -0400
14795 +++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c   2011-08-23 21:47:55.000000000 -0400
14796 @@ -24,17 +24,224 @@
14797  
14798  #include <asm/syscalls.h>
14799  
14800 -/*
14801 - * Do a system call from kernel instead of calling sys_execve so we
14802 - * end up with proper pt_regs.
14803 - */
14804 -int kernel_execve(const char *filename,
14805 -                 const char *const argv[],
14806 -                 const char *const envp[])
14807 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14808  {
14809 -       long __res;
14810 -       asm volatile ("int $0x80"
14811 -       : "=a" (__res)
14812 -       : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14813 -       return __res;
14814 +       unsigned long pax_task_size = TASK_SIZE;
14815 +
14816 +#ifdef CONFIG_PAX_SEGMEXEC
14817 +       if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14818 +               pax_task_size = SEGMEXEC_TASK_SIZE;
14819 +#endif
14820 +
14821 +       if (len > pax_task_size || addr > pax_task_size - len)
14822 +               return -EINVAL;
14823 +
14824 +       return 0;
14825 +}
14826 +
14827 +unsigned long
14828 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14829 +               unsigned long len, unsigned long pgoff, unsigned long flags)
14830 +{
14831 +       struct mm_struct *mm = current->mm;
14832 +       struct vm_area_struct *vma;
14833 +       unsigned long start_addr, pax_task_size = TASK_SIZE;
14834 +
14835 +#ifdef CONFIG_PAX_SEGMEXEC
14836 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
14837 +               pax_task_size = SEGMEXEC_TASK_SIZE;
14838 +#endif
14839 +
14840 +       pax_task_size -= PAGE_SIZE;
14841 +
14842 +       if (len > pax_task_size)
14843 +               return -ENOMEM;
14844 +
14845 +       if (flags & MAP_FIXED)
14846 +               return addr;
14847 +
14848 +#ifdef CONFIG_PAX_RANDMMAP
14849 +       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14850 +#endif
14851 +
14852 +       if (addr) {
14853 +               addr = PAGE_ALIGN(addr);
14854 +               if (pax_task_size - len >= addr) {
14855 +                       vma = find_vma(mm, addr);
14856 +                       if (check_heap_stack_gap(vma, addr, len))
14857 +                               return addr;
14858 +               }
14859 +       }
14860 +       if (len > mm->cached_hole_size) {
14861 +               start_addr = addr = mm->free_area_cache;
14862 +       } else {
14863 +               start_addr = addr = mm->mmap_base;
14864 +               mm->cached_hole_size = 0;
14865 +       }
14866 +
14867 +#ifdef CONFIG_PAX_PAGEEXEC
14868 +       if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14869 +               start_addr = 0x00110000UL;
14870 +
14871 +#ifdef CONFIG_PAX_RANDMMAP
14872 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
14873 +                       start_addr += mm->delta_mmap & 0x03FFF000UL;
14874 +#endif
14875 +
14876 +               if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14877 +                       start_addr = addr = mm->mmap_base;
14878 +               else
14879 +                       addr = start_addr;
14880 +       }
14881 +#endif
14882 +
14883 +full_search:
14884 +       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14885 +               /* At this point:  (!vma || addr < vma->vm_end). */
14886 +               if (pax_task_size - len < addr) {
14887 +                       /*
14888 +                        * Start a new search - just in case we missed
14889 +                        * some holes.
14890 +                        */
14891 +                       if (start_addr != mm->mmap_base) {
14892 +                               start_addr = addr = mm->mmap_base;
14893 +                               mm->cached_hole_size = 0;
14894 +                               goto full_search;
14895 +                       }
14896 +                       return -ENOMEM;
14897 +               }
14898 +               if (check_heap_stack_gap(vma, addr, len))
14899 +                       break;
14900 +               if (addr + mm->cached_hole_size < vma->vm_start)
14901 +                       mm->cached_hole_size = vma->vm_start - addr;
14902 +               addr = vma->vm_end;
14903 +               if (mm->start_brk <= addr && addr < mm->mmap_base) {
14904 +                       start_addr = addr = mm->mmap_base;
14905 +                       mm->cached_hole_size = 0;
14906 +                       goto full_search;
14907 +               }
14908 +       }
14909 +
14910 +       /*
14911 +        * Remember the place where we stopped the search:
14912 +        */
14913 +       mm->free_area_cache = addr + len;
14914 +       return addr;
14915 +}
14916 +
14917 +unsigned long
14918 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14919 +                         const unsigned long len, const unsigned long pgoff,
14920 +                         const unsigned long flags)
14921 +{
14922 +       struct vm_area_struct *vma;
14923 +       struct mm_struct *mm = current->mm;
14924 +       unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14925 +
14926 +#ifdef CONFIG_PAX_SEGMEXEC
14927 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
14928 +               pax_task_size = SEGMEXEC_TASK_SIZE;
14929 +#endif
14930 +
14931 +       pax_task_size -= PAGE_SIZE;
14932 +
14933 +       /* requested length too big for entire address space */
14934 +       if (len > pax_task_size)
14935 +               return -ENOMEM;
14936 +
14937 +       if (flags & MAP_FIXED)
14938 +               return addr;
14939 +
14940 +#ifdef CONFIG_PAX_PAGEEXEC
14941 +       if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14942 +               goto bottomup;
14943 +#endif
14944 +
14945 +#ifdef CONFIG_PAX_RANDMMAP
14946 +       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14947 +#endif
14948 +
14949 +       /* requesting a specific address */
14950 +       if (addr) {
14951 +               addr = PAGE_ALIGN(addr);
14952 +               if (pax_task_size - len >= addr) {
14953 +                       vma = find_vma(mm, addr);
14954 +                       if (check_heap_stack_gap(vma, addr, len))
14955 +                               return addr;
14956 +               }
14957 +       }
14958 +
14959 +       /* check if free_area_cache is useful for us */
14960 +       if (len <= mm->cached_hole_size) {
14961 +               mm->cached_hole_size = 0;
14962 +               mm->free_area_cache = mm->mmap_base;
14963 +       }
14964 +
14965 +       /* either no address requested or can't fit in requested address hole */
14966 +       addr = mm->free_area_cache;
14967 +
14968 +       /* make sure it can fit in the remaining address space */
14969 +       if (addr > len) {
14970 +               vma = find_vma(mm, addr-len);
14971 +               if (check_heap_stack_gap(vma, addr - len, len))
14972 +                       /* remember the address as a hint for next time */
14973 +                       return (mm->free_area_cache = addr-len);
14974 +       }
14975 +
14976 +       if (mm->mmap_base < len)
14977 +               goto bottomup;
14978 +
14979 +       addr = mm->mmap_base-len;
14980 +
14981 +       do {
14982 +               /*
14983 +                * Lookup failure means no vma is above this address,
14984 +                * else if new region fits below vma->vm_start,
14985 +                * return with success:
14986 +                */
14987 +               vma = find_vma(mm, addr);
14988 +               if (check_heap_stack_gap(vma, addr, len))
14989 +                       /* remember the address as a hint for next time */
14990 +                       return (mm->free_area_cache = addr);
14991 +
14992 +               /* remember the largest hole we saw so far */
14993 +               if (addr + mm->cached_hole_size < vma->vm_start)
14994 +                       mm->cached_hole_size = vma->vm_start - addr;
14995 +
14996 +               /* try just below the current vma->vm_start */
14997 +               addr = skip_heap_stack_gap(vma, len);
14998 +       } while (!IS_ERR_VALUE(addr));
14999 +
15000 +bottomup:
15001 +       /*
15002 +        * A failed mmap() very likely causes application failure,
15003 +        * so fall back to the bottom-up function here. This scenario
15004 +        * can happen with large stack limits and large mmap()
15005 +        * allocations.
15006 +        */
15007 +
15008 +#ifdef CONFIG_PAX_SEGMEXEC
15009 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
15010 +               mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15011 +       else
15012 +#endif
15013 +
15014 +       mm->mmap_base = TASK_UNMAPPED_BASE;
15015 +
15016 +#ifdef CONFIG_PAX_RANDMMAP
15017 +       if (mm->pax_flags & MF_PAX_RANDMMAP)
15018 +               mm->mmap_base += mm->delta_mmap;
15019 +#endif
15020 +
15021 +       mm->free_area_cache = mm->mmap_base;
15022 +       mm->cached_hole_size = ~0UL;
15023 +       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15024 +       /*
15025 +        * Restore the topdown base:
15026 +        */
15027 +       mm->mmap_base = base;
15028 +       mm->free_area_cache = base;
15029 +       mm->cached_hole_size = ~0UL;
15030 +
15031 +       return addr;
15032  }
15033 diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15034 --- linux-3.0.4/arch/x86/kernel/sys_x86_64.c    2011-07-21 22:17:23.000000000 -0400
15035 +++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c    2011-08-23 21:47:55.000000000 -0400
15036 @@ -32,8 +32,8 @@ out:
15037         return error;
15038  }
15039  
15040 -static void find_start_end(unsigned long flags, unsigned long *begin,
15041 -                          unsigned long *end)
15042 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15043 +                          unsigned long *begin, unsigned long *end)
15044  {
15045         if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15046                 unsigned long new_begin;
15047 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15048                                 *begin = new_begin;
15049                 }
15050         } else {
15051 -               *begin = TASK_UNMAPPED_BASE;
15052 +               *begin = mm->mmap_base;
15053                 *end = TASK_SIZE;
15054         }
15055  }
15056 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15057         if (flags & MAP_FIXED)
15058                 return addr;
15059  
15060 -       find_start_end(flags, &begin, &end);
15061 +       find_start_end(mm, flags, &begin, &end);
15062  
15063         if (len > end)
15064                 return -ENOMEM;
15065  
15066 +#ifdef CONFIG_PAX_RANDMMAP
15067 +       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15068 +#endif
15069 +
15070         if (addr) {
15071                 addr = PAGE_ALIGN(addr);
15072                 vma = find_vma(mm, addr);
15073 -               if (end - len >= addr &&
15074 -                   (!vma || addr + len <= vma->vm_start))
15075 +               if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15076                         return addr;
15077         }
15078         if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15079 @@ -106,7 +109,7 @@ full_search:
15080                         }
15081                         return -ENOMEM;
15082                 }
15083 -               if (!vma || addr + len <= vma->vm_start) {
15084 +               if (check_heap_stack_gap(vma, addr, len)) {
15085                         /*
15086                          * Remember the place where we stopped the search:
15087                          */
15088 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15089  {
15090         struct vm_area_struct *vma;
15091         struct mm_struct *mm = current->mm;
15092 -       unsigned long addr = addr0;
15093 +       unsigned long base = mm->mmap_base, addr = addr0;
15094  
15095         /* requested length too big for entire address space */
15096         if (len > TASK_SIZE)
15097 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15098         if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15099                 goto bottomup;
15100  
15101 +#ifdef CONFIG_PAX_RANDMMAP
15102 +       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15103 +#endif
15104 +
15105         /* requesting a specific address */
15106         if (addr) {
15107                 addr = PAGE_ALIGN(addr);
15108 -               vma = find_vma(mm, addr);
15109 -               if (TASK_SIZE - len >= addr &&
15110 -                               (!vma || addr + len <= vma->vm_start))
15111 -                       return addr;
15112 +               if (TASK_SIZE - len >= addr) {
15113 +                       vma = find_vma(mm, addr);
15114 +                       if (check_heap_stack_gap(vma, addr, len))
15115 +                               return addr;
15116 +               }
15117         }
15118  
15119         /* check if free_area_cache is useful for us */
15120 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15121         /* make sure it can fit in the remaining address space */
15122         if (addr > len) {
15123                 vma = find_vma(mm, addr-len);
15124 -               if (!vma || addr <= vma->vm_start)
15125 +               if (check_heap_stack_gap(vma, addr - len, len))
15126                         /* remember the address as a hint for next time */
15127                         return mm->free_area_cache = addr-len;
15128         }
15129 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15130                  * return with success:
15131                  */
15132                 vma = find_vma(mm, addr);
15133 -               if (!vma || addr+len <= vma->vm_start)
15134 +               if (check_heap_stack_gap(vma, addr, len))
15135                         /* remember the address as a hint for next time */
15136                         return mm->free_area_cache = addr;
15137  
15138 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15139                         mm->cached_hole_size = vma->vm_start - addr;
15140  
15141                 /* try just below the current vma->vm_start */
15142 -               addr = vma->vm_start-len;
15143 -       } while (len < vma->vm_start);
15144 +               addr = skip_heap_stack_gap(vma, len);
15145 +       } while (!IS_ERR_VALUE(addr));
15146  
15147  bottomup:
15148         /*
15149 @@ -198,13 +206,21 @@ bottomup:
15150          * can happen with large stack limits and large mmap()
15151          * allocations.
15152          */
15153 +       mm->mmap_base = TASK_UNMAPPED_BASE;
15154 +
15155 +#ifdef CONFIG_PAX_RANDMMAP
15156 +       if (mm->pax_flags & MF_PAX_RANDMMAP)
15157 +               mm->mmap_base += mm->delta_mmap;
15158 +#endif
15159 +
15160 +       mm->free_area_cache = mm->mmap_base;
15161         mm->cached_hole_size = ~0UL;
15162 -       mm->free_area_cache = TASK_UNMAPPED_BASE;
15163         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15164         /*
15165          * Restore the topdown base:
15166          */
15167 -       mm->free_area_cache = mm->mmap_base;
15168 +       mm->mmap_base = base;
15169 +       mm->free_area_cache = base;
15170         mm->cached_hole_size = ~0UL;
15171  
15172         return addr;
15173 diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15174 --- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15175 +++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15176 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15177  
15178  void tboot_shutdown(u32 shutdown_type)
15179  {
15180 -       void (*shutdown)(void);
15181 +       void (* __noreturn shutdown)(void);
15182  
15183         if (!tboot_enabled())
15184                 return;
15185 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15186  
15187         switch_to_tboot_pt();
15188  
15189 -       shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15190 +       shutdown = (void *)tboot->shutdown_entry;
15191         shutdown();
15192  
15193         /* should not reach here */
15194 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15195         tboot_shutdown(acpi_shutdown_map[sleep_state]);
15196  }
15197  
15198 -static atomic_t ap_wfs_count;
15199 +static atomic_unchecked_t ap_wfs_count;
15200  
15201  static int tboot_wait_for_aps(int num_aps)
15202  {
15203 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15204  {
15205         switch (action) {
15206         case CPU_DYING:
15207 -               atomic_inc(&ap_wfs_count);
15208 +               atomic_inc_unchecked(&ap_wfs_count);
15209                 if (num_online_cpus() == 1)
15210 -                       if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15211 +                       if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15212                                 return NOTIFY_BAD;
15213                 break;
15214         }
15215 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15216  
15217         tboot_create_trampoline();
15218  
15219 -       atomic_set(&ap_wfs_count, 0);
15220 +       atomic_set_unchecked(&ap_wfs_count, 0);
15221         register_hotcpu_notifier(&tboot_cpu_notifier);
15222         return 0;
15223  }
15224 diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15225 --- linux-3.0.4/arch/x86/kernel/time.c  2011-07-21 22:17:23.000000000 -0400
15226 +++ linux-3.0.4/arch/x86/kernel/time.c  2011-08-23 21:47:55.000000000 -0400
15227 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs 
15228  {
15229         unsigned long pc = instruction_pointer(regs);
15230  
15231 -       if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15232 +       if (!user_mode(regs) && in_lock_functions(pc)) {
15233  #ifdef CONFIG_FRAME_POINTER
15234 -               return *(unsigned long *)(regs->bp + sizeof(long));
15235 +               return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15236  #else
15237                 unsigned long *sp =
15238                         (unsigned long *)kernel_stack_pointer(regs);
15239 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs 
15240                  * or above a saved flags. Eflags has bits 22-31 zero,
15241                  * kernel addresses don't.
15242                  */
15243 +
15244 +#ifdef CONFIG_PAX_KERNEXEC
15245 +               return ktla_ktva(sp[0]);
15246 +#else
15247                 if (sp[0] >> 22)
15248                         return sp[0];
15249                 if (sp[1] >> 22)
15250                         return sp[1];
15251  #endif
15252 +
15253 +#endif
15254         }
15255         return pc;
15256  }
15257 diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15258 --- linux-3.0.4/arch/x86/kernel/tls.c   2011-07-21 22:17:23.000000000 -0400
15259 +++ linux-3.0.4/arch/x86/kernel/tls.c   2011-08-23 21:47:55.000000000 -0400
15260 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15261         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15262                 return -EINVAL;
15263  
15264 +#ifdef CONFIG_PAX_SEGMEXEC
15265 +       if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15266 +               return -EINVAL;
15267 +#endif
15268 +
15269         set_tls_desc(p, idx, &info, 1);
15270  
15271         return 0;
15272 diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15273 --- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15274 +++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15275 @@ -32,6 +32,12 @@
15276  #include <asm/segment.h>
15277  #include <asm/page_types.h>
15278  
15279 +#ifdef CONFIG_PAX_KERNEXEC
15280 +#define ta(X) (X)
15281 +#else
15282 +#define ta(X) ((X) - __PAGE_OFFSET)
15283 +#endif
15284 +
15285  #ifdef CONFIG_SMP
15286  
15287         .section ".x86_trampoline","a"
15288 @@ -62,7 +68,7 @@ r_base = .
15289         inc     %ax             # protected mode (PE) bit
15290         lmsw    %ax             # into protected mode
15291         # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15292 -       ljmpl   $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15293 +       ljmpl   $__BOOT_CS, $ta(startup_32_smp)
15294  
15295         # These need to be in the same 64K segment as the above;
15296         # hence we don't use the boot_gdt_descr defined in head.S
15297 diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15298 --- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15299 +++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15300 @@ -90,7 +90,7 @@ startup_32:
15301         movl    $__KERNEL_DS, %eax      # Initialize the %ds segment register
15302         movl    %eax, %ds
15303  
15304 -       movl    $X86_CR4_PAE, %eax
15305 +       movl    $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15306         movl    %eax, %cr4              # Enable PAE mode
15307  
15308                                         # Setup trampoline 4 level pagetables
15309 @@ -138,7 +138,7 @@ tidt:
15310         # so the kernel can live anywhere
15311         .balign 4
15312  tgdt:
15313 -       .short  tgdt_end - tgdt         # gdt limit
15314 +       .short  tgdt_end - tgdt - 1     # gdt limit
15315         .long   tgdt - r_base
15316         .short 0
15317         .quad   0x00cf9b000000ffff      # __KERNEL32_CS
15318 diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15319 --- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15320 +++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15321 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15322  
15323  /* Do we ignore FPU interrupts ? */
15324  char ignore_fpu_irq;
15325 -
15326 -/*
15327 - * The IDT has to be page-aligned to simplify the Pentium
15328 - * F0 0F bug workaround.
15329 - */
15330 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15331  #endif
15332  
15333  DECLARE_BITMAP(used_vectors, NR_VECTORS);
15334 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15335  }
15336  
15337  static void __kprobes
15338 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15339 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15340         long error_code, siginfo_t *info)
15341  {
15342         struct task_struct *tsk = current;
15343  
15344  #ifdef CONFIG_X86_32
15345 -       if (regs->flags & X86_VM_MASK) {
15346 +       if (v8086_mode(regs)) {
15347                 /*
15348                  * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15349                  * On nmi (interrupt 2), do_trap should not be called.
15350 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15351         }
15352  #endif
15353  
15354 -       if (!user_mode(regs))
15355 +       if (!user_mode_novm(regs))
15356                 goto kernel_trap;
15357  
15358  #ifdef CONFIG_X86_32
15359 @@ -157,7 +151,7 @@ trap_signal:
15360             printk_ratelimit()) {
15361                 printk(KERN_INFO
15362                        "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15363 -                      tsk->comm, tsk->pid, str,
15364 +                      tsk->comm, task_pid_nr(tsk), str,
15365                        regs->ip, regs->sp, error_code);
15366                 print_vma_addr(" in ", regs->ip);
15367                 printk("\n");
15368 @@ -174,8 +168,20 @@ kernel_trap:
15369         if (!fixup_exception(regs)) {
15370                 tsk->thread.error_code = error_code;
15371                 tsk->thread.trap_no = trapnr;
15372 +
15373 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15374 +               if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15375 +                       str = "PAX: suspicious stack segment fault";
15376 +#endif
15377 +
15378                 die(str, regs, error_code);
15379         }
15380 +
15381 +#ifdef CONFIG_PAX_REFCOUNT
15382 +       if (trapnr == 4)
15383 +               pax_report_refcount_overflow(regs);
15384 +#endif
15385 +
15386         return;
15387  
15388  #ifdef CONFIG_X86_32
15389 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15390         conditional_sti(regs);
15391  
15392  #ifdef CONFIG_X86_32
15393 -       if (regs->flags & X86_VM_MASK)
15394 +       if (v8086_mode(regs))
15395                 goto gp_in_vm86;
15396  #endif
15397  
15398         tsk = current;
15399 -       if (!user_mode(regs))
15400 +       if (!user_mode_novm(regs))
15401                 goto gp_in_kernel;
15402  
15403 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15404 +       if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15405 +               struct mm_struct *mm = tsk->mm;
15406 +               unsigned long limit;
15407 +
15408 +               down_write(&mm->mmap_sem);
15409 +               limit = mm->context.user_cs_limit;
15410 +               if (limit < TASK_SIZE) {
15411 +                       track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15412 +                       up_write(&mm->mmap_sem);
15413 +                       return;
15414 +               }
15415 +               up_write(&mm->mmap_sem);
15416 +       }
15417 +#endif
15418 +
15419         tsk->thread.error_code = error_code;
15420         tsk->thread.trap_no = 13;
15421  
15422 @@ -304,6 +326,13 @@ gp_in_kernel:
15423         if (notify_die(DIE_GPF, "general protection fault", regs,
15424                                 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15425                 return;
15426 +
15427 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15428 +       if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15429 +               die("PAX: suspicious general protection fault", regs, error_code);
15430 +       else
15431 +#endif
15432 +
15433         die("general protection fault", regs, error_code);
15434  }
15435  
15436 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15437  dotraplinkage notrace __kprobes void
15438  do_nmi(struct pt_regs *regs, long error_code)
15439  {
15440 +
15441 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15442 +       if (!user_mode(regs)) {
15443 +               unsigned long cs = regs->cs & 0xFFFF;
15444 +               unsigned long ip = ktva_ktla(regs->ip);
15445 +
15446 +               if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15447 +                       regs->ip = ip;
15448 +       }
15449 +#endif
15450 +
15451         nmi_enter();
15452  
15453         inc_irq_stat(__nmi_count);
15454 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15455         /* It's safe to allow irq's after DR6 has been saved */
15456         preempt_conditional_sti(regs);
15457  
15458 -       if (regs->flags & X86_VM_MASK) {
15459 +       if (v8086_mode(regs)) {
15460                 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15461                                 error_code, 1);
15462                 preempt_conditional_cli(regs);
15463 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15464          * We already checked v86 mode above, so we can check for kernel mode
15465          * by just checking the CPL of CS.
15466          */
15467 -       if ((dr6 & DR_STEP) && !user_mode(regs)) {
15468 +       if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15469                 tsk->thread.debugreg6 &= ~DR_STEP;
15470                 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15471                 regs->flags &= ~X86_EFLAGS_TF;
15472 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15473                 return;
15474         conditional_sti(regs);
15475  
15476 -       if (!user_mode_vm(regs))
15477 +       if (!user_mode(regs))
15478         {
15479                 if (!fixup_exception(regs)) {
15480                         task->thread.error_code = error_code;
15481 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15482  void __math_state_restore(void)
15483  {
15484         struct thread_info *thread = current_thread_info();
15485 -       struct task_struct *tsk = thread->task;
15486 +       struct task_struct *tsk = current;
15487  
15488         /*
15489          * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15490 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15491   */
15492  asmlinkage void math_state_restore(void)
15493  {
15494 -       struct thread_info *thread = current_thread_info();
15495 -       struct task_struct *tsk = thread->task;
15496 +       struct task_struct *tsk = current;
15497  
15498         if (!tsk_used_math(tsk)) {
15499                 local_irq_enable();
15500 diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15501 --- linux-3.0.4/arch/x86/kernel/verify_cpu.S    2011-07-21 22:17:23.000000000 -0400
15502 +++ linux-3.0.4/arch/x86/kernel/verify_cpu.S    2011-08-23 21:48:14.000000000 -0400
15503 @@ -20,6 +20,7 @@
15504   *     arch/x86/boot/compressed/head_64.S: Boot cpu verification
15505   *     arch/x86/kernel/trampoline_64.S: secondary processor verification
15506   *     arch/x86/kernel/head_32.S: processor startup
15507 + *     arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15508   *
15509   *     verify_cpu, returns the status of longmode and SSE in register %eax.
15510   *             0: Success    1: Failure
15511 diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15512 --- linux-3.0.4/arch/x86/kernel/vm86_32.c       2011-07-21 22:17:23.000000000 -0400
15513 +++ linux-3.0.4/arch/x86/kernel/vm86_32.c       2011-08-23 21:48:14.000000000 -0400
15514 @@ -41,6 +41,7 @@
15515  #include <linux/ptrace.h>
15516  #include <linux/audit.h>
15517  #include <linux/stddef.h>
15518 +#include <linux/grsecurity.h>
15519  
15520  #include <asm/uaccess.h>
15521  #include <asm/io.h>
15522 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15523                 do_exit(SIGSEGV);
15524         }
15525  
15526 -       tss = &per_cpu(init_tss, get_cpu());
15527 +       tss = init_tss + get_cpu();
15528         current->thread.sp0 = current->thread.saved_sp0;
15529         current->thread.sysenter_cs = __KERNEL_CS;
15530         load_sp0(tss, &current->thread);
15531 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15532         struct task_struct *tsk;
15533         int tmp, ret = -EPERM;
15534  
15535 +#ifdef CONFIG_GRKERNSEC_VM86
15536 +       if (!capable(CAP_SYS_RAWIO)) {
15537 +               gr_handle_vm86();
15538 +               goto out;
15539 +       }
15540 +#endif
15541 +
15542         tsk = current;
15543         if (tsk->thread.saved_sp0)
15544                 goto out;
15545 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15546         int tmp, ret;
15547         struct vm86plus_struct __user *v86;
15548  
15549 +#ifdef CONFIG_GRKERNSEC_VM86
15550 +       if (!capable(CAP_SYS_RAWIO)) {
15551 +               gr_handle_vm86();
15552 +               ret = -EPERM;
15553 +               goto out;
15554 +       }
15555 +#endif
15556 +
15557         tsk = current;
15558         switch (cmd) {
15559         case VM86_REQUEST_IRQ:
15560 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15561         tsk->thread.saved_fs = info->regs32->fs;
15562         tsk->thread.saved_gs = get_user_gs(info->regs32);
15563  
15564 -       tss = &per_cpu(init_tss, get_cpu());
15565 +       tss = init_tss + get_cpu();
15566         tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15567         if (cpu_has_sep)
15568                 tsk->thread.sysenter_cs = 0;
15569 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15570                 goto cannot_handle;
15571         if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15572                 goto cannot_handle;
15573 -       intr_ptr = (unsigned long __user *) (i << 2);
15574 +       intr_ptr = (__force unsigned long __user *) (i << 2);
15575         if (get_user(segoffs, intr_ptr))
15576                 goto cannot_handle;
15577         if ((segoffs >> 16) == BIOSSEG)
15578 diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15579 --- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S   2011-07-21 22:17:23.000000000 -0400
15580 +++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S   2011-08-23 21:47:55.000000000 -0400
15581 @@ -26,6 +26,13 @@
15582  #include <asm/page_types.h>
15583  #include <asm/cache.h>
15584  #include <asm/boot.h>
15585 +#include <asm/segment.h>
15586 +
15587 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15588 +#define __KERNEL_TEXT_OFFSET   (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15589 +#else
15590 +#define __KERNEL_TEXT_OFFSET   0
15591 +#endif
15592  
15593  #undef i386     /* in case the preprocessor is a 32bit one */
15594  
15595 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15596  
15597  PHDRS {
15598         text PT_LOAD FLAGS(5);          /* R_E */
15599 +#ifdef CONFIG_X86_32
15600 +       module PT_LOAD FLAGS(5);        /* R_E */
15601 +#endif
15602 +#ifdef CONFIG_XEN
15603 +       rodata PT_LOAD FLAGS(5);        /* R_E */
15604 +#else
15605 +       rodata PT_LOAD FLAGS(4);        /* R__ */
15606 +#endif
15607         data PT_LOAD FLAGS(6);          /* RW_ */
15608  #ifdef CONFIG_X86_64
15609         user PT_LOAD FLAGS(5);          /* R_E */
15610 +#endif
15611 +       init.begin PT_LOAD FLAGS(6);    /* RW_ */
15612  #ifdef CONFIG_SMP
15613         percpu PT_LOAD FLAGS(6);        /* RW_ */
15614  #endif
15615 +       text.init PT_LOAD FLAGS(5);     /* R_E */
15616 +       text.exit PT_LOAD FLAGS(5);     /* R_E */
15617         init PT_LOAD FLAGS(7);          /* RWE */
15618 -#endif
15619         note PT_NOTE FLAGS(0);          /* ___ */
15620  }
15621  
15622  SECTIONS
15623  {
15624  #ifdef CONFIG_X86_32
15625 -        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15626 -        phys_startup_32 = startup_32 - LOAD_OFFSET;
15627 +       . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15628  #else
15629 -        . = __START_KERNEL;
15630 -        phys_startup_64 = startup_64 - LOAD_OFFSET;
15631 +       . = __START_KERNEL;
15632  #endif
15633  
15634         /* Text and read-only data */
15635 -       .text :  AT(ADDR(.text) - LOAD_OFFSET) {
15636 -               _text = .;
15637 +       .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15638                 /* bootstrapping code */
15639 +#ifdef CONFIG_X86_32
15640 +               phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15641 +#else
15642 +               phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15643 +#endif
15644 +               __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15645 +               _text = .;
15646                 HEAD_TEXT
15647  #ifdef CONFIG_X86_32
15648                 . = ALIGN(PAGE_SIZE);
15649 @@ -109,13 +131,47 @@ SECTIONS
15650                 IRQENTRY_TEXT
15651                 *(.fixup)
15652                 *(.gnu.warning)
15653 -               /* End of text section */
15654 -               _etext = .;
15655         } :text = 0x9090
15656  
15657 -       NOTES :text :note
15658 +       . += __KERNEL_TEXT_OFFSET;
15659 +
15660 +#ifdef CONFIG_X86_32
15661 +       . = ALIGN(PAGE_SIZE);
15662 +       .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15663 +
15664 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15665 +               MODULES_EXEC_VADDR = .;
15666 +               BYTE(0)
15667 +               . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15668 +               . = ALIGN(HPAGE_SIZE);
15669 +               MODULES_EXEC_END = . - 1;
15670 +#endif
15671 +
15672 +       } :module
15673 +#endif
15674 +
15675 +       .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15676 +               /* End of text section */
15677 +               _etext = . - __KERNEL_TEXT_OFFSET;
15678 +       }
15679 +
15680 +#ifdef CONFIG_X86_32
15681 +       . = ALIGN(PAGE_SIZE);
15682 +       .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15683 +               *(.idt)
15684 +               . = ALIGN(PAGE_SIZE);
15685 +               *(.empty_zero_page)
15686 +               *(.initial_pg_fixmap)
15687 +               *(.initial_pg_pmd)
15688 +               *(.initial_page_table)
15689 +               *(.swapper_pg_dir)
15690 +       } :rodata
15691 +#endif
15692 +
15693 +       . = ALIGN(PAGE_SIZE);
15694 +       NOTES :rodata :note
15695  
15696 -       EXCEPTION_TABLE(16) :text = 0x9090
15697 +       EXCEPTION_TABLE(16) :rodata
15698  
15699  #if defined(CONFIG_DEBUG_RODATA)
15700         /* .text should occupy whole number of pages */
15701 @@ -127,16 +183,20 @@ SECTIONS
15702  
15703         /* Data */
15704         .data : AT(ADDR(.data) - LOAD_OFFSET) {
15705 +
15706 +#ifdef CONFIG_PAX_KERNEXEC
15707 +               . = ALIGN(HPAGE_SIZE);
15708 +#else
15709 +               . = ALIGN(PAGE_SIZE);
15710 +#endif
15711 +
15712                 /* Start of data section */
15713                 _sdata = .;
15714  
15715                 /* init_task */
15716                 INIT_TASK_DATA(THREAD_SIZE)
15717  
15718 -#ifdef CONFIG_X86_32
15719 -               /* 32 bit has nosave before _edata */
15720                 NOSAVE_DATA
15721 -#endif
15722  
15723                 PAGE_ALIGNED_DATA(PAGE_SIZE)
15724  
15725 @@ -208,12 +268,19 @@ SECTIONS
15726  #endif /* CONFIG_X86_64 */
15727  
15728         /* Init code and data - will be freed after init */
15729 -       . = ALIGN(PAGE_SIZE);
15730         .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15731 +               BYTE(0)
15732 +
15733 +#ifdef CONFIG_PAX_KERNEXEC
15734 +               . = ALIGN(HPAGE_SIZE);
15735 +#else
15736 +               . = ALIGN(PAGE_SIZE);
15737 +#endif
15738 +
15739                 __init_begin = .; /* paired with __init_end */
15740 -       }
15741 +       } :init.begin
15742  
15743 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15744 +#ifdef CONFIG_SMP
15745         /*
15746          * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
15747          * output PHDR, so the next output section - .init.text - should
15748 @@ -222,12 +289,27 @@ SECTIONS
15749         PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15750  #endif
15751  
15752 -       INIT_TEXT_SECTION(PAGE_SIZE)
15753 -#ifdef CONFIG_X86_64
15754 -       :init
15755 -#endif
15756 +       . = ALIGN(PAGE_SIZE);
15757 +       init_begin = .;
15758 +       .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15759 +               VMLINUX_SYMBOL(_sinittext) = .;
15760 +               INIT_TEXT
15761 +               VMLINUX_SYMBOL(_einittext) = .;
15762 +               . = ALIGN(PAGE_SIZE);
15763 +       } :text.init
15764  
15765 -       INIT_DATA_SECTION(16)
15766 +       /*
15767 +        * .exit.text is discard at runtime, not link time, to deal with
15768 +        *  references from .altinstructions and .eh_frame
15769 +        */
15770 +       .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15771 +               EXIT_TEXT
15772 +               . = ALIGN(16);
15773 +       } :text.exit
15774 +       . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15775 +
15776 +       . = ALIGN(PAGE_SIZE);
15777 +       INIT_DATA_SECTION(16) :init
15778  
15779         /*
15780          * Code and data for a variety of lowlevel trampolines, to be
15781 @@ -301,19 +383,12 @@ SECTIONS
15782         }
15783  
15784         . = ALIGN(8);
15785 -       /*
15786 -        * .exit.text is discard at runtime, not link time, to deal with
15787 -        *  references from .altinstructions and .eh_frame
15788 -        */
15789 -       .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15790 -               EXIT_TEXT
15791 -       }
15792  
15793         .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15794                 EXIT_DATA
15795         }
15796  
15797 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15798 +#ifndef CONFIG_SMP
15799         PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15800  #endif
15801  
15802 @@ -332,16 +407,10 @@ SECTIONS
15803         .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15804                 __smp_locks = .;
15805                 *(.smp_locks)
15806 -               . = ALIGN(PAGE_SIZE);
15807                 __smp_locks_end = .;
15808 +               . = ALIGN(PAGE_SIZE);
15809         }
15810  
15811 -#ifdef CONFIG_X86_64
15812 -       .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15813 -               NOSAVE_DATA
15814 -       }
15815 -#endif
15816 -
15817         /* BSS */
15818         . = ALIGN(PAGE_SIZE);
15819         .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15820 @@ -357,6 +426,7 @@ SECTIONS
15821                 __brk_base = .;
15822                 . += 64 * 1024;         /* 64k alignment slop space */
15823                 *(.brk_reservation)     /* areas brk users have reserved */
15824 +               . = ALIGN(HPAGE_SIZE);
15825                 __brk_limit = .;
15826         }
15827  
15828 @@ -383,13 +453,12 @@ SECTIONS
15829   * for the boot processor.
15830   */
15831  #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15832 -INIT_PER_CPU(gdt_page);
15833  INIT_PER_CPU(irq_stack_union);
15834  
15835  /*
15836   * Build-time check on the image size:
15837   */
15838 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15839 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15840            "kernel image bigger than KERNEL_IMAGE_SIZE");
15841  
15842  #ifdef CONFIG_SMP
15843 diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
15844 --- linux-3.0.4/arch/x86/kernel/vsyscall_64.c   2011-07-21 22:17:23.000000000 -0400
15845 +++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c   2011-08-23 21:47:55.000000000 -0400
15846 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15847  DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15848  {
15849         .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15850 -       .sysctl_enabled = 1,
15851 +       .sysctl_enabled = 0,
15852  };
15853  
15854  void update_vsyscall_tz(void)
15855 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15856  static ctl_table kernel_table2[] = {
15857         { .procname = "vsyscall64",
15858           .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15859 -         .mode = 0644,
15860 +         .mode = 0444,
15861           .proc_handler = proc_dointvec },
15862         {}
15863  };
15864 diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
15865 --- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c        2011-07-21 22:17:23.000000000 -0400
15866 +++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c        2011-08-23 21:47:55.000000000 -0400
15867 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15868  EXPORT_SYMBOL(copy_user_generic_string);
15869  EXPORT_SYMBOL(copy_user_generic_unrolled);
15870  EXPORT_SYMBOL(__copy_user_nocache);
15871 -EXPORT_SYMBOL(_copy_from_user);
15872 -EXPORT_SYMBOL(_copy_to_user);
15873  
15874  EXPORT_SYMBOL(copy_page);
15875  EXPORT_SYMBOL(clear_page);
15876 diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
15877 --- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15878 +++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15879 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15880             fx_sw_user->xstate_size > fx_sw_user->extended_size)
15881                 return -EINVAL;
15882  
15883 -       err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15884 +       err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15885                                             fx_sw_user->extended_size -
15886                                             FP_XSTATE_MAGIC2_SIZE));
15887         if (err)
15888 @@ -267,7 +267,7 @@ fx_only:
15889          * the other extended state.
15890          */
15891         xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15892 -       return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15893 +       return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15894  }
15895  
15896  /*
15897 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15898         if (use_xsave())
15899                 err = restore_user_xstate(buf);
15900         else
15901 -               err = fxrstor_checking((__force struct i387_fxsave_struct *)
15902 +               err = fxrstor_checking((struct i387_fxsave_struct __user *)
15903                                        buf);
15904         if (unlikely(err)) {
15905                 /*
15906 diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
15907 --- linux-3.0.4/arch/x86/kvm/emulate.c  2011-07-21 22:17:23.000000000 -0400
15908 +++ linux-3.0.4/arch/x86/kvm/emulate.c  2011-08-23 21:47:55.000000000 -0400
15909 @@ -96,7 +96,7 @@
15910  #define Src2ImmByte (2<<29)
15911  #define Src2One     (3<<29)
15912  #define Src2Imm     (4<<29)
15913 -#define Src2Mask    (7<<29)
15914 +#define Src2Mask    (7U<<29)
15915  
15916  #define X2(x...) x, x
15917  #define X3(x...) X2(x), x
15918 @@ -207,6 +207,7 @@ struct gprefix {
15919  
15920  #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15921         do {                                                            \
15922 +               unsigned long _tmp;                                     \
15923                 __asm__ __volatile__ (                                  \
15924                         _PRE_EFLAGS("0", "4", "2")                      \
15925                         _op _suffix " %"_x"3,%1; "                      \
15926 @@ -220,8 +221,6 @@ struct gprefix {
15927  /* Raw emulation: instruction has two explicit operands. */
15928  #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15929         do {                                                            \
15930 -               unsigned long _tmp;                                     \
15931 -                                                                       \
15932                 switch ((_dst).bytes) {                                 \
15933                 case 2:                                                 \
15934                         ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15935 @@ -237,7 +236,6 @@ struct gprefix {
15936  
15937  #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15938         do {                                                                 \
15939 -               unsigned long _tmp;                                          \
15940                 switch ((_dst).bytes) {                                      \
15941                 case 1:                                                      \
15942                         ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15943 diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
15944 --- linux-3.0.4/arch/x86/kvm/lapic.c    2011-07-21 22:17:23.000000000 -0400
15945 +++ linux-3.0.4/arch/x86/kvm/lapic.c    2011-08-23 21:47:55.000000000 -0400
15946 @@ -53,7 +53,7 @@
15947  #define APIC_BUS_CYCLE_NS 1
15948  
15949  /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15950 -#define apic_debug(fmt, arg...)
15951 +#define apic_debug(fmt, arg...) do {} while (0)
15952  
15953  #define APIC_LVT_NUM                   6
15954  /* 14 is the version for Xeon and Pentium 8.4.8*/
15955 diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
15956 --- linux-3.0.4/arch/x86/kvm/mmu.c      2011-07-21 22:17:23.000000000 -0400
15957 +++ linux-3.0.4/arch/x86/kvm/mmu.c      2011-08-23 21:47:55.000000000 -0400
15958 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15959  
15960         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15961  
15962 -       invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15963 +       invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15964  
15965         /*
15966          * Assume that the pte write on a page table of the same type
15967 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15968         }
15969  
15970         spin_lock(&vcpu->kvm->mmu_lock);
15971 -       if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15972 +       if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15973                 gentry = 0;
15974         kvm_mmu_free_some_pages(vcpu);
15975         ++vcpu->kvm->stat.mmu_pte_write;
15976 diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
15977 --- linux-3.0.4/arch/x86/kvm/paging_tmpl.h      2011-07-21 22:17:23.000000000 -0400
15978 +++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h      2011-08-23 21:48:14.000000000 -0400
15979 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15980         unsigned long mmu_seq;
15981         bool map_writable;
15982  
15983 +       pax_track_stack();
15984 +
15985         pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15986  
15987         r = mmu_topup_memory_caches(vcpu);
15988 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15989         if (need_flush)
15990                 kvm_flush_remote_tlbs(vcpu->kvm);
15991  
15992 -       atomic_inc(&vcpu->kvm->arch.invlpg_counter);
15993 +       atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
15994  
15995         spin_unlock(&vcpu->kvm->mmu_lock);
15996  
15997 diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
15998 --- linux-3.0.4/arch/x86/kvm/svm.c      2011-07-21 22:17:23.000000000 -0400
15999 +++ linux-3.0.4/arch/x86/kvm/svm.c      2011-08-23 21:47:55.000000000 -0400
16000 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16001         int cpu = raw_smp_processor_id();
16002  
16003         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16004 +
16005 +       pax_open_kernel();
16006         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16007 +       pax_close_kernel();
16008 +
16009         load_TR_desc();
16010  }
16011  
16012 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16013  #endif
16014  #endif
16015  
16016 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16017 +       __set_fs(current_thread_info()->addr_limit);
16018 +#endif
16019 +
16020         reload_tss(vcpu);
16021  
16022         local_irq_disable();
16023 diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16024 --- linux-3.0.4/arch/x86/kvm/vmx.c      2011-07-21 22:17:23.000000000 -0400
16025 +++ linux-3.0.4/arch/x86/kvm/vmx.c      2011-08-23 21:47:55.000000000 -0400
16026 @@ -797,7 +797,11 @@ static void reload_tss(void)
16027         struct desc_struct *descs;
16028  
16029         descs = (void *)gdt->address;
16030 +
16031 +       pax_open_kernel();
16032         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16033 +       pax_close_kernel();
16034 +
16035         load_TR_desc();
16036  }
16037  
16038 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16039         if (!cpu_has_vmx_flexpriority())
16040                 flexpriority_enabled = 0;
16041  
16042 -       if (!cpu_has_vmx_tpr_shadow())
16043 -               kvm_x86_ops->update_cr8_intercept = NULL;
16044 +       if (!cpu_has_vmx_tpr_shadow()) {
16045 +               pax_open_kernel();
16046 +               *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16047 +               pax_close_kernel();
16048 +       }
16049  
16050         if (enable_ept && !cpu_has_vmx_ept_2m_page())
16051                 kvm_disable_largepages();
16052 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16053         vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
16054  
16055         asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16056 -       vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16057 +       vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16058         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16059         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16060         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16061 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16062                 "jmp .Lkvm_vmx_return \n\t"
16063                 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16064                 ".Lkvm_vmx_return: "
16065 +
16066 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16067 +               "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16068 +               ".Lkvm_vmx_return2: "
16069 +#endif
16070 +
16071                 /* Save guest registers, load host registers, keep flags */
16072                 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16073                 "pop %0 \n\t"
16074 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16075  #endif
16076                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16077                 [wordsize]"i"(sizeof(ulong))
16078 +
16079 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16080 +               ,[cs]"i"(__KERNEL_CS)
16081 +#endif
16082 +
16083               : "cc", "memory"
16084                 , R"ax", R"bx", R"di", R"si"
16085  #ifdef CONFIG_X86_64
16086 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16087  
16088         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16089  
16090 -       asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16091 +       asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16092 +
16093 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16094 +       loadsegment(fs, __KERNEL_PERCPU);
16095 +#endif
16096 +
16097 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16098 +       __set_fs(current_thread_info()->addr_limit);
16099 +#endif
16100 +
16101         vmx->launched = 1;
16102  
16103         vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16104 diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16105 --- linux-3.0.4/arch/x86/kvm/x86.c      2011-07-21 22:17:23.000000000 -0400
16106 +++ linux-3.0.4/arch/x86/kvm/x86.c      2011-08-23 21:47:55.000000000 -0400
16107 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16108                 if (n < msr_list.nmsrs)
16109                         goto out;
16110                 r = -EFAULT;
16111 +               if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16112 +                       goto out;
16113                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16114                                  num_msrs_to_save * sizeof(u32)))
16115                         goto out;
16116 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16117                                      struct kvm_cpuid2 *cpuid,
16118                                      struct kvm_cpuid_entry2 __user *entries)
16119  {
16120 -       int r;
16121 +       int r, i;
16122  
16123         r = -E2BIG;
16124         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16125                 goto out;
16126         r = -EFAULT;
16127 -       if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16128 -                          cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16129 +       if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16130                 goto out;
16131 +       for (i = 0; i < cpuid->nent; ++i) {
16132 +               struct kvm_cpuid_entry2 cpuid_entry;
16133 +               if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16134 +                       goto out;
16135 +               vcpu->arch.cpuid_entries[i] = cpuid_entry;
16136 +       }
16137         vcpu->arch.cpuid_nent = cpuid->nent;
16138         kvm_apic_set_version(vcpu);
16139         kvm_x86_ops->cpuid_update(vcpu);
16140 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16141                                      struct kvm_cpuid2 *cpuid,
16142                                      struct kvm_cpuid_entry2 __user *entries)
16143  {
16144 -       int r;
16145 +       int r, i;
16146  
16147         r = -E2BIG;
16148         if (cpuid->nent < vcpu->arch.cpuid_nent)
16149                 goto out;
16150         r = -EFAULT;
16151 -       if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16152 -                        vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16153 +       if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16154                 goto out;
16155 +       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16156 +               struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16157 +               if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16158 +                       goto out;
16159 +       }
16160         return 0;
16161  
16162  out:
16163 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16164  static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16165                                     struct kvm_interrupt *irq)
16166  {
16167 -       if (irq->irq < 0 || irq->irq >= 256)
16168 +       if (irq->irq >= 256)
16169                 return -EINVAL;
16170         if (irqchip_in_kernel(vcpu->kvm))
16171                 return -ENXIO;
16172 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16173  }
16174  EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16175  
16176 -int kvm_arch_init(void *opaque)
16177 +int kvm_arch_init(const void *opaque)
16178  {
16179         int r;
16180         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16181 diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16182 --- linux-3.0.4/arch/x86/lguest/boot.c  2011-07-21 22:17:23.000000000 -0400
16183 +++ linux-3.0.4/arch/x86/lguest/boot.c  2011-08-23 21:47:55.000000000 -0400
16184 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16185   * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16186   * Launcher to reboot us.
16187   */
16188 -static void lguest_restart(char *reason)
16189 +static __noreturn void lguest_restart(char *reason)
16190  {
16191         hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16192 +       BUG();
16193  }
16194  
16195  /*G:050
16196 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16197 --- linux-3.0.4/arch/x86/lib/atomic64_32.c      2011-07-21 22:17:23.000000000 -0400
16198 +++ linux-3.0.4/arch/x86/lib/atomic64_32.c      2011-08-23 21:47:55.000000000 -0400
16199 @@ -8,18 +8,30 @@
16200  
16201  long long atomic64_read_cx8(long long, const atomic64_t *v);
16202  EXPORT_SYMBOL(atomic64_read_cx8);
16203 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16204 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16205  long long atomic64_set_cx8(long long, const atomic64_t *v);
16206  EXPORT_SYMBOL(atomic64_set_cx8);
16207 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16208 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16209  long long atomic64_xchg_cx8(long long, unsigned high);
16210  EXPORT_SYMBOL(atomic64_xchg_cx8);
16211  long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16212  EXPORT_SYMBOL(atomic64_add_return_cx8);
16213 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16214 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16215  long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16216  EXPORT_SYMBOL(atomic64_sub_return_cx8);
16217 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16218 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16219  long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16220  EXPORT_SYMBOL(atomic64_inc_return_cx8);
16221 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16222 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16223  long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16224  EXPORT_SYMBOL(atomic64_dec_return_cx8);
16225 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16226 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16227  long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16228  EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16229  int atomic64_inc_not_zero_cx8(atomic64_t *v);
16230 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16231  #ifndef CONFIG_X86_CMPXCHG64
16232  long long atomic64_read_386(long long, const atomic64_t *v);
16233  EXPORT_SYMBOL(atomic64_read_386);
16234 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16235 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16236  long long atomic64_set_386(long long, const atomic64_t *v);
16237  EXPORT_SYMBOL(atomic64_set_386);
16238 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16239 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16240  long long atomic64_xchg_386(long long, unsigned high);
16241  EXPORT_SYMBOL(atomic64_xchg_386);
16242  long long atomic64_add_return_386(long long a, atomic64_t *v);
16243  EXPORT_SYMBOL(atomic64_add_return_386);
16244 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16245 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16246  long long atomic64_sub_return_386(long long a, atomic64_t *v);
16247  EXPORT_SYMBOL(atomic64_sub_return_386);
16248 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16249 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16250  long long atomic64_inc_return_386(long long a, atomic64_t *v);
16251  EXPORT_SYMBOL(atomic64_inc_return_386);
16252 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16253 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16254  long long atomic64_dec_return_386(long long a, atomic64_t *v);
16255  EXPORT_SYMBOL(atomic64_dec_return_386);
16256 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16257 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16258  long long atomic64_add_386(long long a, atomic64_t *v);
16259  EXPORT_SYMBOL(atomic64_add_386);
16260 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16261 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16262  long long atomic64_sub_386(long long a, atomic64_t *v);
16263  EXPORT_SYMBOL(atomic64_sub_386);
16264 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16265 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16266  long long atomic64_inc_386(long long a, atomic64_t *v);
16267  EXPORT_SYMBOL(atomic64_inc_386);
16268 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16269 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16270  long long atomic64_dec_386(long long a, atomic64_t *v);
16271  EXPORT_SYMBOL(atomic64_dec_386);
16272 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16273 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16274  long long atomic64_dec_if_positive_386(atomic64_t *v);
16275  EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16276  int atomic64_inc_not_zero_386(atomic64_t *v);
16277 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16278 --- linux-3.0.4/arch/x86/lib/atomic64_386_32.S  2011-07-21 22:17:23.000000000 -0400
16279 +++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S  2011-08-23 21:47:55.000000000 -0400
16280 @@ -48,6 +48,10 @@ BEGIN(read)
16281         movl  (v), %eax
16282         movl 4(v), %edx
16283  RET_ENDP
16284 +BEGIN(read_unchecked)
16285 +       movl  (v), %eax
16286 +       movl 4(v), %edx
16287 +RET_ENDP
16288  #undef v
16289  
16290  #define v %esi
16291 @@ -55,6 +59,10 @@ BEGIN(set)
16292         movl %ebx,  (v)
16293         movl %ecx, 4(v)
16294  RET_ENDP
16295 +BEGIN(set_unchecked)
16296 +       movl %ebx,  (v)
16297 +       movl %ecx, 4(v)
16298 +RET_ENDP
16299  #undef v
16300  
16301  #define v  %esi
16302 @@ -70,6 +78,20 @@ RET_ENDP
16303  BEGIN(add)
16304         addl %eax,  (v)
16305         adcl %edx, 4(v)
16306 +
16307 +#ifdef CONFIG_PAX_REFCOUNT
16308 +       jno 0f
16309 +       subl %eax,  (v)
16310 +       sbbl %edx, 4(v)
16311 +       int $4
16312 +0:
16313 +       _ASM_EXTABLE(0b, 0b)
16314 +#endif
16315 +
16316 +RET_ENDP
16317 +BEGIN(add_unchecked)
16318 +       addl %eax,  (v)
16319 +       adcl %edx, 4(v)
16320  RET_ENDP
16321  #undef v
16322  
16323 @@ -77,6 +99,24 @@ RET_ENDP
16324  BEGIN(add_return)
16325         addl  (v), %eax
16326         adcl 4(v), %edx
16327 +
16328 +#ifdef CONFIG_PAX_REFCOUNT
16329 +       into
16330 +1234:
16331 +       _ASM_EXTABLE(1234b, 2f)
16332 +#endif
16333 +
16334 +       movl %eax,  (v)
16335 +       movl %edx, 4(v)
16336 +
16337 +#ifdef CONFIG_PAX_REFCOUNT
16338 +2:
16339 +#endif
16340 +
16341 +RET_ENDP
16342 +BEGIN(add_return_unchecked)
16343 +       addl  (v), %eax
16344 +       adcl 4(v), %edx
16345         movl %eax,  (v)
16346         movl %edx, 4(v)
16347  RET_ENDP
16348 @@ -86,6 +126,20 @@ RET_ENDP
16349  BEGIN(sub)
16350         subl %eax,  (v)
16351         sbbl %edx, 4(v)
16352 +
16353 +#ifdef CONFIG_PAX_REFCOUNT
16354 +       jno 0f
16355 +       addl %eax,  (v)
16356 +       adcl %edx, 4(v)
16357 +       int $4
16358 +0:
16359 +       _ASM_EXTABLE(0b, 0b)
16360 +#endif
16361 +
16362 +RET_ENDP
16363 +BEGIN(sub_unchecked)
16364 +       subl %eax,  (v)
16365 +       sbbl %edx, 4(v)
16366  RET_ENDP
16367  #undef v
16368  
16369 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16370         sbbl $0, %edx
16371         addl  (v), %eax
16372         adcl 4(v), %edx
16373 +
16374 +#ifdef CONFIG_PAX_REFCOUNT
16375 +       into
16376 +1234:
16377 +       _ASM_EXTABLE(1234b, 2f)
16378 +#endif
16379 +
16380 +       movl %eax,  (v)
16381 +       movl %edx, 4(v)
16382 +
16383 +#ifdef CONFIG_PAX_REFCOUNT
16384 +2:
16385 +#endif
16386 +
16387 +RET_ENDP
16388 +BEGIN(sub_return_unchecked)
16389 +       negl %edx
16390 +       negl %eax
16391 +       sbbl $0, %edx
16392 +       addl  (v), %eax
16393 +       adcl 4(v), %edx
16394         movl %eax,  (v)
16395         movl %edx, 4(v)
16396  RET_ENDP
16397 @@ -105,6 +180,20 @@ RET_ENDP
16398  BEGIN(inc)
16399         addl $1,  (v)
16400         adcl $0, 4(v)
16401 +
16402 +#ifdef CONFIG_PAX_REFCOUNT
16403 +       jno 0f
16404 +       subl $1,  (v)
16405 +       sbbl $0, 4(v)
16406 +       int $4
16407 +0:
16408 +       _ASM_EXTABLE(0b, 0b)
16409 +#endif
16410 +
16411 +RET_ENDP
16412 +BEGIN(inc_unchecked)
16413 +       addl $1,  (v)
16414 +       adcl $0, 4(v)
16415  RET_ENDP
16416  #undef v
16417  
16418 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16419         movl 4(v), %edx
16420         addl $1, %eax
16421         adcl $0, %edx
16422 +
16423 +#ifdef CONFIG_PAX_REFCOUNT
16424 +       into
16425 +1234:
16426 +       _ASM_EXTABLE(1234b, 2f)
16427 +#endif
16428 +
16429 +       movl %eax,  (v)
16430 +       movl %edx, 4(v)
16431 +
16432 +#ifdef CONFIG_PAX_REFCOUNT
16433 +2:
16434 +#endif
16435 +
16436 +RET_ENDP
16437 +BEGIN(inc_return_unchecked)
16438 +       movl  (v), %eax
16439 +       movl 4(v), %edx
16440 +       addl $1, %eax
16441 +       adcl $0, %edx
16442         movl %eax,  (v)
16443         movl %edx, 4(v)
16444  RET_ENDP
16445 @@ -123,6 +232,20 @@ RET_ENDP
16446  BEGIN(dec)
16447         subl $1,  (v)
16448         sbbl $0, 4(v)
16449 +
16450 +#ifdef CONFIG_PAX_REFCOUNT
16451 +       jno 0f
16452 +       addl $1,  (v)
16453 +       adcl $0, 4(v)
16454 +       int $4
16455 +0:
16456 +       _ASM_EXTABLE(0b, 0b)
16457 +#endif
16458 +
16459 +RET_ENDP
16460 +BEGIN(dec_unchecked)
16461 +       subl $1,  (v)
16462 +       sbbl $0, 4(v)
16463  RET_ENDP
16464  #undef v
16465  
16466 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16467         movl 4(v), %edx
16468         subl $1, %eax
16469         sbbl $0, %edx
16470 +
16471 +#ifdef CONFIG_PAX_REFCOUNT
16472 +       into
16473 +1234:
16474 +       _ASM_EXTABLE(1234b, 2f)
16475 +#endif
16476 +
16477 +       movl %eax,  (v)
16478 +       movl %edx, 4(v)
16479 +
16480 +#ifdef CONFIG_PAX_REFCOUNT
16481 +2:
16482 +#endif
16483 +
16484 +RET_ENDP
16485 +BEGIN(dec_return_unchecked)
16486 +       movl  (v), %eax
16487 +       movl 4(v), %edx
16488 +       subl $1, %eax
16489 +       sbbl $0, %edx
16490         movl %eax,  (v)
16491         movl %edx, 4(v)
16492  RET_ENDP
16493 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16494         adcl %edx, %edi
16495         addl  (v), %eax
16496         adcl 4(v), %edx
16497 +
16498 +#ifdef CONFIG_PAX_REFCOUNT
16499 +       into
16500 +1234:
16501 +       _ASM_EXTABLE(1234b, 2f)
16502 +#endif
16503 +
16504         cmpl %eax, %esi
16505         je 3f
16506  1:
16507 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16508  1:
16509         addl $1, %eax
16510         adcl $0, %edx
16511 +
16512 +#ifdef CONFIG_PAX_REFCOUNT
16513 +       into
16514 +1234:
16515 +       _ASM_EXTABLE(1234b, 2f)
16516 +#endif
16517 +
16518         movl %eax,  (v)
16519         movl %edx, 4(v)
16520         movl $1, %eax
16521 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16522         movl 4(v), %edx
16523         subl $1, %eax
16524         sbbl $0, %edx
16525 +
16526 +#ifdef CONFIG_PAX_REFCOUNT
16527 +       into
16528 +1234:
16529 +       _ASM_EXTABLE(1234b, 1f)
16530 +#endif
16531 +
16532         js 1f
16533         movl %eax,  (v)
16534         movl %edx, 4(v)
16535 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16536 --- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S  2011-07-21 22:17:23.000000000 -0400
16537 +++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S  2011-08-23 21:47:55.000000000 -0400
16538 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16539         CFI_ENDPROC
16540  ENDPROC(atomic64_read_cx8)
16541  
16542 +ENTRY(atomic64_read_unchecked_cx8)
16543 +       CFI_STARTPROC
16544 +
16545 +       read64 %ecx
16546 +       ret
16547 +       CFI_ENDPROC
16548 +ENDPROC(atomic64_read_unchecked_cx8)
16549 +
16550  ENTRY(atomic64_set_cx8)
16551         CFI_STARTPROC
16552  
16553 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16554         CFI_ENDPROC
16555  ENDPROC(atomic64_set_cx8)
16556  
16557 +ENTRY(atomic64_set_unchecked_cx8)
16558 +       CFI_STARTPROC
16559 +
16560 +1:
16561 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16562 + * are atomic on 586 and newer */
16563 +       cmpxchg8b (%esi)
16564 +       jne 1b
16565 +
16566 +       ret
16567 +       CFI_ENDPROC
16568 +ENDPROC(atomic64_set_unchecked_cx8)
16569 +
16570  ENTRY(atomic64_xchg_cx8)
16571         CFI_STARTPROC
16572  
16573 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16574         CFI_ENDPROC
16575  ENDPROC(atomic64_xchg_cx8)
16576  
16577 -.macro addsub_return func ins insc
16578 -ENTRY(atomic64_\func\()_return_cx8)
16579 +.macro addsub_return func ins insc unchecked=""
16580 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16581         CFI_STARTPROC
16582         SAVE ebp
16583         SAVE ebx
16584 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16585         movl %edx, %ecx
16586         \ins\()l %esi, %ebx
16587         \insc\()l %edi, %ecx
16588 +
16589 +.ifb \unchecked
16590 +#ifdef CONFIG_PAX_REFCOUNT
16591 +       into
16592 +2:
16593 +       _ASM_EXTABLE(2b, 3f)
16594 +#endif
16595 +.endif
16596 +
16597         LOCK_PREFIX
16598         cmpxchg8b (%ebp)
16599         jne 1b
16600 -
16601 -10:
16602         movl %ebx, %eax
16603         movl %ecx, %edx
16604 +
16605 +.ifb \unchecked
16606 +#ifdef CONFIG_PAX_REFCOUNT
16607 +3:
16608 +#endif
16609 +.endif
16610 +
16611         RESTORE edi
16612         RESTORE esi
16613         RESTORE ebx
16614         RESTORE ebp
16615         ret
16616         CFI_ENDPROC
16617 -ENDPROC(atomic64_\func\()_return_cx8)
16618 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16619  .endm
16620  
16621  addsub_return add add adc
16622  addsub_return sub sub sbb
16623 +addsub_return add add adc _unchecked
16624 +addsub_return sub sub sbb _unchecked
16625  
16626 -.macro incdec_return func ins insc
16627 -ENTRY(atomic64_\func\()_return_cx8)
16628 +.macro incdec_return func ins insc unchecked
16629 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16630         CFI_STARTPROC
16631         SAVE ebx
16632  
16633 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16634         movl %edx, %ecx
16635         \ins\()l $1, %ebx
16636         \insc\()l $0, %ecx
16637 +
16638 +.ifb \unchecked
16639 +#ifdef CONFIG_PAX_REFCOUNT
16640 +       into
16641 +2:
16642 +       _ASM_EXTABLE(2b, 3f)
16643 +#endif
16644 +.endif
16645 +
16646         LOCK_PREFIX
16647         cmpxchg8b (%esi)
16648         jne 1b
16649  
16650 -10:
16651         movl %ebx, %eax
16652         movl %ecx, %edx
16653 +
16654 +.ifb \unchecked
16655 +#ifdef CONFIG_PAX_REFCOUNT
16656 +3:
16657 +#endif
16658 +.endif
16659 +
16660         RESTORE ebx
16661         ret
16662         CFI_ENDPROC
16663 -ENDPROC(atomic64_\func\()_return_cx8)
16664 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16665  .endm
16666  
16667  incdec_return inc add adc
16668  incdec_return dec sub sbb
16669 +incdec_return inc add adc _unchecked
16670 +incdec_return dec sub sbb _unchecked
16671  
16672  ENTRY(atomic64_dec_if_positive_cx8)
16673         CFI_STARTPROC
16674 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16675         movl %edx, %ecx
16676         subl $1, %ebx
16677         sbb $0, %ecx
16678 +
16679 +#ifdef CONFIG_PAX_REFCOUNT
16680 +       into
16681 +1234:
16682 +       _ASM_EXTABLE(1234b, 2f)
16683 +#endif
16684 +
16685         js 2f
16686         LOCK_PREFIX
16687         cmpxchg8b (%esi)
16688 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16689         movl %edx, %ecx
16690         addl %esi, %ebx
16691         adcl %edi, %ecx
16692 +
16693 +#ifdef CONFIG_PAX_REFCOUNT
16694 +       into
16695 +1234:
16696 +       _ASM_EXTABLE(1234b, 3f)
16697 +#endif
16698 +
16699         LOCK_PREFIX
16700         cmpxchg8b (%ebp)
16701         jne 1b
16702 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16703         movl %edx, %ecx
16704         addl $1, %ebx
16705         adcl $0, %ecx
16706 +
16707 +#ifdef CONFIG_PAX_REFCOUNT
16708 +       into
16709 +1234:
16710 +       _ASM_EXTABLE(1234b, 3f)
16711 +#endif
16712 +
16713         LOCK_PREFIX
16714         cmpxchg8b (%esi)
16715         jne 1b
16716 diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
16717 --- linux-3.0.4/arch/x86/lib/checksum_32.S      2011-07-21 22:17:23.000000000 -0400
16718 +++ linux-3.0.4/arch/x86/lib/checksum_32.S      2011-08-23 21:47:55.000000000 -0400
16719 @@ -28,7 +28,8 @@
16720  #include <linux/linkage.h>
16721  #include <asm/dwarf2.h>
16722  #include <asm/errno.h>
16723 -                               
16724 +#include <asm/segment.h>
16725 +
16726  /*
16727   * computes a partial checksum, e.g. for TCP/UDP fragments
16728   */
16729 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16730  
16731  #define ARGBASE 16             
16732  #define FP             12
16733 -               
16734 -ENTRY(csum_partial_copy_generic)
16735 +
16736 +ENTRY(csum_partial_copy_generic_to_user)
16737         CFI_STARTPROC
16738 +
16739 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16740 +       pushl_cfi %gs
16741 +       popl_cfi %es
16742 +       jmp csum_partial_copy_generic
16743 +#endif
16744 +
16745 +ENTRY(csum_partial_copy_generic_from_user)
16746 +
16747 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16748 +       pushl_cfi %gs
16749 +       popl_cfi %ds
16750 +#endif
16751 +
16752 +ENTRY(csum_partial_copy_generic)
16753         subl  $4,%esp   
16754         CFI_ADJUST_CFA_OFFSET 4
16755         pushl_cfi %edi
16756 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16757         jmp 4f
16758  SRC(1: movw (%esi), %bx        )
16759         addl $2, %esi
16760 -DST(   movw %bx, (%edi)        )
16761 +DST(   movw %bx, %es:(%edi)    )
16762         addl $2, %edi
16763         addw %bx, %ax   
16764         adcl $0, %eax
16765 @@ -332,30 +348,30 @@ DST(      movw %bx, (%edi)        )
16766  SRC(1: movl (%esi), %ebx       )
16767  SRC(   movl 4(%esi), %edx      )
16768         adcl %ebx, %eax
16769 -DST(   movl %ebx, (%edi)       )
16770 +DST(   movl %ebx, %es:(%edi)   )
16771         adcl %edx, %eax
16772 -DST(   movl %edx, 4(%edi)      )
16773 +DST(   movl %edx, %es:4(%edi)  )
16774  
16775  SRC(   movl 8(%esi), %ebx      )
16776  SRC(   movl 12(%esi), %edx     )
16777         adcl %ebx, %eax
16778 -DST(   movl %ebx, 8(%edi)      )
16779 +DST(   movl %ebx, %es:8(%edi)  )
16780         adcl %edx, %eax
16781 -DST(   movl %edx, 12(%edi)     )
16782 +DST(   movl %edx, %es:12(%edi) )
16783  
16784  SRC(   movl 16(%esi), %ebx     )
16785  SRC(   movl 20(%esi), %edx     )
16786         adcl %ebx, %eax
16787 -DST(   movl %ebx, 16(%edi)     )
16788 +DST(   movl %ebx, %es:16(%edi) )
16789         adcl %edx, %eax
16790 -DST(   movl %edx, 20(%edi)     )
16791 +DST(   movl %edx, %es:20(%edi) )
16792  
16793  SRC(   movl 24(%esi), %ebx     )
16794  SRC(   movl 28(%esi), %edx     )
16795         adcl %ebx, %eax
16796 -DST(   movl %ebx, 24(%edi)     )
16797 +DST(   movl %ebx, %es:24(%edi) )
16798         adcl %edx, %eax
16799 -DST(   movl %edx, 28(%edi)     )
16800 +DST(   movl %edx, %es:28(%edi) )
16801  
16802         lea 32(%esi), %esi
16803         lea 32(%edi), %edi
16804 @@ -369,7 +385,7 @@ DST(        movl %edx, 28(%edi)     )
16805         shrl $2, %edx                   # This clears CF
16806  SRC(3: movl (%esi), %ebx       )
16807         adcl %ebx, %eax
16808 -DST(   movl %ebx, (%edi)       )
16809 +DST(   movl %ebx, %es:(%edi)   )
16810         lea 4(%esi), %esi
16811         lea 4(%edi), %edi
16812         dec %edx
16813 @@ -381,12 +397,12 @@ DST(      movl %ebx, (%edi)       )
16814         jb 5f
16815  SRC(   movw (%esi), %cx        )
16816         leal 2(%esi), %esi
16817 -DST(   movw %cx, (%edi)        )
16818 +DST(   movw %cx, %es:(%edi)    )
16819         leal 2(%edi), %edi
16820         je 6f
16821         shll $16,%ecx
16822  SRC(5: movb (%esi), %cl        )
16823 -DST(   movb %cl, (%edi)        )
16824 +DST(   movb %cl, %es:(%edi)    )
16825  6:     addl %ecx, %eax
16826         adcl $0, %eax
16827  7:
16828 @@ -397,7 +413,7 @@ DST(        movb %cl, (%edi)        )
16829  
16830  6001:
16831         movl ARGBASE+20(%esp), %ebx     # src_err_ptr
16832 -       movl $-EFAULT, (%ebx)
16833 +       movl $-EFAULT, %ss:(%ebx)
16834  
16835         # zero the complete destination - computing the rest
16836         # is too much work 
16837 @@ -410,11 +426,15 @@ DST(      movb %cl, (%edi)        )
16838  
16839  6002:
16840         movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
16841 -       movl $-EFAULT,(%ebx)
16842 +       movl $-EFAULT,%ss:(%ebx)
16843         jmp 5000b
16844  
16845  .previous
16846  
16847 +       pushl_cfi %ss
16848 +       popl_cfi %ds
16849 +       pushl_cfi %ss
16850 +       popl_cfi %es
16851         popl_cfi %ebx
16852         CFI_RESTORE ebx
16853         popl_cfi %esi
16854 @@ -424,26 +444,43 @@ DST(      movb %cl, (%edi)        )
16855         popl_cfi %ecx                   # equivalent to addl $4,%esp
16856         ret     
16857         CFI_ENDPROC
16858 -ENDPROC(csum_partial_copy_generic)
16859 +ENDPROC(csum_partial_copy_generic_to_user)
16860  
16861  #else
16862  
16863  /* Version for PentiumII/PPro */
16864  
16865  #define ROUND1(x) \
16866 +       nop; nop; nop;                          \
16867         SRC(movl x(%esi), %ebx  )       ;       \
16868         addl %ebx, %eax                 ;       \
16869 -       DST(movl %ebx, x(%edi)  )       ; 
16870 +       DST(movl %ebx, %es:x(%edi))     ;
16871  
16872  #define ROUND(x) \
16873 +       nop; nop; nop;                          \
16874         SRC(movl x(%esi), %ebx  )       ;       \
16875         adcl %ebx, %eax                 ;       \
16876 -       DST(movl %ebx, x(%edi)  )       ;
16877 +       DST(movl %ebx, %es:x(%edi))     ;
16878  
16879  #define ARGBASE 12
16880 -               
16881 -ENTRY(csum_partial_copy_generic)
16882 +
16883 +ENTRY(csum_partial_copy_generic_to_user)
16884         CFI_STARTPROC
16885 +
16886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16887 +       pushl_cfi %gs
16888 +       popl_cfi %es
16889 +       jmp csum_partial_copy_generic
16890 +#endif
16891 +
16892 +ENTRY(csum_partial_copy_generic_from_user)
16893 +
16894 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16895 +       pushl_cfi %gs
16896 +       popl_cfi %ds
16897 +#endif
16898 +
16899 +ENTRY(csum_partial_copy_generic)
16900         pushl_cfi %ebx
16901         CFI_REL_OFFSET ebx, 0
16902         pushl_cfi %edi
16903 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16904         subl %ebx, %edi  
16905         lea  -1(%esi),%edx
16906         andl $-32,%edx
16907 -       lea 3f(%ebx,%ebx), %ebx
16908 +       lea 3f(%ebx,%ebx,2), %ebx
16909         testl %esi, %esi 
16910         jmp *%ebx
16911  1:     addl $64,%esi
16912 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16913         jb 5f
16914  SRC(   movw (%esi), %dx         )
16915         leal 2(%esi), %esi
16916 -DST(   movw %dx, (%edi)         )
16917 +DST(   movw %dx, %es:(%edi)     )
16918         leal 2(%edi), %edi
16919         je 6f
16920         shll $16,%edx
16921  5:
16922  SRC(   movb (%esi), %dl         )
16923 -DST(   movb %dl, (%edi)         )
16924 +DST(   movb %dl, %es:(%edi)     )
16925  6:     addl %edx, %eax
16926         adcl $0, %eax
16927  7:
16928  .section .fixup, "ax"
16929  6001:  movl    ARGBASE+20(%esp), %ebx  # src_err_ptr   
16930 -       movl $-EFAULT, (%ebx)
16931 +       movl $-EFAULT, %ss:(%ebx)
16932         # zero the complete destination (computing the rest is too much work)
16933         movl ARGBASE+8(%esp),%edi       # dst
16934         movl ARGBASE+12(%esp),%ecx      # len
16935 @@ -505,10 +542,17 @@ DST(      movb %dl, (%edi)         )
16936         rep; stosb
16937         jmp 7b
16938  6002:  movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
16939 -       movl $-EFAULT, (%ebx)
16940 +       movl $-EFAULT, %ss:(%ebx)
16941         jmp  7b                 
16942  .previous                              
16943  
16944 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16945 +       pushl_cfi %ss
16946 +       popl_cfi %ds
16947 +       pushl_cfi %ss
16948 +       popl_cfi %es
16949 +#endif
16950 +
16951         popl_cfi %esi
16952         CFI_RESTORE esi
16953         popl_cfi %edi
16954 @@ -517,7 +561,7 @@ DST(        movb %dl, (%edi)         )
16955         CFI_RESTORE ebx
16956         ret
16957         CFI_ENDPROC
16958 -ENDPROC(csum_partial_copy_generic)
16959 +ENDPROC(csum_partial_copy_generic_to_user)
16960                                 
16961  #undef ROUND
16962  #undef ROUND1          
16963 diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
16964 --- linux-3.0.4/arch/x86/lib/clear_page_64.S    2011-07-21 22:17:23.000000000 -0400
16965 +++ linux-3.0.4/arch/x86/lib/clear_page_64.S    2011-08-23 21:47:55.000000000 -0400
16966 @@ -58,7 +58,7 @@ ENDPROC(clear_page)
16967  
16968  #include <asm/cpufeature.h>
16969  
16970 -       .section .altinstr_replacement,"ax"
16971 +       .section .altinstr_replacement,"a"
16972  1:     .byte 0xeb                                      /* jmp <disp8> */
16973         .byte (clear_page_c - clear_page) - (2f - 1b)   /* offset */
16974  2:     .byte 0xeb                                      /* jmp <disp8> */
16975 diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
16976 --- linux-3.0.4/arch/x86/lib/copy_page_64.S     2011-07-21 22:17:23.000000000 -0400
16977 +++ linux-3.0.4/arch/x86/lib/copy_page_64.S     2011-08-23 21:47:55.000000000 -0400
16978 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
16979  
16980  #include <asm/cpufeature.h>
16981  
16982 -       .section .altinstr_replacement,"ax"
16983 +       .section .altinstr_replacement,"a"
16984  1:     .byte 0xeb                                      /* jmp <disp8> */
16985         .byte (copy_page_c - copy_page) - (2f - 1b)     /* offset */
16986  2:
16987 diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
16988 --- linux-3.0.4/arch/x86/lib/copy_user_64.S     2011-07-21 22:17:23.000000000 -0400
16989 +++ linux-3.0.4/arch/x86/lib/copy_user_64.S     2011-08-23 21:47:55.000000000 -0400
16990 @@ -16,6 +16,7 @@
16991  #include <asm/thread_info.h>
16992  #include <asm/cpufeature.h>
16993  #include <asm/alternative-asm.h>
16994 +#include <asm/pgtable.h>
16995  
16996  /*
16997   * By placing feature2 after feature1 in altinstructions section, we logically
16998 @@ -29,7 +30,7 @@
16999         .byte 0xe9      /* 32bit jump */
17000         .long \orig-1f  /* by default jump to orig */
17001  1:
17002 -       .section .altinstr_replacement,"ax"
17003 +       .section .altinstr_replacement,"a"
17004  2:     .byte 0xe9                      /* near jump with 32bit immediate */
17005         .long \alt1-1b /* offset */   /* or alternatively to alt1 */
17006  3:     .byte 0xe9                      /* near jump with 32bit immediate */
17007 @@ -71,41 +72,13 @@
17008  #endif
17009         .endm
17010  
17011 -/* Standard copy_to_user with segment limit checking */
17012 -ENTRY(_copy_to_user)
17013 -       CFI_STARTPROC
17014 -       GET_THREAD_INFO(%rax)
17015 -       movq %rdi,%rcx
17016 -       addq %rdx,%rcx
17017 -       jc bad_to_user
17018 -       cmpq TI_addr_limit(%rax),%rcx
17019 -       ja bad_to_user
17020 -       ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17021 -               copy_user_generic_unrolled,copy_user_generic_string,    \
17022 -               copy_user_enhanced_fast_string
17023 -       CFI_ENDPROC
17024 -ENDPROC(_copy_to_user)
17025 -
17026 -/* Standard copy_from_user with segment limit checking */
17027 -ENTRY(_copy_from_user)
17028 -       CFI_STARTPROC
17029 -       GET_THREAD_INFO(%rax)
17030 -       movq %rsi,%rcx
17031 -       addq %rdx,%rcx
17032 -       jc bad_from_user
17033 -       cmpq TI_addr_limit(%rax),%rcx
17034 -       ja bad_from_user
17035 -       ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17036 -               copy_user_generic_unrolled,copy_user_generic_string,    \
17037 -               copy_user_enhanced_fast_string
17038 -       CFI_ENDPROC
17039 -ENDPROC(_copy_from_user)
17040 -
17041         .section .fixup,"ax"
17042         /* must zero dest */
17043  ENTRY(bad_from_user)
17044  bad_from_user:
17045         CFI_STARTPROC
17046 +       testl %edx,%edx
17047 +       js bad_to_user
17048         movl %edx,%ecx
17049         xorl %eax,%eax
17050         rep
17051 diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17052 --- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S     2011-07-21 22:17:23.000000000 -0400
17053 +++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S     2011-08-23 21:47:55.000000000 -0400
17054 @@ -14,6 +14,7 @@
17055  #include <asm/current.h>
17056  #include <asm/asm-offsets.h>
17057  #include <asm/thread_info.h>
17058 +#include <asm/pgtable.h>
17059  
17060         .macro ALIGN_DESTINATION
17061  #ifdef FIX_ALIGNMENT
17062 @@ -50,6 +51,15 @@
17063   */
17064  ENTRY(__copy_user_nocache)
17065         CFI_STARTPROC
17066 +
17067 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17068 +       mov $PAX_USER_SHADOW_BASE,%rcx
17069 +       cmp %rcx,%rsi
17070 +       jae 1f
17071 +       add %rcx,%rsi
17072 +1:
17073 +#endif
17074 +
17075         cmpl $8,%edx
17076         jb 20f          /* less then 8 bytes, go to byte copy loop */
17077         ALIGN_DESTINATION
17078 diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17079 --- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17080 +++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17081 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17082                         len -= 2;
17083                 }
17084         }
17085 +
17086 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17087 +       if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17088 +               src += PAX_USER_SHADOW_BASE;
17089 +#endif
17090 +
17091         isum = csum_partial_copy_generic((__force const void *)src,
17092                                 dst, len, isum, errp, NULL);
17093         if (unlikely(*errp))
17094 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17095         }
17096  
17097         *errp = 0;
17098 +
17099 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17100 +       if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17101 +               dst += PAX_USER_SHADOW_BASE;
17102 +#endif
17103 +
17104         return csum_partial_copy_generic(src, (void __force *)dst,
17105                                          len, isum, NULL, errp);
17106  }
17107 diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17108 --- linux-3.0.4/arch/x86/lib/getuser.S  2011-07-21 22:17:23.000000000 -0400
17109 +++ linux-3.0.4/arch/x86/lib/getuser.S  2011-08-23 21:47:55.000000000 -0400
17110 @@ -33,14 +33,35 @@
17111  #include <asm/asm-offsets.h>
17112  #include <asm/thread_info.h>
17113  #include <asm/asm.h>
17114 +#include <asm/segment.h>
17115 +#include <asm/pgtable.h>
17116 +
17117 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17118 +#define __copyuser_seg gs;
17119 +#else
17120 +#define __copyuser_seg
17121 +#endif
17122  
17123         .text
17124  ENTRY(__get_user_1)
17125         CFI_STARTPROC
17126 +
17127 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17128         GET_THREAD_INFO(%_ASM_DX)
17129         cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17130         jae bad_get_user
17131 -1:     movzb (%_ASM_AX),%edx
17132 +
17133 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17134 +       mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17135 +       cmp %_ASM_DX,%_ASM_AX
17136 +       jae 1234f
17137 +       add %_ASM_DX,%_ASM_AX
17138 +1234:
17139 +#endif
17140 +
17141 +#endif
17142 +
17143 +1:     __copyuser_seg movzb (%_ASM_AX),%edx
17144         xor %eax,%eax
17145         ret
17146         CFI_ENDPROC
17147 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17148  ENTRY(__get_user_2)
17149         CFI_STARTPROC
17150         add $1,%_ASM_AX
17151 +
17152 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17153         jc bad_get_user
17154         GET_THREAD_INFO(%_ASM_DX)
17155         cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17156         jae bad_get_user
17157 -2:     movzwl -1(%_ASM_AX),%edx
17158 +
17159 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17160 +       mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17161 +       cmp %_ASM_DX,%_ASM_AX
17162 +       jae 1234f
17163 +       add %_ASM_DX,%_ASM_AX
17164 +1234:
17165 +#endif
17166 +
17167 +#endif
17168 +
17169 +2:     __copyuser_seg movzwl -1(%_ASM_AX),%edx
17170         xor %eax,%eax
17171         ret
17172         CFI_ENDPROC
17173 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17174  ENTRY(__get_user_4)
17175         CFI_STARTPROC
17176         add $3,%_ASM_AX
17177 +
17178 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17179         jc bad_get_user
17180         GET_THREAD_INFO(%_ASM_DX)
17181         cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17182         jae bad_get_user
17183 -3:     mov -3(%_ASM_AX),%edx
17184 +
17185 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17186 +       mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17187 +       cmp %_ASM_DX,%_ASM_AX
17188 +       jae 1234f
17189 +       add %_ASM_DX,%_ASM_AX
17190 +1234:
17191 +#endif
17192 +
17193 +#endif
17194 +
17195 +3:     __copyuser_seg mov -3(%_ASM_AX),%edx
17196         xor %eax,%eax
17197         ret
17198         CFI_ENDPROC
17199 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17200         GET_THREAD_INFO(%_ASM_DX)
17201         cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17202         jae     bad_get_user
17203 +
17204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17205 +       mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17206 +       cmp %_ASM_DX,%_ASM_AX
17207 +       jae 1234f
17208 +       add %_ASM_DX,%_ASM_AX
17209 +1234:
17210 +#endif
17211 +
17212  4:     movq -7(%_ASM_AX),%_ASM_DX
17213         xor %eax,%eax
17214         ret
17215 diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17216 --- linux-3.0.4/arch/x86/lib/insn.c     2011-07-21 22:17:23.000000000 -0400
17217 +++ linux-3.0.4/arch/x86/lib/insn.c     2011-08-23 21:47:55.000000000 -0400
17218 @@ -21,6 +21,11 @@
17219  #include <linux/string.h>
17220  #include <asm/inat.h>
17221  #include <asm/insn.h>
17222 +#ifdef __KERNEL__
17223 +#include <asm/pgtable_types.h>
17224 +#else
17225 +#define ktla_ktva(addr) addr
17226 +#endif
17227  
17228  #define get_next(t, insn)      \
17229         ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17230 @@ -40,8 +45,8 @@
17231  void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17232  {
17233         memset(insn, 0, sizeof(*insn));
17234 -       insn->kaddr = kaddr;
17235 -       insn->next_byte = kaddr;
17236 +       insn->kaddr = ktla_ktva(kaddr);
17237 +       insn->next_byte = ktla_ktva(kaddr);
17238         insn->x86_64 = x86_64 ? 1 : 0;
17239         insn->opnd_bytes = 4;
17240         if (x86_64)
17241 diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17242 --- linux-3.0.4/arch/x86/lib/mmx_32.c   2011-07-21 22:17:23.000000000 -0400
17243 +++ linux-3.0.4/arch/x86/lib/mmx_32.c   2011-08-23 21:47:55.000000000 -0400
17244 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17245  {
17246         void *p;
17247         int i;
17248 +       unsigned long cr0;
17249  
17250         if (unlikely(in_interrupt()))
17251                 return __memcpy(to, from, len);
17252 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17253         kernel_fpu_begin();
17254  
17255         __asm__ __volatile__ (
17256 -               "1: prefetch (%0)\n"            /* This set is 28 bytes */
17257 -               "   prefetch 64(%0)\n"
17258 -               "   prefetch 128(%0)\n"
17259 -               "   prefetch 192(%0)\n"
17260 -               "   prefetch 256(%0)\n"
17261 +               "1: prefetch (%1)\n"            /* This set is 28 bytes */
17262 +               "   prefetch 64(%1)\n"
17263 +               "   prefetch 128(%1)\n"
17264 +               "   prefetch 192(%1)\n"
17265 +               "   prefetch 256(%1)\n"
17266                 "2:  \n"
17267                 ".section .fixup, \"ax\"\n"
17268 -               "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17269 +               "3:  \n"
17270 +
17271 +#ifdef CONFIG_PAX_KERNEXEC
17272 +               "   movl %%cr0, %0\n"
17273 +               "   movl %0, %%eax\n"
17274 +               "   andl $0xFFFEFFFF, %%eax\n"
17275 +               "   movl %%eax, %%cr0\n"
17276 +#endif
17277 +
17278 +               "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17279 +
17280 +#ifdef CONFIG_PAX_KERNEXEC
17281 +               "   movl %0, %%cr0\n"
17282 +#endif
17283 +
17284                 "   jmp 2b\n"
17285                 ".previous\n"
17286                         _ASM_EXTABLE(1b, 3b)
17287 -                       : : "r" (from));
17288 +                       : "=&r" (cr0) : "r" (from) : "ax");
17289  
17290         for ( ; i > 5; i--) {
17291                 __asm__ __volatile__ (
17292 -               "1:  prefetch 320(%0)\n"
17293 -               "2:  movq (%0), %%mm0\n"
17294 -               "  movq 8(%0), %%mm1\n"
17295 -               "  movq 16(%0), %%mm2\n"
17296 -               "  movq 24(%0), %%mm3\n"
17297 -               "  movq %%mm0, (%1)\n"
17298 -               "  movq %%mm1, 8(%1)\n"
17299 -               "  movq %%mm2, 16(%1)\n"
17300 -               "  movq %%mm3, 24(%1)\n"
17301 -               "  movq 32(%0), %%mm0\n"
17302 -               "  movq 40(%0), %%mm1\n"
17303 -               "  movq 48(%0), %%mm2\n"
17304 -               "  movq 56(%0), %%mm3\n"
17305 -               "  movq %%mm0, 32(%1)\n"
17306 -               "  movq %%mm1, 40(%1)\n"
17307 -               "  movq %%mm2, 48(%1)\n"
17308 -               "  movq %%mm3, 56(%1)\n"
17309 +               "1:  prefetch 320(%1)\n"
17310 +               "2:  movq (%1), %%mm0\n"
17311 +               "  movq 8(%1), %%mm1\n"
17312 +               "  movq 16(%1), %%mm2\n"
17313 +               "  movq 24(%1), %%mm3\n"
17314 +               "  movq %%mm0, (%2)\n"
17315 +               "  movq %%mm1, 8(%2)\n"
17316 +               "  movq %%mm2, 16(%2)\n"
17317 +               "  movq %%mm3, 24(%2)\n"
17318 +               "  movq 32(%1), %%mm0\n"
17319 +               "  movq 40(%1), %%mm1\n"
17320 +               "  movq 48(%1), %%mm2\n"
17321 +               "  movq 56(%1), %%mm3\n"
17322 +               "  movq %%mm0, 32(%2)\n"
17323 +               "  movq %%mm1, 40(%2)\n"
17324 +               "  movq %%mm2, 48(%2)\n"
17325 +               "  movq %%mm3, 56(%2)\n"
17326                 ".section .fixup, \"ax\"\n"
17327 -               "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17328 +               "3:\n"
17329 +
17330 +#ifdef CONFIG_PAX_KERNEXEC
17331 +               "   movl %%cr0, %0\n"
17332 +               "   movl %0, %%eax\n"
17333 +               "   andl $0xFFFEFFFF, %%eax\n"
17334 +               "   movl %%eax, %%cr0\n"
17335 +#endif
17336 +
17337 +               "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17338 +
17339 +#ifdef CONFIG_PAX_KERNEXEC
17340 +               "   movl %0, %%cr0\n"
17341 +#endif
17342 +
17343                 "   jmp 2b\n"
17344                 ".previous\n"
17345                         _ASM_EXTABLE(1b, 3b)
17346 -                       : : "r" (from), "r" (to) : "memory");
17347 +                       : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17348  
17349                 from += 64;
17350                 to += 64;
17351 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17352  static void fast_copy_page(void *to, void *from)
17353  {
17354         int i;
17355 +       unsigned long cr0;
17356  
17357         kernel_fpu_begin();
17358  
17359 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17360          * but that is for later. -AV
17361          */
17362         __asm__ __volatile__(
17363 -               "1: prefetch (%0)\n"
17364 -               "   prefetch 64(%0)\n"
17365 -               "   prefetch 128(%0)\n"
17366 -               "   prefetch 192(%0)\n"
17367 -               "   prefetch 256(%0)\n"
17368 +               "1: prefetch (%1)\n"
17369 +               "   prefetch 64(%1)\n"
17370 +               "   prefetch 128(%1)\n"
17371 +               "   prefetch 192(%1)\n"
17372 +               "   prefetch 256(%1)\n"
17373                 "2:  \n"
17374                 ".section .fixup, \"ax\"\n"
17375 -               "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17376 +               "3:  \n"
17377 +
17378 +#ifdef CONFIG_PAX_KERNEXEC
17379 +               "   movl %%cr0, %0\n"
17380 +               "   movl %0, %%eax\n"
17381 +               "   andl $0xFFFEFFFF, %%eax\n"
17382 +               "   movl %%eax, %%cr0\n"
17383 +#endif
17384 +
17385 +               "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17386 +
17387 +#ifdef CONFIG_PAX_KERNEXEC
17388 +               "   movl %0, %%cr0\n"
17389 +#endif
17390 +
17391                 "   jmp 2b\n"
17392                 ".previous\n"
17393 -                       _ASM_EXTABLE(1b, 3b) : : "r" (from));
17394 +                       _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17395  
17396         for (i = 0; i < (4096-320)/64; i++) {
17397                 __asm__ __volatile__ (
17398 -               "1: prefetch 320(%0)\n"
17399 -               "2: movq (%0), %%mm0\n"
17400 -               "   movntq %%mm0, (%1)\n"
17401 -               "   movq 8(%0), %%mm1\n"
17402 -               "   movntq %%mm1, 8(%1)\n"
17403 -               "   movq 16(%0), %%mm2\n"
17404 -               "   movntq %%mm2, 16(%1)\n"
17405 -               "   movq 24(%0), %%mm3\n"
17406 -               "   movntq %%mm3, 24(%1)\n"
17407 -               "   movq 32(%0), %%mm4\n"
17408 -               "   movntq %%mm4, 32(%1)\n"
17409 -               "   movq 40(%0), %%mm5\n"
17410 -               "   movntq %%mm5, 40(%1)\n"
17411 -               "   movq 48(%0), %%mm6\n"
17412 -               "   movntq %%mm6, 48(%1)\n"
17413 -               "   movq 56(%0), %%mm7\n"
17414 -               "   movntq %%mm7, 56(%1)\n"
17415 +               "1: prefetch 320(%1)\n"
17416 +               "2: movq (%1), %%mm0\n"
17417 +               "   movntq %%mm0, (%2)\n"
17418 +               "   movq 8(%1), %%mm1\n"
17419 +               "   movntq %%mm1, 8(%2)\n"
17420 +               "   movq 16(%1), %%mm2\n"
17421 +               "   movntq %%mm2, 16(%2)\n"
17422 +               "   movq 24(%1), %%mm3\n"
17423 +               "   movntq %%mm3, 24(%2)\n"
17424 +               "   movq 32(%1), %%mm4\n"
17425 +               "   movntq %%mm4, 32(%2)\n"
17426 +               "   movq 40(%1), %%mm5\n"
17427 +               "   movntq %%mm5, 40(%2)\n"
17428 +               "   movq 48(%1), %%mm6\n"
17429 +               "   movntq %%mm6, 48(%2)\n"
17430 +               "   movq 56(%1), %%mm7\n"
17431 +               "   movntq %%mm7, 56(%2)\n"
17432                 ".section .fixup, \"ax\"\n"
17433 -               "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17434 +               "3:\n"
17435 +
17436 +#ifdef CONFIG_PAX_KERNEXEC
17437 +               "   movl %%cr0, %0\n"
17438 +               "   movl %0, %%eax\n"
17439 +               "   andl $0xFFFEFFFF, %%eax\n"
17440 +               "   movl %%eax, %%cr0\n"
17441 +#endif
17442 +
17443 +               "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17444 +
17445 +#ifdef CONFIG_PAX_KERNEXEC
17446 +               "   movl %0, %%cr0\n"
17447 +#endif
17448 +
17449                 "   jmp 2b\n"
17450                 ".previous\n"
17451 -               _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17452 +               _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17453  
17454                 from += 64;
17455                 to += 64;
17456 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17457  static void fast_copy_page(void *to, void *from)
17458  {
17459         int i;
17460 +       unsigned long cr0;
17461  
17462         kernel_fpu_begin();
17463  
17464         __asm__ __volatile__ (
17465 -               "1: prefetch (%0)\n"
17466 -               "   prefetch 64(%0)\n"
17467 -               "   prefetch 128(%0)\n"
17468 -               "   prefetch 192(%0)\n"
17469 -               "   prefetch 256(%0)\n"
17470 +               "1: prefetch (%1)\n"
17471 +               "   prefetch 64(%1)\n"
17472 +               "   prefetch 128(%1)\n"
17473 +               "   prefetch 192(%1)\n"
17474 +               "   prefetch 256(%1)\n"
17475                 "2:  \n"
17476                 ".section .fixup, \"ax\"\n"
17477 -               "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17478 +               "3:  \n"
17479 +
17480 +#ifdef CONFIG_PAX_KERNEXEC
17481 +               "   movl %%cr0, %0\n"
17482 +               "   movl %0, %%eax\n"
17483 +               "   andl $0xFFFEFFFF, %%eax\n"
17484 +               "   movl %%eax, %%cr0\n"
17485 +#endif
17486 +
17487 +               "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17488 +
17489 +#ifdef CONFIG_PAX_KERNEXEC
17490 +               "   movl %0, %%cr0\n"
17491 +#endif
17492 +
17493                 "   jmp 2b\n"
17494                 ".previous\n"
17495 -                       _ASM_EXTABLE(1b, 3b) : : "r" (from));
17496 +                       _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17497  
17498         for (i = 0; i < 4096/64; i++) {
17499                 __asm__ __volatile__ (
17500 -               "1: prefetch 320(%0)\n"
17501 -               "2: movq (%0), %%mm0\n"
17502 -               "   movq 8(%0), %%mm1\n"
17503 -               "   movq 16(%0), %%mm2\n"
17504 -               "   movq 24(%0), %%mm3\n"
17505 -               "   movq %%mm0, (%1)\n"
17506 -               "   movq %%mm1, 8(%1)\n"
17507 -               "   movq %%mm2, 16(%1)\n"
17508 -               "   movq %%mm3, 24(%1)\n"
17509 -               "   movq 32(%0), %%mm0\n"
17510 -               "   movq 40(%0), %%mm1\n"
17511 -               "   movq 48(%0), %%mm2\n"
17512 -               "   movq 56(%0), %%mm3\n"
17513 -               "   movq %%mm0, 32(%1)\n"
17514 -               "   movq %%mm1, 40(%1)\n"
17515 -               "   movq %%mm2, 48(%1)\n"
17516 -               "   movq %%mm3, 56(%1)\n"
17517 +               "1: prefetch 320(%1)\n"
17518 +               "2: movq (%1), %%mm0\n"
17519 +               "   movq 8(%1), %%mm1\n"
17520 +               "   movq 16(%1), %%mm2\n"
17521 +               "   movq 24(%1), %%mm3\n"
17522 +               "   movq %%mm0, (%2)\n"
17523 +               "   movq %%mm1, 8(%2)\n"
17524 +               "   movq %%mm2, 16(%2)\n"
17525 +               "   movq %%mm3, 24(%2)\n"
17526 +               "   movq 32(%1), %%mm0\n"
17527 +               "   movq 40(%1), %%mm1\n"
17528 +               "   movq 48(%1), %%mm2\n"
17529 +               "   movq 56(%1), %%mm3\n"
17530 +               "   movq %%mm0, 32(%2)\n"
17531 +               "   movq %%mm1, 40(%2)\n"
17532 +               "   movq %%mm2, 48(%2)\n"
17533 +               "   movq %%mm3, 56(%2)\n"
17534                 ".section .fixup, \"ax\"\n"
17535 -               "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17536 +               "3:\n"
17537 +
17538 +#ifdef CONFIG_PAX_KERNEXEC
17539 +               "   movl %%cr0, %0\n"
17540 +               "   movl %0, %%eax\n"
17541 +               "   andl $0xFFFEFFFF, %%eax\n"
17542 +               "   movl %%eax, %%cr0\n"
17543 +#endif
17544 +
17545 +               "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17546 +
17547 +#ifdef CONFIG_PAX_KERNEXEC
17548 +               "   movl %0, %%cr0\n"
17549 +#endif
17550 +
17551                 "   jmp 2b\n"
17552                 ".previous\n"
17553                         _ASM_EXTABLE(1b, 3b)
17554 -                       : : "r" (from), "r" (to) : "memory");
17555 +                       : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17556  
17557                 from += 64;
17558                 to += 64;
17559 diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
17560 --- linux-3.0.4/arch/x86/lib/putuser.S  2011-07-21 22:17:23.000000000 -0400
17561 +++ linux-3.0.4/arch/x86/lib/putuser.S  2011-08-23 21:47:55.000000000 -0400
17562 @@ -15,7 +15,8 @@
17563  #include <asm/thread_info.h>
17564  #include <asm/errno.h>
17565  #include <asm/asm.h>
17566 -
17567 +#include <asm/segment.h>
17568 +#include <asm/pgtable.h>
17569  
17570  /*
17571   * __put_user_X
17572 @@ -29,52 +30,119 @@
17573   * as they get called from within inline assembly.
17574   */
17575  
17576 -#define ENTER  CFI_STARTPROC ; \
17577 -               GET_THREAD_INFO(%_ASM_BX)
17578 +#define ENTER  CFI_STARTPROC
17579  #define EXIT   ret ; \
17580                 CFI_ENDPROC
17581  
17582 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17583 +#define _DEST %_ASM_CX,%_ASM_BX
17584 +#else
17585 +#define _DEST %_ASM_CX
17586 +#endif
17587 +
17588 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17589 +#define __copyuser_seg gs;
17590 +#else
17591 +#define __copyuser_seg
17592 +#endif
17593 +
17594  .text
17595  ENTRY(__put_user_1)
17596         ENTER
17597 +
17598 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17599 +       GET_THREAD_INFO(%_ASM_BX)
17600         cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17601         jae bad_put_user
17602 -1:     movb %al,(%_ASM_CX)
17603 +
17604 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17605 +       mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17606 +       cmp %_ASM_BX,%_ASM_CX
17607 +       jb 1234f
17608 +       xor %ebx,%ebx
17609 +1234:
17610 +#endif
17611 +
17612 +#endif
17613 +
17614 +1:     __copyuser_seg movb %al,(_DEST)
17615         xor %eax,%eax
17616         EXIT
17617  ENDPROC(__put_user_1)
17618  
17619  ENTRY(__put_user_2)
17620         ENTER
17621 +
17622 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17623 +       GET_THREAD_INFO(%_ASM_BX)
17624         mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17625         sub $1,%_ASM_BX
17626         cmp %_ASM_BX,%_ASM_CX
17627         jae bad_put_user
17628 -2:     movw %ax,(%_ASM_CX)
17629 +
17630 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17631 +       mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17632 +       cmp %_ASM_BX,%_ASM_CX
17633 +       jb 1234f
17634 +       xor %ebx,%ebx
17635 +1234:
17636 +#endif
17637 +
17638 +#endif
17639 +
17640 +2:     __copyuser_seg movw %ax,(_DEST)
17641         xor %eax,%eax
17642         EXIT
17643  ENDPROC(__put_user_2)
17644  
17645  ENTRY(__put_user_4)
17646         ENTER
17647 +
17648 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17649 +       GET_THREAD_INFO(%_ASM_BX)
17650         mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17651         sub $3,%_ASM_BX
17652         cmp %_ASM_BX,%_ASM_CX
17653         jae bad_put_user
17654 -3:     movl %eax,(%_ASM_CX)
17655 +
17656 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17657 +       mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17658 +       cmp %_ASM_BX,%_ASM_CX
17659 +       jb 1234f
17660 +       xor %ebx,%ebx
17661 +1234:
17662 +#endif
17663 +
17664 +#endif
17665 +
17666 +3:     __copyuser_seg movl %eax,(_DEST)
17667         xor %eax,%eax
17668         EXIT
17669  ENDPROC(__put_user_4)
17670  
17671  ENTRY(__put_user_8)
17672         ENTER
17673 +
17674 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17675 +       GET_THREAD_INFO(%_ASM_BX)
17676         mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17677         sub $7,%_ASM_BX
17678         cmp %_ASM_BX,%_ASM_CX
17679         jae bad_put_user
17680 -4:     mov %_ASM_AX,(%_ASM_CX)
17681 +
17682 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17683 +       mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17684 +       cmp %_ASM_BX,%_ASM_CX
17685 +       jb 1234f
17686 +       xor %ebx,%ebx
17687 +1234:
17688 +#endif
17689 +
17690 +#endif
17691 +
17692 +4:     __copyuser_seg mov %_ASM_AX,(_DEST)
17693  #ifdef CONFIG_X86_32
17694 -5:     movl %edx,4(%_ASM_CX)
17695 +5:     __copyuser_seg movl %edx,4(_DEST)
17696  #endif
17697         xor %eax,%eax
17698         EXIT
17699 diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
17700 --- linux-3.0.4/arch/x86/lib/usercopy_32.c      2011-07-21 22:17:23.000000000 -0400
17701 +++ linux-3.0.4/arch/x86/lib/usercopy_32.c      2011-08-23 21:47:55.000000000 -0400
17702 @@ -43,7 +43,7 @@ do {                                                                     \
17703         __asm__ __volatile__(                                              \
17704                 "       testl %1,%1\n"                                     \
17705                 "       jz 2f\n"                                           \
17706 -               "0:     lodsb\n"                                           \
17707 +               "0:     "__copyuser_seg"lodsb\n"                           \
17708                 "       stosb\n"                                           \
17709                 "       testb %%al,%%al\n"                                 \
17710                 "       jz 1f\n"                                           \
17711 @@ -128,10 +128,12 @@ do {                                                                      \
17712         int __d0;                                                       \
17713         might_fault();                                                  \
17714         __asm__ __volatile__(                                           \
17715 +               __COPYUSER_SET_ES                                       \
17716                 "0:     rep; stosl\n"                                   \
17717                 "       movl %2,%0\n"                                   \
17718                 "1:     rep; stosb\n"                                   \
17719                 "2:\n"                                                  \
17720 +               __COPYUSER_RESTORE_ES                                   \
17721                 ".section .fixup,\"ax\"\n"                              \
17722                 "3:     lea 0(%2,%0,4),%0\n"                            \
17723                 "       jmp 2b\n"                                       \
17724 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, 
17725         might_fault();
17726  
17727         __asm__ __volatile__(
17728 +               __COPYUSER_SET_ES
17729                 "       testl %0, %0\n"
17730                 "       jz 3f\n"
17731                 "       andl %0,%%ecx\n"
17732 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, 
17733                 "       subl %%ecx,%0\n"
17734                 "       addl %0,%%eax\n"
17735                 "1:\n"
17736 +               __COPYUSER_RESTORE_ES
17737                 ".section .fixup,\"ax\"\n"
17738                 "2:     xorl %%eax,%%eax\n"
17739                 "       jmp 1b\n"
17740 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17741  
17742  #ifdef CONFIG_X86_INTEL_USERCOPY
17743  static unsigned long
17744 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17745 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17746  {
17747         int d0, d1;
17748         __asm__ __volatile__(
17749 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17750                        "       .align 2,0x90\n"
17751                        "3:     movl 0(%4), %%eax\n"
17752                        "4:     movl 4(%4), %%edx\n"
17753 -                      "5:     movl %%eax, 0(%3)\n"
17754 -                      "6:     movl %%edx, 4(%3)\n"
17755 +                      "5:     "__copyuser_seg" movl %%eax, 0(%3)\n"
17756 +                      "6:     "__copyuser_seg" movl %%edx, 4(%3)\n"
17757                        "7:     movl 8(%4), %%eax\n"
17758                        "8:     movl 12(%4),%%edx\n"
17759 -                      "9:     movl %%eax, 8(%3)\n"
17760 -                      "10:    movl %%edx, 12(%3)\n"
17761 +                      "9:     "__copyuser_seg" movl %%eax, 8(%3)\n"
17762 +                      "10:    "__copyuser_seg" movl %%edx, 12(%3)\n"
17763                        "11:    movl 16(%4), %%eax\n"
17764                        "12:    movl 20(%4), %%edx\n"
17765 -                      "13:    movl %%eax, 16(%3)\n"
17766 -                      "14:    movl %%edx, 20(%3)\n"
17767 +                      "13:    "__copyuser_seg" movl %%eax, 16(%3)\n"
17768 +                      "14:    "__copyuser_seg" movl %%edx, 20(%3)\n"
17769                        "15:    movl 24(%4), %%eax\n"
17770                        "16:    movl 28(%4), %%edx\n"
17771 -                      "17:    movl %%eax, 24(%3)\n"
17772 -                      "18:    movl %%edx, 28(%3)\n"
17773 +                      "17:    "__copyuser_seg" movl %%eax, 24(%3)\n"
17774 +                      "18:    "__copyuser_seg" movl %%edx, 28(%3)\n"
17775                        "19:    movl 32(%4), %%eax\n"
17776                        "20:    movl 36(%4), %%edx\n"
17777 -                      "21:    movl %%eax, 32(%3)\n"
17778 -                      "22:    movl %%edx, 36(%3)\n"
17779 +                      "21:    "__copyuser_seg" movl %%eax, 32(%3)\n"
17780 +                      "22:    "__copyuser_seg" movl %%edx, 36(%3)\n"
17781                        "23:    movl 40(%4), %%eax\n"
17782                        "24:    movl 44(%4), %%edx\n"
17783 -                      "25:    movl %%eax, 40(%3)\n"
17784 -                      "26:    movl %%edx, 44(%3)\n"
17785 +                      "25:    "__copyuser_seg" movl %%eax, 40(%3)\n"
17786 +                      "26:    "__copyuser_seg" movl %%edx, 44(%3)\n"
17787                        "27:    movl 48(%4), %%eax\n"
17788                        "28:    movl 52(%4), %%edx\n"
17789 -                      "29:    movl %%eax, 48(%3)\n"
17790 -                      "30:    movl %%edx, 52(%3)\n"
17791 +                      "29:    "__copyuser_seg" movl %%eax, 48(%3)\n"
17792 +                      "30:    "__copyuser_seg" movl %%edx, 52(%3)\n"
17793                        "31:    movl 56(%4), %%eax\n"
17794                        "32:    movl 60(%4), %%edx\n"
17795 -                      "33:    movl %%eax, 56(%3)\n"
17796 -                      "34:    movl %%edx, 60(%3)\n"
17797 +                      "33:    "__copyuser_seg" movl %%eax, 56(%3)\n"
17798 +                      "34:    "__copyuser_seg" movl %%edx, 60(%3)\n"
17799                        "       addl $-64, %0\n"
17800                        "       addl $64, %4\n"
17801                        "       addl $64, %3\n"
17802 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17803                        "       shrl  $2, %0\n"
17804                        "       andl  $3, %%eax\n"
17805                        "       cld\n"
17806 +                      __COPYUSER_SET_ES
17807                        "99:    rep; movsl\n"
17808                        "36:    movl %%eax, %0\n"
17809                        "37:    rep; movsb\n"
17810                        "100:\n"
17811 +                      __COPYUSER_RESTORE_ES
17812 +                      ".section .fixup,\"ax\"\n"
17813 +                      "101:   lea 0(%%eax,%0,4),%0\n"
17814 +                      "       jmp 100b\n"
17815 +                      ".previous\n"
17816 +                      ".section __ex_table,\"a\"\n"
17817 +                      "       .align 4\n"
17818 +                      "       .long 1b,100b\n"
17819 +                      "       .long 2b,100b\n"
17820 +                      "       .long 3b,100b\n"
17821 +                      "       .long 4b,100b\n"
17822 +                      "       .long 5b,100b\n"
17823 +                      "       .long 6b,100b\n"
17824 +                      "       .long 7b,100b\n"
17825 +                      "       .long 8b,100b\n"
17826 +                      "       .long 9b,100b\n"
17827 +                      "       .long 10b,100b\n"
17828 +                      "       .long 11b,100b\n"
17829 +                      "       .long 12b,100b\n"
17830 +                      "       .long 13b,100b\n"
17831 +                      "       .long 14b,100b\n"
17832 +                      "       .long 15b,100b\n"
17833 +                      "       .long 16b,100b\n"
17834 +                      "       .long 17b,100b\n"
17835 +                      "       .long 18b,100b\n"
17836 +                      "       .long 19b,100b\n"
17837 +                      "       .long 20b,100b\n"
17838 +                      "       .long 21b,100b\n"
17839 +                      "       .long 22b,100b\n"
17840 +                      "       .long 23b,100b\n"
17841 +                      "       .long 24b,100b\n"
17842 +                      "       .long 25b,100b\n"
17843 +                      "       .long 26b,100b\n"
17844 +                      "       .long 27b,100b\n"
17845 +                      "       .long 28b,100b\n"
17846 +                      "       .long 29b,100b\n"
17847 +                      "       .long 30b,100b\n"
17848 +                      "       .long 31b,100b\n"
17849 +                      "       .long 32b,100b\n"
17850 +                      "       .long 33b,100b\n"
17851 +                      "       .long 34b,100b\n"
17852 +                      "       .long 35b,100b\n"
17853 +                      "       .long 36b,100b\n"
17854 +                      "       .long 37b,100b\n"
17855 +                      "       .long 99b,101b\n"
17856 +                      ".previous"
17857 +                      : "=&c"(size), "=&D" (d0), "=&S" (d1)
17858 +                      :  "1"(to), "2"(from), "0"(size)
17859 +                      : "eax", "edx", "memory");
17860 +       return size;
17861 +}
17862 +
17863 +static unsigned long
17864 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17865 +{
17866 +       int d0, d1;
17867 +       __asm__ __volatile__(
17868 +                      "       .align 2,0x90\n"
17869 +                      "1:     "__copyuser_seg" movl 32(%4), %%eax\n"
17870 +                      "       cmpl $67, %0\n"
17871 +                      "       jbe 3f\n"
17872 +                      "2:     "__copyuser_seg" movl 64(%4), %%eax\n"
17873 +                      "       .align 2,0x90\n"
17874 +                      "3:     "__copyuser_seg" movl 0(%4), %%eax\n"
17875 +                      "4:     "__copyuser_seg" movl 4(%4), %%edx\n"
17876 +                      "5:     movl %%eax, 0(%3)\n"
17877 +                      "6:     movl %%edx, 4(%3)\n"
17878 +                      "7:     "__copyuser_seg" movl 8(%4), %%eax\n"
17879 +                      "8:     "__copyuser_seg" movl 12(%4),%%edx\n"
17880 +                      "9:     movl %%eax, 8(%3)\n"
17881 +                      "10:    movl %%edx, 12(%3)\n"
17882 +                      "11:    "__copyuser_seg" movl 16(%4), %%eax\n"
17883 +                      "12:    "__copyuser_seg" movl 20(%4), %%edx\n"
17884 +                      "13:    movl %%eax, 16(%3)\n"
17885 +                      "14:    movl %%edx, 20(%3)\n"
17886 +                      "15:    "__copyuser_seg" movl 24(%4), %%eax\n"
17887 +                      "16:    "__copyuser_seg" movl 28(%4), %%edx\n"
17888 +                      "17:    movl %%eax, 24(%3)\n"
17889 +                      "18:    movl %%edx, 28(%3)\n"
17890 +                      "19:    "__copyuser_seg" movl 32(%4), %%eax\n"
17891 +                      "20:    "__copyuser_seg" movl 36(%4), %%edx\n"
17892 +                      "21:    movl %%eax, 32(%3)\n"
17893 +                      "22:    movl %%edx, 36(%3)\n"
17894 +                      "23:    "__copyuser_seg" movl 40(%4), %%eax\n"
17895 +                      "24:    "__copyuser_seg" movl 44(%4), %%edx\n"
17896 +                      "25:    movl %%eax, 40(%3)\n"
17897 +                      "26:    movl %%edx, 44(%3)\n"
17898 +                      "27:    "__copyuser_seg" movl 48(%4), %%eax\n"
17899 +                      "28:    "__copyuser_seg" movl 52(%4), %%edx\n"
17900 +                      "29:    movl %%eax, 48(%3)\n"
17901 +                      "30:    movl %%edx, 52(%3)\n"
17902 +                      "31:    "__copyuser_seg" movl 56(%4), %%eax\n"
17903 +                      "32:    "__copyuser_seg" movl 60(%4), %%edx\n"
17904 +                      "33:    movl %%eax, 56(%3)\n"
17905 +                      "34:    movl %%edx, 60(%3)\n"
17906 +                      "       addl $-64, %0\n"
17907 +                      "       addl $64, %4\n"
17908 +                      "       addl $64, %3\n"
17909 +                      "       cmpl $63, %0\n"
17910 +                      "       ja  1b\n"
17911 +                      "35:    movl  %0, %%eax\n"
17912 +                      "       shrl  $2, %0\n"
17913 +                      "       andl  $3, %%eax\n"
17914 +                      "       cld\n"
17915 +                      "99:    rep; "__copyuser_seg" movsl\n"
17916 +                      "36:    movl %%eax, %0\n"
17917 +                      "37:    rep; "__copyuser_seg" movsb\n"
17918 +                      "100:\n"
17919                        ".section .fixup,\"ax\"\n"
17920                        "101:   lea 0(%%eax,%0,4),%0\n"
17921                        "       jmp 100b\n"
17922 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17923         int d0, d1;
17924         __asm__ __volatile__(
17925                        "        .align 2,0x90\n"
17926 -                      "0:      movl 32(%4), %%eax\n"
17927 +                      "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
17928                        "        cmpl $67, %0\n"
17929                        "        jbe 2f\n"
17930 -                      "1:      movl 64(%4), %%eax\n"
17931 +                      "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
17932                        "        .align 2,0x90\n"
17933 -                      "2:      movl 0(%4), %%eax\n"
17934 -                      "21:     movl 4(%4), %%edx\n"
17935 +                      "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
17936 +                      "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
17937                        "        movl %%eax, 0(%3)\n"
17938                        "        movl %%edx, 4(%3)\n"
17939 -                      "3:      movl 8(%4), %%eax\n"
17940 -                      "31:     movl 12(%4),%%edx\n"
17941 +                      "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
17942 +                      "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
17943                        "        movl %%eax, 8(%3)\n"
17944                        "        movl %%edx, 12(%3)\n"
17945 -                      "4:      movl 16(%4), %%eax\n"
17946 -                      "41:     movl 20(%4), %%edx\n"
17947 +                      "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
17948 +                      "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
17949                        "        movl %%eax, 16(%3)\n"
17950                        "        movl %%edx, 20(%3)\n"
17951 -                      "10:     movl 24(%4), %%eax\n"
17952 -                      "51:     movl 28(%4), %%edx\n"
17953 +                      "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
17954 +                      "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
17955                        "        movl %%eax, 24(%3)\n"
17956                        "        movl %%edx, 28(%3)\n"
17957 -                      "11:     movl 32(%4), %%eax\n"
17958 -                      "61:     movl 36(%4), %%edx\n"
17959 +                      "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
17960 +                      "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
17961                        "        movl %%eax, 32(%3)\n"
17962                        "        movl %%edx, 36(%3)\n"
17963 -                      "12:     movl 40(%4), %%eax\n"
17964 -                      "71:     movl 44(%4), %%edx\n"
17965 +                      "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
17966 +                      "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
17967                        "        movl %%eax, 40(%3)\n"
17968                        "        movl %%edx, 44(%3)\n"
17969 -                      "13:     movl 48(%4), %%eax\n"
17970 -                      "81:     movl 52(%4), %%edx\n"
17971 +                      "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
17972 +                      "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
17973                        "        movl %%eax, 48(%3)\n"
17974                        "        movl %%edx, 52(%3)\n"
17975 -                      "14:     movl 56(%4), %%eax\n"
17976 -                      "91:     movl 60(%4), %%edx\n"
17977 +                      "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
17978 +                      "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
17979                        "        movl %%eax, 56(%3)\n"
17980                        "        movl %%edx, 60(%3)\n"
17981                        "        addl $-64, %0\n"
17982 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17983                        "        shrl  $2, %0\n"
17984                        "        andl $3, %%eax\n"
17985                        "        cld\n"
17986 -                      "6:      rep; movsl\n"
17987 +                      "6:      rep; "__copyuser_seg" movsl\n"
17988                        "        movl %%eax,%0\n"
17989 -                      "7:      rep; movsb\n"
17990 +                      "7:      rep; "__copyuser_seg" movsb\n"
17991                        "8:\n"
17992                        ".section .fixup,\"ax\"\n"
17993                        "9:      lea 0(%%eax,%0,4),%0\n"
17994 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
17995  
17996         __asm__ __volatile__(
17997                "        .align 2,0x90\n"
17998 -              "0:      movl 32(%4), %%eax\n"
17999 +              "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
18000                "        cmpl $67, %0\n"
18001                "        jbe 2f\n"
18002 -              "1:      movl 64(%4), %%eax\n"
18003 +              "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
18004                "        .align 2,0x90\n"
18005 -              "2:      movl 0(%4), %%eax\n"
18006 -              "21:     movl 4(%4), %%edx\n"
18007 +              "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
18008 +              "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
18009                "        movnti %%eax, 0(%3)\n"
18010                "        movnti %%edx, 4(%3)\n"
18011 -              "3:      movl 8(%4), %%eax\n"
18012 -              "31:     movl 12(%4),%%edx\n"
18013 +              "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
18014 +              "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
18015                "        movnti %%eax, 8(%3)\n"
18016                "        movnti %%edx, 12(%3)\n"
18017 -              "4:      movl 16(%4), %%eax\n"
18018 -              "41:     movl 20(%4), %%edx\n"
18019 +              "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
18020 +              "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
18021                "        movnti %%eax, 16(%3)\n"
18022                "        movnti %%edx, 20(%3)\n"
18023 -              "10:     movl 24(%4), %%eax\n"
18024 -              "51:     movl 28(%4), %%edx\n"
18025 +              "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
18026 +              "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
18027                "        movnti %%eax, 24(%3)\n"
18028                "        movnti %%edx, 28(%3)\n"
18029 -              "11:     movl 32(%4), %%eax\n"
18030 -              "61:     movl 36(%4), %%edx\n"
18031 +              "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
18032 +              "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
18033                "        movnti %%eax, 32(%3)\n"
18034                "        movnti %%edx, 36(%3)\n"
18035 -              "12:     movl 40(%4), %%eax\n"
18036 -              "71:     movl 44(%4), %%edx\n"
18037 +              "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
18038 +              "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
18039                "        movnti %%eax, 40(%3)\n"
18040                "        movnti %%edx, 44(%3)\n"
18041 -              "13:     movl 48(%4), %%eax\n"
18042 -              "81:     movl 52(%4), %%edx\n"
18043 +              "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
18044 +              "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
18045                "        movnti %%eax, 48(%3)\n"
18046                "        movnti %%edx, 52(%3)\n"
18047 -              "14:     movl 56(%4), %%eax\n"
18048 -              "91:     movl 60(%4), %%edx\n"
18049 +              "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
18050 +              "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
18051                "        movnti %%eax, 56(%3)\n"
18052                "        movnti %%edx, 60(%3)\n"
18053                "        addl $-64, %0\n"
18054 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18055                "        shrl  $2, %0\n"
18056                "        andl $3, %%eax\n"
18057                "        cld\n"
18058 -              "6:      rep; movsl\n"
18059 +              "6:      rep; "__copyuser_seg" movsl\n"
18060                "        movl %%eax,%0\n"
18061 -              "7:      rep; movsb\n"
18062 +              "7:      rep; "__copyuser_seg" movsb\n"
18063                "8:\n"
18064                ".section .fixup,\"ax\"\n"
18065                "9:      lea 0(%%eax,%0,4),%0\n"
18066 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18067  
18068         __asm__ __volatile__(
18069                "        .align 2,0x90\n"
18070 -              "0:      movl 32(%4), %%eax\n"
18071 +              "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
18072                "        cmpl $67, %0\n"
18073                "        jbe 2f\n"
18074 -              "1:      movl 64(%4), %%eax\n"
18075 +              "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
18076                "        .align 2,0x90\n"
18077 -              "2:      movl 0(%4), %%eax\n"
18078 -              "21:     movl 4(%4), %%edx\n"
18079 +              "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
18080 +              "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
18081                "        movnti %%eax, 0(%3)\n"
18082                "        movnti %%edx, 4(%3)\n"
18083 -              "3:      movl 8(%4), %%eax\n"
18084 -              "31:     movl 12(%4),%%edx\n"
18085 +              "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
18086 +              "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
18087                "        movnti %%eax, 8(%3)\n"
18088                "        movnti %%edx, 12(%3)\n"
18089 -              "4:      movl 16(%4), %%eax\n"
18090 -              "41:     movl 20(%4), %%edx\n"
18091 +              "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
18092 +              "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
18093                "        movnti %%eax, 16(%3)\n"
18094                "        movnti %%edx, 20(%3)\n"
18095 -              "10:     movl 24(%4), %%eax\n"
18096 -              "51:     movl 28(%4), %%edx\n"
18097 +              "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
18098 +              "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
18099                "        movnti %%eax, 24(%3)\n"
18100                "        movnti %%edx, 28(%3)\n"
18101 -              "11:     movl 32(%4), %%eax\n"
18102 -              "61:     movl 36(%4), %%edx\n"
18103 +              "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
18104 +              "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
18105                "        movnti %%eax, 32(%3)\n"
18106                "        movnti %%edx, 36(%3)\n"
18107 -              "12:     movl 40(%4), %%eax\n"
18108 -              "71:     movl 44(%4), %%edx\n"
18109 +              "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
18110 +              "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
18111                "        movnti %%eax, 40(%3)\n"
18112                "        movnti %%edx, 44(%3)\n"
18113 -              "13:     movl 48(%4), %%eax\n"
18114 -              "81:     movl 52(%4), %%edx\n"
18115 +              "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
18116 +              "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
18117                "        movnti %%eax, 48(%3)\n"
18118                "        movnti %%edx, 52(%3)\n"
18119 -              "14:     movl 56(%4), %%eax\n"
18120 -              "91:     movl 60(%4), %%edx\n"
18121 +              "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
18122 +              "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
18123                "        movnti %%eax, 56(%3)\n"
18124                "        movnti %%edx, 60(%3)\n"
18125                "        addl $-64, %0\n"
18126 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18127                "        shrl  $2, %0\n"
18128                "        andl $3, %%eax\n"
18129                "        cld\n"
18130 -              "6:      rep; movsl\n"
18131 +              "6:      rep; "__copyuser_seg" movsl\n"
18132                "        movl %%eax,%0\n"
18133 -              "7:      rep; movsb\n"
18134 +              "7:      rep; "__copyuser_seg" movsb\n"
18135                "8:\n"
18136                ".section .fixup,\"ax\"\n"
18137                "9:      lea 0(%%eax,%0,4),%0\n"
18138 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18139   */
18140  unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18141                                         unsigned long size);
18142 -unsigned long __copy_user_intel(void __user *to, const void *from,
18143 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18144 +                                       unsigned long size);
18145 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18146                                         unsigned long size);
18147  unsigned long __copy_user_zeroing_intel_nocache(void *to,
18148                                 const void __user *from, unsigned long size);
18149  #endif /* CONFIG_X86_INTEL_USERCOPY */
18150  
18151  /* Generic arbitrary sized copy.  */
18152 -#define __copy_user(to, from, size)                                    \
18153 +#define __copy_user(to, from, size, prefix, set, restore)              \
18154  do {                                                                   \
18155         int __d0, __d1, __d2;                                           \
18156         __asm__ __volatile__(                                           \
18157 +               set                                                     \
18158                 "       cmp  $7,%0\n"                                   \
18159                 "       jbe  1f\n"                                      \
18160                 "       movl %1,%0\n"                                   \
18161                 "       negl %0\n"                                      \
18162                 "       andl $7,%0\n"                                   \
18163                 "       subl %0,%3\n"                                   \
18164 -               "4:     rep; movsb\n"                                   \
18165 +               "4:     rep; "prefix"movsb\n"                           \
18166                 "       movl %3,%0\n"                                   \
18167                 "       shrl $2,%0\n"                                   \
18168                 "       andl $3,%3\n"                                   \
18169                 "       .align 2,0x90\n"                                \
18170 -               "0:     rep; movsl\n"                                   \
18171 +               "0:     rep; "prefix"movsl\n"                           \
18172                 "       movl %3,%0\n"                                   \
18173 -               "1:     rep; movsb\n"                                   \
18174 +               "1:     rep; "prefix"movsb\n"                           \
18175                 "2:\n"                                                  \
18176 +               restore                                                 \
18177                 ".section .fixup,\"ax\"\n"                              \
18178                 "5:     addl %3,%0\n"                                   \
18179                 "       jmp 2b\n"                                       \
18180 @@ -682,14 +799,14 @@ do {                                                                      \
18181                 "       negl %0\n"                                      \
18182                 "       andl $7,%0\n"                                   \
18183                 "       subl %0,%3\n"                                   \
18184 -               "4:     rep; movsb\n"                                   \
18185 +               "4:     rep; "__copyuser_seg"movsb\n"                   \
18186                 "       movl %3,%0\n"                                   \
18187                 "       shrl $2,%0\n"                                   \
18188                 "       andl $3,%3\n"                                   \
18189                 "       .align 2,0x90\n"                                \
18190 -               "0:     rep; movsl\n"                                   \
18191 +               "0:     rep; "__copyuser_seg"movsl\n"                   \
18192                 "       movl %3,%0\n"                                   \
18193 -               "1:     rep; movsb\n"                                   \
18194 +               "1:     rep; "__copyuser_seg"movsb\n"                   \
18195                 "2:\n"                                                  \
18196                 ".section .fixup,\"ax\"\n"                              \
18197                 "5:     addl %3,%0\n"                                   \
18198 @@ -775,9 +892,9 @@ survive:
18199         }
18200  #endif
18201         if (movsl_is_ok(to, from, n))
18202 -               __copy_user(to, from, n);
18203 +               __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18204         else
18205 -               n = __copy_user_intel(to, from, n);
18206 +               n = __generic_copy_to_user_intel(to, from, n);
18207         return n;
18208  }
18209  EXPORT_SYMBOL(__copy_to_user_ll);
18210 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18211                                          unsigned long n)
18212  {
18213         if (movsl_is_ok(to, from, n))
18214 -               __copy_user(to, from, n);
18215 +               __copy_user(to, from, n, __copyuser_seg, "", "");
18216         else
18217 -               n = __copy_user_intel((void __user *)to,
18218 -                                     (const void *)from, n);
18219 +               n = __generic_copy_from_user_intel(to, from, n);
18220         return n;
18221  }
18222  EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18223 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18224         if (n > 64 && cpu_has_xmm2)
18225                 n = __copy_user_intel_nocache(to, from, n);
18226         else
18227 -               __copy_user(to, from, n);
18228 +               __copy_user(to, from, n, __copyuser_seg, "", "");
18229  #else
18230 -       __copy_user(to, from, n);
18231 +       __copy_user(to, from, n, __copyuser_seg, "", "");
18232  #endif
18233         return n;
18234  }
18235  EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18236  
18237 -/**
18238 - * copy_to_user: - Copy a block of data into user space.
18239 - * @to:   Destination address, in user space.
18240 - * @from: Source address, in kernel space.
18241 - * @n:    Number of bytes to copy.
18242 - *
18243 - * Context: User context only.  This function may sleep.
18244 - *
18245 - * Copy data from kernel space to user space.
18246 - *
18247 - * Returns number of bytes that could not be copied.
18248 - * On success, this will be zero.
18249 - */
18250 -unsigned long
18251 -copy_to_user(void __user *to, const void *from, unsigned long n)
18252 +void copy_from_user_overflow(void)
18253  {
18254 -       if (access_ok(VERIFY_WRITE, to, n))
18255 -               n = __copy_to_user(to, from, n);
18256 -       return n;
18257 +       WARN(1, "Buffer overflow detected!\n");
18258  }
18259 -EXPORT_SYMBOL(copy_to_user);
18260 +EXPORT_SYMBOL(copy_from_user_overflow);
18261  
18262 -/**
18263 - * copy_from_user: - Copy a block of data from user space.
18264 - * @to:   Destination address, in kernel space.
18265 - * @from: Source address, in user space.
18266 - * @n:    Number of bytes to copy.
18267 - *
18268 - * Context: User context only.  This function may sleep.
18269 - *
18270 - * Copy data from user space to kernel space.
18271 - *
18272 - * Returns number of bytes that could not be copied.
18273 - * On success, this will be zero.
18274 - *
18275 - * If some data could not be copied, this function will pad the copied
18276 - * data to the requested size using zero bytes.
18277 - */
18278 -unsigned long
18279 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18280 +void copy_to_user_overflow(void)
18281  {
18282 -       if (access_ok(VERIFY_READ, from, n))
18283 -               n = __copy_from_user(to, from, n);
18284 -       else
18285 -               memset(to, 0, n);
18286 -       return n;
18287 +       WARN(1, "Buffer overflow detected!\n");
18288  }
18289 -EXPORT_SYMBOL(_copy_from_user);
18290 +EXPORT_SYMBOL(copy_to_user_overflow);
18291  
18292 -void copy_from_user_overflow(void)
18293 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18294 +void __set_fs(mm_segment_t x)
18295  {
18296 -       WARN(1, "Buffer overflow detected!\n");
18297 +       switch (x.seg) {
18298 +       case 0:
18299 +               loadsegment(gs, 0);
18300 +               break;
18301 +       case TASK_SIZE_MAX:
18302 +               loadsegment(gs, __USER_DS);
18303 +               break;
18304 +       case -1UL:
18305 +               loadsegment(gs, __KERNEL_DS);
18306 +               break;
18307 +       default:
18308 +               BUG();
18309 +       }
18310 +       return;
18311  }
18312 -EXPORT_SYMBOL(copy_from_user_overflow);
18313 +EXPORT_SYMBOL(__set_fs);
18314 +
18315 +void set_fs(mm_segment_t x)
18316 +{
18317 +       current_thread_info()->addr_limit = x;
18318 +       __set_fs(x);
18319 +}
18320 +EXPORT_SYMBOL(set_fs);
18321 +#endif
18322 diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
18323 --- linux-3.0.4/arch/x86/lib/usercopy_64.c      2011-07-21 22:17:23.000000000 -0400
18324 +++ linux-3.0.4/arch/x86/lib/usercopy_64.c      2011-08-23 21:47:55.000000000 -0400
18325 @@ -42,6 +42,12 @@ long
18326  __strncpy_from_user(char *dst, const char __user *src, long count)
18327  {
18328         long res;
18329 +
18330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18331 +       if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18332 +               src += PAX_USER_SHADOW_BASE;
18333 +#endif
18334 +
18335         __do_strncpy_from_user(dst, src, count, res);
18336         return res;
18337  }
18338 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18339  {
18340         long __d0;
18341         might_fault();
18342 +
18343 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18344 +       if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18345 +               addr += PAX_USER_SHADOW_BASE;
18346 +#endif
18347 +
18348         /* no memory constraint because it doesn't change any memory gcc knows
18349            about */
18350         asm volatile(
18351 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18352  
18353  unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18354  {
18355 -       if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
18356 +       if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18357 +
18358 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18359 +               if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18360 +                       to += PAX_USER_SHADOW_BASE;
18361 +               if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18362 +                       from += PAX_USER_SHADOW_BASE;
18363 +#endif
18364 +
18365                 return copy_user_generic((__force void *)to, (__force void *)from, len);
18366 -       } 
18367 -       return len;             
18368 +       }
18369 +       return len;
18370  }
18371  EXPORT_SYMBOL(copy_in_user);
18372  
18373 diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
18374 --- linux-3.0.4/arch/x86/Makefile       2011-07-21 22:17:23.000000000 -0400
18375 +++ linux-3.0.4/arch/x86/Makefile       2011-08-23 21:48:14.000000000 -0400
18376 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18377  else
18378          BITS := 64
18379          UTS_MACHINE := x86_64
18380 +        biarch := $(call cc-option,-m64)
18381          CHECKFLAGS += -D__x86_64__ -m64
18382  
18383          KBUILD_AFLAGS += -m64
18384 @@ -195,3 +196,12 @@ define archhelp
18385    echo  '                  FDARGS="..."  arguments for the booted kernel'
18386    echo  '                  FDINITRD=file initrd for the booted kernel'
18387  endef
18388 +
18389 +define OLD_LD
18390 +
18391 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18392 +*** Please upgrade your binutils to 2.18 or newer
18393 +endef
18394 +
18395 +archprepare:
18396 +       $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18397 diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
18398 --- linux-3.0.4/arch/x86/mm/extable.c   2011-07-21 22:17:23.000000000 -0400
18399 +++ linux-3.0.4/arch/x86/mm/extable.c   2011-08-23 21:47:55.000000000 -0400
18400 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18401         const struct exception_table_entry *fixup;
18402  
18403  #ifdef CONFIG_PNPBIOS
18404 -       if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18405 +       if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18406                 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18407                 extern u32 pnp_bios_is_utter_crap;
18408                 pnp_bios_is_utter_crap = 1;
18409 diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
18410 --- linux-3.0.4/arch/x86/mm/fault.c     2011-07-21 22:17:23.000000000 -0400
18411 +++ linux-3.0.4/arch/x86/mm/fault.c     2011-08-23 21:48:14.000000000 -0400
18412 @@ -13,10 +13,18 @@
18413  #include <linux/perf_event.h>          /* perf_sw_event                */
18414  #include <linux/hugetlb.h>             /* hstate_index_to_shift        */
18415  #include <linux/prefetch.h>            /* prefetchw                    */
18416 +#include <linux/unistd.h>
18417 +#include <linux/compiler.h>
18418  
18419  #include <asm/traps.h>                 /* dotraplinkage, ...           */
18420  #include <asm/pgalloc.h>               /* pgd_*(), ...                 */
18421  #include <asm/kmemcheck.h>             /* kmemcheck_*(), ...           */
18422 +#include <asm/vsyscall.h>
18423 +#include <asm/tlbflush.h>
18424 +
18425 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18426 +#include <asm/stacktrace.h>
18427 +#endif
18428  
18429  /*
18430   * Page fault error code bits:
18431 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18432         int ret = 0;
18433  
18434         /* kprobe_running() needs smp_processor_id() */
18435 -       if (kprobes_built_in() && !user_mode_vm(regs)) {
18436 +       if (kprobes_built_in() && !user_mode(regs)) {
18437                 preempt_disable();
18438                 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18439                         ret = 1;
18440 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18441                 return !instr_lo || (instr_lo>>1) == 1;
18442         case 0x00:
18443                 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18444 -               if (probe_kernel_address(instr, opcode))
18445 +               if (user_mode(regs)) {
18446 +                       if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18447 +                               return 0;
18448 +               } else if (probe_kernel_address(instr, opcode))
18449                         return 0;
18450  
18451                 *prefetch = (instr_lo == 0xF) &&
18452 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18453         while (instr < max_instr) {
18454                 unsigned char opcode;
18455  
18456 -               if (probe_kernel_address(instr, opcode))
18457 +               if (user_mode(regs)) {
18458 +                       if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18459 +                               break;
18460 +               } else if (probe_kernel_address(instr, opcode))
18461                         break;
18462  
18463                 instr++;
18464 @@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18465         force_sig_info(si_signo, &info, tsk);
18466  }
18467  
18468 +#ifdef CONFIG_PAX_EMUTRAMP
18469 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18470 +#endif
18471 +
18472 +#ifdef CONFIG_PAX_PAGEEXEC
18473 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18474 +{
18475 +       pgd_t *pgd;
18476 +       pud_t *pud;
18477 +       pmd_t *pmd;
18478 +
18479 +       pgd = pgd_offset(mm, address);
18480 +       if (!pgd_present(*pgd))
18481 +               return NULL;
18482 +       pud = pud_offset(pgd, address);
18483 +       if (!pud_present(*pud))
18484 +               return NULL;
18485 +       pmd = pmd_offset(pud, address);
18486 +       if (!pmd_present(*pmd))
18487 +               return NULL;
18488 +       return pmd;
18489 +}
18490 +#endif
18491 +
18492  DEFINE_SPINLOCK(pgd_lock);
18493  LIST_HEAD(pgd_list);
18494  
18495 @@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18496         for (address = VMALLOC_START & PMD_MASK;
18497              address >= TASK_SIZE && address < FIXADDR_TOP;
18498              address += PMD_SIZE) {
18499 +
18500 +#ifdef CONFIG_PAX_PER_CPU_PGD
18501 +               unsigned long cpu;
18502 +#else
18503                 struct page *page;
18504 +#endif
18505  
18506                 spin_lock(&pgd_lock);
18507 +
18508 +#ifdef CONFIG_PAX_PER_CPU_PGD
18509 +               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18510 +                       pgd_t *pgd = get_cpu_pgd(cpu);
18511 +                       pmd_t *ret;
18512 +#else
18513                 list_for_each_entry(page, &pgd_list, lru) {
18514 +                       pgd_t *pgd = page_address(page);
18515                         spinlock_t *pgt_lock;
18516                         pmd_t *ret;
18517  
18518 @@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18519                         pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18520  
18521                         spin_lock(pgt_lock);
18522 -                       ret = vmalloc_sync_one(page_address(page), address);
18523 +#endif
18524 +
18525 +                       ret = vmalloc_sync_one(pgd, address);
18526 +
18527 +#ifndef CONFIG_PAX_PER_CPU_PGD
18528                         spin_unlock(pgt_lock);
18529 +#endif
18530  
18531                         if (!ret)
18532                                 break;
18533 @@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18534          * an interrupt in the middle of a task switch..
18535          */
18536         pgd_paddr = read_cr3();
18537 +
18538 +#ifdef CONFIG_PAX_PER_CPU_PGD
18539 +       BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18540 +#endif
18541 +
18542         pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18543         if (!pmd_k)
18544                 return -1;
18545 @@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18546          * happen within a race in page table update. In the later
18547          * case just flush:
18548          */
18549 +
18550 +#ifdef CONFIG_PAX_PER_CPU_PGD
18551 +       BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18552 +       pgd = pgd_offset_cpu(smp_processor_id(), address);
18553 +#else
18554         pgd = pgd_offset(current->active_mm, address);
18555 +#endif
18556 +
18557         pgd_ref = pgd_offset_k(address);
18558         if (pgd_none(*pgd_ref))
18559                 return -1;
18560 @@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18561  static int is_errata100(struct pt_regs *regs, unsigned long address)
18562  {
18563  #ifdef CONFIG_X86_64
18564 -       if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18565 +       if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18566                 return 1;
18567  #endif
18568         return 0;
18569 @@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18570  }
18571  
18572  static const char nx_warning[] = KERN_CRIT
18573 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18574 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18575  
18576  static void
18577  show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18578 @@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18579         if (!oops_may_print())
18580                 return;
18581  
18582 -       if (error_code & PF_INSTR) {
18583 +       if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18584                 unsigned int level;
18585  
18586                 pte_t *pte = lookup_address(address, &level);
18587  
18588                 if (pte && pte_present(*pte) && !pte_exec(*pte))
18589 -                       printk(nx_warning, current_uid());
18590 +                       printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18591 +       }
18592 +
18593 +#ifdef CONFIG_PAX_KERNEXEC
18594 +       if (init_mm.start_code <= address && address < init_mm.end_code) {
18595 +               if (current->signal->curr_ip)
18596 +                       printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18597 +                                        &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18598 +               else
18599 +                       printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18600 +                                        current->comm, task_pid_nr(current), current_uid(), current_euid());
18601         }
18602 +#endif
18603  
18604         printk(KERN_ALERT "BUG: unable to handle kernel ");
18605         if (address < PAGE_SIZE)
18606 @@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18607                        unsigned long address, int si_code)
18608  {
18609         struct task_struct *tsk = current;
18610 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18611 +       struct mm_struct *mm = tsk->mm;
18612 +#endif
18613 +
18614 +#ifdef CONFIG_X86_64
18615 +       if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18616 +               if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18617 +                   regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18618 +                   regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18619 +                       regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18620 +                       return;
18621 +               }
18622 +       }
18623 +#endif
18624 +
18625 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18626 +       if (mm && (error_code & PF_USER)) {
18627 +               unsigned long ip = regs->ip;
18628 +
18629 +               if (v8086_mode(regs))
18630 +                       ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18631 +
18632 +               /*
18633 +                * It's possible to have interrupts off here:
18634 +                */
18635 +               local_irq_enable();
18636 +
18637 +#ifdef CONFIG_PAX_PAGEEXEC
18638 +               if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18639 +                   (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18640 +
18641 +#ifdef CONFIG_PAX_EMUTRAMP
18642 +                       switch (pax_handle_fetch_fault(regs)) {
18643 +                       case 2:
18644 +                               return;
18645 +                       }
18646 +#endif
18647 +
18648 +                       pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18649 +                       do_group_exit(SIGKILL);
18650 +               }
18651 +#endif
18652 +
18653 +#ifdef CONFIG_PAX_SEGMEXEC
18654 +               if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18655 +
18656 +#ifdef CONFIG_PAX_EMUTRAMP
18657 +                       switch (pax_handle_fetch_fault(regs)) {
18658 +                       case 2:
18659 +                               return;
18660 +                       }
18661 +#endif
18662 +
18663 +                       pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18664 +                       do_group_exit(SIGKILL);
18665 +               }
18666 +#endif
18667 +
18668 +       }
18669 +#endif
18670  
18671         /* User mode accesses just cause a SIGSEGV */
18672         if (error_code & PF_USER) {
18673 @@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18674         return 1;
18675  }
18676  
18677 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18678 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18679 +{
18680 +       pte_t *pte;
18681 +       pmd_t *pmd;
18682 +       spinlock_t *ptl;
18683 +       unsigned char pte_mask;
18684 +
18685 +       if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18686 +           !(mm->pax_flags & MF_PAX_PAGEEXEC))
18687 +               return 0;
18688 +
18689 +       /* PaX: it's our fault, let's handle it if we can */
18690 +
18691 +       /* PaX: take a look at read faults before acquiring any locks */
18692 +       if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18693 +               /* instruction fetch attempt from a protected page in user mode */
18694 +               up_read(&mm->mmap_sem);
18695 +
18696 +#ifdef CONFIG_PAX_EMUTRAMP
18697 +               switch (pax_handle_fetch_fault(regs)) {
18698 +               case 2:
18699 +                       return 1;
18700 +               }
18701 +#endif
18702 +
18703 +               pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18704 +               do_group_exit(SIGKILL);
18705 +       }
18706 +
18707 +       pmd = pax_get_pmd(mm, address);
18708 +       if (unlikely(!pmd))
18709 +               return 0;
18710 +
18711 +       pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18712 +       if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18713 +               pte_unmap_unlock(pte, ptl);
18714 +               return 0;
18715 +       }
18716 +
18717 +       if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18718 +               /* write attempt to a protected page in user mode */
18719 +               pte_unmap_unlock(pte, ptl);
18720 +               return 0;
18721 +       }
18722 +
18723 +#ifdef CONFIG_SMP
18724 +       if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18725 +#else
18726 +       if (likely(address > get_limit(regs->cs)))
18727 +#endif
18728 +       {
18729 +               set_pte(pte, pte_mkread(*pte));
18730 +               __flush_tlb_one(address);
18731 +               pte_unmap_unlock(pte, ptl);
18732 +               up_read(&mm->mmap_sem);
18733 +               return 1;
18734 +       }
18735 +
18736 +       pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18737 +
18738 +       /*
18739 +        * PaX: fill DTLB with user rights and retry
18740 +        */
18741 +       __asm__ __volatile__ (
18742 +               "orb %2,(%1)\n"
18743 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18744 +/*
18745 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18746 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18747 + * page fault when examined during a TLB load attempt. this is true not only
18748 + * for PTEs holding a non-present entry but also present entries that will
18749 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18750 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18751 + * for our target pages since their PTEs are simply not in the TLBs at all.
18752 +
18753 + * the best thing in omitting it is that we gain around 15-20% speed in the
18754 + * fast path of the page fault handler and can get rid of tracing since we
18755 + * can no longer flush unintended entries.
18756 + */
18757 +               "invlpg (%0)\n"
18758 +#endif
18759 +               __copyuser_seg"testb $0,(%0)\n"
18760 +               "xorb %3,(%1)\n"
18761 +               :
18762 +               : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18763 +               : "memory", "cc");
18764 +       pte_unmap_unlock(pte, ptl);
18765 +       up_read(&mm->mmap_sem);
18766 +       return 1;
18767 +}
18768 +#endif
18769 +
18770  /*
18771   * Handle a spurious fault caused by a stale TLB entry.
18772   *
18773 @@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18774  static inline int
18775  access_error(unsigned long error_code, struct vm_area_struct *vma)
18776  {
18777 +       if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18778 +               return 1;
18779 +
18780         if (error_code & PF_WRITE) {
18781                 /* write, present and write, not present: */
18782                 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18783 @@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18784  {
18785         struct vm_area_struct *vma;
18786         struct task_struct *tsk;
18787 -       unsigned long address;
18788         struct mm_struct *mm;
18789         int fault;
18790         int write = error_code & PF_WRITE;
18791         unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18792                                         (write ? FAULT_FLAG_WRITE : 0);
18793  
18794 +       /* Get the faulting address: */
18795 +       unsigned long address = read_cr2();
18796 +
18797 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18798 +       if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18799 +               if (!search_exception_tables(regs->ip)) {
18800 +                       bad_area_nosemaphore(regs, error_code, address);
18801 +                       return;
18802 +               }
18803 +               if (address < PAX_USER_SHADOW_BASE) {
18804 +                       printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18805 +                       printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18806 +                       show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18807 +               } else
18808 +                       address -= PAX_USER_SHADOW_BASE;
18809 +       }
18810 +#endif
18811 +
18812         tsk = current;
18813         mm = tsk->mm;
18814  
18815 -       /* Get the faulting address: */
18816 -       address = read_cr2();
18817 -
18818         /*
18819          * Detect and handle instructions that would cause a page fault for
18820          * both a tracked kernel page and a userspace page.
18821 @@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18822          * User-mode registers count as a user access even for any
18823          * potential system fault or CPU buglet:
18824          */
18825 -       if (user_mode_vm(regs)) {
18826 +       if (user_mode(regs)) {
18827                 local_irq_enable();
18828                 error_code |= PF_USER;
18829         } else {
18830 @@ -1103,6 +1351,11 @@ retry:
18831                 might_sleep();
18832         }
18833  
18834 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18835 +       if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18836 +               return;
18837 +#endif
18838 +
18839         vma = find_vma(mm, address);
18840         if (unlikely(!vma)) {
18841                 bad_area(regs, error_code, address);
18842 @@ -1114,18 +1367,24 @@ retry:
18843                 bad_area(regs, error_code, address);
18844                 return;
18845         }
18846 -       if (error_code & PF_USER) {
18847 -               /*
18848 -                * Accessing the stack below %sp is always a bug.
18849 -                * The large cushion allows instructions like enter
18850 -                * and pusha to work. ("enter $65535, $31" pushes
18851 -                * 32 pointers and then decrements %sp by 65535.)
18852 -                */
18853 -               if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18854 -                       bad_area(regs, error_code, address);
18855 -                       return;
18856 -               }
18857 +       /*
18858 +        * Accessing the stack below %sp is always a bug.
18859 +        * The large cushion allows instructions like enter
18860 +        * and pusha to work. ("enter $65535, $31" pushes
18861 +        * 32 pointers and then decrements %sp by 65535.)
18862 +        */
18863 +       if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18864 +               bad_area(regs, error_code, address);
18865 +               return;
18866         }
18867 +
18868 +#ifdef CONFIG_PAX_SEGMEXEC
18869 +       if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18870 +               bad_area(regs, error_code, address);
18871 +               return;
18872 +       }
18873 +#endif
18874 +
18875         if (unlikely(expand_stack(vma, address))) {
18876                 bad_area(regs, error_code, address);
18877                 return;
18878 @@ -1180,3 +1439,199 @@ good_area:
18879  
18880         up_read(&mm->mmap_sem);
18881  }
18882 +
18883 +#ifdef CONFIG_PAX_EMUTRAMP
18884 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18885 +{
18886 +       int err;
18887 +
18888 +       do { /* PaX: gcc trampoline emulation #1 */
18889 +               unsigned char mov1, mov2;
18890 +               unsigned short jmp;
18891 +               unsigned int addr1, addr2;
18892 +
18893 +#ifdef CONFIG_X86_64
18894 +               if ((regs->ip + 11) >> 32)
18895 +                       break;
18896 +#endif
18897 +
18898 +               err = get_user(mov1, (unsigned char __user *)regs->ip);
18899 +               err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18900 +               err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18901 +               err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18902 +               err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18903 +
18904 +               if (err)
18905 +                       break;
18906 +
18907 +               if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18908 +                       regs->cx = addr1;
18909 +                       regs->ax = addr2;
18910 +                       regs->ip = addr2;
18911 +                       return 2;
18912 +               }
18913 +       } while (0);
18914 +
18915 +       do { /* PaX: gcc trampoline emulation #2 */
18916 +               unsigned char mov, jmp;
18917 +               unsigned int addr1, addr2;
18918 +
18919 +#ifdef CONFIG_X86_64
18920 +               if ((regs->ip + 9) >> 32)
18921 +                       break;
18922 +#endif
18923 +
18924 +               err = get_user(mov, (unsigned char __user *)regs->ip);
18925 +               err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18926 +               err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18927 +               err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18928 +
18929 +               if (err)
18930 +                       break;
18931 +
18932 +               if (mov == 0xB9 && jmp == 0xE9) {
18933 +                       regs->cx = addr1;
18934 +                       regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18935 +                       return 2;
18936 +               }
18937 +       } while (0);
18938 +
18939 +       return 1; /* PaX in action */
18940 +}
18941 +
18942 +#ifdef CONFIG_X86_64
18943 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18944 +{
18945 +       int err;
18946 +
18947 +       do { /* PaX: gcc trampoline emulation #1 */
18948 +               unsigned short mov1, mov2, jmp1;
18949 +               unsigned char jmp2;
18950 +               unsigned int addr1;
18951 +               unsigned long addr2;
18952 +
18953 +               err = get_user(mov1, (unsigned short __user *)regs->ip);
18954 +               err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18955 +               err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18956 +               err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18957 +               err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18958 +               err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18959 +
18960 +               if (err)
18961 +                       break;
18962 +
18963 +               if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18964 +                       regs->r11 = addr1;
18965 +                       regs->r10 = addr2;
18966 +                       regs->ip = addr1;
18967 +                       return 2;
18968 +               }
18969 +       } while (0);
18970 +
18971 +       do { /* PaX: gcc trampoline emulation #2 */
18972 +               unsigned short mov1, mov2, jmp1;
18973 +               unsigned char jmp2;
18974 +               unsigned long addr1, addr2;
18975 +
18976 +               err = get_user(mov1, (unsigned short __user *)regs->ip);
18977 +               err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18978 +               err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18979 +               err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18980 +               err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18981 +               err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18982 +
18983 +               if (err)
18984 +                       break;
18985 +
18986 +               if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18987 +                       regs->r11 = addr1;
18988 +                       regs->r10 = addr2;
18989 +                       regs->ip = addr1;
18990 +                       return 2;
18991 +               }
18992 +       } while (0);
18993 +
18994 +       return 1; /* PaX in action */
18995 +}
18996 +#endif
18997 +
18998 +/*
18999 + * PaX: decide what to do with offenders (regs->ip = fault address)
19000 + *
19001 + * returns 1 when task should be killed
19002 + *         2 when gcc trampoline was detected
19003 + */
19004 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19005 +{
19006 +       if (v8086_mode(regs))
19007 +               return 1;
19008 +
19009 +       if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19010 +               return 1;
19011 +
19012 +#ifdef CONFIG_X86_32
19013 +       return pax_handle_fetch_fault_32(regs);
19014 +#else
19015 +       if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19016 +               return pax_handle_fetch_fault_32(regs);
19017 +       else
19018 +               return pax_handle_fetch_fault_64(regs);
19019 +#endif
19020 +}
19021 +#endif
19022 +
19023 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19024 +void pax_report_insns(void *pc, void *sp)
19025 +{
19026 +       long i;
19027 +
19028 +       printk(KERN_ERR "PAX: bytes at PC: ");
19029 +       for (i = 0; i < 20; i++) {
19030 +               unsigned char c;
19031 +               if (get_user(c, (__force unsigned char __user *)pc+i))
19032 +                       printk(KERN_CONT "?? ");
19033 +               else
19034 +                       printk(KERN_CONT "%02x ", c);
19035 +       }
19036 +       printk("\n");
19037 +
19038 +       printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19039 +       for (i = -1; i < 80 / (long)sizeof(long); i++) {
19040 +               unsigned long c;
19041 +               if (get_user(c, (__force unsigned long __user *)sp+i))
19042 +#ifdef CONFIG_X86_32
19043 +                       printk(KERN_CONT "???????? ");
19044 +#else
19045 +                       printk(KERN_CONT "???????????????? ");
19046 +#endif
19047 +               else
19048 +                       printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19049 +       }
19050 +       printk("\n");
19051 +}
19052 +#endif
19053 +
19054 +/**
19055 + * probe_kernel_write(): safely attempt to write to a location
19056 + * @dst: address to write to
19057 + * @src: pointer to the data that shall be written
19058 + * @size: size of the data chunk
19059 + *
19060 + * Safely write to address @dst from the buffer at @src.  If a kernel fault
19061 + * happens, handle that and return -EFAULT.
19062 + */
19063 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19064 +{
19065 +       long ret;
19066 +       mm_segment_t old_fs = get_fs();
19067 +
19068 +       set_fs(KERNEL_DS);
19069 +       pagefault_disable();
19070 +       pax_open_kernel();
19071 +       ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19072 +       pax_close_kernel();
19073 +       pagefault_enable();
19074 +       set_fs(old_fs);
19075 +
19076 +       return ret ? -EFAULT : 0;
19077 +}
19078 diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19079 --- linux-3.0.4/arch/x86/mm/gup.c       2011-07-21 22:17:23.000000000 -0400
19080 +++ linux-3.0.4/arch/x86/mm/gup.c       2011-08-23 21:47:55.000000000 -0400
19081 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long 
19082         addr = start;
19083         len = (unsigned long) nr_pages << PAGE_SHIFT;
19084         end = start + len;
19085 -       if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19086 +       if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19087                                         (void __user *)start, len)))
19088                 return 0;
19089  
19090 diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19091 --- linux-3.0.4/arch/x86/mm/highmem_32.c        2011-07-21 22:17:23.000000000 -0400
19092 +++ linux-3.0.4/arch/x86/mm/highmem_32.c        2011-08-23 21:47:55.000000000 -0400
19093 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19094         idx = type + KM_TYPE_NR*smp_processor_id();
19095         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19096         BUG_ON(!pte_none(*(kmap_pte-idx)));
19097 +
19098 +       pax_open_kernel();
19099         set_pte(kmap_pte-idx, mk_pte(page, prot));
19100 +       pax_close_kernel();
19101  
19102         return (void *)vaddr;
19103  }
19104 diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19105 --- linux-3.0.4/arch/x86/mm/hugetlbpage.c       2011-07-21 22:17:23.000000000 -0400
19106 +++ linux-3.0.4/arch/x86/mm/hugetlbpage.c       2011-08-23 21:47:55.000000000 -0400
19107 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19108         struct hstate *h = hstate_file(file);
19109         struct mm_struct *mm = current->mm;
19110         struct vm_area_struct *vma;
19111 -       unsigned long start_addr;
19112 +       unsigned long start_addr, pax_task_size = TASK_SIZE;
19113 +
19114 +#ifdef CONFIG_PAX_SEGMEXEC
19115 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
19116 +               pax_task_size = SEGMEXEC_TASK_SIZE;
19117 +#endif
19118 +
19119 +       pax_task_size -= PAGE_SIZE;
19120  
19121         if (len > mm->cached_hole_size) {
19122 -               start_addr = mm->free_area_cache;
19123 +               start_addr = mm->free_area_cache;
19124         } else {
19125 -               start_addr = TASK_UNMAPPED_BASE;
19126 -               mm->cached_hole_size = 0;
19127 +               start_addr = mm->mmap_base;
19128 +               mm->cached_hole_size = 0;
19129         }
19130  
19131  full_search:
19132 @@ -280,26 +287,27 @@ full_search:
19133  
19134         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19135                 /* At this point:  (!vma || addr < vma->vm_end). */
19136 -               if (TASK_SIZE - len < addr) {
19137 +               if (pax_task_size - len < addr) {
19138                         /*
19139                          * Start a new search - just in case we missed
19140                          * some holes.
19141                          */
19142 -                       if (start_addr != TASK_UNMAPPED_BASE) {
19143 -                               start_addr = TASK_UNMAPPED_BASE;
19144 +                       if (start_addr != mm->mmap_base) {
19145 +                               start_addr = mm->mmap_base;
19146                                 mm->cached_hole_size = 0;
19147                                 goto full_search;
19148                         }
19149                         return -ENOMEM;
19150                 }
19151 -               if (!vma || addr + len <= vma->vm_start) {
19152 -                       mm->free_area_cache = addr + len;
19153 -                       return addr;
19154 -               }
19155 +               if (check_heap_stack_gap(vma, addr, len))
19156 +                       break;
19157                 if (addr + mm->cached_hole_size < vma->vm_start)
19158                         mm->cached_hole_size = vma->vm_start - addr;
19159                 addr = ALIGN(vma->vm_end, huge_page_size(h));
19160         }
19161 +
19162 +       mm->free_area_cache = addr + len;
19163 +       return addr;
19164  }
19165  
19166  static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19167 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19168  {
19169         struct hstate *h = hstate_file(file);
19170         struct mm_struct *mm = current->mm;
19171 -       struct vm_area_struct *vma, *prev_vma;
19172 -       unsigned long base = mm->mmap_base, addr = addr0;
19173 +       struct vm_area_struct *vma;
19174 +       unsigned long base = mm->mmap_base, addr;
19175         unsigned long largest_hole = mm->cached_hole_size;
19176 -       int first_time = 1;
19177  
19178         /* don't allow allocations above current base */
19179         if (mm->free_area_cache > base)
19180 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19181                 largest_hole = 0;
19182                 mm->free_area_cache  = base;
19183         }
19184 -try_again:
19185 +
19186         /* make sure it can fit in the remaining address space */
19187         if (mm->free_area_cache < len)
19188                 goto fail;
19189  
19190         /* either no address requested or can't fit in requested address hole */
19191 -       addr = (mm->free_area_cache - len) & huge_page_mask(h);
19192 +       addr = (mm->free_area_cache - len);
19193         do {
19194 +               addr &= huge_page_mask(h);
19195 +               vma = find_vma(mm, addr);
19196                 /*
19197                  * Lookup failure means no vma is above this address,
19198                  * i.e. return with success:
19199 -                */
19200 -               if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19201 -                       return addr;
19202 -
19203 -               /*
19204                  * new region fits between prev_vma->vm_end and
19205                  * vma->vm_start, use it:
19206                  */
19207 -               if (addr + len <= vma->vm_start &&
19208 -                           (!prev_vma || (addr >= prev_vma->vm_end))) {
19209 +               if (check_heap_stack_gap(vma, addr, len)) {
19210                         /* remember the address as a hint for next time */
19211 -                       mm->cached_hole_size = largest_hole;
19212 -                       return (mm->free_area_cache = addr);
19213 -               } else {
19214 -                       /* pull free_area_cache down to the first hole */
19215 -                       if (mm->free_area_cache == vma->vm_end) {
19216 -                               mm->free_area_cache = vma->vm_start;
19217 -                               mm->cached_hole_size = largest_hole;
19218 -                       }
19219 +                       mm->cached_hole_size = largest_hole;
19220 +                       return (mm->free_area_cache = addr);
19221 +               }
19222 +               /* pull free_area_cache down to the first hole */
19223 +               if (mm->free_area_cache == vma->vm_end) {
19224 +                       mm->free_area_cache = vma->vm_start;
19225 +                       mm->cached_hole_size = largest_hole;
19226                 }
19227  
19228                 /* remember the largest hole we saw so far */
19229                 if (addr + largest_hole < vma->vm_start)
19230 -                       largest_hole = vma->vm_start - addr;
19231 +                       largest_hole = vma->vm_start - addr;
19232  
19233                 /* try just below the current vma->vm_start */
19234 -               addr = (vma->vm_start - len) & huge_page_mask(h);
19235 -       } while (len <= vma->vm_start);
19236 +               addr = skip_heap_stack_gap(vma, len);
19237 +       } while (!IS_ERR_VALUE(addr));
19238  
19239  fail:
19240         /*
19241 -        * if hint left us with no space for the requested
19242 -        * mapping then try again:
19243 -        */
19244 -       if (first_time) {
19245 -               mm->free_area_cache = base;
19246 -               largest_hole = 0;
19247 -               first_time = 0;
19248 -               goto try_again;
19249 -       }
19250 -       /*
19251          * A failed mmap() very likely causes application failure,
19252          * so fall back to the bottom-up function here. This scenario
19253          * can happen with large stack limits and large mmap()
19254          * allocations.
19255          */
19256 -       mm->free_area_cache = TASK_UNMAPPED_BASE;
19257 +
19258 +#ifdef CONFIG_PAX_SEGMEXEC
19259 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
19260 +               mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19261 +       else
19262 +#endif
19263 +
19264 +       mm->mmap_base = TASK_UNMAPPED_BASE;
19265 +
19266 +#ifdef CONFIG_PAX_RANDMMAP
19267 +       if (mm->pax_flags & MF_PAX_RANDMMAP)
19268 +               mm->mmap_base += mm->delta_mmap;
19269 +#endif
19270 +
19271 +       mm->free_area_cache = mm->mmap_base;
19272         mm->cached_hole_size = ~0UL;
19273         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19274                         len, pgoff, flags);
19275 @@ -386,6 +392,7 @@ fail:
19276         /*
19277          * Restore the topdown base:
19278          */
19279 +       mm->mmap_base = base;
19280         mm->free_area_cache = base;
19281         mm->cached_hole_size = ~0UL;
19282  
19283 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19284         struct hstate *h = hstate_file(file);
19285         struct mm_struct *mm = current->mm;
19286         struct vm_area_struct *vma;
19287 +       unsigned long pax_task_size = TASK_SIZE;
19288  
19289         if (len & ~huge_page_mask(h))
19290                 return -EINVAL;
19291 -       if (len > TASK_SIZE)
19292 +
19293 +#ifdef CONFIG_PAX_SEGMEXEC
19294 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
19295 +               pax_task_size = SEGMEXEC_TASK_SIZE;
19296 +#endif
19297 +
19298 +       pax_task_size -= PAGE_SIZE;
19299 +
19300 +       if (len > pax_task_size)
19301                 return -ENOMEM;
19302  
19303         if (flags & MAP_FIXED) {
19304 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19305         if (addr) {
19306                 addr = ALIGN(addr, huge_page_size(h));
19307                 vma = find_vma(mm, addr);
19308 -               if (TASK_SIZE - len >= addr &&
19309 -                   (!vma || addr + len <= vma->vm_start))
19310 +               if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19311                         return addr;
19312         }
19313         if (mm->get_unmapped_area == arch_get_unmapped_area)
19314 diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
19315 --- linux-3.0.4/arch/x86/mm/init_32.c   2011-07-21 22:17:23.000000000 -0400
19316 +++ linux-3.0.4/arch/x86/mm/init_32.c   2011-08-23 21:47:55.000000000 -0400
19317 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19318  }
19319  
19320  /*
19321 - * Creates a middle page table and puts a pointer to it in the
19322 - * given global directory entry. This only returns the gd entry
19323 - * in non-PAE compilation mode, since the middle layer is folded.
19324 - */
19325 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19326 -{
19327 -       pud_t *pud;
19328 -       pmd_t *pmd_table;
19329 -
19330 -#ifdef CONFIG_X86_PAE
19331 -       if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19332 -               if (after_bootmem)
19333 -                       pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19334 -               else
19335 -                       pmd_table = (pmd_t *)alloc_low_page();
19336 -               paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19337 -               set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19338 -               pud = pud_offset(pgd, 0);
19339 -               BUG_ON(pmd_table != pmd_offset(pud, 0));
19340 -
19341 -               return pmd_table;
19342 -       }
19343 -#endif
19344 -       pud = pud_offset(pgd, 0);
19345 -       pmd_table = pmd_offset(pud, 0);
19346 -
19347 -       return pmd_table;
19348 -}
19349 -
19350 -/*
19351   * Create a page table and place a pointer to it in a middle page
19352   * directory entry:
19353   */
19354 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19355                         page_table = (pte_t *)alloc_low_page();
19356  
19357                 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19358 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19359 +               set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19360 +#else
19361                 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19362 +#endif
19363                 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19364         }
19365  
19366         return pte_offset_kernel(pmd, 0);
19367  }
19368  
19369 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19370 +{
19371 +       pud_t *pud;
19372 +       pmd_t *pmd_table;
19373 +
19374 +       pud = pud_offset(pgd, 0);
19375 +       pmd_table = pmd_offset(pud, 0);
19376 +
19377 +       return pmd_table;
19378 +}
19379 +
19380  pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19381  {
19382         int pgd_idx = pgd_index(vaddr);
19383 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19384         int pgd_idx, pmd_idx;
19385         unsigned long vaddr;
19386         pgd_t *pgd;
19387 +       pud_t *pud;
19388         pmd_t *pmd;
19389         pte_t *pte = NULL;
19390  
19391 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19392         pgd = pgd_base + pgd_idx;
19393  
19394         for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19395 -               pmd = one_md_table_init(pgd);
19396 -               pmd = pmd + pmd_index(vaddr);
19397 +               pud = pud_offset(pgd, vaddr);
19398 +               pmd = pmd_offset(pud, vaddr);
19399 +
19400 +#ifdef CONFIG_X86_PAE
19401 +               paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19402 +#endif
19403 +
19404                 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19405                                                         pmd++, pmd_idx++) {
19406                         pte = page_table_kmap_check(one_page_table_init(pmd),
19407 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19408         }
19409  }
19410  
19411 -static inline int is_kernel_text(unsigned long addr)
19412 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19413  {
19414 -       if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19415 -               return 1;
19416 -       return 0;
19417 +       if ((start > ktla_ktva((unsigned long)_etext) ||
19418 +            end <= ktla_ktva((unsigned long)_stext)) &&
19419 +           (start > ktla_ktva((unsigned long)_einittext) ||
19420 +            end <= ktla_ktva((unsigned long)_sinittext)) &&
19421 +
19422 +#ifdef CONFIG_ACPI_SLEEP
19423 +           (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19424 +#endif
19425 +
19426 +           (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19427 +               return 0;
19428 +       return 1;
19429  }
19430  
19431  /*
19432 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19433         unsigned long last_map_addr = end;
19434         unsigned long start_pfn, end_pfn;
19435         pgd_t *pgd_base = swapper_pg_dir;
19436 -       int pgd_idx, pmd_idx, pte_ofs;
19437 +       unsigned int pgd_idx, pmd_idx, pte_ofs;
19438         unsigned long pfn;
19439         pgd_t *pgd;
19440 +       pud_t *pud;
19441         pmd_t *pmd;
19442         pte_t *pte;
19443         unsigned pages_2m, pages_4k;
19444 @@ -281,8 +282,13 @@ repeat:
19445         pfn = start_pfn;
19446         pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19447         pgd = pgd_base + pgd_idx;
19448 -       for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19449 -               pmd = one_md_table_init(pgd);
19450 +       for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19451 +               pud = pud_offset(pgd, 0);
19452 +               pmd = pmd_offset(pud, 0);
19453 +
19454 +#ifdef CONFIG_X86_PAE
19455 +               paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19456 +#endif
19457  
19458                 if (pfn >= end_pfn)
19459                         continue;
19460 @@ -294,14 +300,13 @@ repeat:
19461  #endif
19462                 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19463                      pmd++, pmd_idx++) {
19464 -                       unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19465 +                       unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19466  
19467                         /*
19468                          * Map with big pages if possible, otherwise
19469                          * create normal page tables:
19470                          */
19471                         if (use_pse) {
19472 -                               unsigned int addr2;
19473                                 pgprot_t prot = PAGE_KERNEL_LARGE;
19474                                 /*
19475                                  * first pass will use the same initial
19476 @@ -311,11 +316,7 @@ repeat:
19477                                         __pgprot(PTE_IDENT_ATTR |
19478                                                  _PAGE_PSE);
19479  
19480 -                               addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19481 -                                       PAGE_OFFSET + PAGE_SIZE-1;
19482 -
19483 -                               if (is_kernel_text(addr) ||
19484 -                                   is_kernel_text(addr2))
19485 +                               if (is_kernel_text(address, address + PMD_SIZE))
19486                                         prot = PAGE_KERNEL_LARGE_EXEC;
19487  
19488                                 pages_2m++;
19489 @@ -332,7 +333,7 @@ repeat:
19490                         pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19491                         pte += pte_ofs;
19492                         for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19493 -                            pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19494 +                            pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19495                                 pgprot_t prot = PAGE_KERNEL;
19496                                 /*
19497                                  * first pass will use the same initial
19498 @@ -340,7 +341,7 @@ repeat:
19499                                  */
19500                                 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19501  
19502 -                               if (is_kernel_text(addr))
19503 +                               if (is_kernel_text(address, address + PAGE_SIZE))
19504                                         prot = PAGE_KERNEL_EXEC;
19505  
19506                                 pages_4k++;
19507 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19508  
19509                 pud = pud_offset(pgd, va);
19510                 pmd = pmd_offset(pud, va);
19511 -               if (!pmd_present(*pmd))
19512 +               if (!pmd_present(*pmd) || pmd_huge(*pmd))
19513                         break;
19514  
19515                 pte = pte_offset_kernel(pmd, va);
19516 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19517  
19518  static void __init pagetable_init(void)
19519  {
19520 -       pgd_t *pgd_base = swapper_pg_dir;
19521 -
19522 -       permanent_kmaps_init(pgd_base);
19523 +       permanent_kmaps_init(swapper_pg_dir);
19524  }
19525  
19526 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19527 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19528  EXPORT_SYMBOL_GPL(__supported_pte_mask);
19529  
19530  /* user-defined highmem size */
19531 @@ -757,6 +756,12 @@ void __init mem_init(void)
19532  
19533         pci_iommu_alloc();
19534  
19535 +#ifdef CONFIG_PAX_PER_CPU_PGD
19536 +       clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19537 +                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19538 +                       KERNEL_PGD_PTRS);
19539 +#endif
19540 +
19541  #ifdef CONFIG_FLATMEM
19542         BUG_ON(!mem_map);
19543  #endif
19544 @@ -774,7 +779,7 @@ void __init mem_init(void)
19545         set_highmem_pages_init();
19546  
19547         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
19548 -       datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
19549 +       datasize =  (unsigned long) &_edata - (unsigned long) &_sdata;
19550         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
19551  
19552         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19553 @@ -815,10 +820,10 @@ void __init mem_init(void)
19554                 ((unsigned long)&__init_end -
19555                  (unsigned long)&__init_begin) >> 10,
19556  
19557 -               (unsigned long)&_etext, (unsigned long)&_edata,
19558 -               ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19559 +               (unsigned long)&_sdata, (unsigned long)&_edata,
19560 +               ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19561  
19562 -               (unsigned long)&_text, (unsigned long)&_etext,
19563 +               ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19564                 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19565  
19566         /*
19567 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19568         if (!kernel_set_to_readonly)
19569                 return;
19570  
19571 +       start = ktla_ktva(start);
19572         pr_debug("Set kernel text: %lx - %lx for read write\n",
19573                  start, start+size);
19574  
19575 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19576         if (!kernel_set_to_readonly)
19577                 return;
19578  
19579 +       start = ktla_ktva(start);
19580         pr_debug("Set kernel text: %lx - %lx for read only\n",
19581                  start, start+size);
19582  
19583 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19584         unsigned long start = PFN_ALIGN(_text);
19585         unsigned long size = PFN_ALIGN(_etext) - start;
19586  
19587 +       start = ktla_ktva(start);
19588         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19589         printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19590                 size >> 10);
19591 diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
19592 --- linux-3.0.4/arch/x86/mm/init_64.c   2011-07-21 22:17:23.000000000 -0400
19593 +++ linux-3.0.4/arch/x86/mm/init_64.c   2011-08-23 21:47:55.000000000 -0400
19594 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19595   * around without checking the pgd every time.
19596   */
19597  
19598 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19599 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19600  EXPORT_SYMBOL_GPL(__supported_pte_mask);
19601  
19602  int force_personality32;
19603 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19604  
19605         for (address = start; address <= end; address += PGDIR_SIZE) {
19606                 const pgd_t *pgd_ref = pgd_offset_k(address);
19607 +
19608 +#ifdef CONFIG_PAX_PER_CPU_PGD
19609 +               unsigned long cpu;
19610 +#else
19611                 struct page *page;
19612 +#endif
19613  
19614                 if (pgd_none(*pgd_ref))
19615                         continue;
19616  
19617                 spin_lock(&pgd_lock);
19618 +
19619 +#ifdef CONFIG_PAX_PER_CPU_PGD
19620 +               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19621 +                       pgd_t *pgd = pgd_offset_cpu(cpu, address);
19622 +#else
19623                 list_for_each_entry(page, &pgd_list, lru) {
19624                         pgd_t *pgd;
19625                         spinlock_t *pgt_lock;
19626 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19627                         /* the pgt_lock only for Xen */
19628                         pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19629                         spin_lock(pgt_lock);
19630 +#endif
19631  
19632                         if (pgd_none(*pgd))
19633                                 set_pgd(pgd, *pgd_ref);
19634 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19635                                 BUG_ON(pgd_page_vaddr(*pgd)
19636                                        != pgd_page_vaddr(*pgd_ref));
19637  
19638 +#ifndef CONFIG_PAX_PER_CPU_PGD
19639                         spin_unlock(pgt_lock);
19640 +#endif
19641 +
19642                 }
19643                 spin_unlock(&pgd_lock);
19644         }
19645 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, 
19646         pmd = fill_pmd(pud, vaddr);
19647         pte = fill_pte(pmd, vaddr);
19648  
19649 +       pax_open_kernel();
19650         set_pte(pte, new_pte);
19651 +       pax_close_kernel();
19652  
19653         /*
19654          * It's enough to flush this one mapping.
19655 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19656                 pgd = pgd_offset_k((unsigned long)__va(phys));
19657                 if (pgd_none(*pgd)) {
19658                         pud = (pud_t *) spp_getpage();
19659 -                       set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19660 -                                               _PAGE_USER));
19661 +                       set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19662                 }
19663                 pud = pud_offset(pgd, (unsigned long)__va(phys));
19664                 if (pud_none(*pud)) {
19665                         pmd = (pmd_t *) spp_getpage();
19666 -                       set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19667 -                                               _PAGE_USER));
19668 +                       set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19669                 }
19670                 pmd = pmd_offset(pud, phys);
19671                 BUG_ON(!pmd_none(*pmd));
19672 @@ -693,6 +707,12 @@ void __init mem_init(void)
19673  
19674         pci_iommu_alloc();
19675  
19676 +#ifdef CONFIG_PAX_PER_CPU_PGD
19677 +       clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19678 +                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19679 +                       KERNEL_PGD_PTRS);
19680 +#endif
19681 +
19682         /* clear_bss() already clear the empty_zero_page */
19683  
19684         reservedpages = 0;
19685 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19686  static struct vm_area_struct gate_vma = {
19687         .vm_start       = VSYSCALL_START,
19688         .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19689 -       .vm_page_prot   = PAGE_READONLY_EXEC,
19690 -       .vm_flags       = VM_READ | VM_EXEC
19691 +       .vm_page_prot   = PAGE_READONLY,
19692 +       .vm_flags       = VM_READ
19693  };
19694  
19695  struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19696 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19697  
19698  const char *arch_vma_name(struct vm_area_struct *vma)
19699  {
19700 -       if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19701 +       if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19702                 return "[vdso]";
19703         if (vma == &gate_vma)
19704                 return "[vsyscall]";
19705 diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
19706 --- linux-3.0.4/arch/x86/mm/init.c      2011-07-21 22:17:23.000000000 -0400
19707 +++ linux-3.0.4/arch/x86/mm/init.c      2011-08-23 21:48:14.000000000 -0400
19708 @@ -31,7 +31,7 @@ int direct_gbpages
19709  static void __init find_early_table_space(unsigned long end, int use_pse,
19710                                           int use_gbpages)
19711  {
19712 -       unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19713 +       unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19714         phys_addr_t base;
19715  
19716         puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19717 @@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19718   */
19719  int devmem_is_allowed(unsigned long pagenr)
19720  {
19721 -       if (pagenr <= 256)
19722 +#ifdef CONFIG_GRKERNSEC_KMEM
19723 +       /* allow BDA */
19724 +       if (!pagenr)
19725 +               return 1;
19726 +       /* allow EBDA */
19727 +       if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19728 +               return 1;
19729 +#else
19730 +       if (!pagenr)
19731 +               return 1;
19732 +#ifdef CONFIG_VM86
19733 +       if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19734 +               return 1;
19735 +#endif
19736 +#endif
19737 +
19738 +       if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19739                 return 1;
19740 +#ifdef CONFIG_GRKERNSEC_KMEM
19741 +       /* throw out everything else below 1MB */
19742 +       if (pagenr <= 256)
19743 +               return 0;
19744 +#endif
19745         if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19746                 return 0;
19747         if (!page_is_ram(pagenr))
19748                 return 1;
19749 +
19750         return 0;
19751  }
19752  
19753 @@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19754  
19755  void free_initmem(void)
19756  {
19757 +
19758 +#ifdef CONFIG_PAX_KERNEXEC
19759 +#ifdef CONFIG_X86_32
19760 +       /* PaX: limit KERNEL_CS to actual size */
19761 +       unsigned long addr, limit;
19762 +       struct desc_struct d;
19763 +       int cpu;
19764 +
19765 +       limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19766 +       limit = (limit - 1UL) >> PAGE_SHIFT;
19767 +
19768 +       memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19769 +       for (cpu = 0; cpu < NR_CPUS; cpu++) {
19770 +               pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19771 +               write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19772 +       }
19773 +
19774 +       /* PaX: make KERNEL_CS read-only */
19775 +       addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19776 +       if (!paravirt_enabled())
19777 +               set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19778 +/*
19779 +               for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19780 +                       pgd = pgd_offset_k(addr);
19781 +                       pud = pud_offset(pgd, addr);
19782 +                       pmd = pmd_offset(pud, addr);
19783 +                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19784 +               }
19785 +*/
19786 +#ifdef CONFIG_X86_PAE
19787 +       set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19788 +/*
19789 +       for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19790 +               pgd = pgd_offset_k(addr);
19791 +               pud = pud_offset(pgd, addr);
19792 +               pmd = pmd_offset(pud, addr);
19793 +               set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19794 +       }
19795 +*/
19796 +#endif
19797 +
19798 +#ifdef CONFIG_MODULES
19799 +       set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19800 +#endif
19801 +
19802 +#else
19803 +       pgd_t *pgd;
19804 +       pud_t *pud;
19805 +       pmd_t *pmd;
19806 +       unsigned long addr, end;
19807 +
19808 +       /* PaX: make kernel code/rodata read-only, rest non-executable */
19809 +       for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19810 +               pgd = pgd_offset_k(addr);
19811 +               pud = pud_offset(pgd, addr);
19812 +               pmd = pmd_offset(pud, addr);
19813 +               if (!pmd_present(*pmd))
19814 +                       continue;
19815 +               if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19816 +                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19817 +               else
19818 +                       set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19819 +       }
19820 +
19821 +       addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19822 +       end = addr + KERNEL_IMAGE_SIZE;
19823 +       for (; addr < end; addr += PMD_SIZE) {
19824 +               pgd = pgd_offset_k(addr);
19825 +               pud = pud_offset(pgd, addr);
19826 +               pmd = pmd_offset(pud, addr);
19827 +               if (!pmd_present(*pmd))
19828 +                       continue;
19829 +               if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19830 +                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19831 +       }
19832 +#endif
19833 +
19834 +       flush_tlb_all();
19835 +#endif
19836 +
19837         free_init_pages("unused kernel memory",
19838                         (unsigned long)(&__init_begin),
19839                         (unsigned long)(&__init_end));
19840 diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
19841 --- linux-3.0.4/arch/x86/mm/iomap_32.c  2011-07-21 22:17:23.000000000 -0400
19842 +++ linux-3.0.4/arch/x86/mm/iomap_32.c  2011-08-23 21:47:55.000000000 -0400
19843 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19844         type = kmap_atomic_idx_push();
19845         idx = type + KM_TYPE_NR * smp_processor_id();
19846         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19847 +
19848 +       pax_open_kernel();
19849         set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19850 +       pax_close_kernel();
19851 +
19852         arch_flush_lazy_mmu_mode();
19853  
19854         return (void *)vaddr;
19855 diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
19856 --- linux-3.0.4/arch/x86/mm/ioremap.c   2011-07-21 22:17:23.000000000 -0400
19857 +++ linux-3.0.4/arch/x86/mm/ioremap.c   2011-08-23 21:47:55.000000000 -0400
19858 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19859         for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19860                 int is_ram = page_is_ram(pfn);
19861  
19862 -               if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19863 +               if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19864                         return NULL;
19865                 WARN_ON_ONCE(is_ram);
19866         }
19867 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19868  early_param("early_ioremap_debug", early_ioremap_debug_setup);
19869  
19870  static __initdata int after_paging_init;
19871 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19872 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19873  
19874  static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19875  {
19876 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19877                 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19878  
19879         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19880 -       memset(bm_pte, 0, sizeof(bm_pte));
19881 -       pmd_populate_kernel(&init_mm, pmd, bm_pte);
19882 +       pmd_populate_user(&init_mm, pmd, bm_pte);
19883  
19884         /*
19885          * The boot-ioremap range spans multiple pmds, for which
19886 diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
19887 --- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c       2011-07-21 22:17:23.000000000 -0400
19888 +++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c       2011-08-23 21:47:55.000000000 -0400
19889 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19890          * memory (e.g. tracked pages)? For now, we need this to avoid
19891          * invoking kmemcheck for PnP BIOS calls.
19892          */
19893 -       if (regs->flags & X86_VM_MASK)
19894 +       if (v8086_mode(regs))
19895                 return false;
19896 -       if (regs->cs != __KERNEL_CS)
19897 +       if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19898                 return false;
19899  
19900         pte = kmemcheck_pte_lookup(address);
19901 diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
19902 --- linux-3.0.4/arch/x86/mm/mmap.c      2011-07-21 22:17:23.000000000 -0400
19903 +++ linux-3.0.4/arch/x86/mm/mmap.c      2011-08-23 21:47:55.000000000 -0400
19904 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19905   * Leave an at least ~128 MB hole with possible stack randomization.
19906   */
19907  #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19908 -#define MAX_GAP (TASK_SIZE/6*5)
19909 +#define MAX_GAP (pax_task_size/6*5)
19910  
19911  /*
19912   * True on X86_32 or when emulating IA32 on X86_64
19913 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19914         return rnd << PAGE_SHIFT;
19915  }
19916  
19917 -static unsigned long mmap_base(void)
19918 +static unsigned long mmap_base(struct mm_struct *mm)
19919  {
19920         unsigned long gap = rlimit(RLIMIT_STACK);
19921 +       unsigned long pax_task_size = TASK_SIZE;
19922 +
19923 +#ifdef CONFIG_PAX_SEGMEXEC
19924 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
19925 +               pax_task_size = SEGMEXEC_TASK_SIZE;
19926 +#endif
19927  
19928         if (gap < MIN_GAP)
19929                 gap = MIN_GAP;
19930         else if (gap > MAX_GAP)
19931                 gap = MAX_GAP;
19932  
19933 -       return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19934 +       return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19935  }
19936  
19937  /*
19938   * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19939   * does, but not when emulating X86_32
19940   */
19941 -static unsigned long mmap_legacy_base(void)
19942 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
19943  {
19944 -       if (mmap_is_ia32())
19945 +       if (mmap_is_ia32()) {
19946 +
19947 +#ifdef CONFIG_PAX_SEGMEXEC
19948 +               if (mm->pax_flags & MF_PAX_SEGMEXEC)
19949 +                       return SEGMEXEC_TASK_UNMAPPED_BASE;
19950 +               else
19951 +#endif
19952 +
19953                 return TASK_UNMAPPED_BASE;
19954 -       else
19955 +       } else
19956                 return TASK_UNMAPPED_BASE + mmap_rnd();
19957  }
19958  
19959 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19960  void arch_pick_mmap_layout(struct mm_struct *mm)
19961  {
19962         if (mmap_is_legacy()) {
19963 -               mm->mmap_base = mmap_legacy_base();
19964 +               mm->mmap_base = mmap_legacy_base(mm);
19965 +
19966 +#ifdef CONFIG_PAX_RANDMMAP
19967 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
19968 +                       mm->mmap_base += mm->delta_mmap;
19969 +#endif
19970 +
19971                 mm->get_unmapped_area = arch_get_unmapped_area;
19972                 mm->unmap_area = arch_unmap_area;
19973         } else {
19974 -               mm->mmap_base = mmap_base();
19975 +               mm->mmap_base = mmap_base(mm);
19976 +
19977 +#ifdef CONFIG_PAX_RANDMMAP
19978 +               if (mm->pax_flags & MF_PAX_RANDMMAP)
19979 +                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19980 +#endif
19981 +
19982                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19983                 mm->unmap_area = arch_unmap_area_topdown;
19984         }
19985 diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
19986 --- linux-3.0.4/arch/x86/mm/mmio-mod.c  2011-07-21 22:17:23.000000000 -0400
19987 +++ linux-3.0.4/arch/x86/mm/mmio-mod.c  2011-08-23 21:47:55.000000000 -0400
19988 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19989                 break;
19990         default:
19991                 {
19992 -                       unsigned char *ip = (unsigned char *)instptr;
19993 +                       unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
19994                         my_trace->opcode = MMIO_UNKNOWN_OP;
19995                         my_trace->width = 0;
19996                         my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
19997 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p, 
19998  static void ioremap_trace_core(resource_size_t offset, unsigned long size,
19999                                                         void __iomem *addr)
20000  {
20001 -       static atomic_t next_id;
20002 +       static atomic_unchecked_t next_id;
20003         struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20004         /* These are page-unaligned. */
20005         struct mmiotrace_map map = {
20006 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20007                         .private = trace
20008                 },
20009                 .phys = offset,
20010 -               .id = atomic_inc_return(&next_id)
20011 +               .id = atomic_inc_return_unchecked(&next_id)
20012         };
20013         map.map_id = trace->id;
20014  
20015 diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20016 --- linux-3.0.4/arch/x86/mm/pageattr.c  2011-07-21 22:17:23.000000000 -0400
20017 +++ linux-3.0.4/arch/x86/mm/pageattr.c  2011-08-23 21:47:55.000000000 -0400
20018 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20019          */
20020  #ifdef CONFIG_PCI_BIOS
20021         if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20022 -               pgprot_val(forbidden) |= _PAGE_NX;
20023 +               pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20024  #endif
20025  
20026         /*
20027 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20028          * Does not cover __inittext since that is gone later on. On
20029          * 64bit we do not enforce !NX on the low mapping
20030          */
20031 -       if (within(address, (unsigned long)_text, (unsigned long)_etext))
20032 -               pgprot_val(forbidden) |= _PAGE_NX;
20033 +       if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20034 +               pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20035  
20036 +#ifdef CONFIG_DEBUG_RODATA
20037         /*
20038          * The .rodata section needs to be read-only. Using the pfn
20039          * catches all aliases.
20040 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20041         if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20042                    __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20043                 pgprot_val(forbidden) |= _PAGE_RW;
20044 +#endif
20045  
20046  #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20047         /*
20048 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20049         }
20050  #endif
20051  
20052 +#ifdef CONFIG_PAX_KERNEXEC
20053 +       if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20054 +               pgprot_val(forbidden) |= _PAGE_RW;
20055 +               pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20056 +       }
20057 +#endif
20058 +
20059         prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20060  
20061         return prot;
20062 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20063  static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20064  {
20065         /* change init_mm */
20066 +       pax_open_kernel();
20067         set_pte_atomic(kpte, pte);
20068 +
20069  #ifdef CONFIG_X86_32
20070         if (!SHARED_KERNEL_PMD) {
20071 +
20072 +#ifdef CONFIG_PAX_PER_CPU_PGD
20073 +               unsigned long cpu;
20074 +#else
20075                 struct page *page;
20076 +#endif
20077  
20078 +#ifdef CONFIG_PAX_PER_CPU_PGD
20079 +               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20080 +                       pgd_t *pgd = get_cpu_pgd(cpu);
20081 +#else
20082                 list_for_each_entry(page, &pgd_list, lru) {
20083 -                       pgd_t *pgd;
20084 +                       pgd_t *pgd = (pgd_t *)page_address(page);
20085 +#endif
20086 +
20087                         pud_t *pud;
20088                         pmd_t *pmd;
20089  
20090 -                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
20091 +                       pgd += pgd_index(address);
20092                         pud = pud_offset(pgd, address);
20093                         pmd = pmd_offset(pud, address);
20094                         set_pte_atomic((pte_t *)pmd, pte);
20095                 }
20096         }
20097  #endif
20098 +       pax_close_kernel();
20099  }
20100  
20101  static int
20102 diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20103 --- linux-3.0.4/arch/x86/mm/pageattr-test.c     2011-07-21 22:17:23.000000000 -0400
20104 +++ linux-3.0.4/arch/x86/mm/pageattr-test.c     2011-08-23 21:47:55.000000000 -0400
20105 @@ -36,7 +36,7 @@ enum {
20106  
20107  static int pte_testbit(pte_t pte)
20108  {
20109 -       return pte_flags(pte) & _PAGE_UNUSED1;
20110 +       return pte_flags(pte) & _PAGE_CPA_TEST;
20111  }
20112  
20113  struct split_state {
20114 diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20115 --- linux-3.0.4/arch/x86/mm/pat.c       2011-07-21 22:17:23.000000000 -0400
20116 +++ linux-3.0.4/arch/x86/mm/pat.c       2011-08-23 21:47:55.000000000 -0400
20117 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20118  
20119         if (!entry) {
20120                 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20121 -                       current->comm, current->pid, start, end);
20122 +                       current->comm, task_pid_nr(current), start, end);
20123                 return -EINVAL;
20124         }
20125  
20126 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20127         while (cursor < to) {
20128                 if (!devmem_is_allowed(pfn)) {
20129                         printk(KERN_INFO
20130 -               "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20131 -                               current->comm, from, to);
20132 +               "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20133 +                               current->comm, from, to, cursor);
20134                         return 0;
20135                 }
20136                 cursor += PAGE_SIZE;
20137 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20138                 printk(KERN_INFO
20139                         "%s:%d ioremap_change_attr failed %s "
20140                         "for %Lx-%Lx\n",
20141 -                       current->comm, current->pid,
20142 +                       current->comm, task_pid_nr(current),
20143                         cattr_name(flags),
20144                         base, (unsigned long long)(base + size));
20145                 return -EINVAL;
20146 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, 
20147                 if (want_flags != flags) {
20148                         printk(KERN_WARNING
20149                         "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20150 -                               current->comm, current->pid,
20151 +                               current->comm, task_pid_nr(current),
20152                                 cattr_name(want_flags),
20153                                 (unsigned long long)paddr,
20154                                 (unsigned long long)(paddr + size),
20155 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, 
20156                         free_memtype(paddr, paddr + size);
20157                         printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20158                                 " for %Lx-%Lx, got %s\n",
20159 -                               current->comm, current->pid,
20160 +                               current->comm, task_pid_nr(current),
20161                                 cattr_name(want_flags),
20162                                 (unsigned long long)paddr,
20163                                 (unsigned long long)(paddr + size),
20164 diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20165 --- linux-3.0.4/arch/x86/mm/pf_in.c     2011-07-21 22:17:23.000000000 -0400
20166 +++ linux-3.0.4/arch/x86/mm/pf_in.c     2011-08-23 21:47:55.000000000 -0400
20167 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20168         int i;
20169         enum reason_type rv = OTHERS;
20170  
20171 -       p = (unsigned char *)ins_addr;
20172 +       p = (unsigned char *)ktla_ktva(ins_addr);
20173         p += skip_prefix(p, &prf);
20174         p += get_opcode(p, &opcode);
20175  
20176 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20177         struct prefix_bits prf;
20178         int i;
20179  
20180 -       p = (unsigned char *)ins_addr;
20181 +       p = (unsigned char *)ktla_ktva(ins_addr);
20182         p += skip_prefix(p, &prf);
20183         p += get_opcode(p, &opcode);
20184  
20185 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned 
20186         struct prefix_bits prf;
20187         int i;
20188  
20189 -       p = (unsigned char *)ins_addr;
20190 +       p = (unsigned char *)ktla_ktva(ins_addr);
20191         p += skip_prefix(p, &prf);
20192         p += get_opcode(p, &opcode);
20193  
20194 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20195         struct prefix_bits prf;
20196         int i;
20197  
20198 -       p = (unsigned char *)ins_addr;
20199 +       p = (unsigned char *)ktla_ktva(ins_addr);
20200         p += skip_prefix(p, &prf);
20201         p += get_opcode(p, &opcode);
20202         for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20203 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20204         struct prefix_bits prf;
20205         int i;
20206  
20207 -       p = (unsigned char *)ins_addr;
20208 +       p = (unsigned char *)ktla_ktva(ins_addr);
20209         p += skip_prefix(p, &prf);
20210         p += get_opcode(p, &opcode);
20211         for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20212 diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
20213 --- linux-3.0.4/arch/x86/mm/pgtable_32.c        2011-07-21 22:17:23.000000000 -0400
20214 +++ linux-3.0.4/arch/x86/mm/pgtable_32.c        2011-08-23 21:47:55.000000000 -0400
20215 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, 
20216                 return;
20217         }
20218         pte = pte_offset_kernel(pmd, vaddr);
20219 +
20220 +       pax_open_kernel();
20221         if (pte_val(pteval))
20222                 set_pte_at(&init_mm, vaddr, pte, pteval);
20223         else
20224                 pte_clear(&init_mm, vaddr, pte);
20225 +       pax_close_kernel();
20226  
20227         /*
20228          * It's enough to flush this one mapping.
20229 diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
20230 --- linux-3.0.4/arch/x86/mm/pgtable.c   2011-07-21 22:17:23.000000000 -0400
20231 +++ linux-3.0.4/arch/x86/mm/pgtable.c   2011-08-23 21:47:55.000000000 -0400
20232 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20233         list_del(&page->lru);
20234  }
20235  
20236 -#define UNSHARED_PTRS_PER_PGD                          \
20237 -       (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20238 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20239 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20240  
20241 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20242 +{
20243 +       while (count--)
20244 +               *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20245 +}
20246 +#endif
20247 +
20248 +#ifdef CONFIG_PAX_PER_CPU_PGD
20249 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20250 +{
20251 +       while (count--)
20252 +
20253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20254 +               *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20255 +#else
20256 +               *dst++ = *src++;
20257 +#endif
20258  
20259 +}
20260 +#endif
20261 +
20262 +#ifdef CONFIG_X86_64
20263 +#define pxd_t                          pud_t
20264 +#define pyd_t                          pgd_t
20265 +#define paravirt_release_pxd(pfn)      paravirt_release_pud(pfn)
20266 +#define pxd_free(mm, pud)              pud_free((mm), (pud))
20267 +#define pyd_populate(mm, pgd, pud)     pgd_populate((mm), (pgd), (pud))
20268 +#define pyd_offset(mm ,address)                pgd_offset((mm), (address))
20269 +#define PYD_SIZE                       PGDIR_SIZE
20270 +#else
20271 +#define pxd_t                          pmd_t
20272 +#define pyd_t                          pud_t
20273 +#define paravirt_release_pxd(pfn)      paravirt_release_pmd(pfn)
20274 +#define pxd_free(mm, pud)              pmd_free((mm), (pud))
20275 +#define pyd_populate(mm, pgd, pud)     pud_populate((mm), (pgd), (pud))
20276 +#define pyd_offset(mm ,address)                pud_offset((mm), (address))
20277 +#define PYD_SIZE                       PUD_SIZE
20278 +#endif
20279 +
20280 +#ifdef CONFIG_PAX_PER_CPU_PGD
20281 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20282 +static inline void pgd_dtor(pgd_t *pgd) {}
20283 +#else
20284  static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20285  {
20286         BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20287 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20288         pgd_list_del(pgd);
20289         spin_unlock(&pgd_lock);
20290  }
20291 +#endif
20292  
20293  /*
20294   * List of all pgd's needed for non-PAE so it can invalidate entries
20295 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20296   * -- wli
20297   */
20298  
20299 -#ifdef CONFIG_X86_PAE
20300 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20301  /*
20302   * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20303   * updating the top-level pagetable entries to guarantee the
20304 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20305   * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20306   * and initialize the kernel pmds here.
20307   */
20308 -#define PREALLOCATED_PMDS      UNSHARED_PTRS_PER_PGD
20309 +#define PREALLOCATED_PXDS      (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20310  
20311  void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20312  {
20313 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, 
20314          */
20315         flush_tlb_mm(mm);
20316  }
20317 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20318 +#define PREALLOCATED_PXDS      USER_PGD_PTRS
20319  #else  /* !CONFIG_X86_PAE */
20320  
20321  /* No need to prepopulate any pagetable entries in non-PAE modes. */
20322 -#define PREALLOCATED_PMDS      0
20323 +#define PREALLOCATED_PXDS      0
20324  
20325  #endif /* CONFIG_X86_PAE */
20326  
20327 -static void free_pmds(pmd_t *pmds[])
20328 +static void free_pxds(pxd_t *pxds[])
20329  {
20330         int i;
20331  
20332 -       for(i = 0; i < PREALLOCATED_PMDS; i++)
20333 -               if (pmds[i])
20334 -                       free_page((unsigned long)pmds[i]);
20335 +       for(i = 0; i < PREALLOCATED_PXDS; i++)
20336 +               if (pxds[i])
20337 +                       free_page((unsigned long)pxds[i]);
20338  }
20339  
20340 -static int preallocate_pmds(pmd_t *pmds[])
20341 +static int preallocate_pxds(pxd_t *pxds[])
20342  {
20343         int i;
20344         bool failed = false;
20345  
20346 -       for(i = 0; i < PREALLOCATED_PMDS; i++) {
20347 -               pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20348 -               if (pmd == NULL)
20349 +       for(i = 0; i < PREALLOCATED_PXDS; i++) {
20350 +               pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20351 +               if (pxd == NULL)
20352                         failed = true;
20353 -               pmds[i] = pmd;
20354 +               pxds[i] = pxd;
20355         }
20356  
20357         if (failed) {
20358 -               free_pmds(pmds);
20359 +               free_pxds(pxds);
20360                 return -ENOMEM;
20361         }
20362  
20363 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20364   * preallocate which never got a corresponding vma will need to be
20365   * freed manually.
20366   */
20367 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20368 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20369  {
20370         int i;
20371  
20372 -       for(i = 0; i < PREALLOCATED_PMDS; i++) {
20373 +       for(i = 0; i < PREALLOCATED_PXDS; i++) {
20374                 pgd_t pgd = pgdp[i];
20375  
20376                 if (pgd_val(pgd) != 0) {
20377 -                       pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20378 +                       pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20379  
20380 -                       pgdp[i] = native_make_pgd(0);
20381 +                       set_pgd(pgdp + i, native_make_pgd(0));
20382  
20383 -                       paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20384 -                       pmd_free(mm, pmd);
20385 +                       paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20386 +                       pxd_free(mm, pxd);
20387                 }
20388         }
20389  }
20390  
20391 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20392 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20393  {
20394 -       pud_t *pud;
20395 +       pyd_t *pyd;
20396         unsigned long addr;
20397         int i;
20398  
20399 -       if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20400 +       if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20401                 return;
20402  
20403 -       pud = pud_offset(pgd, 0);
20404 +#ifdef CONFIG_X86_64
20405 +       pyd = pyd_offset(mm, 0L);
20406 +#else
20407 +       pyd = pyd_offset(pgd, 0L);
20408 +#endif
20409  
20410 -       for (addr = i = 0; i < PREALLOCATED_PMDS;
20411 -            i++, pud++, addr += PUD_SIZE) {
20412 -               pmd_t *pmd = pmds[i];
20413 +       for (addr = i = 0; i < PREALLOCATED_PXDS;
20414 +            i++, pyd++, addr += PYD_SIZE) {
20415 +               pxd_t *pxd = pxds[i];
20416  
20417                 if (i >= KERNEL_PGD_BOUNDARY)
20418 -                       memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20419 -                              sizeof(pmd_t) * PTRS_PER_PMD);
20420 +                       memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20421 +                              sizeof(pxd_t) * PTRS_PER_PMD);
20422  
20423 -               pud_populate(mm, pud, pmd);
20424 +               pyd_populate(mm, pyd, pxd);
20425         }
20426  }
20427  
20428  pgd_t *pgd_alloc(struct mm_struct *mm)
20429  {
20430         pgd_t *pgd;
20431 -       pmd_t *pmds[PREALLOCATED_PMDS];
20432 +       pxd_t *pxds[PREALLOCATED_PXDS];
20433  
20434         pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20435  
20436 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20437  
20438         mm->pgd = pgd;
20439  
20440 -       if (preallocate_pmds(pmds) != 0)
20441 +       if (preallocate_pxds(pxds) != 0)
20442                 goto out_free_pgd;
20443  
20444         if (paravirt_pgd_alloc(mm) != 0)
20445 -               goto out_free_pmds;
20446 +               goto out_free_pxds;
20447  
20448         /*
20449          * Make sure that pre-populating the pmds is atomic with
20450 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20451         spin_lock(&pgd_lock);
20452  
20453         pgd_ctor(mm, pgd);
20454 -       pgd_prepopulate_pmd(mm, pgd, pmds);
20455 +       pgd_prepopulate_pxd(mm, pgd, pxds);
20456  
20457         spin_unlock(&pgd_lock);
20458  
20459         return pgd;
20460  
20461 -out_free_pmds:
20462 -       free_pmds(pmds);
20463 +out_free_pxds:
20464 +       free_pxds(pxds);
20465  out_free_pgd:
20466         free_page((unsigned long)pgd);
20467  out:
20468 @@ -295,7 +344,7 @@ out:
20469  
20470  void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20471  {
20472 -       pgd_mop_up_pmds(mm, pgd);
20473 +       pgd_mop_up_pxds(mm, pgd);
20474         pgd_dtor(pgd);
20475         paravirt_pgd_free(mm, pgd);
20476         free_page((unsigned long)pgd);
20477 diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
20478 --- linux-3.0.4/arch/x86/mm/setup_nx.c  2011-07-21 22:17:23.000000000 -0400
20479 +++ linux-3.0.4/arch/x86/mm/setup_nx.c  2011-08-23 21:47:55.000000000 -0400
20480 @@ -5,8 +5,10 @@
20481  #include <asm/pgtable.h>
20482  #include <asm/proto.h>
20483  
20484 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20485  static int disable_nx __cpuinitdata;
20486  
20487 +#ifndef CONFIG_PAX_PAGEEXEC
20488  /*
20489   * noexec = on|off
20490   *
20491 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20492         return 0;
20493  }
20494  early_param("noexec", noexec_setup);
20495 +#endif
20496 +
20497 +#endif
20498  
20499  void __cpuinit x86_configure_nx(void)
20500  {
20501 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20502         if (cpu_has_nx && !disable_nx)
20503                 __supported_pte_mask |= _PAGE_NX;
20504         else
20505 +#endif
20506                 __supported_pte_mask &= ~_PAGE_NX;
20507  }
20508  
20509 diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
20510 --- linux-3.0.4/arch/x86/mm/tlb.c       2011-07-21 22:17:23.000000000 -0400
20511 +++ linux-3.0.4/arch/x86/mm/tlb.c       2011-08-23 21:47:55.000000000 -0400
20512 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20513                 BUG();
20514         cpumask_clear_cpu(cpu,
20515                           mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20516 +
20517 +#ifndef CONFIG_PAX_PER_CPU_PGD
20518         load_cr3(swapper_pg_dir);
20519 +#endif
20520 +
20521  }
20522  EXPORT_SYMBOL_GPL(leave_mm);
20523  
20524 diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
20525 --- linux-3.0.4/arch/x86/net/bpf_jit_comp.c     2011-07-21 22:17:23.000000000 -0400
20526 +++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c     2011-08-23 21:47:55.000000000 -0400
20527 @@ -589,7 +589,9 @@ cond_branch:                        f_offset = addrs[i + filt
20528                                         module_free(NULL, image);
20529                                         return;
20530                                 }
20531 +                               pax_open_kernel();
20532                                 memcpy(image + proglen, temp, ilen);
20533 +                               pax_close_kernel();
20534                         }
20535                         proglen += ilen;
20536                         addrs[i] = proglen;
20537 @@ -609,7 +611,7 @@ cond_branch:                        f_offset = addrs[i + filt
20538                         break;
20539                 }
20540                 if (proglen == oldproglen) {
20541 -                       image = module_alloc(max_t(unsigned int,
20542 +                       image = module_alloc_exec(max_t(unsigned int,
20543                                                    proglen,
20544                                                    sizeof(struct work_struct)));
20545                         if (!image)
20546 diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
20547 --- linux-3.0.4/arch/x86/oprofile/backtrace.c   2011-08-23 21:44:40.000000000 -0400
20548 +++ linux-3.0.4/arch/x86/oprofile/backtrace.c   2011-08-23 21:47:55.000000000 -0400
20549 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20550  {
20551         struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20552  
20553 -       if (!user_mode_vm(regs)) {
20554 +       if (!user_mode(regs)) {
20555                 unsigned long stack = kernel_stack_pointer(regs);
20556                 if (depth)
20557                         dump_trace(NULL, regs, (unsigned long *)stack, 0,
20558 diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
20559 --- linux-3.0.4/arch/x86/pci/mrst.c     2011-07-21 22:17:23.000000000 -0400
20560 +++ linux-3.0.4/arch/x86/pci/mrst.c     2011-08-23 21:47:55.000000000 -0400
20561 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20562         printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20563         pci_mmcfg_late_init();
20564         pcibios_enable_irq = mrst_pci_irq_enable;
20565 -       pci_root_ops = pci_mrst_ops;
20566 +       pax_open_kernel();
20567 +       memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20568 +       pax_close_kernel();
20569         /* Continue with standard init */
20570         return 1;
20571  }
20572 diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
20573 --- linux-3.0.4/arch/x86/pci/pcbios.c   2011-07-21 22:17:23.000000000 -0400
20574 +++ linux-3.0.4/arch/x86/pci/pcbios.c   2011-08-23 21:47:55.000000000 -0400
20575 @@ -79,50 +79,93 @@ union bios32 {
20576  static struct {
20577         unsigned long address;
20578         unsigned short segment;
20579 -} bios32_indirect = { 0, __KERNEL_CS };
20580 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20581  
20582  /*
20583   * Returns the entry point for the given service, NULL on error
20584   */
20585  
20586 -static unsigned long bios32_service(unsigned long service)
20587 +static unsigned long __devinit bios32_service(unsigned long service)
20588  {
20589         unsigned char return_code;      /* %al */
20590         unsigned long address;          /* %ebx */
20591         unsigned long length;           /* %ecx */
20592         unsigned long entry;            /* %edx */
20593         unsigned long flags;
20594 +       struct desc_struct d, *gdt;
20595  
20596         local_irq_save(flags);
20597 -       __asm__("lcall *(%%edi); cld"
20598 +
20599 +       gdt = get_cpu_gdt_table(smp_processor_id());
20600 +
20601 +       pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20602 +       write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20603 +       pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20604 +       write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20605 +
20606 +       __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20607                 : "=a" (return_code),
20608                   "=b" (address),
20609                   "=c" (length),
20610                   "=d" (entry)
20611                 : "0" (service),
20612                   "1" (0),
20613 -                 "D" (&bios32_indirect));
20614 +                 "D" (&bios32_indirect),
20615 +                 "r"(__PCIBIOS_DS)
20616 +               : "memory");
20617 +
20618 +       pax_open_kernel();
20619 +       gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20620 +       gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20621 +       gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20622 +       gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20623 +       pax_close_kernel();
20624 +
20625         local_irq_restore(flags);
20626  
20627         switch (return_code) {
20628 -               case 0:
20629 -                       return address + entry;
20630 -               case 0x80:      /* Not present */
20631 -                       printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20632 -                       return 0;
20633 -               default: /* Shouldn't happen */
20634 -                       printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20635 -                               service, return_code);
20636 +       case 0: {
20637 +               int cpu;
20638 +               unsigned char flags;
20639 +
20640 +               printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20641 +               if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20642 +                       printk(KERN_WARNING "bios32_service: not valid\n");
20643                         return 0;
20644 +               }
20645 +               address = address + PAGE_OFFSET;
20646 +               length += 16UL; /* some BIOSs underreport this... */
20647 +               flags = 4;
20648 +               if (length >= 64*1024*1024) {
20649 +                       length >>= PAGE_SHIFT;
20650 +                       flags |= 8;
20651 +               }
20652 +
20653 +               for (cpu = 0; cpu < NR_CPUS; cpu++) {
20654 +                       gdt = get_cpu_gdt_table(cpu);
20655 +                       pack_descriptor(&d, address, length, 0x9b, flags);
20656 +                       write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20657 +                       pack_descriptor(&d, address, length, 0x93, flags);
20658 +                       write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20659 +               }
20660 +               return entry;
20661 +       }
20662 +       case 0x80:      /* Not present */
20663 +               printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20664 +               return 0;
20665 +       default: /* Shouldn't happen */
20666 +               printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20667 +                       service, return_code);
20668 +               return 0;
20669         }
20670  }
20671  
20672  static struct {
20673         unsigned long address;
20674         unsigned short segment;
20675 -} pci_indirect = { 0, __KERNEL_CS };
20676 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20677  
20678 -static int pci_bios_present;
20679 +static int pci_bios_present __read_only;
20680  
20681  static int __devinit check_pcibios(void)
20682  {
20683 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20684         unsigned long flags, pcibios_entry;
20685  
20686         if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20687 -               pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20688 +               pci_indirect.address = pcibios_entry;
20689  
20690                 local_irq_save(flags);
20691 -               __asm__(
20692 -                       "lcall *(%%edi); cld\n\t"
20693 +               __asm__("movw %w6, %%ds\n\t"
20694 +                       "lcall *%%ss:(%%edi); cld\n\t"
20695 +                       "push %%ss\n\t"
20696 +                       "pop %%ds\n\t"
20697                         "jc 1f\n\t"
20698                         "xor %%ah, %%ah\n"
20699                         "1:"
20700 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20701                           "=b" (ebx),
20702                           "=c" (ecx)
20703                         : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20704 -                         "D" (&pci_indirect)
20705 +                         "D" (&pci_indirect),
20706 +                         "r" (__PCIBIOS_DS)
20707                         : "memory");
20708                 local_irq_restore(flags);
20709  
20710 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20711  
20712         switch (len) {
20713         case 1:
20714 -               __asm__("lcall *(%%esi); cld\n\t"
20715 +               __asm__("movw %w6, %%ds\n\t"
20716 +                       "lcall *%%ss:(%%esi); cld\n\t"
20717 +                       "push %%ss\n\t"
20718 +                       "pop %%ds\n\t"
20719                         "jc 1f\n\t"
20720                         "xor %%ah, %%ah\n"
20721                         "1:"
20722 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20723                         : "1" (PCIBIOS_READ_CONFIG_BYTE),
20724                           "b" (bx),
20725                           "D" ((long)reg),
20726 -                         "S" (&pci_indirect));
20727 +                         "S" (&pci_indirect),
20728 +                         "r" (__PCIBIOS_DS));
20729                 /*
20730                  * Zero-extend the result beyond 8 bits, do not trust the
20731                  * BIOS having done it:
20732 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20733                 *value &= 0xff;
20734                 break;
20735         case 2:
20736 -               __asm__("lcall *(%%esi); cld\n\t"
20737 +               __asm__("movw %w6, %%ds\n\t"
20738 +                       "lcall *%%ss:(%%esi); cld\n\t"
20739 +                       "push %%ss\n\t"
20740 +                       "pop %%ds\n\t"
20741                         "jc 1f\n\t"
20742                         "xor %%ah, %%ah\n"
20743                         "1:"
20744 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20745                         : "1" (PCIBIOS_READ_CONFIG_WORD),
20746                           "b" (bx),
20747                           "D" ((long)reg),
20748 -                         "S" (&pci_indirect));
20749 +                         "S" (&pci_indirect),
20750 +                         "r" (__PCIBIOS_DS));
20751                 /*
20752                  * Zero-extend the result beyond 16 bits, do not trust the
20753                  * BIOS having done it:
20754 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20755                 *value &= 0xffff;
20756                 break;
20757         case 4:
20758 -               __asm__("lcall *(%%esi); cld\n\t"
20759 +               __asm__("movw %w6, %%ds\n\t"
20760 +                       "lcall *%%ss:(%%esi); cld\n\t"
20761 +                       "push %%ss\n\t"
20762 +                       "pop %%ds\n\t"
20763                         "jc 1f\n\t"
20764                         "xor %%ah, %%ah\n"
20765                         "1:"
20766 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20767                         : "1" (PCIBIOS_READ_CONFIG_DWORD),
20768                           "b" (bx),
20769                           "D" ((long)reg),
20770 -                         "S" (&pci_indirect));
20771 +                         "S" (&pci_indirect),
20772 +                         "r" (__PCIBIOS_DS));
20773                 break;
20774         }
20775  
20776 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20777  
20778         switch (len) {
20779         case 1:
20780 -               __asm__("lcall *(%%esi); cld\n\t"
20781 +               __asm__("movw %w6, %%ds\n\t"
20782 +                       "lcall *%%ss:(%%esi); cld\n\t"
20783 +                       "push %%ss\n\t"
20784 +                       "pop %%ds\n\t"
20785                         "jc 1f\n\t"
20786                         "xor %%ah, %%ah\n"
20787                         "1:"
20788 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20789                           "c" (value),
20790                           "b" (bx),
20791                           "D" ((long)reg),
20792 -                         "S" (&pci_indirect));
20793 +                         "S" (&pci_indirect),
20794 +                         "r" (__PCIBIOS_DS));
20795                 break;
20796         case 2:
20797 -               __asm__("lcall *(%%esi); cld\n\t"
20798 +               __asm__("movw %w6, %%ds\n\t"
20799 +                       "lcall *%%ss:(%%esi); cld\n\t"
20800 +                       "push %%ss\n\t"
20801 +                       "pop %%ds\n\t"
20802                         "jc 1f\n\t"
20803                         "xor %%ah, %%ah\n"
20804                         "1:"
20805 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20806                           "c" (value),
20807                           "b" (bx),
20808                           "D" ((long)reg),
20809 -                         "S" (&pci_indirect));
20810 +                         "S" (&pci_indirect),
20811 +                         "r" (__PCIBIOS_DS));
20812                 break;
20813         case 4:
20814 -               __asm__("lcall *(%%esi); cld\n\t"
20815 +               __asm__("movw %w6, %%ds\n\t"
20816 +                       "lcall *%%ss:(%%esi); cld\n\t"
20817 +                       "push %%ss\n\t"
20818 +                       "pop %%ds\n\t"
20819                         "jc 1f\n\t"
20820                         "xor %%ah, %%ah\n"
20821                         "1:"
20822 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20823                           "c" (value),
20824                           "b" (bx),
20825                           "D" ((long)reg),
20826 -                         "S" (&pci_indirect));
20827 +                         "S" (&pci_indirect),
20828 +                         "r" (__PCIBIOS_DS));
20829                 break;
20830         }
20831  
20832 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20833  
20834         DBG("PCI: Fetching IRQ routing table... ");
20835         __asm__("push %%es\n\t"
20836 +               "movw %w8, %%ds\n\t"
20837                 "push %%ds\n\t"
20838                 "pop  %%es\n\t"
20839 -               "lcall *(%%esi); cld\n\t"
20840 +               "lcall *%%ss:(%%esi); cld\n\t"
20841                 "pop %%es\n\t"
20842 +               "push %%ss\n\t"
20843 +               "pop %%ds\n"
20844                 "jc 1f\n\t"
20845                 "xor %%ah, %%ah\n"
20846                 "1:"
20847 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20848                   "1" (0),
20849                   "D" ((long) &opt),
20850                   "S" (&pci_indirect),
20851 -                 "m" (opt)
20852 +                 "m" (opt),
20853 +                 "r" (__PCIBIOS_DS)
20854                 : "memory");
20855         DBG("OK  ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20856         if (ret & 0xff00)
20857 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20858  {
20859         int ret;
20860  
20861 -       __asm__("lcall *(%%esi); cld\n\t"
20862 +       __asm__("movw %w5, %%ds\n\t"
20863 +               "lcall *%%ss:(%%esi); cld\n\t"
20864 +               "push %%ss\n\t"
20865 +               "pop %%ds\n"
20866                 "jc 1f\n\t"
20867                 "xor %%ah, %%ah\n"
20868                 "1:"
20869 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20870                 : "0" (PCIBIOS_SET_PCI_HW_INT),
20871                   "b" ((dev->bus->number << 8) | dev->devfn),
20872                   "c" ((irq << 8) | (pin + 10)),
20873 -                 "S" (&pci_indirect));
20874 +                 "S" (&pci_indirect),
20875 +                 "r" (__PCIBIOS_DS));
20876         return !(ret & 0xff00);
20877  }
20878  EXPORT_SYMBOL(pcibios_set_irq_routing);
20879 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
20880 --- linux-3.0.4/arch/x86/platform/efi/efi_32.c  2011-07-21 22:17:23.000000000 -0400
20881 +++ linux-3.0.4/arch/x86/platform/efi/efi_32.c  2011-08-23 21:47:55.000000000 -0400
20882 @@ -38,70 +38,37 @@
20883   */
20884  
20885  static unsigned long efi_rt_eflags;
20886 -static pgd_t efi_bak_pg_dir_pointer[2];
20887 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20888  
20889 -void efi_call_phys_prelog(void)
20890 +void __init efi_call_phys_prelog(void)
20891  {
20892 -       unsigned long cr4;
20893 -       unsigned long temp;
20894         struct desc_ptr gdt_descr;
20895  
20896         local_irq_save(efi_rt_eflags);
20897  
20898 -       /*
20899 -        * If I don't have PAE, I should just duplicate two entries in page
20900 -        * directory. If I have PAE, I just need to duplicate one entry in
20901 -        * page directory.
20902 -        */
20903 -       cr4 = read_cr4_safe();
20904 -
20905 -       if (cr4 & X86_CR4_PAE) {
20906 -               efi_bak_pg_dir_pointer[0].pgd =
20907 -                   swapper_pg_dir[pgd_index(0)].pgd;
20908 -               swapper_pg_dir[0].pgd =
20909 -                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20910 -       } else {
20911 -               efi_bak_pg_dir_pointer[0].pgd =
20912 -                   swapper_pg_dir[pgd_index(0)].pgd;
20913 -               efi_bak_pg_dir_pointer[1].pgd =
20914 -                   swapper_pg_dir[pgd_index(0x400000)].pgd;
20915 -               swapper_pg_dir[pgd_index(0)].pgd =
20916 -                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20917 -               temp = PAGE_OFFSET + 0x400000;
20918 -               swapper_pg_dir[pgd_index(0x400000)].pgd =
20919 -                   swapper_pg_dir[pgd_index(temp)].pgd;
20920 -       }
20921 +       clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20922 +       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20923 +                       min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20924  
20925         /*
20926          * After the lock is released, the original page table is restored.
20927          */
20928         __flush_tlb_all();
20929  
20930 -       gdt_descr.address = __pa(get_cpu_gdt_table(0));
20931 +       gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20932         gdt_descr.size = GDT_SIZE - 1;
20933         load_gdt(&gdt_descr);
20934  }
20935  
20936 -void efi_call_phys_epilog(void)
20937 +void __init efi_call_phys_epilog(void)
20938  {
20939 -       unsigned long cr4;
20940         struct desc_ptr gdt_descr;
20941  
20942 -       gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20943 +       gdt_descr.address = get_cpu_gdt_table(0);
20944         gdt_descr.size = GDT_SIZE - 1;
20945         load_gdt(&gdt_descr);
20946  
20947 -       cr4 = read_cr4_safe();
20948 -
20949 -       if (cr4 & X86_CR4_PAE) {
20950 -               swapper_pg_dir[pgd_index(0)].pgd =
20951 -                   efi_bak_pg_dir_pointer[0].pgd;
20952 -       } else {
20953 -               swapper_pg_dir[pgd_index(0)].pgd =
20954 -                   efi_bak_pg_dir_pointer[0].pgd;
20955 -               swapper_pg_dir[pgd_index(0x400000)].pgd =
20956 -                   efi_bak_pg_dir_pointer[1].pgd;
20957 -       }
20958 +       clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20959  
20960         /*
20961          * After the lock is released, the original page table is restored.
20962 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
20963 --- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S     2011-07-21 22:17:23.000000000 -0400
20964 +++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S     2011-08-23 21:47:55.000000000 -0400
20965 @@ -6,6 +6,7 @@
20966   */
20967  
20968  #include <linux/linkage.h>
20969 +#include <linux/init.h>
20970  #include <asm/page_types.h>
20971  
20972  /*
20973 @@ -20,7 +21,7 @@
20974   * service functions will comply with gcc calling convention, too.
20975   */
20976  
20977 -.text
20978 +__INIT
20979  ENTRY(efi_call_phys)
20980         /*
20981          * 0. The function can only be called in Linux kernel. So CS has been
20982 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20983          * The mapping of lower virtual memory has been created in prelog and
20984          * epilog.
20985          */
20986 -       movl    $1f, %edx
20987 -       subl    $__PAGE_OFFSET, %edx
20988 -       jmp     *%edx
20989 +       jmp     1f-__PAGE_OFFSET
20990  1:
20991  
20992         /*
20993 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
20994          * parameter 2, ..., param n. To make things easy, we save the return
20995          * address of efi_call_phys in a global variable.
20996          */
20997 -       popl    %edx
20998 -       movl    %edx, saved_return_addr
20999 -       /* get the function pointer into ECX*/
21000 -       popl    %ecx
21001 -       movl    %ecx, efi_rt_function_ptr
21002 -       movl    $2f, %edx
21003 -       subl    $__PAGE_OFFSET, %edx
21004 -       pushl   %edx
21005 +       popl    (saved_return_addr)
21006 +       popl    (efi_rt_function_ptr)
21007  
21008         /*
21009          * 3. Clear PG bit in %CR0.
21010 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21011         /*
21012          * 5. Call the physical function.
21013          */
21014 -       jmp     *%ecx
21015 +       call    *(efi_rt_function_ptr-__PAGE_OFFSET)
21016  
21017 -2:
21018         /*
21019          * 6. After EFI runtime service returns, control will return to
21020          * following instruction. We'd better readjust stack pointer first.
21021 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21022         movl    %cr0, %edx
21023         orl     $0x80000000, %edx
21024         movl    %edx, %cr0
21025 -       jmp     1f
21026 -1:
21027 +
21028         /*
21029          * 8. Now restore the virtual mode from flat mode by
21030          * adding EIP with PAGE_OFFSET.
21031          */
21032 -       movl    $1f, %edx
21033 -       jmp     *%edx
21034 +       jmp     1f+__PAGE_OFFSET
21035  1:
21036  
21037         /*
21038          * 9. Balance the stack. And because EAX contain the return value,
21039          * we'd better not clobber it.
21040          */
21041 -       leal    efi_rt_function_ptr, %edx
21042 -       movl    (%edx), %ecx
21043 -       pushl   %ecx
21044 +       pushl   (efi_rt_function_ptr)
21045  
21046         /*
21047 -        * 10. Push the saved return address onto the stack and return.
21048 +        * 10. Return to the saved return address.
21049          */
21050 -       leal    saved_return_addr, %edx
21051 -       movl    (%edx), %ecx
21052 -       pushl   %ecx
21053 -       ret
21054 +       jmpl    *(saved_return_addr)
21055  ENDPROC(efi_call_phys)
21056  .previous
21057  
21058 -.data
21059 +__INITDATA
21060  saved_return_addr:
21061         .long 0
21062  efi_rt_function_ptr:
21063 diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
21064 --- linux-3.0.4/arch/x86/platform/mrst/mrst.c   2011-07-21 22:17:23.000000000 -0400
21065 +++ linux-3.0.4/arch/x86/platform/mrst/mrst.c   2011-08-23 21:47:55.000000000 -0400
21066 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21067  }
21068  
21069  /* Reboot and power off are handled by the SCU on a MID device */
21070 -static void mrst_power_off(void)
21071 +static __noreturn void mrst_power_off(void)
21072  {
21073         intel_scu_ipc_simple_command(0xf1, 1);
21074 +       BUG();
21075  }
21076  
21077 -static void mrst_reboot(void)
21078 +static __noreturn void mrst_reboot(void)
21079  {
21080         intel_scu_ipc_simple_command(0xf1, 0);
21081 +       BUG();
21082  }
21083  
21084  /*
21085 diff -urNp linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c
21086 --- linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c        2011-07-21 22:17:23.000000000 -0400
21087 +++ linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c        2011-08-29 22:31:19.000000000 -0400
21088 @@ -163,7 +163,7 @@ static struct of_pdt_ops prom_olpc_ops _
21089         .getchild = olpc_dt_getchild,
21090         .getsibling = olpc_dt_getsibling,
21091         .pkg2path = olpc_dt_pkg2path,
21092 -};
21093 +} __no_const;
21094  
21095  void __init olpc_dt_build_devicetree(void)
21096  {
21097 diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
21098 --- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c   2011-07-21 22:17:23.000000000 -0400
21099 +++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c   2011-08-23 21:48:14.000000000 -0400
21100 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21101         cpumask_t mask;
21102         struct reset_args reset_args;
21103  
21104 +       pax_track_stack();
21105 +
21106         reset_args.sender = sender;
21107         cpus_clear(mask);
21108         /* find a single cpu for each uvhub in this distribution mask */
21109 diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
21110 --- linux-3.0.4/arch/x86/power/cpu.c    2011-07-21 22:17:23.000000000 -0400
21111 +++ linux-3.0.4/arch/x86/power/cpu.c    2011-08-23 21:47:55.000000000 -0400
21112 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21113  static void fix_processor_context(void)
21114  {
21115         int cpu = smp_processor_id();
21116 -       struct tss_struct *t = &per_cpu(init_tss, cpu);
21117 +       struct tss_struct *t = init_tss + cpu;
21118  
21119         set_tss_desc(cpu, t);   /*
21120                                  * This just modifies memory; should not be
21121 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21122                                  */
21123  
21124  #ifdef CONFIG_X86_64
21125 +       pax_open_kernel();
21126         get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21127 +       pax_close_kernel();
21128  
21129         syscall_init();                         /* This sets MSR_*STAR and related */
21130  #endif
21131 diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
21132 --- linux-3.0.4/arch/x86/vdso/Makefile  2011-07-21 22:17:23.000000000 -0400
21133 +++ linux-3.0.4/arch/x86/vdso/Makefile  2011-08-23 21:47:55.000000000 -0400
21134 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO    $@
21135                        -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21136                  sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21137  
21138 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21139 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21140  GCOV_PROFILE := n
21141  
21142  #
21143 diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
21144 --- linux-3.0.4/arch/x86/vdso/vdso32-setup.c    2011-07-21 22:17:23.000000000 -0400
21145 +++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c    2011-08-23 21:47:55.000000000 -0400
21146 @@ -25,6 +25,7 @@
21147  #include <asm/tlbflush.h>
21148  #include <asm/vdso.h>
21149  #include <asm/proto.h>
21150 +#include <asm/mman.h>
21151  
21152  enum {
21153         VDSO_DISABLED = 0,
21154 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21155  void enable_sep_cpu(void)
21156  {
21157         int cpu = get_cpu();
21158 -       struct tss_struct *tss = &per_cpu(init_tss, cpu);
21159 +       struct tss_struct *tss = init_tss + cpu;
21160  
21161         if (!boot_cpu_has(X86_FEATURE_SEP)) {
21162                 put_cpu();
21163 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21164         gate_vma.vm_start = FIXADDR_USER_START;
21165         gate_vma.vm_end = FIXADDR_USER_END;
21166         gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21167 -       gate_vma.vm_page_prot = __P101;
21168 +       gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21169         /*
21170          * Make sure the vDSO gets into every core dump.
21171          * Dumping its contents makes post-mortem fully interpretable later
21172 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21173         if (compat)
21174                 addr = VDSO_HIGH_BASE;
21175         else {
21176 -               addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21177 +               addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21178                 if (IS_ERR_VALUE(addr)) {
21179                         ret = addr;
21180                         goto up_fail;
21181                 }
21182         }
21183  
21184 -       current->mm->context.vdso = (void *)addr;
21185 +       current->mm->context.vdso = addr;
21186  
21187         if (compat_uses_vma || !compat) {
21188                 /*
21189 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21190         }
21191  
21192         current_thread_info()->sysenter_return =
21193 -               VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21194 +               (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21195  
21196    up_fail:
21197         if (ret)
21198 -               current->mm->context.vdso = NULL;
21199 +               current->mm->context.vdso = 0;
21200  
21201         up_write(&mm->mmap_sem);
21202  
21203 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21204  
21205  const char *arch_vma_name(struct vm_area_struct *vma)
21206  {
21207 -       if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21208 +       if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21209                 return "[vdso]";
21210 +
21211 +#ifdef CONFIG_PAX_SEGMEXEC
21212 +       if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21213 +               return "[vdso]";
21214 +#endif
21215 +
21216         return NULL;
21217  }
21218  
21219 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21220          * Check to see if the corresponding task was created in compat vdso
21221          * mode.
21222          */
21223 -       if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21224 +       if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21225                 return &gate_vma;
21226         return NULL;
21227  }
21228 diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
21229 --- linux-3.0.4/arch/x86/vdso/vma.c     2011-07-21 22:17:23.000000000 -0400
21230 +++ linux-3.0.4/arch/x86/vdso/vma.c     2011-08-23 21:47:55.000000000 -0400
21231 @@ -15,18 +15,19 @@
21232  #include <asm/proto.h>
21233  #include <asm/vdso.h>
21234  
21235 -unsigned int __read_mostly vdso_enabled = 1;
21236 -
21237  extern char vdso_start[], vdso_end[];
21238  extern unsigned short vdso_sync_cpuid;
21239 +extern char __vsyscall_0;
21240  
21241  static struct page **vdso_pages;
21242 +static struct page *vsyscall_page;
21243  static unsigned vdso_size;
21244  
21245  static int __init init_vdso_vars(void)
21246  {
21247 -       int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21248 -       int i;
21249 +       size_t nbytes = vdso_end - vdso_start;
21250 +       size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21251 +       size_t i;
21252  
21253         vdso_size = npages << PAGE_SHIFT;
21254         vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21255 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21256                 goto oom;
21257         for (i = 0; i < npages; i++) {
21258                 struct page *p;
21259 -               p = alloc_page(GFP_KERNEL);
21260 +               p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21261                 if (!p)
21262                         goto oom;
21263                 vdso_pages[i] = p;
21264 -               copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21265 +               memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21266 +               nbytes -= PAGE_SIZE;
21267         }
21268 +       vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21269  
21270         return 0;
21271  
21272   oom:
21273 -       printk("Cannot allocate vdso\n");
21274 -       vdso_enabled = 0;
21275 -       return -ENOMEM;
21276 +       panic("Cannot allocate vdso\n");
21277  }
21278  subsys_initcall(init_vdso_vars);
21279  
21280 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21281         unsigned long addr;
21282         int ret;
21283  
21284 -       if (!vdso_enabled)
21285 -               return 0;
21286 -
21287         down_write(&mm->mmap_sem);
21288 -       addr = vdso_addr(mm->start_stack, vdso_size);
21289 -       addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21290 +       addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21291 +       addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21292         if (IS_ERR_VALUE(addr)) {
21293                 ret = addr;
21294                 goto up_fail;
21295         }
21296  
21297 -       current->mm->context.vdso = (void *)addr;
21298 +       mm->context.vdso = addr + PAGE_SIZE;
21299  
21300 -       ret = install_special_mapping(mm, addr, vdso_size,
21301 +       ret = install_special_mapping(mm, addr, PAGE_SIZE,
21302                                       VM_READ|VM_EXEC|
21303 -                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21304 +                                     VM_MAYREAD|VM_MAYEXEC|
21305                                       VM_ALWAYSDUMP,
21306 -                                     vdso_pages);
21307 +                                     &vsyscall_page);
21308         if (ret) {
21309 -               current->mm->context.vdso = NULL;
21310 +               mm->context.vdso = 0;
21311                 goto up_fail;
21312         }
21313  
21314 +       ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21315 +                                     VM_READ|VM_EXEC|
21316 +                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21317 +                                     VM_ALWAYSDUMP,
21318 +                                     vdso_pages);
21319 +       if (ret)
21320 +               mm->context.vdso = 0;
21321 +
21322  up_fail:
21323         up_write(&mm->mmap_sem);
21324         return ret;
21325  }
21326 -
21327 -static __init int vdso_setup(char *s)
21328 -{
21329 -       vdso_enabled = simple_strtoul(s, NULL, 0);
21330 -       return 0;
21331 -}
21332 -__setup("vdso=", vdso_setup);
21333 diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
21334 --- linux-3.0.4/arch/x86/xen/enlighten.c        2011-08-29 23:26:13.000000000 -0400
21335 +++ linux-3.0.4/arch/x86/xen/enlighten.c        2011-08-29 23:26:21.000000000 -0400
21336 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21337  
21338  struct shared_info xen_dummy_shared_info;
21339  
21340 -void *xen_initial_gdt;
21341 -
21342  RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21343  __read_mostly int xen_have_vector_callback;
21344  EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21345 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21346  #endif
21347  };
21348  
21349 -static void xen_reboot(int reason)
21350 +static __noreturn void xen_reboot(int reason)
21351  {
21352         struct sched_shutdown r = { .reason = reason };
21353  
21354 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21355                 BUG();
21356  }
21357  
21358 -static void xen_restart(char *msg)
21359 +static __noreturn void xen_restart(char *msg)
21360  {
21361         xen_reboot(SHUTDOWN_reboot);
21362  }
21363  
21364 -static void xen_emergency_restart(void)
21365 +static __noreturn void xen_emergency_restart(void)
21366  {
21367         xen_reboot(SHUTDOWN_reboot);
21368  }
21369  
21370 -static void xen_machine_halt(void)
21371 +static __noreturn void xen_machine_halt(void)
21372  {
21373         xen_reboot(SHUTDOWN_poweroff);
21374  }
21375 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21376         __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21377  
21378         /* Work out if we support NX */
21379 -       x86_configure_nx();
21380 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21381 +       if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21382 +           (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21383 +               unsigned l, h;
21384 +
21385 +               __supported_pte_mask |= _PAGE_NX;
21386 +               rdmsr(MSR_EFER, l, h);
21387 +               l |= EFER_NX;
21388 +               wrmsr(MSR_EFER, l, h);
21389 +       }
21390 +#endif
21391  
21392         xen_setup_features();
21393  
21394 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21395  
21396         machine_ops = xen_machine_ops;
21397  
21398 -       /*
21399 -        * The only reliable way to retain the initial address of the
21400 -        * percpu gdt_page is to remember it here, so we can go and
21401 -        * mark it RW later, when the initial percpu area is freed.
21402 -        */
21403 -       xen_initial_gdt = &per_cpu(gdt_page, 0);
21404 -
21405         xen_smp_init();
21406  
21407  #ifdef CONFIG_ACPI_NUMA
21408 diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
21409 --- linux-3.0.4/arch/x86/xen/mmu.c      2011-08-29 23:26:13.000000000 -0400
21410 +++ linux-3.0.4/arch/x86/xen/mmu.c      2011-08-29 23:26:21.000000000 -0400
21411 @@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21412         convert_pfn_mfn(init_level4_pgt);
21413         convert_pfn_mfn(level3_ident_pgt);
21414         convert_pfn_mfn(level3_kernel_pgt);
21415 +       convert_pfn_mfn(level3_vmalloc_pgt);
21416 +       convert_pfn_mfn(level3_vmemmap_pgt);
21417  
21418         l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21419         l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21420 @@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21421         set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21422         set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21423         set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21424 +       set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21425 +       set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21426         set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21427 +       set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21428         set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21429         set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21430  
21431 @@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
21432         pv_mmu_ops.set_pud = xen_set_pud;
21433  #if PAGETABLE_LEVELS == 4
21434         pv_mmu_ops.set_pgd = xen_set_pgd;
21435 +       pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21436  #endif
21437  
21438         /* This will work as long as patching hasn't happened yet
21439 @@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
21440         .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21441         .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21442         .set_pgd = xen_set_pgd_hyper,
21443 +       .set_pgd_batched = xen_set_pgd_hyper,
21444  
21445         .alloc_pud = xen_alloc_pmd_init,
21446         .release_pud = xen_release_pmd_init,
21447 diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
21448 --- linux-3.0.4/arch/x86/xen/smp.c      2011-08-29 23:26:13.000000000 -0400
21449 +++ linux-3.0.4/arch/x86/xen/smp.c      2011-08-29 23:26:21.000000000 -0400
21450 @@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21451  {
21452         BUG_ON(smp_processor_id() != 0);
21453         native_smp_prepare_boot_cpu();
21454 -
21455 -       /* We've switched to the "real" per-cpu gdt, so make sure the
21456 -          old memory can be recycled */
21457 -       make_lowmem_page_readwrite(xen_initial_gdt);
21458 -
21459         xen_filter_cpu_maps();
21460         xen_setup_vcpu_info_placement();
21461  }
21462 @@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21463         gdt = get_cpu_gdt_table(cpu);
21464  
21465         ctxt->flags = VGCF_IN_KERNEL;
21466 -       ctxt->user_regs.ds = __USER_DS;
21467 -       ctxt->user_regs.es = __USER_DS;
21468 +       ctxt->user_regs.ds = __KERNEL_DS;
21469 +       ctxt->user_regs.es = __KERNEL_DS;
21470         ctxt->user_regs.ss = __KERNEL_DS;
21471  #ifdef CONFIG_X86_32
21472         ctxt->user_regs.fs = __KERNEL_PERCPU;
21473 -       ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21474 +       savesegment(gs, ctxt->user_regs.gs);
21475  #else
21476         ctxt->gs_base_kernel = per_cpu_offset(cpu);
21477  #endif
21478 @@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21479         int rc;
21480  
21481         per_cpu(current_task, cpu) = idle;
21482 +       per_cpu(current_tinfo, cpu) = &idle->tinfo;
21483  #ifdef CONFIG_X86_32
21484         irq_ctx_init(cpu);
21485  #else
21486         clear_tsk_thread_flag(idle, TIF_FORK);
21487 -       per_cpu(kernel_stack, cpu) =
21488 -               (unsigned long)task_stack_page(idle) -
21489 -               KERNEL_STACK_OFFSET + THREAD_SIZE;
21490 +       per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21491  #endif
21492         xen_setup_runstate_info(cpu);
21493         xen_setup_timer(cpu);
21494 diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
21495 --- linux-3.0.4/arch/x86/xen/xen-asm_32.S       2011-07-21 22:17:23.000000000 -0400
21496 +++ linux-3.0.4/arch/x86/xen/xen-asm_32.S       2011-08-23 21:47:55.000000000 -0400
21497 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21498         ESP_OFFSET=4    # bytes pushed onto stack
21499  
21500         /*
21501 -        * Store vcpu_info pointer for easy access.  Do it this way to
21502 -        * avoid having to reload %fs
21503 +        * Store vcpu_info pointer for easy access.
21504          */
21505  #ifdef CONFIG_SMP
21506 -       GET_THREAD_INFO(%eax)
21507 -       movl TI_cpu(%eax), %eax
21508 -       movl __per_cpu_offset(,%eax,4), %eax
21509 -       mov xen_vcpu(%eax), %eax
21510 +       push %fs
21511 +       mov $(__KERNEL_PERCPU), %eax
21512 +       mov %eax, %fs
21513 +       mov PER_CPU_VAR(xen_vcpu), %eax
21514 +       pop %fs
21515  #else
21516         movl xen_vcpu, %eax
21517  #endif
21518 diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
21519 --- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21520 +++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21521 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21522  #ifdef CONFIG_X86_32
21523         mov %esi,xen_start_info
21524         mov $init_thread_union+THREAD_SIZE,%esp
21525 +#ifdef CONFIG_SMP
21526 +       movl $cpu_gdt_table,%edi
21527 +       movl $__per_cpu_load,%eax
21528 +       movw %ax,__KERNEL_PERCPU + 2(%edi)
21529 +       rorl $16,%eax
21530 +       movb %al,__KERNEL_PERCPU + 4(%edi)
21531 +       movb %ah,__KERNEL_PERCPU + 7(%edi)
21532 +       movl $__per_cpu_end - 1,%eax
21533 +       subl $__per_cpu_start,%eax
21534 +       movw %ax,__KERNEL_PERCPU + 0(%edi)
21535 +#endif
21536  #else
21537         mov %rsi,xen_start_info
21538         mov $init_thread_union+THREAD_SIZE,%rsp
21539 diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
21540 --- linux-3.0.4/arch/x86/xen/xen-ops.h  2011-08-23 21:44:40.000000000 -0400
21541 +++ linux-3.0.4/arch/x86/xen/xen-ops.h  2011-08-23 21:47:55.000000000 -0400
21542 @@ -10,8 +10,6 @@
21543  extern const char xen_hypervisor_callback[];
21544  extern const char xen_failsafe_callback[];
21545  
21546 -extern void *xen_initial_gdt;
21547 -
21548  struct trap_info;
21549  void xen_copy_trap_info(struct trap_info *traps);
21550  
21551 diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
21552 --- linux-3.0.4/block/blk-iopoll.c      2011-07-21 22:17:23.000000000 -0400
21553 +++ linux-3.0.4/block/blk-iopoll.c      2011-08-23 21:47:55.000000000 -0400
21554 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21555  }
21556  EXPORT_SYMBOL(blk_iopoll_complete);
21557  
21558 -static void blk_iopoll_softirq(struct softirq_action *h)
21559 +static void blk_iopoll_softirq(void)
21560  {
21561         struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21562         int rearm = 0, budget = blk_iopoll_budget;
21563 diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
21564 --- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21565 +++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21566 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21567         if (!len || !kbuf)
21568                 return -EINVAL;
21569  
21570 -       do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21571 +       do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21572         if (do_copy)
21573                 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21574         else
21575 diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
21576 --- linux-3.0.4/block/blk-softirq.c     2011-07-21 22:17:23.000000000 -0400
21577 +++ linux-3.0.4/block/blk-softirq.c     2011-08-23 21:47:55.000000000 -0400
21578 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, 
21579   * Softirq action handler - move entries to local list and loop over them
21580   * while passing them to the queue registered handler.
21581   */
21582 -static void blk_done_softirq(struct softirq_action *h)
21583 +static void blk_done_softirq(void)
21584  {
21585         struct list_head *cpu_list, local_list;
21586  
21587 diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
21588 --- linux-3.0.4/block/bsg.c     2011-07-21 22:17:23.000000000 -0400
21589 +++ linux-3.0.4/block/bsg.c     2011-08-23 21:47:55.000000000 -0400
21590 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21591                                 struct sg_io_v4 *hdr, struct bsg_device *bd,
21592                                 fmode_t has_write_perm)
21593  {
21594 +       unsigned char tmpcmd[sizeof(rq->__cmd)];
21595 +       unsigned char *cmdptr;
21596 +
21597         if (hdr->request_len > BLK_MAX_CDB) {
21598                 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21599                 if (!rq->cmd)
21600                         return -ENOMEM;
21601 -       }
21602 +               cmdptr = rq->cmd;
21603 +       } else
21604 +               cmdptr = tmpcmd;
21605  
21606 -       if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21607 +       if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21608                            hdr->request_len))
21609                 return -EFAULT;
21610  
21611 +       if (cmdptr != rq->cmd)
21612 +               memcpy(rq->cmd, cmdptr, hdr->request_len);
21613 +
21614         if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21615                 if (blk_verify_command(rq->cmd, has_write_perm))
21616                         return -EPERM;
21617 diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
21618 --- linux-3.0.4/block/scsi_ioctl.c      2011-07-21 22:17:23.000000000 -0400
21619 +++ linux-3.0.4/block/scsi_ioctl.c      2011-08-23 21:47:55.000000000 -0400
21620 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21621  static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21622                              struct sg_io_hdr *hdr, fmode_t mode)
21623  {
21624 -       if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21625 +       unsigned char tmpcmd[sizeof(rq->__cmd)];
21626 +       unsigned char *cmdptr;
21627 +
21628 +       if (rq->cmd != rq->__cmd)
21629 +               cmdptr = rq->cmd;
21630 +       else
21631 +               cmdptr = tmpcmd;
21632 +
21633 +       if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21634                 return -EFAULT;
21635 +
21636 +       if (cmdptr != rq->cmd)
21637 +               memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21638 +
21639         if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21640                 return -EPERM;
21641  
21642 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21643         int err;
21644         unsigned int in_len, out_len, bytes, opcode, cmdlen;
21645         char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21646 +       unsigned char tmpcmd[sizeof(rq->__cmd)];
21647 +       unsigned char *cmdptr;
21648  
21649         if (!sic)
21650                 return -EINVAL;
21651 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21652          */
21653         err = -EFAULT;
21654         rq->cmd_len = cmdlen;
21655 -       if (copy_from_user(rq->cmd, sic->data, cmdlen))
21656 +
21657 +       if (rq->cmd != rq->__cmd)
21658 +               cmdptr = rq->cmd;
21659 +       else
21660 +               cmdptr = tmpcmd;
21661 +
21662 +       if (copy_from_user(cmdptr, sic->data, cmdlen))
21663                 goto error;
21664  
21665 +       if (rq->cmd != cmdptr)
21666 +               memcpy(rq->cmd, cmdptr, cmdlen);
21667 +
21668         if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21669                 goto error;
21670  
21671 diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
21672 --- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21673 +++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21674 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21675  
21676  struct cryptd_blkcipher_request_ctx {
21677         crypto_completion_t complete;
21678 -};
21679 +} __no_const;
21680  
21681  struct cryptd_hash_ctx {
21682         struct crypto_shash *child;
21683 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21684  
21685  struct cryptd_aead_request_ctx {
21686         crypto_completion_t complete;
21687 -};
21688 +} __no_const;
21689  
21690  static void cryptd_queue_worker(struct work_struct *work);
21691  
21692 diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
21693 --- linux-3.0.4/crypto/gf128mul.c       2011-07-21 22:17:23.000000000 -0400
21694 +++ linux-3.0.4/crypto/gf128mul.c       2011-08-23 21:47:55.000000000 -0400
21695 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 
21696         for (i = 0; i < 7; ++i)
21697                 gf128mul_x_lle(&p[i + 1], &p[i]);
21698  
21699 -       memset(r, 0, sizeof(r));
21700 +       memset(r, 0, sizeof(*r));
21701         for (i = 0;;) {
21702                 u8 ch = ((u8 *)b)[15 - i];
21703  
21704 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 
21705         for (i = 0; i < 7; ++i)
21706                 gf128mul_x_bbe(&p[i + 1], &p[i]);
21707  
21708 -       memset(r, 0, sizeof(r));
21709 +       memset(r, 0, sizeof(*r));
21710         for (i = 0;;) {
21711                 u8 ch = ((u8 *)b)[i];
21712  
21713 diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
21714 --- linux-3.0.4/crypto/serpent.c        2011-07-21 22:17:23.000000000 -0400
21715 +++ linux-3.0.4/crypto/serpent.c        2011-08-23 21:48:14.000000000 -0400
21716 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21717         u32 r0,r1,r2,r3,r4;
21718         int i;
21719  
21720 +       pax_track_stack();
21721 +
21722         /* Copy key, add padding */
21723  
21724         for (i = 0; i < keylen; ++i)
21725 diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
21726 --- linux-3.0.4/Documentation/dontdiff  2011-07-21 22:17:23.000000000 -0400
21727 +++ linux-3.0.4/Documentation/dontdiff  2011-08-23 21:47:55.000000000 -0400
21728 @@ -5,6 +5,7 @@
21729  *.cis
21730  *.cpio
21731  *.csp
21732 +*.dbg
21733  *.dsp
21734  *.dvi
21735  *.elf
21736 @@ -48,9 +49,11 @@
21737  *.tab.h
21738  *.tex
21739  *.ver
21740 +*.vim
21741  *.xml
21742  *.xz
21743  *_MODULES
21744 +*_reg_safe.h
21745  *_vga16.c
21746  *~
21747  \#*#
21748 @@ -70,6 +73,7 @@ Kerntypes
21749  Module.markers
21750  Module.symvers
21751  PENDING
21752 +PERF*
21753  SCCS
21754  System.map*
21755  TAGS
21756 @@ -98,6 +102,8 @@ bzImage*
21757  capability_names.h
21758  capflags.c
21759  classlist.h*
21760 +clut_vga16.c
21761 +common-cmds.h
21762  comp*.log
21763  compile.h*
21764  conf
21765 @@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21766  gconf
21767  gconf.glade.h
21768  gen-devlist
21769 +gen-kdb_cmds.c
21770  gen_crc32table
21771  gen_init_cpio
21772  generated
21773  genheaders
21774  genksyms
21775  *_gray256.c
21776 +hash
21777  hpet_example
21778  hugepage-mmap
21779  hugepage-shm
21780 @@ -146,7 +154,6 @@ int32.c
21781  int4.c
21782  int8.c
21783  kallsyms
21784 -kconfig
21785  keywords.c
21786  ksym.c*
21787  ksym.h*
21788 @@ -154,7 +161,6 @@ kxgettext
21789  lkc_defs.h
21790  lex.c
21791  lex.*.c
21792 -linux
21793  logo_*.c
21794  logo_*_clut224.c
21795  logo_*_mono.c
21796 @@ -174,6 +180,7 @@ mkboot
21797  mkbugboot
21798  mkcpustr
21799  mkdep
21800 +mkpiggy
21801  mkprep
21802  mkregtable
21803  mktables
21804 @@ -209,6 +216,7 @@ r300_reg_safe.h
21805  r420_reg_safe.h
21806  r600_reg_safe.h
21807  recordmcount
21808 +regdb.c
21809  relocs
21810  rlim_names.h
21811  rn50_reg_safe.h
21812 @@ -219,6 +227,7 @@ setup
21813  setup.bin
21814  setup.elf
21815  sImage
21816 +slabinfo
21817  sm_tbl*
21818  split-include
21819  syscalltab.h
21820 @@ -246,7 +255,9 @@ vmlinux
21821  vmlinux-*
21822  vmlinux.aout
21823  vmlinux.bin.all
21824 +vmlinux.bin.bz2
21825  vmlinux.lds
21826 +vmlinux.relocs
21827  vmlinuz
21828  voffset.h
21829  vsyscall.lds
21830 @@ -254,6 +265,7 @@ vsyscall_32.lds
21831  wanxlfw.inc
21832  uImage
21833  unifdef
21834 +utsrelease.h
21835  wakeup.bin
21836  wakeup.elf
21837  wakeup.lds
21838 diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
21839 --- linux-3.0.4/Documentation/kernel-parameters.txt     2011-07-21 22:17:23.000000000 -0400
21840 +++ linux-3.0.4/Documentation/kernel-parameters.txt     2011-08-23 21:47:55.000000000 -0400
21841 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21842                         the specified number of seconds.  This is to be used if
21843                         your oopses keep scrolling off the screen.
21844  
21845 +       pax_nouderef    [X86] disables UDEREF.  Most likely needed under certain
21846 +                       virtualization environments that don't cope well with the
21847 +                       expand down segment used by UDEREF on X86-32 or the frequent
21848 +                       page table updates on X86-64.
21849 +
21850 +       pax_softmode=   0/1 to disable/enable PaX softmode on boot already.
21851 +
21852         pcbit=          [HW,ISDN]
21853  
21854         pcd.            [PARIDE]
21855 diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
21856 --- linux-3.0.4/drivers/acpi/apei/cper.c        2011-07-21 22:17:23.000000000 -0400
21857 +++ linux-3.0.4/drivers/acpi/apei/cper.c        2011-08-23 21:47:55.000000000 -0400
21858 @@ -38,12 +38,12 @@
21859   */
21860  u64 cper_next_record_id(void)
21861  {
21862 -       static atomic64_t seq;
21863 +       static atomic64_unchecked_t seq;
21864  
21865 -       if (!atomic64_read(&seq))
21866 -               atomic64_set(&seq, ((u64)get_seconds()) << 32);
21867 +       if (!atomic64_read_unchecked(&seq))
21868 +               atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21869  
21870 -       return atomic64_inc_return(&seq);
21871 +       return atomic64_inc_return_unchecked(&seq);
21872  }
21873  EXPORT_SYMBOL_GPL(cper_next_record_id);
21874  
21875 diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
21876 --- linux-3.0.4/drivers/acpi/ec_sys.c   2011-07-21 22:17:23.000000000 -0400
21877 +++ linux-3.0.4/drivers/acpi/ec_sys.c   2011-08-24 19:06:55.000000000 -0400
21878 @@ -11,6 +11,7 @@
21879  #include <linux/kernel.h>
21880  #include <linux/acpi.h>
21881  #include <linux/debugfs.h>
21882 +#include <asm/uaccess.h>
21883  #include "internal.h"
21884  
21885  MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21886 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21887          * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21888          */
21889         unsigned int size = EC_SPACE_SIZE;
21890 -       u8 *data = (u8 *) buf;
21891 +       u8 data;
21892         loff_t init_off = *off;
21893         int err = 0;
21894  
21895 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21896                 size = count;
21897  
21898         while (size) {
21899 -               err = ec_read(*off, &data[*off - init_off]);
21900 +               err = ec_read(*off, &data);
21901                 if (err)
21902                         return err;
21903 +               if (put_user(data, &buf[*off - init_off]))
21904 +                       return -EFAULT;
21905                 *off += 1;
21906                 size--;
21907         }
21908 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21909  
21910         unsigned int size = count;
21911         loff_t init_off = *off;
21912 -       u8 *data = (u8 *) buf;
21913         int err = 0;
21914  
21915         if (*off >= EC_SPACE_SIZE)
21916 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21917         }
21918  
21919         while (size) {
21920 -               u8 byte_write = data[*off - init_off];
21921 +               u8 byte_write;
21922 +               if (get_user(byte_write, &buf[*off - init_off]))
21923 +                       return -EFAULT;
21924                 err = ec_write(*off, byte_write);
21925                 if (err)
21926                         return err;
21927 diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
21928 --- linux-3.0.4/drivers/acpi/proc.c     2011-07-21 22:17:23.000000000 -0400
21929 +++ linux-3.0.4/drivers/acpi/proc.c     2011-08-23 21:47:55.000000000 -0400
21930 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21931                                 size_t count, loff_t * ppos)
21932  {
21933         struct list_head *node, *next;
21934 -       char strbuf[5];
21935 -       char str[5] = "";
21936 -       unsigned int len = count;
21937 -
21938 -       if (len > 4)
21939 -               len = 4;
21940 -       if (len < 0)
21941 -               return -EFAULT;
21942 +       char strbuf[5] = {0};
21943  
21944 -       if (copy_from_user(strbuf, buffer, len))
21945 +       if (count > 4)
21946 +               count = 4;
21947 +       if (copy_from_user(strbuf, buffer, count))
21948                 return -EFAULT;
21949 -       strbuf[len] = '\0';
21950 -       sscanf(strbuf, "%s", str);
21951 +       strbuf[count] = '\0';
21952  
21953         mutex_lock(&acpi_device_lock);
21954         list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21955 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21956                 if (!dev->wakeup.flags.valid)
21957                         continue;
21958  
21959 -               if (!strncmp(dev->pnp.bus_id, str, 4)) {
21960 +               if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21961                         if (device_can_wakeup(&dev->dev)) {
21962                                 bool enable = !device_may_wakeup(&dev->dev);
21963                                 device_set_wakeup_enable(&dev->dev, enable);
21964 diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
21965 --- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21966 +++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21967 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21968                 return 0;
21969  #endif
21970  
21971 -       BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21972 +       BUG_ON(pr->id >= nr_cpu_ids);
21973  
21974         /*
21975          * Buggy BIOS check
21976 diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
21977 --- linux-3.0.4/drivers/ata/libata-core.c       2011-07-21 22:17:23.000000000 -0400
21978 +++ linux-3.0.4/drivers/ata/libata-core.c       2011-08-23 21:47:55.000000000 -0400
21979 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21980         struct ata_port *ap;
21981         unsigned int tag;
21982  
21983 -       WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21984 +       BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21985         ap = qc->ap;
21986  
21987         qc->flags = 0;
21988 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21989         struct ata_port *ap;
21990         struct ata_link *link;
21991  
21992 -       WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21993 +       BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21994         WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
21995         ap = qc->ap;
21996         link = qc->dev->link;
21997 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
21998                 return;
21999  
22000         spin_lock(&lock);
22001 +       pax_open_kernel();
22002  
22003         for (cur = ops->inherits; cur; cur = cur->inherits) {
22004                 void **inherit = (void **)cur;
22005 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22006                 if (IS_ERR(*pp))
22007                         *pp = NULL;
22008  
22009 -       ops->inherits = NULL;
22010 +       *(struct ata_port_operations **)&ops->inherits = NULL;
22011  
22012 +       pax_close_kernel();
22013         spin_unlock(&lock);
22014  }
22015  
22016 diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
22017 --- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22018 +++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22019 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22020  {
22021         struct ata_link *link;
22022  
22023 +       pax_track_stack();
22024 +
22025         ata_for_each_link(link, ap, HOST_FIRST)
22026                 ata_eh_link_report(link);
22027  }
22028 diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
22029 --- linux-3.0.4/drivers/ata/pata_arasan_cf.c    2011-07-21 22:17:23.000000000 -0400
22030 +++ linux-3.0.4/drivers/ata/pata_arasan_cf.c    2011-08-23 21:47:55.000000000 -0400
22031 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22032         /* Handle platform specific quirks */
22033         if (pdata->quirk) {
22034                 if (pdata->quirk & CF_BROKEN_PIO) {
22035 -                       ap->ops->set_piomode = NULL;
22036 +                       pax_open_kernel();
22037 +                       *(void **)&ap->ops->set_piomode = NULL;
22038 +                       pax_close_kernel();
22039                         ap->pio_mask = 0;
22040                 }
22041                 if (pdata->quirk & CF_BROKEN_MWDMA)
22042 diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
22043 --- linux-3.0.4/drivers/atm/adummy.c    2011-07-21 22:17:23.000000000 -0400
22044 +++ linux-3.0.4/drivers/atm/adummy.c    2011-08-23 21:47:55.000000000 -0400
22045 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct 
22046                 vcc->pop(vcc, skb);
22047         else
22048                 dev_kfree_skb_any(skb);
22049 -       atomic_inc(&vcc->stats->tx);
22050 +       atomic_inc_unchecked(&vcc->stats->tx);
22051  
22052         return 0;
22053  }
22054 diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
22055 --- linux-3.0.4/drivers/atm/ambassador.c        2011-07-21 22:17:23.000000000 -0400
22056 +++ linux-3.0.4/drivers/atm/ambassador.c        2011-08-23 21:47:55.000000000 -0400
22057 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, 
22058    PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22059    
22060    // VC layer stats
22061 -  atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22062 +  atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22063    
22064    // free the descriptor
22065    kfree (tx_descr);
22066 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, 
22067           dump_skb ("<<<", vc, skb);
22068           
22069           // VC layer stats
22070 -         atomic_inc(&atm_vcc->stats->rx);
22071 +         atomic_inc_unchecked(&atm_vcc->stats->rx);
22072           __net_timestamp(skb);
22073           // end of our responsibility
22074           atm_vcc->push (atm_vcc, skb);
22075 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, 
22076        } else {
22077         PRINTK (KERN_INFO, "dropped over-size frame");
22078         // should we count this?
22079 -       atomic_inc(&atm_vcc->stats->rx_drop);
22080 +       atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22081        }
22082        
22083      } else {
22084 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22085    }
22086    
22087    if (check_area (skb->data, skb->len)) {
22088 -    atomic_inc(&atm_vcc->stats->tx_err);
22089 +    atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22090      return -ENOMEM; // ?
22091    }
22092    
22093 diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
22094 --- linux-3.0.4/drivers/atm/atmtcp.c    2011-07-21 22:17:23.000000000 -0400
22095 +++ linux-3.0.4/drivers/atm/atmtcp.c    2011-08-23 21:47:55.000000000 -0400
22096 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc 
22097                 if (vcc->pop) vcc->pop(vcc,skb);
22098                 else dev_kfree_skb(skb);
22099                 if (dev_data) return 0;
22100 -               atomic_inc(&vcc->stats->tx_err);
22101 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22102                 return -ENOLINK;
22103         }
22104         size = skb->len+sizeof(struct atmtcp_hdr);
22105 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc 
22106         if (!new_skb) {
22107                 if (vcc->pop) vcc->pop(vcc,skb);
22108                 else dev_kfree_skb(skb);
22109 -               atomic_inc(&vcc->stats->tx_err);
22110 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22111                 return -ENOBUFS;
22112         }
22113         hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22114 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc 
22115         if (vcc->pop) vcc->pop(vcc,skb);
22116         else dev_kfree_skb(skb);
22117         out_vcc->push(out_vcc,new_skb);
22118 -       atomic_inc(&vcc->stats->tx);
22119 -       atomic_inc(&out_vcc->stats->rx);
22120 +       atomic_inc_unchecked(&vcc->stats->tx);
22121 +       atomic_inc_unchecked(&out_vcc->stats->rx);
22122         return 0;
22123  }
22124  
22125 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc 
22126         out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22127         read_unlock(&vcc_sklist_lock);
22128         if (!out_vcc) {
22129 -               atomic_inc(&vcc->stats->tx_err);
22130 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22131                 goto done;
22132         }
22133         skb_pull(skb,sizeof(struct atmtcp_hdr));
22134 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc 
22135         __net_timestamp(new_skb);
22136         skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22137         out_vcc->push(out_vcc,new_skb);
22138 -       atomic_inc(&vcc->stats->tx);
22139 -       atomic_inc(&out_vcc->stats->rx);
22140 +       atomic_inc_unchecked(&vcc->stats->tx);
22141 +       atomic_inc_unchecked(&out_vcc->stats->rx);
22142  done:
22143         if (vcc->pop) vcc->pop(vcc,skb);
22144         else dev_kfree_skb(skb);
22145 diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
22146 --- linux-3.0.4/drivers/atm/eni.c       2011-07-21 22:17:23.000000000 -0400
22147 +++ linux-3.0.4/drivers/atm/eni.c       2011-08-23 21:47:55.000000000 -0400
22148 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22149                 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22150                     vcc->dev->number);
22151                 length = 0;
22152 -               atomic_inc(&vcc->stats->rx_err);
22153 +               atomic_inc_unchecked(&vcc->stats->rx_err);
22154         }
22155         else {
22156                 length = ATM_CELL_SIZE-1; /* no HEC */
22157 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22158                             size);
22159                 }
22160                 eff = length = 0;
22161 -               atomic_inc(&vcc->stats->rx_err);
22162 +               atomic_inc_unchecked(&vcc->stats->rx_err);
22163         }
22164         else {
22165                 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22166 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22167                             "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22168                             vcc->dev->number,vcc->vci,length,size << 2,descr);
22169                         length = eff = 0;
22170 -                       atomic_inc(&vcc->stats->rx_err);
22171 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
22172                 }
22173         }
22174         skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22175 @@ -771,7 +771,7 @@ rx_dequeued++;
22176                         vcc->push(vcc,skb);
22177                         pushed++;
22178                 }
22179 -               atomic_inc(&vcc->stats->rx);
22180 +               atomic_inc_unchecked(&vcc->stats->rx);
22181         }
22182         wake_up(&eni_dev->rx_wait);
22183  }
22184 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22185                     PCI_DMA_TODEVICE);
22186                 if (vcc->pop) vcc->pop(vcc,skb);
22187                 else dev_kfree_skb_irq(skb);
22188 -               atomic_inc(&vcc->stats->tx);
22189 +               atomic_inc_unchecked(&vcc->stats->tx);
22190                 wake_up(&eni_dev->tx_wait);
22191  dma_complete++;
22192         }
22193 diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
22194 --- linux-3.0.4/drivers/atm/firestream.c        2011-07-21 22:17:23.000000000 -0400
22195 +++ linux-3.0.4/drivers/atm/firestream.c        2011-08-23 21:47:55.000000000 -0400
22196 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22197                                 }
22198                         }
22199  
22200 -                       atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22201 +                       atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22202  
22203                         fs_dprintk (FS_DEBUG_TXMEM, "i");
22204                         fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22205 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22206  #endif
22207                                 skb_put (skb, qe->p1 & 0xffff); 
22208                                 ATM_SKB(skb)->vcc = atm_vcc;
22209 -                               atomic_inc(&atm_vcc->stats->rx);
22210 +                               atomic_inc_unchecked(&atm_vcc->stats->rx);
22211                                 __net_timestamp(skb);
22212                                 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22213                                 atm_vcc->push (atm_vcc, skb);
22214 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22215                                 kfree (pe);
22216                         }
22217                         if (atm_vcc)
22218 -                               atomic_inc(&atm_vcc->stats->rx_drop);
22219 +                               atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22220                         break;
22221                 case 0x1f: /*  Reassembly abort: no buffers. */
22222                         /* Silently increment error counter. */
22223                         if (atm_vcc)
22224 -                               atomic_inc(&atm_vcc->stats->rx_drop);
22225 +                               atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22226                         break;
22227                 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22228                         printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", 
22229 diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
22230 --- linux-3.0.4/drivers/atm/fore200e.c  2011-07-21 22:17:23.000000000 -0400
22231 +++ linux-3.0.4/drivers/atm/fore200e.c  2011-08-23 21:47:55.000000000 -0400
22232 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22233  #endif
22234                 /* check error condition */
22235                 if (*entry->status & STATUS_ERROR)
22236 -                   atomic_inc(&vcc->stats->tx_err);
22237 +                   atomic_inc_unchecked(&vcc->stats->tx_err);
22238                 else
22239 -                   atomic_inc(&vcc->stats->tx);
22240 +                   atomic_inc_unchecked(&vcc->stats->tx);
22241             }
22242         }
22243  
22244 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22245      if (skb == NULL) {
22246         DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22247  
22248 -       atomic_inc(&vcc->stats->rx_drop);
22249 +       atomic_inc_unchecked(&vcc->stats->rx_drop);
22250         return -ENOMEM;
22251      } 
22252  
22253 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22254  
22255         dev_kfree_skb_any(skb);
22256  
22257 -       atomic_inc(&vcc->stats->rx_drop);
22258 +       atomic_inc_unchecked(&vcc->stats->rx_drop);
22259         return -ENOMEM;
22260      }
22261  
22262      ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22263  
22264      vcc->push(vcc, skb);
22265 -    atomic_inc(&vcc->stats->rx);
22266 +    atomic_inc_unchecked(&vcc->stats->rx);
22267  
22268      ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22269  
22270 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22271                 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22272                         fore200e->atm_dev->number,
22273                         entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22274 -               atomic_inc(&vcc->stats->rx_err);
22275 +               atomic_inc_unchecked(&vcc->stats->rx_err);
22276             }
22277         }
22278  
22279 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22280                 goto retry_here;
22281             }
22282  
22283 -           atomic_inc(&vcc->stats->tx_err);
22284 +           atomic_inc_unchecked(&vcc->stats->tx_err);
22285  
22286             fore200e->tx_sat++;
22287             DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22288 diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
22289 --- linux-3.0.4/drivers/atm/he.c        2011-07-21 22:17:23.000000000 -0400
22290 +++ linux-3.0.4/drivers/atm/he.c        2011-08-23 21:47:55.000000000 -0400
22291 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22292  
22293                 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22294                         hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
22295 -                               atomic_inc(&vcc->stats->rx_drop);
22296 +                               atomic_inc_unchecked(&vcc->stats->rx_drop);
22297                         goto return_host_buffers;
22298                 }
22299  
22300 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22301                                 RBRQ_LEN_ERR(he_dev->rbrq_head)
22302                                                         ? "LEN_ERR" : "",
22303                                                         vcc->vpi, vcc->vci);
22304 -                       atomic_inc(&vcc->stats->rx_err);
22305 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
22306                         goto return_host_buffers;
22307                 }
22308  
22309 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22310                 vcc->push(vcc, skb);
22311                 spin_lock(&he_dev->global_lock);
22312  
22313 -               atomic_inc(&vcc->stats->rx);
22314 +               atomic_inc_unchecked(&vcc->stats->rx);
22315  
22316  return_host_buffers:
22317                 ++pdus_assembled;
22318 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22319                                         tpd->vcc->pop(tpd->vcc, tpd->skb);
22320                                 else
22321                                         dev_kfree_skb_any(tpd->skb);
22322 -                               atomic_inc(&tpd->vcc->stats->tx_err);
22323 +                               atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22324                         }
22325                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22326                         return;
22327 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22328                         vcc->pop(vcc, skb);
22329                 else
22330                         dev_kfree_skb_any(skb);
22331 -               atomic_inc(&vcc->stats->tx_err);
22332 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22333                 return -EINVAL;
22334         }
22335  
22336 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22337                         vcc->pop(vcc, skb);
22338                 else
22339                         dev_kfree_skb_any(skb);
22340 -               atomic_inc(&vcc->stats->tx_err);
22341 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22342                 return -EINVAL;
22343         }
22344  #endif
22345 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22346                         vcc->pop(vcc, skb);
22347                 else
22348                         dev_kfree_skb_any(skb);
22349 -               atomic_inc(&vcc->stats->tx_err);
22350 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22351                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22352                 return -ENOMEM;
22353         }
22354 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22355                                         vcc->pop(vcc, skb);
22356                                 else
22357                                         dev_kfree_skb_any(skb);
22358 -                               atomic_inc(&vcc->stats->tx_err);
22359 +                               atomic_inc_unchecked(&vcc->stats->tx_err);
22360                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22361                                 return -ENOMEM;
22362                         }
22363 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22364         __enqueue_tpd(he_dev, tpd, cid);
22365         spin_unlock_irqrestore(&he_dev->global_lock, flags);
22366  
22367 -       atomic_inc(&vcc->stats->tx);
22368 +       atomic_inc_unchecked(&vcc->stats->tx);
22369  
22370         return 0;
22371  }
22372 diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
22373 --- linux-3.0.4/drivers/atm/horizon.c   2011-07-21 22:17:23.000000000 -0400
22374 +++ linux-3.0.4/drivers/atm/horizon.c   2011-08-23 21:47:55.000000000 -0400
22375 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, 
22376         {
22377           struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22378           // VC layer stats
22379 -         atomic_inc(&vcc->stats->rx);
22380 +         atomic_inc_unchecked(&vcc->stats->rx);
22381           __net_timestamp(skb);
22382           // end of our responsibility
22383           vcc->push (vcc, skb);
22384 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22385         dev->tx_iovec = NULL;
22386         
22387         // VC layer stats
22388 -       atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22389 +       atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22390         
22391         // free the skb
22392         hrz_kfree_skb (skb);
22393 diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
22394 --- linux-3.0.4/drivers/atm/idt77252.c  2011-07-21 22:17:23.000000000 -0400
22395 +++ linux-3.0.4/drivers/atm/idt77252.c  2011-08-23 21:47:55.000000000 -0400
22396 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22397                 else
22398                         dev_kfree_skb(skb);
22399  
22400 -               atomic_inc(&vcc->stats->tx);
22401 +               atomic_inc_unchecked(&vcc->stats->tx);
22402         }
22403  
22404         atomic_dec(&scq->used);
22405 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22406                         if ((sb = dev_alloc_skb(64)) == NULL) {
22407                                 printk("%s: Can't allocate buffers for aal0.\n",
22408                                        card->name);
22409 -                               atomic_add(i, &vcc->stats->rx_drop);
22410 +                               atomic_add_unchecked(i, &vcc->stats->rx_drop);
22411                                 break;
22412                         }
22413                         if (!atm_charge(vcc, sb->truesize)) {
22414                                 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22415                                          card->name);
22416 -                               atomic_add(i - 1, &vcc->stats->rx_drop);
22417 +                               atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22418                                 dev_kfree_skb(sb);
22419                                 break;
22420                         }
22421 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22422                         ATM_SKB(sb)->vcc = vcc;
22423                         __net_timestamp(sb);
22424                         vcc->push(vcc, sb);
22425 -                       atomic_inc(&vcc->stats->rx);
22426 +                       atomic_inc_unchecked(&vcc->stats->rx);
22427  
22428                         cell += ATM_CELL_PAYLOAD;
22429                 }
22430 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22431                                  "(CDC: %08x)\n",
22432                                  card->name, len, rpp->len, readl(SAR_REG_CDC));
22433                         recycle_rx_pool_skb(card, rpp);
22434 -                       atomic_inc(&vcc->stats->rx_err);
22435 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
22436                         return;
22437                 }
22438                 if (stat & SAR_RSQE_CRC) {
22439                         RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22440                         recycle_rx_pool_skb(card, rpp);
22441 -                       atomic_inc(&vcc->stats->rx_err);
22442 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
22443                         return;
22444                 }
22445                 if (skb_queue_len(&rpp->queue) > 1) {
22446 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22447                                 RXPRINTK("%s: Can't alloc RX skb.\n",
22448                                          card->name);
22449                                 recycle_rx_pool_skb(card, rpp);
22450 -                               atomic_inc(&vcc->stats->rx_err);
22451 +                               atomic_inc_unchecked(&vcc->stats->rx_err);
22452                                 return;
22453                         }
22454                         if (!atm_charge(vcc, skb->truesize)) {
22455 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22456                         __net_timestamp(skb);
22457  
22458                         vcc->push(vcc, skb);
22459 -                       atomic_inc(&vcc->stats->rx);
22460 +                       atomic_inc_unchecked(&vcc->stats->rx);
22461  
22462                         return;
22463                 }
22464 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22465                 __net_timestamp(skb);
22466  
22467                 vcc->push(vcc, skb);
22468 -               atomic_inc(&vcc->stats->rx);
22469 +               atomic_inc_unchecked(&vcc->stats->rx);
22470  
22471                 if (skb->truesize > SAR_FB_SIZE_3)
22472                         add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22473 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22474                 if (vcc->qos.aal != ATM_AAL0) {
22475                         RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22476                                 card->name, vpi, vci);
22477 -                       atomic_inc(&vcc->stats->rx_drop);
22478 +                       atomic_inc_unchecked(&vcc->stats->rx_drop);
22479                         goto drop;
22480                 }
22481         
22482                 if ((sb = dev_alloc_skb(64)) == NULL) {
22483                         printk("%s: Can't allocate buffers for AAL0.\n",
22484                                card->name);
22485 -                       atomic_inc(&vcc->stats->rx_err);
22486 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
22487                         goto drop;
22488                 }
22489  
22490 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22491                 ATM_SKB(sb)->vcc = vcc;
22492                 __net_timestamp(sb);
22493                 vcc->push(vcc, sb);
22494 -               atomic_inc(&vcc->stats->rx);
22495 +               atomic_inc_unchecked(&vcc->stats->rx);
22496  
22497  drop:
22498                 skb_pull(queue, 64);
22499 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22500  
22501         if (vc == NULL) {
22502                 printk("%s: NULL connection in send().\n", card->name);
22503 -               atomic_inc(&vcc->stats->tx_err);
22504 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22505                 dev_kfree_skb(skb);
22506                 return -EINVAL;
22507         }
22508         if (!test_bit(VCF_TX, &vc->flags)) {
22509                 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22510 -               atomic_inc(&vcc->stats->tx_err);
22511 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22512                 dev_kfree_skb(skb);
22513                 return -EINVAL;
22514         }
22515 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22516                 break;
22517         default:
22518                 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22519 -               atomic_inc(&vcc->stats->tx_err);
22520 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22521                 dev_kfree_skb(skb);
22522                 return -EINVAL;
22523         }
22524  
22525         if (skb_shinfo(skb)->nr_frags != 0) {
22526                 printk("%s: No scatter-gather yet.\n", card->name);
22527 -               atomic_inc(&vcc->stats->tx_err);
22528 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22529                 dev_kfree_skb(skb);
22530                 return -EINVAL;
22531         }
22532 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22533  
22534         err = queue_skb(card, vc, skb, oam);
22535         if (err) {
22536 -               atomic_inc(&vcc->stats->tx_err);
22537 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22538                 dev_kfree_skb(skb);
22539                 return err;
22540         }
22541 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22542         skb = dev_alloc_skb(64);
22543         if (!skb) {
22544                 printk("%s: Out of memory in send_oam().\n", card->name);
22545 -               atomic_inc(&vcc->stats->tx_err);
22546 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22547                 return -ENOMEM;
22548         }
22549         atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22550 diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
22551 --- linux-3.0.4/drivers/atm/iphase.c    2011-07-21 22:17:23.000000000 -0400
22552 +++ linux-3.0.4/drivers/atm/iphase.c    2011-08-23 21:47:55.000000000 -0400
22553 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)  
22554         status = (u_short) (buf_desc_ptr->desc_mode);  
22555         if (status & (RX_CER | RX_PTE | RX_OFL))  
22556         {  
22557 -                atomic_inc(&vcc->stats->rx_err);
22558 +                atomic_inc_unchecked(&vcc->stats->rx_err);
22559                 IF_ERR(printk("IA: bad packet, dropping it");)  
22560                  if (status & RX_CER) { 
22561                      IF_ERR(printk(" cause: packet CRC error\n");)
22562 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)  
22563         len = dma_addr - buf_addr;  
22564          if (len > iadev->rx_buf_sz) {
22565             printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22566 -           atomic_inc(&vcc->stats->rx_err);
22567 +           atomic_inc_unchecked(&vcc->stats->rx_err);
22568            goto out_free_desc;
22569          }
22570                   
22571 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22572            ia_vcc = INPH_IA_VCC(vcc);
22573            if (ia_vcc == NULL)
22574            {
22575 -             atomic_inc(&vcc->stats->rx_err);
22576 +             atomic_inc_unchecked(&vcc->stats->rx_err);
22577               dev_kfree_skb_any(skb);
22578               atm_return(vcc, atm_guess_pdu2truesize(len));
22579               goto INCR_DLE;
22580 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22581            if ((length > iadev->rx_buf_sz) || (length > 
22582                                (skb->len - sizeof(struct cpcs_trailer))))
22583            {
22584 -             atomic_inc(&vcc->stats->rx_err);
22585 +             atomic_inc_unchecked(&vcc->stats->rx_err);
22586               IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
22587                                                              length, skb->len);)
22588               dev_kfree_skb_any(skb);
22589 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22590  
22591           IF_RX(printk("rx_dle_intr: skb push");)  
22592           vcc->push(vcc,skb);  
22593 -         atomic_inc(&vcc->stats->rx);
22594 +         atomic_inc_unchecked(&vcc->stats->rx);
22595            iadev->rx_pkt_cnt++;
22596        }  
22597  INCR_DLE:
22598 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22599           {
22600               struct k_sonet_stats *stats;
22601               stats = &PRIV(_ia_dev[board])->sonet_stats;
22602 -             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22603 -             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
22604 -             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
22605 -             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
22606 -             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
22607 -             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
22608 -             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22609 -             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
22610 -             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
22611 +             printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22612 +             printk("line_bip   : %d\n", atomic_read_unchecked(&stats->line_bip));
22613 +             printk("path_bip   : %d\n", atomic_read_unchecked(&stats->path_bip));
22614 +             printk("line_febe  : %d\n", atomic_read_unchecked(&stats->line_febe));
22615 +             printk("path_febe  : %d\n", atomic_read_unchecked(&stats->path_febe));
22616 +             printk("corr_hcs   : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22617 +             printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22618 +             printk("tx_cells   : %d\n", atomic_read_unchecked(&stats->tx_cells));
22619 +             printk("rx_cells   : %d\n", atomic_read_unchecked(&stats->rx_cells));
22620           }
22621              ia_cmds.status = 0;
22622              break;
22623 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22624         if ((desc == 0) || (desc > iadev->num_tx_desc))  
22625         {  
22626                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
22627 -                atomic_inc(&vcc->stats->tx);
22628 +                atomic_inc_unchecked(&vcc->stats->tx);
22629                 if (vcc->pop)   
22630                     vcc->pop(vcc, skb);   
22631                 else  
22632 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22633          ATM_DESC(skb) = vcc->vci;
22634          skb_queue_tail(&iadev->tx_dma_q, skb);
22635  
22636 -        atomic_inc(&vcc->stats->tx);
22637 +        atomic_inc_unchecked(&vcc->stats->tx);
22638          iadev->tx_pkt_cnt++;
22639         /* Increment transaction counter */  
22640         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
22641          
22642  #if 0        
22643          /* add flow control logic */ 
22644 -        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22645 +        if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22646            if (iavcc->vc_desc_cnt > 10) {
22647               vcc->tx_quota =  vcc->tx_quota * 3 / 4;
22648              printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22649 diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
22650 --- linux-3.0.4/drivers/atm/lanai.c     2011-07-21 22:17:23.000000000 -0400
22651 +++ linux-3.0.4/drivers/atm/lanai.c     2011-08-23 21:47:55.000000000 -0400
22652 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22653         vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22654         lanai_endtx(lanai, lvcc);
22655         lanai_free_skb(lvcc->tx.atmvcc, skb);
22656 -       atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22657 +       atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22658  }
22659  
22660  /* Try to fill the buffer - don't call unless there is backlog */
22661 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22662         ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22663         __net_timestamp(skb);
22664         lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22665 -       atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22666 +       atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22667      out:
22668         lvcc->rx.buf.ptr = end;
22669         cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22670 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22671                 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22672                     "vcc %d\n", lanai->number, (unsigned int) s, vci);
22673                 lanai->stats.service_rxnotaal5++;
22674 -               atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22675 +               atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22676                 return 0;
22677         }
22678         if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22679 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22680                 int bytes;
22681                 read_unlock(&vcc_sklist_lock);
22682                 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22683 -               atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22684 +               atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22685                 lvcc->stats.x.aal5.service_trash++;
22686                 bytes = (SERVICE_GET_END(s) * 16) -
22687                     (((unsigned long) lvcc->rx.buf.ptr) -
22688 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22689         }
22690         if (s & SERVICE_STREAM) {
22691                 read_unlock(&vcc_sklist_lock);
22692 -               atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22693 +               atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22694                 lvcc->stats.x.aal5.service_stream++;
22695                 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22696                     "PDU on VCI %d!\n", lanai->number, vci);
22697 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22698                 return 0;
22699         }
22700         DPRINTK("got rx crc error on vci %d\n", vci);
22701 -       atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22702 +       atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22703         lvcc->stats.x.aal5.service_rxcrc++;
22704         lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22705         cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22706 diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
22707 --- linux-3.0.4/drivers/atm/nicstar.c   2011-07-21 22:17:23.000000000 -0400
22708 +++ linux-3.0.4/drivers/atm/nicstar.c   2011-08-23 21:47:55.000000000 -0400
22709 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, 
22710         if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22711                 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22712                        card->index);
22713 -               atomic_inc(&vcc->stats->tx_err);
22714 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22715                 dev_kfree_skb_any(skb);
22716                 return -EINVAL;
22717         }
22718 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, 
22719         if (!vc->tx) {
22720                 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22721                        card->index);
22722 -               atomic_inc(&vcc->stats->tx_err);
22723 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22724                 dev_kfree_skb_any(skb);
22725                 return -EINVAL;
22726         }
22727 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, 
22728         if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22729                 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22730                        card->index);
22731 -               atomic_inc(&vcc->stats->tx_err);
22732 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22733                 dev_kfree_skb_any(skb);
22734                 return -EINVAL;
22735         }
22736  
22737         if (skb_shinfo(skb)->nr_frags != 0) {
22738                 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22739 -               atomic_inc(&vcc->stats->tx_err);
22740 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22741                 dev_kfree_skb_any(skb);
22742                 return -EINVAL;
22743         }
22744 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, 
22745         }
22746  
22747         if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22748 -               atomic_inc(&vcc->stats->tx_err);
22749 +               atomic_inc_unchecked(&vcc->stats->tx_err);
22750                 dev_kfree_skb_any(skb);
22751                 return -EIO;
22752         }
22753 -       atomic_inc(&vcc->stats->tx);
22754 +       atomic_inc_unchecked(&vcc->stats->tx);
22755  
22756         return 0;
22757  }
22758 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22759                                 printk
22760                                     ("nicstar%d: Can't allocate buffers for aal0.\n",
22761                                      card->index);
22762 -                               atomic_add(i, &vcc->stats->rx_drop);
22763 +                               atomic_add_unchecked(i, &vcc->stats->rx_drop);
22764                                 break;
22765                         }
22766                         if (!atm_charge(vcc, sb->truesize)) {
22767                                 RXPRINTK
22768                                     ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22769                                      card->index);
22770 -                               atomic_add(i - 1, &vcc->stats->rx_drop);        /* already increased by 1 */
22771 +                               atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);      /* already increased by 1 */
22772                                 dev_kfree_skb_any(sb);
22773                                 break;
22774                         }
22775 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22776                         ATM_SKB(sb)->vcc = vcc;
22777                         __net_timestamp(sb);
22778                         vcc->push(vcc, sb);
22779 -                       atomic_inc(&vcc->stats->rx);
22780 +                       atomic_inc_unchecked(&vcc->stats->rx);
22781                         cell += ATM_CELL_PAYLOAD;
22782                 }
22783  
22784 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22785                         if (iovb == NULL) {
22786                                 printk("nicstar%d: Out of iovec buffers.\n",
22787                                        card->index);
22788 -                               atomic_inc(&vcc->stats->rx_drop);
22789 +                               atomic_inc_unchecked(&vcc->stats->rx_drop);
22790                                 recycle_rx_buf(card, skb);
22791                                 return;
22792                         }
22793 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22794                    small or large buffer itself. */
22795         } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22796                 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22797 -               atomic_inc(&vcc->stats->rx_err);
22798 +               atomic_inc_unchecked(&vcc->stats->rx_err);
22799                 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22800                                       NS_MAX_IOVECS);
22801                 NS_PRV_IOVCNT(iovb) = 0;
22802 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22803                             ("nicstar%d: Expected a small buffer, and this is not one.\n",
22804                              card->index);
22805                         which_list(card, skb);
22806 -                       atomic_inc(&vcc->stats->rx_err);
22807 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
22808                         recycle_rx_buf(card, skb);
22809                         vc->rx_iov = NULL;
22810                         recycle_iov_buf(card, iovb);
22811 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22812                             ("nicstar%d: Expected a large buffer, and this is not one.\n",
22813                              card->index);
22814                         which_list(card, skb);
22815 -                       atomic_inc(&vcc->stats->rx_err);
22816 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
22817                         recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22818                                               NS_PRV_IOVCNT(iovb));
22819                         vc->rx_iov = NULL;
22820 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22821                                 printk(" - PDU size mismatch.\n");
22822                         else
22823                                 printk(".\n");
22824 -                       atomic_inc(&vcc->stats->rx_err);
22825 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
22826                         recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22827                                               NS_PRV_IOVCNT(iovb));
22828                         vc->rx_iov = NULL;
22829 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22830                         /* skb points to a small buffer */
22831                         if (!atm_charge(vcc, skb->truesize)) {
22832                                 push_rxbufs(card, skb);
22833 -                               atomic_inc(&vcc->stats->rx_drop);
22834 +                               atomic_inc_unchecked(&vcc->stats->rx_drop);
22835                         } else {
22836                                 skb_put(skb, len);
22837                                 dequeue_sm_buf(card, skb);
22838 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22839                                 ATM_SKB(skb)->vcc = vcc;
22840                                 __net_timestamp(skb);
22841                                 vcc->push(vcc, skb);
22842 -                               atomic_inc(&vcc->stats->rx);
22843 +                               atomic_inc_unchecked(&vcc->stats->rx);
22844                         }
22845                 } else if (NS_PRV_IOVCNT(iovb) == 2) {  /* One small plus one large buffer */
22846                         struct sk_buff *sb;
22847 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22848                         if (len <= NS_SMBUFSIZE) {
22849                                 if (!atm_charge(vcc, sb->truesize)) {
22850                                         push_rxbufs(card, sb);
22851 -                                       atomic_inc(&vcc->stats->rx_drop);
22852 +                                       atomic_inc_unchecked(&vcc->stats->rx_drop);
22853                                 } else {
22854                                         skb_put(sb, len);
22855                                         dequeue_sm_buf(card, sb);
22856 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22857                                         ATM_SKB(sb)->vcc = vcc;
22858                                         __net_timestamp(sb);
22859                                         vcc->push(vcc, sb);
22860 -                                       atomic_inc(&vcc->stats->rx);
22861 +                                       atomic_inc_unchecked(&vcc->stats->rx);
22862                                 }
22863  
22864                                 push_rxbufs(card, skb);
22865 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22866  
22867                                 if (!atm_charge(vcc, skb->truesize)) {
22868                                         push_rxbufs(card, skb);
22869 -                                       atomic_inc(&vcc->stats->rx_drop);
22870 +                                       atomic_inc_unchecked(&vcc->stats->rx_drop);
22871                                 } else {
22872                                         dequeue_lg_buf(card, skb);
22873  #ifdef NS_USE_DESTRUCTORS
22874 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22875                                         ATM_SKB(skb)->vcc = vcc;
22876                                         __net_timestamp(skb);
22877                                         vcc->push(vcc, skb);
22878 -                                       atomic_inc(&vcc->stats->rx);
22879 +                                       atomic_inc_unchecked(&vcc->stats->rx);
22880                                 }
22881  
22882                                 push_rxbufs(card, sb);
22883 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22884                                         printk
22885                                             ("nicstar%d: Out of huge buffers.\n",
22886                                              card->index);
22887 -                                       atomic_inc(&vcc->stats->rx_drop);
22888 +                                       atomic_inc_unchecked(&vcc->stats->rx_drop);
22889                                         recycle_iovec_rx_bufs(card,
22890                                                               (struct iovec *)
22891                                                               iovb->data,
22892 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22893                                         card->hbpool.count++;
22894                                 } else
22895                                         dev_kfree_skb_any(hb);
22896 -                               atomic_inc(&vcc->stats->rx_drop);
22897 +                               atomic_inc_unchecked(&vcc->stats->rx_drop);
22898                         } else {
22899                                 /* Copy the small buffer to the huge buffer */
22900                                 sb = (struct sk_buff *)iov->iov_base;
22901 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22902  #endif /* NS_USE_DESTRUCTORS */
22903                                 __net_timestamp(hb);
22904                                 vcc->push(vcc, hb);
22905 -                               atomic_inc(&vcc->stats->rx);
22906 +                               atomic_inc_unchecked(&vcc->stats->rx);
22907                         }
22908                 }
22909  
22910 diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
22911 --- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22912 +++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22913 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22914                                 }
22915                                 atm_charge(vcc, skb->truesize);
22916                                 vcc->push(vcc, skb);
22917 -                               atomic_inc(&vcc->stats->rx);
22918 +                               atomic_inc_unchecked(&vcc->stats->rx);
22919                                 break;
22920  
22921                         case PKT_STATUS:
22922 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22923         char msg[500];
22924         char item[10];
22925  
22926 +       pax_track_stack();
22927 +
22928         len = buf->len;
22929         for (i = 0; i < len; i++){
22930                 if(i % 8 == 0)
22931 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22932                         vcc = SKB_CB(oldskb)->vcc;
22933  
22934                         if (vcc) {
22935 -                               atomic_inc(&vcc->stats->tx);
22936 +                               atomic_inc_unchecked(&vcc->stats->tx);
22937                                 solos_pop(vcc, oldskb);
22938                         } else
22939                                 dev_kfree_skb_irq(oldskb);
22940 diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
22941 --- linux-3.0.4/drivers/atm/suni.c      2011-07-21 22:17:23.000000000 -0400
22942 +++ linux-3.0.4/drivers/atm/suni.c      2011-08-23 21:47:55.000000000 -0400
22943 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22944  
22945  
22946  #define ADD_LIMITED(s,v) \
22947 -    atomic_add((v),&stats->s); \
22948 -    if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22949 +    atomic_add_unchecked((v),&stats->s); \
22950 +    if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22951  
22952  
22953  static void suni_hz(unsigned long from_timer)
22954 diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
22955 --- linux-3.0.4/drivers/atm/uPD98402.c  2011-07-21 22:17:23.000000000 -0400
22956 +++ linux-3.0.4/drivers/atm/uPD98402.c  2011-08-23 21:47:55.000000000 -0400
22957 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22958         struct sonet_stats tmp;
22959         int error = 0;
22960  
22961 -       atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22962 +       atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22963         sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22964         if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22965         if (zero && !error) {
22966 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22967  
22968  
22969  #define ADD_LIMITED(s,v) \
22970 -    { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22971 -    if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22972 -       atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22973 +    { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22974 +    if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22975 +       atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22976  
22977  
22978  static void stat_event(struct atm_dev *dev)
22979 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev 
22980                 if (reason & uPD98402_INT_PFM) stat_event(dev);
22981                 if (reason & uPD98402_INT_PCO) {
22982                         (void) GET(PCOCR); /* clear interrupt cause */
22983 -                       atomic_add(GET(HECCT),
22984 +                       atomic_add_unchecked(GET(HECCT),
22985                             &PRIV(dev)->sonet_stats.uncorr_hcs);
22986                 }
22987                 if ((reason & uPD98402_INT_RFO) && 
22988 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22989         PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22990           uPD98402_INT_LOS),PIMR); /* enable them */
22991         (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22992 -       atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22993 -       atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22994 -       atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22995 +       atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22996 +       atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22997 +       atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22998         return 0;
22999  }
23000  
23001 diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
23002 --- linux-3.0.4/drivers/atm/zatm.c      2011-07-21 22:17:23.000000000 -0400
23003 +++ linux-3.0.4/drivers/atm/zatm.c      2011-08-23 21:47:55.000000000 -0400
23004 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23005                 }
23006                 if (!size) {
23007                         dev_kfree_skb_irq(skb);
23008 -                       if (vcc) atomic_inc(&vcc->stats->rx_err);
23009 +                       if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23010                         continue;
23011                 }
23012                 if (!atm_charge(vcc,skb->truesize)) {
23013 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23014                 skb->len = size;
23015                 ATM_SKB(skb)->vcc = vcc;
23016                 vcc->push(vcc,skb);
23017 -               atomic_inc(&vcc->stats->rx);
23018 +               atomic_inc_unchecked(&vcc->stats->rx);
23019         }
23020         zout(pos & 0xffff,MTA(mbx));
23021  #if 0 /* probably a stupid idea */
23022 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23023                         skb_queue_head(&zatm_vcc->backlog,skb);
23024                         break;
23025                 }
23026 -       atomic_inc(&vcc->stats->tx);
23027 +       atomic_inc_unchecked(&vcc->stats->tx);
23028         wake_up(&zatm_vcc->tx_wait);
23029  }
23030  
23031 diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
23032 --- linux-3.0.4/drivers/base/power/wakeup.c     2011-07-21 22:17:23.000000000 -0400
23033 +++ linux-3.0.4/drivers/base/power/wakeup.c     2011-08-23 21:47:55.000000000 -0400
23034 @@ -29,14 +29,14 @@ bool events_check_enabled;
23035   * They need to be modified together atomically, so it's better to use one
23036   * atomic variable to hold them both.
23037   */
23038 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23039 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23040  
23041  #define IN_PROGRESS_BITS       (sizeof(int) * 4)
23042  #define MAX_IN_PROGRESS                ((1 << IN_PROGRESS_BITS) - 1)
23043  
23044  static void split_counters(unsigned int *cnt, unsigned int *inpr)
23045  {
23046 -       unsigned int comb = atomic_read(&combined_event_count);
23047 +       unsigned int comb = atomic_read_unchecked(&combined_event_count);
23048  
23049         *cnt = (comb >> IN_PROGRESS_BITS);
23050         *inpr = comb & MAX_IN_PROGRESS;
23051 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23052         ws->last_time = ktime_get();
23053  
23054         /* Increment the counter of events in progress. */
23055 -       atomic_inc(&combined_event_count);
23056 +       atomic_inc_unchecked(&combined_event_count);
23057  }
23058  
23059  /**
23060 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23061          * Increment the counter of registered wakeup events and decrement the
23062          * couter of wakeup events in progress simultaneously.
23063          */
23064 -       atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23065 +       atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23066  }
23067  
23068  /**
23069 diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
23070 --- linux-3.0.4/drivers/block/cciss.c   2011-07-21 22:17:23.000000000 -0400
23071 +++ linux-3.0.4/drivers/block/cciss.c   2011-08-23 21:48:14.000000000 -0400
23072 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23073         int err;
23074         u32 cp;
23075  
23076 +       memset(&arg64, 0, sizeof(arg64));
23077 +
23078         err = 0;
23079         err |=
23080             copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23081 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23082         while (!list_empty(&h->reqQ)) {
23083                 c = list_entry(h->reqQ.next, CommandList_struct, list);
23084                 /* can't do anything if fifo is full */
23085 -               if ((h->access.fifo_full(h))) {
23086 +               if ((h->access->fifo_full(h))) {
23087                         dev_warn(&h->pdev->dev, "fifo full\n");
23088                         break;
23089                 }
23090 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23091                 h->Qdepth--;
23092  
23093                 /* Tell the controller execute command */
23094 -               h->access.submit_command(h, c);
23095 +               h->access->submit_command(h, c);
23096  
23097                 /* Put job onto the completed Q */
23098                 addQ(&h->cmpQ, c);
23099 @@ -3422,17 +3424,17 @@ startio:
23100  
23101  static inline unsigned long get_next_completion(ctlr_info_t *h)
23102  {
23103 -       return h->access.command_completed(h);
23104 +       return h->access->command_completed(h);
23105  }
23106  
23107  static inline int interrupt_pending(ctlr_info_t *h)
23108  {
23109 -       return h->access.intr_pending(h);
23110 +       return h->access->intr_pending(h);
23111  }
23112  
23113  static inline long interrupt_not_for_us(ctlr_info_t *h)
23114  {
23115 -       return ((h->access.intr_pending(h) == 0) ||
23116 +       return ((h->access->intr_pending(h) == 0) ||
23117                 (h->interrupts_enabled == 0));
23118  }
23119  
23120 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23121         u32 a;
23122  
23123         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23124 -               return h->access.command_completed(h);
23125 +               return h->access->command_completed(h);
23126  
23127         if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23128                 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23129 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23130                 trans_support & CFGTBL_Trans_use_short_tags);
23131  
23132         /* Change the access methods to the performant access methods */
23133 -       h->access = SA5_performant_access;
23134 +       h->access = &SA5_performant_access;
23135         h->transMethod = CFGTBL_Trans_Performant;
23136  
23137         return;
23138 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23139         if (prod_index < 0)
23140                 return -ENODEV;
23141         h->product_name = products[prod_index].product_name;
23142 -       h->access = *(products[prod_index].access);
23143 +       h->access = products[prod_index].access;
23144  
23145         if (cciss_board_disabled(h)) {
23146                 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23147 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23148         }
23149  
23150         /* make sure the board interrupts are off */
23151 -       h->access.set_intr_mask(h, CCISS_INTR_OFF);
23152 +       h->access->set_intr_mask(h, CCISS_INTR_OFF);
23153         rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23154         if (rc)
23155                 goto clean2;
23156 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23157                  * fake ones to scoop up any residual completions.
23158                  */
23159                 spin_lock_irqsave(&h->lock, flags);
23160 -               h->access.set_intr_mask(h, CCISS_INTR_OFF);
23161 +               h->access->set_intr_mask(h, CCISS_INTR_OFF);
23162                 spin_unlock_irqrestore(&h->lock, flags);
23163                 free_irq(h->intr[PERF_MODE_INT], h);
23164                 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23165 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23166                 dev_info(&h->pdev->dev, "Board READY.\n");
23167                 dev_info(&h->pdev->dev,
23168                         "Waiting for stale completions to drain.\n");
23169 -               h->access.set_intr_mask(h, CCISS_INTR_ON);
23170 +               h->access->set_intr_mask(h, CCISS_INTR_ON);
23171                 msleep(10000);
23172 -               h->access.set_intr_mask(h, CCISS_INTR_OFF);
23173 +               h->access->set_intr_mask(h, CCISS_INTR_OFF);
23174  
23175                 rc = controller_reset_failed(h->cfgtable);
23176                 if (rc)
23177 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23178         cciss_scsi_setup(h);
23179  
23180         /* Turn the interrupts on so we can service requests */
23181 -       h->access.set_intr_mask(h, CCISS_INTR_ON);
23182 +       h->access->set_intr_mask(h, CCISS_INTR_ON);
23183  
23184         /* Get the firmware version */
23185         inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23186 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23187         kfree(flush_buf);
23188         if (return_code != IO_OK)
23189                 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23190 -       h->access.set_intr_mask(h, CCISS_INTR_OFF);
23191 +       h->access->set_intr_mask(h, CCISS_INTR_OFF);
23192         free_irq(h->intr[PERF_MODE_INT], h);
23193  }
23194  
23195 diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
23196 --- linux-3.0.4/drivers/block/cciss.h   2011-08-23 21:44:40.000000000 -0400
23197 +++ linux-3.0.4/drivers/block/cciss.h   2011-08-23 21:47:55.000000000 -0400
23198 @@ -100,7 +100,7 @@ struct ctlr_info
23199         /* information about each logical volume */
23200         drive_info_struct *drv[CISS_MAX_LUN];
23201  
23202 -       struct access_method access;
23203 +       struct access_method *access;
23204  
23205         /* queue and queue Info */ 
23206         struct list_head reqQ;
23207 diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
23208 --- linux-3.0.4/drivers/block/cpqarray.c        2011-07-21 22:17:23.000000000 -0400
23209 +++ linux-3.0.4/drivers/block/cpqarray.c        2011-08-23 21:48:14.000000000 -0400
23210 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23211         if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23212                 goto Enomem4;
23213         }
23214 -       hba[i]->access.set_intr_mask(hba[i], 0);
23215 +       hba[i]->access->set_intr_mask(hba[i], 0);
23216         if (request_irq(hba[i]->intr, do_ida_intr,
23217                 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23218         {
23219 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23220         add_timer(&hba[i]->timer);
23221  
23222         /* Enable IRQ now that spinlock and rate limit timer are set up */
23223 -       hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23224 +       hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23225  
23226         for(j=0; j<NWD; j++) {
23227                 struct gendisk *disk = ida_gendisk[i][j];
23228 @@ -694,7 +694,7 @@ DBGINFO(
23229         for(i=0; i<NR_PRODUCTS; i++) {
23230                 if (board_id == products[i].board_id) {
23231                         c->product_name = products[i].product_name;
23232 -                       c->access = *(products[i].access);
23233 +                       c->access = products[i].access;
23234                         break;
23235                 }
23236         }
23237 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23238                 hba[ctlr]->intr = intr;
23239                 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23240                 hba[ctlr]->product_name = products[j].product_name;
23241 -               hba[ctlr]->access = *(products[j].access);
23242 +               hba[ctlr]->access = products[j].access;
23243                 hba[ctlr]->ctlr = ctlr;
23244                 hba[ctlr]->board_id = board_id;
23245                 hba[ctlr]->pci_dev = NULL; /* not PCI */
23246 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23247         struct scatterlist tmp_sg[SG_MAX];
23248         int i, dir, seg;
23249  
23250 +       pax_track_stack();
23251 +
23252  queue_next:
23253         creq = blk_peek_request(q);
23254         if (!creq)
23255 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23256  
23257         while((c = h->reqQ) != NULL) {
23258                 /* Can't do anything if we're busy */
23259 -               if (h->access.fifo_full(h) == 0)
23260 +               if (h->access->fifo_full(h) == 0)
23261                         return;
23262  
23263                 /* Get the first entry from the request Q */
23264 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23265                 h->Qdepth--;
23266         
23267                 /* Tell the controller to do our bidding */
23268 -               h->access.submit_command(h, c);
23269 +               h->access->submit_command(h, c);
23270  
23271                 /* Get onto the completion Q */
23272                 addQ(&h->cmpQ, c);
23273 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq, 
23274         unsigned long flags;
23275         __u32 a,a1;
23276  
23277 -       istat = h->access.intr_pending(h);
23278 +       istat = h->access->intr_pending(h);
23279         /* Is this interrupt for us? */
23280         if (istat == 0)
23281                 return IRQ_NONE;
23282 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq, 
23283          */
23284         spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23285         if (istat & FIFO_NOT_EMPTY) {
23286 -               while((a = h->access.command_completed(h))) {
23287 +               while((a = h->access->command_completed(h))) {
23288                         a1 = a; a &= ~3;
23289                         if ((c = h->cmpQ) == NULL)
23290                         {  
23291 @@ -1449,11 +1451,11 @@ static int sendcmd(
23292         /*
23293          * Disable interrupt
23294          */
23295 -       info_p->access.set_intr_mask(info_p, 0);
23296 +       info_p->access->set_intr_mask(info_p, 0);
23297         /* Make sure there is room in the command FIFO */
23298         /* Actually it should be completely empty at this time. */
23299         for (i = 200000; i > 0; i--) {
23300 -               temp = info_p->access.fifo_full(info_p);
23301 +               temp = info_p->access->fifo_full(info_p);
23302                 if (temp != 0) {
23303                         break;
23304                 }
23305 @@ -1466,7 +1468,7 @@ DBG(
23306         /*
23307          * Send the cmd
23308          */
23309 -       info_p->access.submit_command(info_p, c);
23310 +       info_p->access->submit_command(info_p, c);
23311         complete = pollcomplete(ctlr);
23312         
23313         pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 
23314 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23315          * we check the new geometry.  Then turn interrupts back on when
23316          * we're done.
23317          */
23318 -       host->access.set_intr_mask(host, 0);
23319 +       host->access->set_intr_mask(host, 0);
23320         getgeometry(ctlr);
23321 -       host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23322 +       host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23323  
23324         for(i=0; i<NWD; i++) {
23325                 struct gendisk *disk = ida_gendisk[ctlr][i];
23326 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23327         /* Wait (up to 2 seconds) for a command to complete */
23328  
23329         for (i = 200000; i > 0; i--) {
23330 -               done = hba[ctlr]->access.command_completed(hba[ctlr]);
23331 +               done = hba[ctlr]->access->command_completed(hba[ctlr]);
23332                 if (done == 0) {
23333                         udelay(10);     /* a short fixed delay */
23334                 } else
23335 diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
23336 --- linux-3.0.4/drivers/block/cpqarray.h        2011-07-21 22:17:23.000000000 -0400
23337 +++ linux-3.0.4/drivers/block/cpqarray.h        2011-08-23 21:47:55.000000000 -0400
23338 @@ -99,7 +99,7 @@ struct ctlr_info {
23339         drv_info_t      drv[NWD];
23340         struct proc_dir_entry *proc;
23341  
23342 -       struct access_method access;
23343 +       struct access_method *access;
23344  
23345         cmdlist_t *reqQ;
23346         cmdlist_t *cmpQ;
23347 diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
23348 --- linux-3.0.4/drivers/block/DAC960.c  2011-07-21 22:17:23.000000000 -0400
23349 +++ linux-3.0.4/drivers/block/DAC960.c  2011-08-23 21:48:14.000000000 -0400
23350 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23351    unsigned long flags;
23352    int Channel, TargetID;
23353  
23354 +  pax_track_stack();
23355 +
23356    if (!init_dma_loaf(Controller->PCIDevice, &local_dma, 
23357                 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23358                         sizeof(DAC960_SCSI_Inquiry_T) +
23359 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
23360 --- linux-3.0.4/drivers/block/drbd/drbd_int.h   2011-07-21 22:17:23.000000000 -0400
23361 +++ linux-3.0.4/drivers/block/drbd/drbd_int.h   2011-08-23 21:47:55.000000000 -0400
23362 @@ -737,7 +737,7 @@ struct drbd_request;
23363  struct drbd_epoch {
23364         struct list_head list;
23365         unsigned int barrier_nr;
23366 -       atomic_t epoch_size; /* increased on every request added. */
23367 +       atomic_unchecked_t epoch_size; /* increased on every request added. */
23368         atomic_t active;     /* increased on every req. added, and dec on every finished. */
23369         unsigned long flags;
23370  };
23371 @@ -1109,7 +1109,7 @@ struct drbd_conf {
23372         void *int_dig_in;
23373         void *int_dig_vv;
23374         wait_queue_head_t seq_wait;
23375 -       atomic_t packet_seq;
23376 +       atomic_unchecked_t packet_seq;
23377         unsigned int peer_seq;
23378         spinlock_t peer_seq_lock;
23379         unsigned int minor;
23380 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
23381 --- linux-3.0.4/drivers/block/drbd/drbd_main.c  2011-07-21 22:17:23.000000000 -0400
23382 +++ linux-3.0.4/drivers/block/drbd/drbd_main.c  2011-08-23 21:47:55.000000000 -0400
23383 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23384         p.sector   = sector;
23385         p.block_id = block_id;
23386         p.blksize  = blksize;
23387 -       p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23388 +       p.seq_num  = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23389  
23390         if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23391                 return false;
23392 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23393         p.sector   = cpu_to_be64(req->sector);
23394         p.block_id = (unsigned long)req;
23395         p.seq_num  = cpu_to_be32(req->seq_num =
23396 -                                atomic_add_return(1, &mdev->packet_seq));
23397 +                                atomic_add_return_unchecked(1, &mdev->packet_seq));
23398  
23399         dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23400  
23401 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23402         atomic_set(&mdev->unacked_cnt, 0);
23403         atomic_set(&mdev->local_cnt, 0);
23404         atomic_set(&mdev->net_cnt, 0);
23405 -       atomic_set(&mdev->packet_seq, 0);
23406 +       atomic_set_unchecked(&mdev->packet_seq, 0);
23407         atomic_set(&mdev->pp_in_use, 0);
23408         atomic_set(&mdev->pp_in_use_by_net, 0);
23409         atomic_set(&mdev->rs_sect_in, 0);
23410 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf 
23411                                 mdev->receiver.t_state);
23412  
23413         /* no need to lock it, I'm the only thread alive */
23414 -       if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
23415 -               dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23416 +       if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) !=  0)
23417 +               dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23418         mdev->al_writ_cnt  =
23419         mdev->bm_writ_cnt  =
23420         mdev->read_cnt     =
23421 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
23422 --- linux-3.0.4/drivers/block/drbd/drbd_nl.c    2011-07-21 22:17:23.000000000 -0400
23423 +++ linux-3.0.4/drivers/block/drbd/drbd_nl.c    2011-08-23 21:47:55.000000000 -0400
23424 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23425         module_put(THIS_MODULE);
23426  }
23427  
23428 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23429 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23430  
23431  static unsigned short *
23432  __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23433 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23434         cn_reply->id.idx = CN_IDX_DRBD;
23435         cn_reply->id.val = CN_VAL_DRBD;
23436  
23437 -       cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23438 +       cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23439         cn_reply->ack = 0; /* not used here. */
23440         cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23441                 (int)((char *)tl - (char *)reply->tag_list);
23442 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23443         cn_reply->id.idx = CN_IDX_DRBD;
23444         cn_reply->id.val = CN_VAL_DRBD;
23445  
23446 -       cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23447 +       cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23448         cn_reply->ack = 0; /* not used here. */
23449         cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23450                 (int)((char *)tl - (char *)reply->tag_list);
23451 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23452         cn_reply->id.idx = CN_IDX_DRBD;
23453         cn_reply->id.val = CN_VAL_DRBD;
23454  
23455 -       cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23456 +       cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23457         cn_reply->ack = 0; // not used here.
23458         cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23459                 (int)((char*)tl - (char*)reply->tag_list);
23460 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23461         cn_reply->id.idx = CN_IDX_DRBD;
23462         cn_reply->id.val = CN_VAL_DRBD;
23463  
23464 -       cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23465 +       cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23466         cn_reply->ack = 0; /* not used here. */
23467         cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23468                 (int)((char *)tl - (char *)reply->tag_list);
23469 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
23470 --- linux-3.0.4/drivers/block/drbd/drbd_receiver.c      2011-07-21 22:17:23.000000000 -0400
23471 +++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c      2011-08-23 21:47:55.000000000 -0400
23472 @@ -894,7 +894,7 @@ retry:
23473         sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23474         sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23475  
23476 -       atomic_set(&mdev->packet_seq, 0);
23477 +       atomic_set_unchecked(&mdev->packet_seq, 0);
23478         mdev->peer_seq = 0;
23479  
23480         drbd_thread_start(&mdev->asender);
23481 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23482         do {
23483                 next_epoch = NULL;
23484  
23485 -               epoch_size = atomic_read(&epoch->epoch_size);
23486 +               epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23487  
23488                 switch (ev & ~EV_CLEANUP) {
23489                 case EV_PUT:
23490 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23491                                         rv = FE_DESTROYED;
23492                         } else {
23493                                 epoch->flags = 0;
23494 -                               atomic_set(&epoch->epoch_size, 0);
23495 +                               atomic_set_unchecked(&epoch->epoch_size, 0);
23496                                 /* atomic_set(&epoch->active, 0); is already zero */
23497                                 if (rv == FE_STILL_LIVE)
23498                                         rv = FE_RECYCLED;
23499 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23500                 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23501                 drbd_flush(mdev);
23502  
23503 -               if (atomic_read(&mdev->current_epoch->epoch_size)) {
23504 +               if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23505                         epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23506                         if (epoch)
23507                                 break;
23508                 }
23509  
23510                 epoch = mdev->current_epoch;
23511 -               wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23512 +               wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23513  
23514                 D_ASSERT(atomic_read(&epoch->active) == 0);
23515                 D_ASSERT(epoch->flags == 0);
23516 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23517         }
23518  
23519         epoch->flags = 0;
23520 -       atomic_set(&epoch->epoch_size, 0);
23521 +       atomic_set_unchecked(&epoch->epoch_size, 0);
23522         atomic_set(&epoch->active, 0);
23523  
23524         spin_lock(&mdev->epoch_lock);
23525 -       if (atomic_read(&mdev->current_epoch->epoch_size)) {
23526 +       if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23527                 list_add(&epoch->list, &mdev->current_epoch->list);
23528                 mdev->current_epoch = epoch;
23529                 mdev->epochs++;
23530 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23531                 spin_unlock(&mdev->peer_seq_lock);
23532  
23533                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23534 -               atomic_inc(&mdev->current_epoch->epoch_size);
23535 +               atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23536                 return drbd_drain_block(mdev, data_size);
23537         }
23538  
23539 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23540  
23541         spin_lock(&mdev->epoch_lock);
23542         e->epoch = mdev->current_epoch;
23543 -       atomic_inc(&e->epoch->epoch_size);
23544 +       atomic_inc_unchecked(&e->epoch->epoch_size);
23545         atomic_inc(&e->epoch->active);
23546         spin_unlock(&mdev->epoch_lock);
23547  
23548 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23549         D_ASSERT(list_empty(&mdev->done_ee));
23550  
23551         /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23552 -       atomic_set(&mdev->current_epoch->epoch_size, 0);
23553 +       atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23554         D_ASSERT(list_empty(&mdev->current_epoch->list));
23555  }
23556  
23557 diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
23558 --- linux-3.0.4/drivers/block/nbd.c     2011-07-21 22:17:23.000000000 -0400
23559 +++ linux-3.0.4/drivers/block/nbd.c     2011-08-23 21:48:14.000000000 -0400
23560 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23561         struct kvec iov;
23562         sigset_t blocked, oldset;
23563  
23564 +       pax_track_stack();
23565 +
23566         if (unlikely(!sock)) {
23567                 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23568                        lo->disk->disk_name, (send ? "send" : "recv"));
23569 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23570  static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23571                        unsigned int cmd, unsigned long arg)
23572  {
23573 +       pax_track_stack();
23574 +
23575         switch (cmd) {
23576         case NBD_DISCONNECT: {
23577                 struct request sreq;
23578 diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
23579 --- linux-3.0.4/drivers/char/agp/frontend.c     2011-07-21 22:17:23.000000000 -0400
23580 +++ linux-3.0.4/drivers/char/agp/frontend.c     2011-08-23 21:47:55.000000000 -0400
23581 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23582         if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23583                 return -EFAULT;
23584  
23585 -       if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23586 +       if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23587                 return -EFAULT;
23588  
23589         client = agp_find_client_by_pid(reserve.pid);
23590 diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
23591 --- linux-3.0.4/drivers/char/briq_panel.c       2011-07-21 22:17:23.000000000 -0400
23592 +++ linux-3.0.4/drivers/char/briq_panel.c       2011-08-23 21:48:14.000000000 -0400
23593 @@ -9,6 +9,7 @@
23594  #include <linux/types.h>
23595  #include <linux/errno.h>
23596  #include <linux/tty.h>
23597 +#include <linux/mutex.h>
23598  #include <linux/timer.h>
23599  #include <linux/kernel.h>
23600  #include <linux/wait.h>
23601 @@ -34,6 +35,7 @@ static int            vfd_is_open;
23602  static unsigned char   vfd[40];
23603  static int             vfd_cursor;
23604  static unsigned char   ledpb, led;
23605 +static DEFINE_MUTEX(vfd_mutex);
23606  
23607  static void update_vfd(void)
23608  {
23609 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23610         if (!vfd_is_open)
23611                 return -EBUSY;
23612  
23613 +       mutex_lock(&vfd_mutex);
23614         for (;;) {
23615                 char c;
23616                 if (!indx)
23617                         break;
23618 -               if (get_user(c, buf))
23619 +               if (get_user(c, buf)) {
23620 +                       mutex_unlock(&vfd_mutex);
23621                         return -EFAULT;
23622 +               }
23623                 if (esc) {
23624                         set_led(c);
23625                         esc = 0;
23626 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23627                 buf++;
23628         }
23629         update_vfd();
23630 +       mutex_unlock(&vfd_mutex);
23631  
23632         return len;
23633  }
23634 diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
23635 --- linux-3.0.4/drivers/char/genrtc.c   2011-07-21 22:17:23.000000000 -0400
23636 +++ linux-3.0.4/drivers/char/genrtc.c   2011-08-23 21:48:14.000000000 -0400
23637 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23638         switch (cmd) {
23639  
23640         case RTC_PLL_GET:
23641 +           memset(&pll, 0, sizeof(pll));
23642             if (get_rtc_pll(&pll))
23643                     return -EINVAL;
23644             else
23645 diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
23646 --- linux-3.0.4/drivers/char/hpet.c     2011-07-21 22:17:23.000000000 -0400
23647 +++ linux-3.0.4/drivers/char/hpet.c     2011-08-23 21:47:55.000000000 -0400
23648 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23649  }
23650  
23651  static int
23652 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23653 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23654                   struct hpet_info *info)
23655  {
23656         struct hpet_timer __iomem *timer;
23657 diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
23658 --- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c     2011-07-21 22:17:23.000000000 -0400
23659 +++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c     2011-08-23 21:48:14.000000000 -0400
23660 @@ -415,7 +415,7 @@ struct ipmi_smi {
23661         struct proc_dir_entry *proc_dir;
23662         char                  proc_dir_name[10];
23663  
23664 -       atomic_t stats[IPMI_NUM_STATS];
23665 +       atomic_unchecked_t stats[IPMI_NUM_STATS];
23666  
23667         /*
23668          * run_to_completion duplicate of smb_info, smi_info
23669 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23670  
23671  
23672  #define ipmi_inc_stat(intf, stat) \
23673 -       atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23674 +       atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23675  #define ipmi_get_stat(intf, stat) \
23676 -       ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23677 +       ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23678  
23679  static int is_lan_addr(struct ipmi_addr *addr)
23680  {
23681 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23682         INIT_LIST_HEAD(&intf->cmd_rcvrs);
23683         init_waitqueue_head(&intf->waitq);
23684         for (i = 0; i < IPMI_NUM_STATS; i++)
23685 -               atomic_set(&intf->stats[i], 0);
23686 +               atomic_set_unchecked(&intf->stats[i], 0);
23687  
23688         intf->proc_dir = NULL;
23689  
23690 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23691         struct ipmi_smi_msg               smi_msg;
23692         struct ipmi_recv_msg              recv_msg;
23693  
23694 +       pax_track_stack();
23695 +
23696         si = (struct ipmi_system_interface_addr *) &addr;
23697         si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23698         si->channel = IPMI_BMC_CHANNEL;
23699 diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
23700 --- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c        2011-07-21 22:17:23.000000000 -0400
23701 +++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c        2011-08-23 21:47:55.000000000 -0400
23702 @@ -277,7 +277,7 @@ struct smi_info {
23703         unsigned char slave_addr;
23704  
23705         /* Counters and things for the proc filesystem. */
23706 -       atomic_t stats[SI_NUM_STATS];
23707 +       atomic_unchecked_t stats[SI_NUM_STATS];
23708  
23709         struct task_struct *thread;
23710  
23711 @@ -286,9 +286,9 @@ struct smi_info {
23712  };
23713  
23714  #define smi_inc_stat(smi, stat) \
23715 -       atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23716 +       atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23717  #define smi_get_stat(smi, stat) \
23718 -       ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23719 +       ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23720  
23721  #define SI_MAX_PARMS 4
23722  
23723 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info 
23724         atomic_set(&new_smi->req_events, 0);
23725         new_smi->run_to_completion = 0;
23726         for (i = 0; i < SI_NUM_STATS; i++)
23727 -               atomic_set(&new_smi->stats[i], 0);
23728 +               atomic_set_unchecked(&new_smi->stats[i], 0);
23729  
23730         new_smi->interrupt_disabled = 1;
23731         atomic_set(&new_smi->stop_operation, 0);
23732 diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
23733 --- linux-3.0.4/drivers/char/Kconfig    2011-07-21 22:17:23.000000000 -0400
23734 +++ linux-3.0.4/drivers/char/Kconfig    2011-08-23 21:48:14.000000000 -0400
23735 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23736  
23737  config DEVKMEM
23738         bool "/dev/kmem virtual device support"
23739 -       default y
23740 +       default n
23741 +       depends on !GRKERNSEC_KMEM
23742         help
23743           Say Y here if you want to support the /dev/kmem device. The
23744           /dev/kmem device is rarely used, but can be used for certain
23745 @@ -596,6 +597,7 @@ config DEVPORT
23746         bool
23747         depends on !M68K
23748         depends on ISA || PCI
23749 +       depends on !GRKERNSEC_KMEM
23750         default y
23751  
23752  source "drivers/s390/char/Kconfig"
23753 diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
23754 --- linux-3.0.4/drivers/char/mem.c      2011-07-21 22:17:23.000000000 -0400
23755 +++ linux-3.0.4/drivers/char/mem.c      2011-08-23 21:48:14.000000000 -0400
23756 @@ -18,6 +18,7 @@
23757  #include <linux/raw.h>
23758  #include <linux/tty.h>
23759  #include <linux/capability.h>
23760 +#include <linux/security.h>
23761  #include <linux/ptrace.h>
23762  #include <linux/device.h>
23763  #include <linux/highmem.h>
23764 @@ -34,6 +35,10 @@
23765  # include <linux/efi.h>
23766  #endif
23767  
23768 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23769 +extern struct file_operations grsec_fops;
23770 +#endif
23771 +
23772  static inline unsigned long size_inside_page(unsigned long start,
23773                                              unsigned long size)
23774  {
23775 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23776  
23777         while (cursor < to) {
23778                 if (!devmem_is_allowed(pfn)) {
23779 +#ifdef CONFIG_GRKERNSEC_KMEM
23780 +                       gr_handle_mem_readwrite(from, to);
23781 +#else
23782                         printk(KERN_INFO
23783                 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23784                                 current->comm, from, to);
23785 +#endif
23786                         return 0;
23787                 }
23788                 cursor += PAGE_SIZE;
23789 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23790         }
23791         return 1;
23792  }
23793 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23794 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23795 +{
23796 +       return 0;
23797 +}
23798  #else
23799  static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23800  {
23801 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23802  
23803         while (count > 0) {
23804                 unsigned long remaining;
23805 +               char *temp;
23806  
23807                 sz = size_inside_page(p, count);
23808  
23809 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23810                 if (!ptr)
23811                         return -EFAULT;
23812  
23813 -               remaining = copy_to_user(buf, ptr, sz);
23814 +#ifdef CONFIG_PAX_USERCOPY
23815 +               temp = kmalloc(sz, GFP_KERNEL);
23816 +               if (!temp) {
23817 +                       unxlate_dev_mem_ptr(p, ptr);
23818 +                       return -ENOMEM;
23819 +               }
23820 +               memcpy(temp, ptr, sz);
23821 +#else
23822 +               temp = ptr;
23823 +#endif
23824 +
23825 +               remaining = copy_to_user(buf, temp, sz);
23826 +
23827 +#ifdef CONFIG_PAX_USERCOPY
23828 +               kfree(temp);
23829 +#endif
23830 +
23831                 unxlate_dev_mem_ptr(p, ptr);
23832                 if (remaining)
23833                         return -EFAULT;
23834 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23835                          size_t count, loff_t *ppos)
23836  {
23837         unsigned long p = *ppos;
23838 -       ssize_t low_count, read, sz;
23839 +       ssize_t low_count, read, sz, err = 0;
23840         char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23841 -       int err = 0;
23842  
23843         read = 0;
23844         if (p < (unsigned long) high_memory) {
23845 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23846                 }
23847  #endif
23848                 while (low_count > 0) {
23849 +                       char *temp;
23850 +
23851                         sz = size_inside_page(p, low_count);
23852  
23853                         /*
23854 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23855                          */
23856                         kbuf = xlate_dev_kmem_ptr((char *)p);
23857  
23858 -                       if (copy_to_user(buf, kbuf, sz))
23859 +#ifdef CONFIG_PAX_USERCOPY
23860 +                       temp = kmalloc(sz, GFP_KERNEL);
23861 +                       if (!temp)
23862 +                               return -ENOMEM;
23863 +                       memcpy(temp, kbuf, sz);
23864 +#else
23865 +                       temp = kbuf;
23866 +#endif
23867 +
23868 +                       err = copy_to_user(buf, temp, sz);
23869 +
23870 +#ifdef CONFIG_PAX_USERCOPY
23871 +                       kfree(temp);
23872 +#endif
23873 +
23874 +                       if (err)
23875                                 return -EFAULT;
23876                         buf += sz;
23877                         p += sz;
23878 @@ -866,6 +913,9 @@ static const struct memdev {
23879  #ifdef CONFIG_CRASH_DUMP
23880         [12] = { "oldmem", 0, &oldmem_fops, NULL },
23881  #endif
23882 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23883 +       [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23884 +#endif
23885  };
23886  
23887  static int memory_open(struct inode *inode, struct file *filp)
23888 diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
23889 --- linux-3.0.4/drivers/char/nvram.c    2011-07-21 22:17:23.000000000 -0400
23890 +++ linux-3.0.4/drivers/char/nvram.c    2011-08-23 21:47:55.000000000 -0400
23891 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23892  
23893         spin_unlock_irq(&rtc_lock);
23894  
23895 -       if (copy_to_user(buf, contents, tmp - contents))
23896 +       if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23897                 return -EFAULT;
23898  
23899         *ppos = i;
23900 diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
23901 --- linux-3.0.4/drivers/char/random.c   2011-08-23 21:44:40.000000000 -0400
23902 +++ linux-3.0.4/drivers/char/random.c   2011-08-23 21:48:14.000000000 -0400
23903 @@ -261,8 +261,13 @@
23904  /*
23905   * Configuration information
23906   */
23907 +#ifdef CONFIG_GRKERNSEC_RANDNET
23908 +#define INPUT_POOL_WORDS 512
23909 +#define OUTPUT_POOL_WORDS 128
23910 +#else
23911  #define INPUT_POOL_WORDS 128
23912  #define OUTPUT_POOL_WORDS 32
23913 +#endif
23914  #define SEC_XFER_SIZE 512
23915  #define EXTRACT_SIZE 10
23916  
23917 @@ -300,10 +305,17 @@ static struct poolinfo {
23918         int poolwords;
23919         int tap1, tap2, tap3, tap4, tap5;
23920  } poolinfo_table[] = {
23921 +#ifdef CONFIG_GRKERNSEC_RANDNET
23922 +       /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23923 +       { 512,  411,    308,    208,    104,    1 },
23924 +       /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23925 +       { 128,  103,    76,     51,     25,     1 },
23926 +#else
23927         /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23928         { 128,  103,    76,     51,     25,     1 },
23929         /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23930         { 32,   26,     20,     14,     7,      1 },
23931 +#endif
23932  #if 0
23933         /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
23934         { 2048, 1638,   1231,   819,    411,    1 },
23935 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23936  
23937                 extract_buf(r, tmp);
23938                 i = min_t(int, nbytes, EXTRACT_SIZE);
23939 -               if (copy_to_user(buf, tmp, i)) {
23940 +               if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23941                         ret = -EFAULT;
23942                         break;
23943                 }
23944 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23945  #include <linux/sysctl.h>
23946  
23947  static int min_read_thresh = 8, min_write_thresh;
23948 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
23949 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23950  static int max_write_thresh = INPUT_POOL_WORDS * 32;
23951  static char sysctl_bootid[16];
23952  
23953 diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
23954 --- linux-3.0.4/drivers/char/sonypi.c   2011-07-21 22:17:23.000000000 -0400
23955 +++ linux-3.0.4/drivers/char/sonypi.c   2011-08-23 21:47:55.000000000 -0400
23956 @@ -55,6 +55,7 @@
23957  #include <asm/uaccess.h>
23958  #include <asm/io.h>
23959  #include <asm/system.h>
23960 +#include <asm/local.h>
23961  
23962  #include <linux/sonypi.h>
23963  
23964 @@ -491,7 +492,7 @@ static struct sonypi_device {
23965         spinlock_t fifo_lock;
23966         wait_queue_head_t fifo_proc_list;
23967         struct fasync_struct *fifo_async;
23968 -       int open_count;
23969 +       local_t open_count;
23970         int model;
23971         struct input_dev *input_jog_dev;
23972         struct input_dev *input_key_dev;
23973 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23974  static int sonypi_misc_release(struct inode *inode, struct file *file)
23975  {
23976         mutex_lock(&sonypi_device.lock);
23977 -       sonypi_device.open_count--;
23978 +       local_dec(&sonypi_device.open_count);
23979         mutex_unlock(&sonypi_device.lock);
23980         return 0;
23981  }
23982 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23983  {
23984         mutex_lock(&sonypi_device.lock);
23985         /* Flush input queue on first open */
23986 -       if (!sonypi_device.open_count)
23987 +       if (!local_read(&sonypi_device.open_count))
23988                 kfifo_reset(&sonypi_device.fifo);
23989 -       sonypi_device.open_count++;
23990 +       local_inc(&sonypi_device.open_count);
23991         mutex_unlock(&sonypi_device.lock);
23992  
23993         return 0;
23994 diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
23995 --- linux-3.0.4/drivers/char/tpm/tpm_bios.c     2011-07-21 22:17:23.000000000 -0400
23996 +++ linux-3.0.4/drivers/char/tpm/tpm_bios.c     2011-08-23 21:47:55.000000000 -0400
23997 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23998         event = addr;
23999  
24000         if ((event->event_type == 0 && event->event_size == 0) ||
24001 -           ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24002 +           (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24003                 return NULL;
24004  
24005         return addr;
24006 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24007                 return NULL;
24008  
24009         if ((event->event_type == 0 && event->event_size == 0) ||
24010 -           ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24011 +           (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24012                 return NULL;
24013  
24014         (*pos)++;
24015 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24016         int i;
24017  
24018         for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24019 -               seq_putc(m, data[i]);
24020 +               if (!seq_putc(m, data[i]))
24021 +                       return -EFAULT;
24022  
24023         return 0;
24024  }
24025 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log 
24026         log->bios_event_log_end = log->bios_event_log + len;
24027  
24028         virt = acpi_os_map_memory(start, len);
24029 +       if (!virt) {
24030 +               kfree(log->bios_event_log);
24031 +               log->bios_event_log = NULL;
24032 +               return -EFAULT;
24033 +       }
24034  
24035         memcpy(log->bios_event_log, virt, len);
24036  
24037 diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
24038 --- linux-3.0.4/drivers/char/tpm/tpm.c  2011-07-21 22:17:23.000000000 -0400
24039 +++ linux-3.0.4/drivers/char/tpm/tpm.c  2011-08-23 21:48:14.000000000 -0400
24040 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24041                     chip->vendor.req_complete_val)
24042                         goto out_recv;
24043  
24044 -               if ((status == chip->vendor.req_canceled)) {
24045 +               if (status == chip->vendor.req_canceled) {
24046                         dev_err(chip->dev, "Operation Canceled\n");
24047                         rc = -ECANCELED;
24048                         goto out;
24049 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24050  
24051         struct tpm_chip *chip = dev_get_drvdata(dev);
24052  
24053 +       pax_track_stack();
24054 +
24055         tpm_cmd.header.in = tpm_readpubek_header;
24056         err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24057                         "attempting to read the PUBEK");
24058 diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
24059 --- linux-3.0.4/drivers/crypto/hifn_795x.c      2011-07-21 22:17:23.000000000 -0400
24060 +++ linux-3.0.4/drivers/crypto/hifn_795x.c      2011-08-23 21:48:14.000000000 -0400
24061 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device 
24062                 0xCA, 0x34, 0x2B, 0x2E};
24063         struct scatterlist sg;
24064  
24065 +       pax_track_stack();
24066 +
24067         memset(src, 0, sizeof(src));
24068         memset(ctx.key, 0, sizeof(ctx.key));
24069  
24070 diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
24071 --- linux-3.0.4/drivers/crypto/padlock-aes.c    2011-07-21 22:17:23.000000000 -0400
24072 +++ linux-3.0.4/drivers/crypto/padlock-aes.c    2011-08-23 21:48:14.000000000 -0400
24073 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24074         struct crypto_aes_ctx gen_aes;
24075         int cpu;
24076  
24077 +       pax_track_stack();
24078 +
24079         if (key_len % 8) {
24080                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24081                 return -EINVAL;
24082 diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
24083 --- linux-3.0.4/drivers/edac/edac_pci_sysfs.c   2011-07-21 22:17:23.000000000 -0400
24084 +++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c   2011-08-23 21:47:55.000000000 -0400
24085 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1;               /* log 
24086  static int edac_pci_log_npe = 1;       /* log PCI non-parity error errors */
24087  static int edac_pci_poll_msec = 1000;  /* one second workq period */
24088  
24089 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24090 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24091 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24092 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24093  
24094  static struct kobject *edac_pci_top_main_kobj;
24095  static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24096 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24097                         edac_printk(KERN_CRIT, EDAC_PCI,
24098                                 "Signaled System Error on %s\n",
24099                                 pci_name(dev));
24100 -                       atomic_inc(&pci_nonparity_count);
24101 +                       atomic_inc_unchecked(&pci_nonparity_count);
24102                 }
24103  
24104                 if (status & (PCI_STATUS_PARITY)) {
24105 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24106                                 "Master Data Parity Error on %s\n",
24107                                 pci_name(dev));
24108  
24109 -                       atomic_inc(&pci_parity_count);
24110 +                       atomic_inc_unchecked(&pci_parity_count);
24111                 }
24112  
24113                 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24114 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24115                                 "Detected Parity Error on %s\n",
24116                                 pci_name(dev));
24117  
24118 -                       atomic_inc(&pci_parity_count);
24119 +                       atomic_inc_unchecked(&pci_parity_count);
24120                 }
24121         }
24122  
24123 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24124                                 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24125                                         "Signaled System Error on %s\n",
24126                                         pci_name(dev));
24127 -                               atomic_inc(&pci_nonparity_count);
24128 +                               atomic_inc_unchecked(&pci_nonparity_count);
24129                         }
24130  
24131                         if (status & (PCI_STATUS_PARITY)) {
24132 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24133                                         "Master Data Parity Error on "
24134                                         "%s\n", pci_name(dev));
24135  
24136 -                               atomic_inc(&pci_parity_count);
24137 +                               atomic_inc_unchecked(&pci_parity_count);
24138                         }
24139  
24140                         if (status & (PCI_STATUS_DETECTED_PARITY)) {
24141 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24142                                         "Detected Parity Error on %s\n",
24143                                         pci_name(dev));
24144  
24145 -                               atomic_inc(&pci_parity_count);
24146 +                               atomic_inc_unchecked(&pci_parity_count);
24147                         }
24148                 }
24149         }
24150 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24151         if (!check_pci_errors)
24152                 return;
24153  
24154 -       before_count = atomic_read(&pci_parity_count);
24155 +       before_count = atomic_read_unchecked(&pci_parity_count);
24156  
24157         /* scan all PCI devices looking for a Parity Error on devices and
24158          * bridges.
24159 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24160         /* Only if operator has selected panic on PCI Error */
24161         if (edac_pci_get_panic_on_pe()) {
24162                 /* If the count is different 'after' from 'before' */
24163 -               if (before_count != atomic_read(&pci_parity_count))
24164 +               if (before_count != atomic_read_unchecked(&pci_parity_count))
24165                         panic("EDAC: PCI Parity Error");
24166         }
24167  }
24168 diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
24169 --- linux-3.0.4/drivers/edac/mce_amd.h  2011-07-21 22:17:23.000000000 -0400
24170 +++ linux-3.0.4/drivers/edac/mce_amd.h  2011-08-23 21:47:55.000000000 -0400
24171 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24172         bool (*dc_mce)(u16, u8);
24173         bool (*ic_mce)(u16, u8);
24174         bool (*nb_mce)(u16, u8);
24175 -};
24176 +} __no_const;
24177  
24178  void amd_report_gart_errors(bool);
24179  void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24180 diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
24181 --- linux-3.0.4/drivers/firewire/core-card.c    2011-07-21 22:17:23.000000000 -0400
24182 +++ linux-3.0.4/drivers/firewire/core-card.c    2011-08-23 21:47:55.000000000 -0400
24183 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24184  
24185  void fw_core_remove_card(struct fw_card *card)
24186  {
24187 -       struct fw_card_driver dummy_driver = dummy_driver_template;
24188 +       fw_card_driver_no_const dummy_driver = dummy_driver_template;
24189  
24190         card->driver->update_phy_reg(card, 4,
24191                                      PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24192 diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
24193 --- linux-3.0.4/drivers/firewire/core-cdev.c    2011-08-23 21:44:40.000000000 -0400
24194 +++ linux-3.0.4/drivers/firewire/core-cdev.c    2011-08-23 21:47:55.000000000 -0400
24195 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24196         int ret;
24197  
24198         if ((request->channels == 0 && request->bandwidth == 0) ||
24199 -           request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24200 -           request->bandwidth < 0)
24201 +           request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24202                 return -EINVAL;
24203  
24204         r  = kmalloc(sizeof(*r), GFP_KERNEL);
24205 diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
24206 --- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24207 +++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24208 @@ -101,6 +101,7 @@ struct fw_card_driver {
24209  
24210         int (*stop_iso)(struct fw_iso_context *ctx);
24211  };
24212 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24213  
24214  void fw_card_initialize(struct fw_card *card,
24215                 const struct fw_card_driver *driver, struct device *device);
24216 diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
24217 --- linux-3.0.4/drivers/firewire/core-transaction.c     2011-07-21 22:17:23.000000000 -0400
24218 +++ linux-3.0.4/drivers/firewire/core-transaction.c     2011-08-23 21:48:14.000000000 -0400
24219 @@ -37,6 +37,7 @@
24220  #include <linux/timer.h>
24221  #include <linux/types.h>
24222  #include <linux/workqueue.h>
24223 +#include <linux/sched.h>
24224  
24225  #include <asm/byteorder.h>
24226  
24227 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24228         struct transaction_callback_data d;
24229         struct fw_transaction t;
24230  
24231 +       pax_track_stack();
24232 +
24233         init_timer_on_stack(&t.split_timeout_timer);
24234         init_completion(&d.done);
24235         d.payload = payload;
24236 diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
24237 --- linux-3.0.4/drivers/firmware/dmi_scan.c     2011-07-21 22:17:23.000000000 -0400
24238 +++ linux-3.0.4/drivers/firmware/dmi_scan.c     2011-08-23 21:47:55.000000000 -0400
24239 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24240                 }
24241         }
24242         else {
24243 -               /*
24244 -                * no iounmap() for that ioremap(); it would be a no-op, but
24245 -                * it's so early in setup that sucker gets confused into doing
24246 -                * what it shouldn't if we actually call it.
24247 -                */
24248                 p = dmi_ioremap(0xF0000, 0x10000);
24249                 if (p == NULL)
24250                         goto error;
24251 diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
24252 --- linux-3.0.4/drivers/gpio/vr41xx_giu.c       2011-07-21 22:17:23.000000000 -0400
24253 +++ linux-3.0.4/drivers/gpio/vr41xx_giu.c       2011-08-23 21:47:55.000000000 -0400
24254 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24255         printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24256                maskl, pendl, maskh, pendh);
24257  
24258 -       atomic_inc(&irq_err_count);
24259 +       atomic_inc_unchecked(&irq_err_count);
24260  
24261         return -EINVAL;
24262  }
24263 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
24264 --- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c       2011-07-21 22:17:23.000000000 -0400
24265 +++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c       2011-08-23 21:48:14.000000000 -0400
24266 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24267         struct drm_crtc *tmp;
24268         int crtc_mask = 1;
24269  
24270 -       WARN(!crtc, "checking null crtc?\n");
24271 +       BUG_ON(!crtc);
24272  
24273         dev = crtc->dev;
24274  
24275 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24276         struct drm_encoder *encoder;
24277         bool ret = true;
24278  
24279 +       pax_track_stack();
24280 +
24281         crtc->enabled = drm_helper_crtc_in_use(crtc);
24282         if (!crtc->enabled)
24283                 return true;
24284 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
24285 --- linux-3.0.4/drivers/gpu/drm/drm_drv.c       2011-07-21 22:17:23.000000000 -0400
24286 +++ linux-3.0.4/drivers/gpu/drm/drm_drv.c       2011-08-23 21:47:55.000000000 -0400
24287 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24288  
24289         dev = file_priv->minor->dev;
24290         atomic_inc(&dev->ioctl_count);
24291 -       atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24292 +       atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24293         ++file_priv->ioctl_count;
24294  
24295         DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24296 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
24297 --- linux-3.0.4/drivers/gpu/drm/drm_fops.c      2011-07-21 22:17:23.000000000 -0400
24298 +++ linux-3.0.4/drivers/gpu/drm/drm_fops.c      2011-08-23 21:47:55.000000000 -0400
24299 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24300         }
24301  
24302         for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24303 -               atomic_set(&dev->counts[i], 0);
24304 +               atomic_set_unchecked(&dev->counts[i], 0);
24305  
24306         dev->sigdata.lock = NULL;
24307  
24308 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24309  
24310         retcode = drm_open_helper(inode, filp, dev);
24311         if (!retcode) {
24312 -               atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24313 -               if (!dev->open_count++)
24314 +               atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24315 +               if (local_inc_return(&dev->open_count) == 1)
24316                         retcode = drm_setup(dev);
24317         }
24318         if (!retcode) {
24319 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24320  
24321         mutex_lock(&drm_global_mutex);
24322  
24323 -       DRM_DEBUG("open_count = %d\n", dev->open_count);
24324 +       DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24325  
24326         if (dev->driver->preclose)
24327                 dev->driver->preclose(dev, file_priv);
24328 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24329         DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24330                   task_pid_nr(current),
24331                   (long)old_encode_dev(file_priv->minor->device),
24332 -                 dev->open_count);
24333 +                 local_read(&dev->open_count));
24334  
24335         /* if the master has gone away we can't do anything with the lock */
24336         if (file_priv->minor->master)
24337 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24338          * End inline drm_release
24339          */
24340  
24341 -       atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24342 -       if (!--dev->open_count) {
24343 +       atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24344 +       if (local_dec_and_test(&dev->open_count)) {
24345                 if (atomic_read(&dev->ioctl_count)) {
24346                         DRM_ERROR("Device busy: %d\n",
24347                                   atomic_read(&dev->ioctl_count));
24348 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
24349 --- linux-3.0.4/drivers/gpu/drm/drm_global.c    2011-07-21 22:17:23.000000000 -0400
24350 +++ linux-3.0.4/drivers/gpu/drm/drm_global.c    2011-08-23 21:47:55.000000000 -0400
24351 @@ -36,7 +36,7 @@
24352  struct drm_global_item {
24353         struct mutex mutex;
24354         void *object;
24355 -       int refcount;
24356 +       atomic_t refcount;
24357  };
24358  
24359  static struct drm_global_item glob[DRM_GLOBAL_NUM];
24360 @@ -49,7 +49,7 @@ void drm_global_init(void)
24361                 struct drm_global_item *item = &glob[i];
24362                 mutex_init(&item->mutex);
24363                 item->object = NULL;
24364 -               item->refcount = 0;
24365 +               atomic_set(&item->refcount, 0);
24366         }
24367  }
24368  
24369 @@ -59,7 +59,7 @@ void drm_global_release(void)
24370         for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24371                 struct drm_global_item *item = &glob[i];
24372                 BUG_ON(item->object != NULL);
24373 -               BUG_ON(item->refcount != 0);
24374 +               BUG_ON(atomic_read(&item->refcount) != 0);
24375         }
24376  }
24377  
24378 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24379         void *object;
24380  
24381         mutex_lock(&item->mutex);
24382 -       if (item->refcount == 0) {
24383 +       if (atomic_read(&item->refcount) == 0) {
24384                 item->object = kzalloc(ref->size, GFP_KERNEL);
24385                 if (unlikely(item->object == NULL)) {
24386                         ret = -ENOMEM;
24387 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24388                         goto out_err;
24389  
24390         }
24391 -       ++item->refcount;
24392 +       atomic_inc(&item->refcount);
24393         ref->object = item->object;
24394         object = item->object;
24395         mutex_unlock(&item->mutex);
24396 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24397         struct drm_global_item *item = &glob[ref->global_type];
24398  
24399         mutex_lock(&item->mutex);
24400 -       BUG_ON(item->refcount == 0);
24401 +       BUG_ON(atomic_read(&item->refcount) == 0);
24402         BUG_ON(ref->object != item->object);
24403 -       if (--item->refcount == 0) {
24404 +       if (atomic_dec_and_test(&item->refcount)) {
24405                 ref->release(ref);
24406                 item->object = NULL;
24407         }
24408 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
24409 --- linux-3.0.4/drivers/gpu/drm/drm_info.c      2011-07-21 22:17:23.000000000 -0400
24410 +++ linux-3.0.4/drivers/gpu/drm/drm_info.c      2011-08-23 21:48:14.000000000 -0400
24411 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24412         struct drm_local_map *map;
24413         struct drm_map_list *r_list;
24414  
24415 -       /* Hardcoded from _DRM_FRAME_BUFFER,
24416 -          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24417 -          _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24418 -       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24419 +       static const char * const types[] = {
24420 +               [_DRM_FRAME_BUFFER] = "FB",
24421 +               [_DRM_REGISTERS] = "REG",
24422 +               [_DRM_SHM] = "SHM",
24423 +               [_DRM_AGP] = "AGP",
24424 +               [_DRM_SCATTER_GATHER] = "SG",
24425 +               [_DRM_CONSISTENT] = "PCI",
24426 +               [_DRM_GEM] = "GEM" };
24427         const char *type;
24428         int i;
24429  
24430 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24431                 map = r_list->map;
24432                 if (!map)
24433                         continue;
24434 -               if (map->type < 0 || map->type > 5)
24435 +               if (map->type >= ARRAY_SIZE(types))
24436                         type = "??";
24437                 else
24438                         type = types[map->type];
24439 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24440                            vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24441                            vma->vm_flags & VM_LOCKED ? 'l' : '-',
24442                            vma->vm_flags & VM_IO ? 'i' : '-',
24443 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24444 +                          0);
24445 +#else
24446                            vma->vm_pgoff);
24447 +#endif
24448  
24449  #if defined(__i386__)
24450                 pgprot = pgprot_val(vma->vm_page_prot);
24451 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
24452 --- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c     2011-07-21 22:17:23.000000000 -0400
24453 +++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c     2011-08-23 21:47:55.000000000 -0400
24454 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24455                         stats->data[i].value =
24456                             (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24457                 else
24458 -                       stats->data[i].value = atomic_read(&dev->counts[i]);
24459 +                       stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24460                 stats->data[i].type = dev->types[i];
24461         }
24462  
24463 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
24464 --- linux-3.0.4/drivers/gpu/drm/drm_lock.c      2011-07-21 22:17:23.000000000 -0400
24465 +++ linux-3.0.4/drivers/gpu/drm/drm_lock.c      2011-08-23 21:47:55.000000000 -0400
24466 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24467                 if (drm_lock_take(&master->lock, lock->context)) {
24468                         master->lock.file_priv = file_priv;
24469                         master->lock.lock_time = jiffies;
24470 -                       atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24471 +                       atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24472                         break;  /* Got lock */
24473                 }
24474  
24475 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24476                 return -EINVAL;
24477         }
24478  
24479 -       atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24480 +       atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24481  
24482         if (drm_lock_free(&master->lock, lock->context)) {
24483                 /* FIXME: Should really bail out here. */
24484 diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
24485 --- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24486 +++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24487 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24488                                  dma->buflist[vertex->idx],
24489                                  vertex->discard, vertex->used);
24490  
24491 -       atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24492 -       atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24493 +       atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24494 +       atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24495         sarea_priv->last_enqueue = dev_priv->counter - 1;
24496         sarea_priv->last_dispatch = (int)hw_status[5];
24497  
24498 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24499         i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24500                              mc->last_render);
24501  
24502 -       atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24503 -       atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24504 +       atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24505 +       atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24506         sarea_priv->last_enqueue = dev_priv->counter - 1;
24507         sarea_priv->last_dispatch = (int)hw_status[5];
24508  
24509 diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
24510 --- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24511 +++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24512 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24513         int page_flipping;
24514  
24515         wait_queue_head_t irq_queue;
24516 -       atomic_t irq_received;
24517 -       atomic_t irq_emitted;
24518 +       atomic_unchecked_t irq_received;
24519 +       atomic_unchecked_t irq_emitted;
24520  
24521         int front_offset;
24522  } drm_i810_private_t;
24523 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
24524 --- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c     2011-07-21 22:17:23.000000000 -0400
24525 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c     2011-08-23 21:47:55.000000000 -0400
24526 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24527                            I915_READ(GTIMR));
24528         }
24529         seq_printf(m, "Interrupts received: %d\n",
24530 -                  atomic_read(&dev_priv->irq_received));
24531 +                  atomic_read_unchecked(&dev_priv->irq_received));
24532         for (i = 0; i < I915_NUM_RINGS; i++) {
24533                 if (IS_GEN6(dev)) {
24534                         seq_printf(m, "Graphics Interrupt mask (%s):    %08x\n",
24535 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
24536 --- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24537 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24538 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24539         bool can_switch;
24540  
24541         spin_lock(&dev->count_lock);
24542 -       can_switch = (dev->open_count == 0);
24543 +       can_switch = (local_read(&dev->open_count) == 0);
24544         spin_unlock(&dev->count_lock);
24545         return can_switch;
24546  }
24547 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
24548 --- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24549 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24550 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24551         /* render clock increase/decrease */
24552         /* display clock increase/decrease */
24553         /* pll clock increase/decrease */
24554 -};
24555 +} __no_const;
24556  
24557  struct intel_device_info {
24558         u8 gen;
24559 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24560         int current_page;
24561         int page_flipping;
24562  
24563 -       atomic_t irq_received;
24564 +       atomic_unchecked_t irq_received;
24565  
24566         /* protects the irq masks */
24567         spinlock_t irq_lock;
24568 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24569          * will be page flipped away on the next vblank.  When it
24570          * reaches 0, dev_priv->pending_flip_queue will be woken up.
24571          */
24572 -       atomic_t pending_flip;
24573 +       atomic_unchecked_t pending_flip;
24574  };
24575  
24576  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24577 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24578  extern void intel_teardown_gmbus(struct drm_device *dev);
24579  extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24580  extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24581 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24582 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24583  {
24584         return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24585  }
24586 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24587 --- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c      2011-07-21 22:17:23.000000000 -0400
24588 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c      2011-08-23 21:47:55.000000000 -0400
24589 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24590                 i915_gem_clflush_object(obj);
24591  
24592         if (obj->base.pending_write_domain)
24593 -               cd->flips |= atomic_read(&obj->pending_flip);
24594 +               cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24595  
24596         /* The actual obj->write_domain will be updated with
24597          * pending_write_domain after we emit the accumulated flush for all
24598 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
24599 --- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24600 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24601 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24602         u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24603         struct drm_i915_master_private *master_priv;
24604  
24605 -       atomic_inc(&dev_priv->irq_received);
24606 +       atomic_inc_unchecked(&dev_priv->irq_received);
24607  
24608         /* disable master interrupt before clearing iir  */
24609         de_ier = I915_READ(DEIER);
24610 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24611         struct drm_i915_master_private *master_priv;
24612         u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24613  
24614 -       atomic_inc(&dev_priv->irq_received);
24615 +       atomic_inc_unchecked(&dev_priv->irq_received);
24616  
24617         if (IS_GEN6(dev))
24618                 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24619 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24620         int ret = IRQ_NONE, pipe;
24621         bool blc_event = false;
24622  
24623 -       atomic_inc(&dev_priv->irq_received);
24624 +       atomic_inc_unchecked(&dev_priv->irq_received);
24625  
24626         iir = I915_READ(IIR);
24627  
24628 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24629  {
24630         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24631  
24632 -       atomic_set(&dev_priv->irq_received, 0);
24633 +       atomic_set_unchecked(&dev_priv->irq_received, 0);
24634  
24635         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24636         INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24637 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24638         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24639         int pipe;
24640  
24641 -       atomic_set(&dev_priv->irq_received, 0);
24642 +       atomic_set_unchecked(&dev_priv->irq_received, 0);
24643  
24644         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24645         INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24646 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
24647 --- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c    2011-08-23 21:44:40.000000000 -0400
24648 +++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c    2011-08-23 21:47:55.000000000 -0400
24649 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24650  
24651                 wait_event(dev_priv->pending_flip_queue,
24652                            atomic_read(&dev_priv->mm.wedged) ||
24653 -                          atomic_read(&obj->pending_flip) == 0);
24654 +                          atomic_read_unchecked(&obj->pending_flip) == 0);
24655  
24656                 /* Big Hammer, we also need to ensure that any pending
24657                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24658 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24659         obj = to_intel_framebuffer(crtc->fb)->obj;
24660         dev_priv = crtc->dev->dev_private;
24661         wait_event(dev_priv->pending_flip_queue,
24662 -                  atomic_read(&obj->pending_flip) == 0);
24663 +                  atomic_read_unchecked(&obj->pending_flip) == 0);
24664  }
24665  
24666  static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24667 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24668  
24669         atomic_clear_mask(1 << intel_crtc->plane,
24670                           &obj->pending_flip.counter);
24671 -       if (atomic_read(&obj->pending_flip) == 0)
24672 +       if (atomic_read_unchecked(&obj->pending_flip) == 0)
24673                 wake_up(&dev_priv->pending_flip_queue);
24674  
24675         schedule_work(&work->work);
24676 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24677         /* Block clients from rendering to the new back buffer until
24678          * the flip occurs and the object is no longer visible.
24679          */
24680 -       atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24681 +       atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24682  
24683         ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24684         if (ret)
24685 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24686         return 0;
24687  
24688  cleanup_pending:
24689 -       atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24690 +       atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24691  cleanup_objs:
24692         drm_gem_object_unreference(&work->old_fb_obj->base);
24693         drm_gem_object_unreference(&obj->base);
24694 diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
24695 --- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h   2011-07-21 22:17:23.000000000 -0400
24696 +++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h   2011-08-23 21:47:55.000000000 -0400
24697 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24698         u32 clear_cmd;
24699         u32 maccess;
24700  
24701 -       atomic_t vbl_received;          /**< Number of vblanks received. */
24702 +       atomic_unchecked_t vbl_received;          /**< Number of vblanks received. */
24703         wait_queue_head_t fence_queue;
24704 -       atomic_t last_fence_retired;
24705 +       atomic_unchecked_t last_fence_retired;
24706         u32 next_fence_to_post;
24707  
24708         unsigned int fb_cpp;
24709 diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
24710 --- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c   2011-07-21 22:17:23.000000000 -0400
24711 +++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c   2011-08-23 21:47:55.000000000 -0400
24712 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24713         if (crtc != 0)
24714                 return 0;
24715  
24716 -       return atomic_read(&dev_priv->vbl_received);
24717 +       return atomic_read_unchecked(&dev_priv->vbl_received);
24718  }
24719  
24720  
24721 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24722         /* VBLANK interrupt */
24723         if (status & MGA_VLINEPEN) {
24724                 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24725 -               atomic_inc(&dev_priv->vbl_received);
24726 +               atomic_inc_unchecked(&dev_priv->vbl_received);
24727                 drm_handle_vblank(dev, 0);
24728                 handled = 1;
24729         }
24730 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24731                 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24732                         MGA_WRITE(MGA_PRIMEND, prim_end);
24733  
24734 -               atomic_inc(&dev_priv->last_fence_retired);
24735 +               atomic_inc_unchecked(&dev_priv->last_fence_retired);
24736                 DRM_WAKEUP(&dev_priv->fence_queue);
24737                 handled = 1;
24738         }
24739 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24740          * using fences.
24741          */
24742         DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24743 -                   (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24744 +                   (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24745                       - *sequence) <= (1 << 23)));
24746  
24747         *sequence = cur_fence;
24748 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
24749 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c  2011-07-21 22:17:23.000000000 -0400
24750 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c  2011-08-26 19:49:56.000000000 -0400
24751 @@ -200,7 +200,7 @@ struct methods {
24752         const char desc[8];
24753         void (*loadbios)(struct drm_device *, uint8_t *);
24754         const bool rw;
24755 -};
24756 +} __do_const;
24757  
24758  static struct methods shadow_methods[] = {
24759         { "PRAMIN", load_vbios_pramin, true },
24760 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24761  struct bit_table {
24762         const char id;
24763         int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24764 -};
24765 +} __no_const;
24766  
24767  #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24768  
24769 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24770 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h   2011-07-21 22:17:23.000000000 -0400
24771 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h   2011-08-23 21:47:55.000000000 -0400
24772 @@ -227,7 +227,7 @@ struct nouveau_channel {
24773                 struct list_head pending;
24774                 uint32_t sequence;
24775                 uint32_t sequence_ack;
24776 -               atomic_t last_sequence_irq;
24777 +               atomic_unchecked_t last_sequence_irq;
24778         } fence;
24779  
24780         /* DMA push buffer */
24781 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24782                            u32 handle, u16 class);
24783         void (*set_tile_region)(struct drm_device *dev, int i);
24784         void (*tlb_flush)(struct drm_device *, int engine);
24785 -};
24786 +} __no_const;
24787  
24788  struct nouveau_instmem_engine {
24789         void    *priv;
24790 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24791  struct nouveau_mc_engine {
24792         int  (*init)(struct drm_device *dev);
24793         void (*takedown)(struct drm_device *dev);
24794 -};
24795 +} __no_const;
24796  
24797  struct nouveau_timer_engine {
24798         int      (*init)(struct drm_device *dev);
24799         void     (*takedown)(struct drm_device *dev);
24800         uint64_t (*read)(struct drm_device *dev);
24801 -};
24802 +} __no_const;
24803  
24804  struct nouveau_fb_engine {
24805         int num_tiles;
24806 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24807         void (*put)(struct drm_device *, struct nouveau_mem **);
24808  
24809         bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24810 -};
24811 +} __no_const;
24812  
24813  struct nouveau_engine {
24814         struct nouveau_instmem_engine instmem;
24815 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
24816                 struct drm_global_reference mem_global_ref;
24817                 struct ttm_bo_global_ref bo_global_ref;
24818                 struct ttm_bo_device bdev;
24819 -               atomic_t validate_sequence;
24820 +               atomic_unchecked_t validate_sequence;
24821         } ttm;
24822  
24823         struct {
24824 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24825 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24826 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24827 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24828                 if (USE_REFCNT(dev))
24829                         sequence = nvchan_rd32(chan, 0x48);
24830                 else
24831 -                       sequence = atomic_read(&chan->fence.last_sequence_irq);
24832 +                       sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24833  
24834                 if (chan->fence.sequence_ack == sequence)
24835                         goto out;
24836 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24837  
24838         INIT_LIST_HEAD(&chan->fence.pending);
24839         spin_lock_init(&chan->fence.lock);
24840 -       atomic_set(&chan->fence.last_sequence_irq, 0);
24841 +       atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24842         return 0;
24843  }
24844  
24845 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24846 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c   2011-07-21 22:17:23.000000000 -0400
24847 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c   2011-08-23 21:47:55.000000000 -0400
24848 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24849         int trycnt = 0;
24850         int ret, i;
24851  
24852 -       sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24853 +       sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24854  retry:
24855         if (++trycnt > 100000) {
24856                 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24857 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
24858 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24859 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24860 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24861         bool can_switch;
24862  
24863         spin_lock(&dev->count_lock);
24864 -       can_switch = (dev->open_count == 0);
24865 +       can_switch = (local_read(&dev->open_count) == 0);
24866         spin_unlock(&dev->count_lock);
24867         return can_switch;
24868  }
24869 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
24870 --- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c    2011-07-21 22:17:23.000000000 -0400
24871 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c    2011-08-23 21:47:55.000000000 -0400
24872 @@ -560,7 +560,7 @@ static int
24873  nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24874                         u32 class, u32 mthd, u32 data)
24875  {
24876 -       atomic_set(&chan->fence.last_sequence_irq, data);
24877 +       atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24878         return 0;
24879  }
24880  
24881 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
24882 --- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24883 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24884 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24885  
24886         /* GH: Simple idle check.
24887          */
24888 -       atomic_set(&dev_priv->idle_count, 0);
24889 +       atomic_set_unchecked(&dev_priv->idle_count, 0);
24890  
24891         /* We don't support anything other than bus-mastering ring mode,
24892          * but the ring can be in either AGP or PCI space for the ring
24893 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
24894 --- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24895 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24896 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24897         int is_pci;
24898         unsigned long cce_buffers_offset;
24899  
24900 -       atomic_t idle_count;
24901 +       atomic_unchecked_t idle_count;
24902  
24903         int page_flipping;
24904         int current_page;
24905         u32 crtc_offset;
24906         u32 crtc_offset_cntl;
24907  
24908 -       atomic_t vbl_received;
24909 +       atomic_unchecked_t vbl_received;
24910  
24911         u32 color_fmt;
24912         unsigned int front_offset;
24913 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
24914 --- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24915 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24916 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24917         if (crtc != 0)
24918                 return 0;
24919  
24920 -       return atomic_read(&dev_priv->vbl_received);
24921 +       return atomic_read_unchecked(&dev_priv->vbl_received);
24922  }
24923  
24924  irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24925 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24926         /* VBLANK interrupt */
24927         if (status & R128_CRTC_VBLANK_INT) {
24928                 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24929 -               atomic_inc(&dev_priv->vbl_received);
24930 +               atomic_inc_unchecked(&dev_priv->vbl_received);
24931                 drm_handle_vblank(dev, 0);
24932                 return IRQ_HANDLED;
24933         }
24934 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
24935 --- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c       2011-07-21 22:17:23.000000000 -0400
24936 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c       2011-08-23 21:47:55.000000000 -0400
24937 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24938  
24939  static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24940  {
24941 -       if (atomic_read(&dev_priv->idle_count) == 0)
24942 +       if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24943                 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24944         else
24945 -               atomic_set(&dev_priv->idle_count, 0);
24946 +               atomic_set_unchecked(&dev_priv->idle_count, 0);
24947  }
24948  
24949  #endif
24950 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
24951 --- linux-3.0.4/drivers/gpu/drm/radeon/atom.c   2011-07-21 22:17:23.000000000 -0400
24952 +++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c   2011-08-23 21:48:14.000000000 -0400
24953 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24954         char name[512];
24955         int i;
24956  
24957 +       pax_track_stack();
24958 +
24959         ctx->card = card;
24960         ctx->bios = bios;
24961  
24962 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
24963 --- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c     2011-07-21 22:17:23.000000000 -0400
24964 +++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c     2011-08-23 21:47:55.000000000 -0400
24965 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, 
24966         regex_t mask_rex;
24967         regmatch_t match[4];
24968         char buf[1024];
24969 -       size_t end;
24970 +       long end;
24971         int len;
24972         int done = 0;
24973         int r;
24974         unsigned o;
24975         struct offset *offset;
24976         char last_reg_s[10];
24977 -       int last_reg;
24978 +       unsigned long last_reg;
24979  
24980         if (regcomp
24981             (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24982 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
24983 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c        2011-07-21 22:17:23.000000000 -0400
24984 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c        2011-08-23 21:48:14.000000000 -0400
24985 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24986         struct radeon_gpio_rec gpio;
24987         struct radeon_hpd hpd;
24988  
24989 +       pax_track_stack();
24990 +
24991         if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24992                 return false;
24993  
24994 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
24995 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c  2011-08-23 21:44:40.000000000 -0400
24996 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c  2011-08-23 21:47:55.000000000 -0400
24997 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
24998         bool can_switch;
24999  
25000         spin_lock(&dev->count_lock);
25001 -       can_switch = (dev->open_count == 0);
25002 +       can_switch = (local_read(&dev->open_count) == 0);
25003         spin_unlock(&dev->count_lock);
25004         return can_switch;
25005  }
25006 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
25007 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
25008 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25009 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25010         uint32_t post_div;
25011         u32 pll_out_min, pll_out_max;
25012  
25013 +       pax_track_stack();
25014 +
25015         DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25016         freq = freq * 1000;
25017  
25018 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
25019 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h     2011-07-21 22:17:23.000000000 -0400
25020 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h     2011-08-23 21:47:55.000000000 -0400
25021 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25022  
25023         /* SW interrupt */
25024         wait_queue_head_t swi_queue;
25025 -       atomic_t swi_emitted;
25026 +       atomic_unchecked_t swi_emitted;
25027         int vblank_crtc;
25028         uint32_t irq_enable_reg;
25029         uint32_t r500_disp_irq_reg;
25030 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
25031 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c   2011-07-21 22:17:23.000000000 -0400
25032 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c   2011-08-23 21:47:55.000000000 -0400
25033 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25034                 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25035                 return 0;
25036         }
25037 -       fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25038 +       fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25039         if (!rdev->cp.ready)
25040                 /* FIXME: cp is not running assume everythings is done right
25041                  * away
25042 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25043                 return r;
25044         }
25045         radeon_fence_write(rdev, 0);
25046 -       atomic_set(&rdev->fence_drv.seq, 0);
25047 +       atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25048         INIT_LIST_HEAD(&rdev->fence_drv.created);
25049         INIT_LIST_HEAD(&rdev->fence_drv.emited);
25050         INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25051 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
25052 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25053 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25054 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25055   */
25056  struct radeon_fence_driver {
25057         uint32_t                        scratch_reg;
25058 -       atomic_t                        seq;
25059 +       atomic_unchecked_t              seq;
25060         uint32_t                        last_seq;
25061         unsigned long                   last_jiffies;
25062         unsigned long                   last_timeout;
25063 @@ -960,7 +960,7 @@ struct radeon_asic {
25064         void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25065         u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25066         void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25067 -};
25068 +} __no_const;
25069  
25070  /*
25071   * Asic structures
25072 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25073 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c   2011-07-21 22:17:23.000000000 -0400
25074 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c   2011-08-23 21:47:55.000000000 -0400
25075 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25076         request = compat_alloc_user_space(sizeof(*request));
25077         if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25078             || __put_user(req32.param, &request->param)
25079 -           || __put_user((void __user *)(unsigned long)req32.value,
25080 +           || __put_user((unsigned long)req32.value,
25081                           &request->value))
25082                 return -EFAULT;
25083  
25084 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
25085 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c     2011-07-21 22:17:23.000000000 -0400
25086 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c     2011-08-23 21:47:55.000000000 -0400
25087 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25088         unsigned int ret;
25089         RING_LOCALS;
25090  
25091 -       atomic_inc(&dev_priv->swi_emitted);
25092 -       ret = atomic_read(&dev_priv->swi_emitted);
25093 +       atomic_inc_unchecked(&dev_priv->swi_emitted);
25094 +       ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25095  
25096         BEGIN_RING(4);
25097         OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25098 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25099         drm_radeon_private_t *dev_priv =
25100             (drm_radeon_private_t *) dev->dev_private;
25101  
25102 -       atomic_set(&dev_priv->swi_emitted, 0);
25103 +       atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25104         DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25105  
25106         dev->max_vblank_count = 0x001fffff;
25107 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
25108 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c   2011-07-21 22:17:23.000000000 -0400
25109 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c   2011-08-23 21:47:55.000000000 -0400
25110 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25111         if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25112                 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25113  
25114 -       if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25115 +       if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25116                                sarea_priv->nbox * sizeof(depth_boxes[0])))
25117                 return -EFAULT;
25118  
25119 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25120  {
25121         drm_radeon_private_t *dev_priv = dev->dev_private;
25122         drm_radeon_getparam_t *param = data;
25123 -       int value;
25124 +       int value = 0;
25125  
25126         DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25127  
25128 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
25129 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c     2011-07-21 22:17:23.000000000 -0400
25130 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c     2011-08-23 21:47:55.000000000 -0400
25131 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25132         }
25133         if (unlikely(ttm_vm_ops == NULL)) {
25134                 ttm_vm_ops = vma->vm_ops;
25135 -               radeon_ttm_vm_ops = *ttm_vm_ops;
25136 -               radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25137 +               pax_open_kernel();
25138 +               memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25139 +               *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25140 +               pax_close_kernel();
25141         }
25142         vma->vm_ops = &radeon_ttm_vm_ops;
25143         return 0;
25144 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
25145 --- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c  2011-07-21 22:17:23.000000000 -0400
25146 +++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c  2011-08-23 21:47:55.000000000 -0400
25147 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25148                 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25149                         rdev->pm.sideport_bandwidth.full)
25150                         rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25151 -               read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25152 +               read_delay_latency.full = dfixed_const(800 * 1000);
25153                 read_delay_latency.full = dfixed_div(read_delay_latency,
25154                         rdev->pm.igp_sideport_mclk);
25155 +               a.full = dfixed_const(370);
25156 +               read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25157         } else {
25158                 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25159                         rdev->pm.k8_bandwidth.full)
25160 diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25161 --- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c    2011-07-21 22:17:23.000000000 -0400
25162 +++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c    2011-08-23 21:47:55.000000000 -0400
25163 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25164  static int ttm_pool_mm_shrink(struct shrinker *shrink,
25165                               struct shrink_control *sc)
25166  {
25167 -       static atomic_t start_pool = ATOMIC_INIT(0);
25168 +       static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25169         unsigned i;
25170 -       unsigned pool_offset = atomic_add_return(1, &start_pool);
25171 +       unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25172         struct ttm_page_pool *pool;
25173         int shrink_pages = sc->nr_to_scan;
25174  
25175 diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
25176 --- linux-3.0.4/drivers/gpu/drm/via/via_drv.h   2011-07-21 22:17:23.000000000 -0400
25177 +++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h   2011-08-23 21:47:55.000000000 -0400
25178 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25179  typedef uint32_t maskarray_t[5];
25180  
25181  typedef struct drm_via_irq {
25182 -       atomic_t irq_received;
25183 +       atomic_unchecked_t irq_received;
25184         uint32_t pending_mask;
25185         uint32_t enable_mask;
25186         wait_queue_head_t irq_queue;
25187 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25188         struct timeval last_vblank;
25189         int last_vblank_valid;
25190         unsigned usec_per_vblank;
25191 -       atomic_t vbl_received;
25192 +       atomic_unchecked_t vbl_received;
25193         drm_via_state_t hc_state;
25194         char pci_buf[VIA_PCI_BUF_SIZE];
25195         const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25196 diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
25197 --- linux-3.0.4/drivers/gpu/drm/via/via_irq.c   2011-07-21 22:17:23.000000000 -0400
25198 +++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c   2011-08-23 21:47:55.000000000 -0400
25199 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25200         if (crtc != 0)
25201                 return 0;
25202  
25203 -       return atomic_read(&dev_priv->vbl_received);
25204 +       return atomic_read_unchecked(&dev_priv->vbl_received);
25205  }
25206  
25207  irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25208 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25209  
25210         status = VIA_READ(VIA_REG_INTERRUPT);
25211         if (status & VIA_IRQ_VBLANK_PENDING) {
25212 -               atomic_inc(&dev_priv->vbl_received);
25213 -               if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25214 +               atomic_inc_unchecked(&dev_priv->vbl_received);
25215 +               if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25216                         do_gettimeofday(&cur_vblank);
25217                         if (dev_priv->last_vblank_valid) {
25218                                 dev_priv->usec_per_vblank =
25219 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25220                         dev_priv->last_vblank = cur_vblank;
25221                         dev_priv->last_vblank_valid = 1;
25222                 }
25223 -               if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25224 +               if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25225                         DRM_DEBUG("US per vblank is: %u\n",
25226                                   dev_priv->usec_per_vblank);
25227                 }
25228 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25229  
25230         for (i = 0; i < dev_priv->num_irqs; ++i) {
25231                 if (status & cur_irq->pending_mask) {
25232 -                       atomic_inc(&cur_irq->irq_received);
25233 +                       atomic_inc_unchecked(&cur_irq->irq_received);
25234                         DRM_WAKEUP(&cur_irq->irq_queue);
25235                         handled = 1;
25236                         if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25237 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25238                 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25239                             ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25240                              masks[irq][4]));
25241 -               cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25242 +               cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25243         } else {
25244                 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25245                             (((cur_irq_sequence =
25246 -                              atomic_read(&cur_irq->irq_received)) -
25247 +                              atomic_read_unchecked(&cur_irq->irq_received)) -
25248                               *sequence) <= (1 << 23)));
25249         }
25250         *sequence = cur_irq_sequence;
25251 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25252                 }
25253  
25254                 for (i = 0; i < dev_priv->num_irqs; ++i) {
25255 -                       atomic_set(&cur_irq->irq_received, 0);
25256 +                       atomic_set_unchecked(&cur_irq->irq_received, 0);
25257                         cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25258                         cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25259                         DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25260 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25261         switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25262         case VIA_IRQ_RELATIVE:
25263                 irqwait->request.sequence +=
25264 -                       atomic_read(&cur_irq->irq_received);
25265 +                       atomic_read_unchecked(&cur_irq->irq_received);
25266                 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25267         case VIA_IRQ_ABSOLUTE:
25268                 break;
25269 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25270 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     2011-07-21 22:17:23.000000000 -0400
25271 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     2011-08-23 21:47:55.000000000 -0400
25272 @@ -240,7 +240,7 @@ struct vmw_private {
25273          * Fencing and IRQs.
25274          */
25275  
25276 -       atomic_t fence_seq;
25277 +       atomic_unchecked_t fence_seq;
25278         wait_queue_head_t fence_queue;
25279         wait_queue_head_t fifo_queue;
25280         atomic_t fence_queue_waiters;
25281 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25282 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c   2011-07-21 22:17:23.000000000 -0400
25283 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c   2011-08-23 21:47:55.000000000 -0400
25284 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25285         while (!vmw_lag_lt(queue, us)) {
25286                 spin_lock(&queue->lock);
25287                 if (list_empty(&queue->head))
25288 -                       sequence = atomic_read(&dev_priv->fence_seq);
25289 +                       sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25290                 else {
25291                         fence = list_first_entry(&queue->head,
25292                                                  struct vmw_fence, head);
25293 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25294 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c    2011-07-21 22:17:23.000000000 -0400
25295 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c    2011-08-23 21:47:55.000000000 -0400
25296 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25297                  (unsigned int) min,
25298                  (unsigned int) fifo->capabilities);
25299  
25300 -       atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25301 +       atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25302         iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25303         vmw_fence_queue_init(&fifo->fence_queue);
25304         return vmw_fifo_send_fence(dev_priv, &dummy);
25305 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25306  
25307         fm = vmw_fifo_reserve(dev_priv, bytes);
25308         if (unlikely(fm == NULL)) {
25309 -               *sequence = atomic_read(&dev_priv->fence_seq);
25310 +               *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25311                 ret = -ENOMEM;
25312                 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25313                                         false, 3*HZ);
25314 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25315         }
25316  
25317         do {
25318 -               *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25319 +               *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25320         } while (*sequence == 0);
25321  
25322         if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25323 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25324 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c     2011-07-21 22:17:23.000000000 -0400
25325 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c     2011-08-23 21:47:55.000000000 -0400
25326 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25327          * emitted. Then the fence is stale and signaled.
25328          */
25329  
25330 -       ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25331 +       ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25332                > VMW_FENCE_WRAP);
25333  
25334         return ret;
25335 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25336  
25337         if (fifo_idle)
25338                 down_read(&fifo_state->rwsem);
25339 -       signal_seq = atomic_read(&dev_priv->fence_seq);
25340 +       signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25341         ret = 0;
25342  
25343         for (;;) {
25344 diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
25345 --- linux-3.0.4/drivers/hid/hid-core.c  2011-07-21 22:17:23.000000000 -0400
25346 +++ linux-3.0.4/drivers/hid/hid-core.c  2011-08-23 21:47:55.000000000 -0400
25347 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25348  
25349  int hid_add_device(struct hid_device *hdev)
25350  {
25351 -       static atomic_t id = ATOMIC_INIT(0);
25352 +       static atomic_unchecked_t id = ATOMIC_INIT(0);
25353         int ret;
25354  
25355         if (WARN_ON(hdev->status & HID_STAT_ADDED))
25356 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25357         /* XXX hack, any other cleaner solution after the driver core
25358          * is converted to allow more than 20 bytes as the device name? */
25359         dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25360 -                    hdev->vendor, hdev->product, atomic_inc_return(&id));
25361 +                    hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25362  
25363         hid_debug_register(hdev, dev_name(&hdev->dev));
25364         ret = device_add(&hdev->dev);
25365 diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
25366 --- linux-3.0.4/drivers/hid/usbhid/hiddev.c     2011-07-21 22:17:23.000000000 -0400
25367 +++ linux-3.0.4/drivers/hid/usbhid/hiddev.c     2011-08-23 21:47:55.000000000 -0400
25368 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25369                 break;
25370  
25371         case HIDIOCAPPLICATION:
25372 -               if (arg < 0 || arg >= hid->maxapplication)
25373 +               if (arg >= hid->maxapplication)
25374                         break;
25375  
25376                 for (i = 0; i < hid->maxcollection; i++)
25377 diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
25378 --- linux-3.0.4/drivers/hwmon/acpi_power_meter.c        2011-07-21 22:17:23.000000000 -0400
25379 +++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c        2011-08-23 21:47:55.000000000 -0400
25380 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25381                 return res;
25382  
25383         temp /= 1000;
25384 -       if (temp < 0)
25385 -               return -EINVAL;
25386  
25387         mutex_lock(&resource->lock);
25388         resource->trip[attr->index - 7] = temp;
25389 diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
25390 --- linux-3.0.4/drivers/hwmon/sht15.c   2011-07-21 22:17:23.000000000 -0400
25391 +++ linux-3.0.4/drivers/hwmon/sht15.c   2011-08-23 21:47:55.000000000 -0400
25392 @@ -166,7 +166,7 @@ struct sht15_data {
25393         int                             supply_uV;
25394         bool                            supply_uV_valid;
25395         struct work_struct              update_supply_work;
25396 -       atomic_t                        interrupt_handled;
25397 +       atomic_unchecked_t              interrupt_handled;
25398  };
25399  
25400  /**
25401 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25402                 return ret;
25403  
25404         gpio_direction_input(data->pdata->gpio_data);
25405 -       atomic_set(&data->interrupt_handled, 0);
25406 +       atomic_set_unchecked(&data->interrupt_handled, 0);
25407  
25408         enable_irq(gpio_to_irq(data->pdata->gpio_data));
25409         if (gpio_get_value(data->pdata->gpio_data) == 0) {
25410                 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25411                 /* Only relevant if the interrupt hasn't occurred. */
25412 -               if (!atomic_read(&data->interrupt_handled))
25413 +               if (!atomic_read_unchecked(&data->interrupt_handled))
25414                         schedule_work(&data->read_work);
25415         }
25416         ret = wait_event_timeout(data->wait_queue,
25417 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25418  
25419         /* First disable the interrupt */
25420         disable_irq_nosync(irq);
25421 -       atomic_inc(&data->interrupt_handled);
25422 +       atomic_inc_unchecked(&data->interrupt_handled);
25423         /* Then schedule a reading work struct */
25424         if (data->state != SHT15_READING_NOTHING)
25425                 schedule_work(&data->read_work);
25426 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25427                  * If not, then start the interrupt again - care here as could
25428                  * have gone low in meantime so verify it hasn't!
25429                  */
25430 -               atomic_set(&data->interrupt_handled, 0);
25431 +               atomic_set_unchecked(&data->interrupt_handled, 0);
25432                 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25433                 /* If still not occurred or another handler has been scheduled */
25434                 if (gpio_get_value(data->pdata->gpio_data)
25435 -                   || atomic_read(&data->interrupt_handled))
25436 +                   || atomic_read_unchecked(&data->interrupt_handled))
25437                         return;
25438         }
25439  
25440 diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
25441 --- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25442 +++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25443 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25444                           struct i2c_board_info *info);
25445  static int w83791d_remove(struct i2c_client *client);
25446  
25447 -static int w83791d_read(struct i2c_client *client, u8 register);
25448 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25449 +static int w83791d_read(struct i2c_client *client, u8 reg);
25450 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25451  static struct w83791d_data *w83791d_update_device(struct device *dev);
25452  
25453  #ifdef DEBUG
25454 diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
25455 --- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c   2011-07-21 22:17:23.000000000 -0400
25456 +++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c   2011-08-23 21:47:55.000000000 -0400
25457 @@ -43,7 +43,7 @@
25458  extern struct i2c_adapter amd756_smbus;
25459  
25460  static struct i2c_adapter *s4882_adapter;
25461 -static struct i2c_algorithm *s4882_algo;
25462 +static i2c_algorithm_no_const *s4882_algo;
25463  
25464  /* Wrapper access functions for multiplexed SMBus */
25465  static DEFINE_MUTEX(amd756_lock);
25466 diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25467 --- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c  2011-07-21 22:17:23.000000000 -0400
25468 +++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c  2011-08-23 21:47:55.000000000 -0400
25469 @@ -41,7 +41,7 @@
25470  extern struct i2c_adapter *nforce2_smbus;
25471  
25472  static struct i2c_adapter *s4985_adapter;
25473 -static struct i2c_algorithm *s4985_algo;
25474 +static i2c_algorithm_no_const *s4985_algo;
25475  
25476  /* Wrapper access functions for multiplexed SMBus */
25477  static DEFINE_MUTEX(nforce2_lock);
25478 diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
25479 --- linux-3.0.4/drivers/i2c/i2c-mux.c   2011-07-21 22:17:23.000000000 -0400
25480 +++ linux-3.0.4/drivers/i2c/i2c-mux.c   2011-08-23 21:47:55.000000000 -0400
25481 @@ -28,7 +28,7 @@
25482  /* multiplexer per channel data */
25483  struct i2c_mux_priv {
25484         struct i2c_adapter adap;
25485 -       struct i2c_algorithm algo;
25486 +       i2c_algorithm_no_const algo;
25487  
25488         struct i2c_adapter *parent;
25489         void *mux_dev;  /* the mux chip/device */
25490 diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
25491 --- linux-3.0.4/drivers/ide/ide-cd.c    2011-07-21 22:17:23.000000000 -0400
25492 +++ linux-3.0.4/drivers/ide/ide-cd.c    2011-08-23 21:47:55.000000000 -0400
25493 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25494                 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25495                 if ((unsigned long)buf & alignment
25496                     || blk_rq_bytes(rq) & q->dma_pad_mask
25497 -                   || object_is_on_stack(buf))
25498 +                   || object_starts_on_stack(buf))
25499                         drive->dma = 0;
25500         }
25501  }
25502 diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
25503 --- linux-3.0.4/drivers/ide/ide-floppy.c        2011-07-21 22:17:23.000000000 -0400
25504 +++ linux-3.0.4/drivers/ide/ide-floppy.c        2011-08-23 21:48:14.000000000 -0400
25505 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25506         u8 pc_buf[256], header_len, desc_cnt;
25507         int i, rc = 1, blocks, length;
25508  
25509 +       pax_track_stack();
25510 +
25511         ide_debug_log(IDE_DBG_FUNC, "enter");
25512  
25513         drive->bios_cyl = 0;
25514 diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
25515 --- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25516 +++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25517 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25518         int ret, i, n_ports = dev2 ? 4 : 2;
25519         struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25520  
25521 +       pax_track_stack();
25522 +
25523         for (i = 0; i < n_ports / 2; i++) {
25524                 ret = ide_setup_pci_controller(pdev[i], d, !i);
25525                 if (ret < 0)
25526 diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
25527 --- linux-3.0.4/drivers/infiniband/core/cm.c    2011-07-21 22:17:23.000000000 -0400
25528 +++ linux-3.0.4/drivers/infiniband/core/cm.c    2011-08-23 21:47:55.000000000 -0400
25529 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25530  
25531  struct cm_counter_group {
25532         struct kobject obj;
25533 -       atomic_long_t counter[CM_ATTR_COUNT];
25534 +       atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25535  };
25536  
25537  struct cm_counter_attribute {
25538 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25539         struct ib_mad_send_buf *msg = NULL;
25540         int ret;
25541  
25542 -       atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25543 +       atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25544                         counter[CM_REQ_COUNTER]);
25545  
25546         /* Quick state check to discard duplicate REQs. */
25547 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25548         if (!cm_id_priv)
25549                 return;
25550  
25551 -       atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25552 +       atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25553                         counter[CM_REP_COUNTER]);
25554         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25555         if (ret)
25556 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25557         if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25558             cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25559                 spin_unlock_irq(&cm_id_priv->lock);
25560 -               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25561 +               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25562                                 counter[CM_RTU_COUNTER]);
25563                 goto out;
25564         }
25565 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25566         cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25567                                    dreq_msg->local_comm_id);
25568         if (!cm_id_priv) {
25569 -               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25570 +               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25571                                 counter[CM_DREQ_COUNTER]);
25572                 cm_issue_drep(work->port, work->mad_recv_wc);
25573                 return -EINVAL;
25574 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25575         case IB_CM_MRA_REP_RCVD:
25576                 break;
25577         case IB_CM_TIMEWAIT:
25578 -               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25579 +               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25580                                 counter[CM_DREQ_COUNTER]);
25581                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25582                         goto unlock;
25583 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25584                         cm_free_msg(msg);
25585                 goto deref;
25586         case IB_CM_DREQ_RCVD:
25587 -               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25588 +               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25589                                 counter[CM_DREQ_COUNTER]);
25590                 goto unlock;
25591         default:
25592 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25593                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
25594                                   cm_id_priv->msg, timeout)) {
25595                         if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25596 -                               atomic_long_inc(&work->port->
25597 +                               atomic_long_inc_unchecked(&work->port->
25598                                                 counter_group[CM_RECV_DUPLICATES].
25599                                                 counter[CM_MRA_COUNTER]);
25600                         goto out;
25601 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25602                 break;
25603         case IB_CM_MRA_REQ_RCVD:
25604         case IB_CM_MRA_REP_RCVD:
25605 -               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25606 +               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25607                                 counter[CM_MRA_COUNTER]);
25608                 /* fall through */
25609         default:
25610 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25611         case IB_CM_LAP_IDLE:
25612                 break;
25613         case IB_CM_MRA_LAP_SENT:
25614 -               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25615 +               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25616                                 counter[CM_LAP_COUNTER]);
25617                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25618                         goto unlock;
25619 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25620                         cm_free_msg(msg);
25621                 goto deref;
25622         case IB_CM_LAP_RCVD:
25623 -               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25624 +               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25625                                 counter[CM_LAP_COUNTER]);
25626                 goto unlock;
25627         default:
25628 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25629         cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25630         if (cur_cm_id_priv) {
25631                 spin_unlock_irq(&cm.lock);
25632 -               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25633 +               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25634                                 counter[CM_SIDR_REQ_COUNTER]);
25635                 goto out; /* Duplicate message. */
25636         }
25637 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25638         if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25639                 msg->retries = 1;
25640  
25641 -       atomic_long_add(1 + msg->retries,
25642 +       atomic_long_add_unchecked(1 + msg->retries,
25643                         &port->counter_group[CM_XMIT].counter[attr_index]);
25644         if (msg->retries)
25645 -               atomic_long_add(msg->retries,
25646 +               atomic_long_add_unchecked(msg->retries,
25647                                 &port->counter_group[CM_XMIT_RETRIES].
25648                                 counter[attr_index]);
25649  
25650 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25651         }
25652  
25653         attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25654 -       atomic_long_inc(&port->counter_group[CM_RECV].
25655 +       atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25656                         counter[attr_id - CM_ATTR_ID_OFFSET]);
25657  
25658         work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25659 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25660         cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25661  
25662         return sprintf(buf, "%ld\n",
25663 -                      atomic_long_read(&group->counter[cm_attr->index]));
25664 +                      atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25665  }
25666  
25667  static const struct sysfs_ops cm_counter_ops = {
25668 diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
25669 --- linux-3.0.4/drivers/infiniband/core/fmr_pool.c      2011-07-21 22:17:23.000000000 -0400
25670 +++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c      2011-08-23 21:47:55.000000000 -0400
25671 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25672  
25673         struct task_struct       *thread;
25674  
25675 -       atomic_t                  req_ser;
25676 -       atomic_t                  flush_ser;
25677 +       atomic_unchecked_t        req_ser;
25678 +       atomic_unchecked_t        flush_ser;
25679  
25680         wait_queue_head_t         force_wait;
25681  };
25682 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25683         struct ib_fmr_pool *pool = pool_ptr;
25684  
25685         do {
25686 -               if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25687 +               if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25688                         ib_fmr_batch_release(pool);
25689  
25690 -                       atomic_inc(&pool->flush_ser);
25691 +                       atomic_inc_unchecked(&pool->flush_ser);
25692                         wake_up_interruptible(&pool->force_wait);
25693  
25694                         if (pool->flush_function)
25695 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25696                 }
25697  
25698                 set_current_state(TASK_INTERRUPTIBLE);
25699 -               if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25700 +               if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25701                     !kthread_should_stop())
25702                         schedule();
25703                 __set_current_state(TASK_RUNNING);
25704 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25705         pool->dirty_watermark = params->dirty_watermark;
25706         pool->dirty_len       = 0;
25707         spin_lock_init(&pool->pool_lock);
25708 -       atomic_set(&pool->req_ser,   0);
25709 -       atomic_set(&pool->flush_ser, 0);
25710 +       atomic_set_unchecked(&pool->req_ser,   0);
25711 +       atomic_set_unchecked(&pool->flush_ser, 0);
25712         init_waitqueue_head(&pool->force_wait);
25713  
25714         pool->thread = kthread_run(ib_fmr_cleanup_thread,
25715 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25716         }
25717         spin_unlock_irq(&pool->pool_lock);
25718  
25719 -       serial = atomic_inc_return(&pool->req_ser);
25720 +       serial = atomic_inc_return_unchecked(&pool->req_ser);
25721         wake_up_process(pool->thread);
25722  
25723         if (wait_event_interruptible(pool->force_wait,
25724 -                                    atomic_read(&pool->flush_ser) - serial >= 0))
25725 +                                    atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25726                 return -EINTR;
25727  
25728         return 0;
25729 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25730                 } else {
25731                         list_add_tail(&fmr->list, &pool->dirty_list);
25732                         if (++pool->dirty_len >= pool->dirty_watermark) {
25733 -                               atomic_inc(&pool->req_ser);
25734 +                               atomic_inc_unchecked(&pool->req_ser);
25735                                 wake_up_process(pool->thread);
25736                         }
25737                 }
25738 diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
25739 --- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c       2011-07-21 22:17:23.000000000 -0400
25740 +++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c       2011-08-23 21:47:55.000000000 -0400
25741 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25742         int err;
25743         struct fw_ri_tpte tpt;
25744         u32 stag_idx;
25745 -       static atomic_t key;
25746 +       static atomic_unchecked_t key;
25747  
25748         if (c4iw_fatal_error(rdev))
25749                 return -EIO;
25750 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25751                                              &rdev->resource.tpt_fifo_lock);
25752                 if (!stag_idx)
25753                         return -ENOMEM;
25754 -               *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25755 +               *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25756         }
25757         PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25758              __func__, stag_state, type, pdid, stag_idx);
25759 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
25760 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c  2011-07-21 22:17:23.000000000 -0400
25761 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c  2011-08-23 21:48:14.000000000 -0400
25762 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25763         struct infinipath_counters counters;
25764         struct ipath_devdata *dd;
25765  
25766 +       pax_track_stack();
25767 +
25768         dd = file->f_path.dentry->d_inode->i_private;
25769         dd->ipath_f_read_counters(dd, &counters);
25770  
25771 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
25772 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c  2011-07-21 22:17:23.000000000 -0400
25773 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c  2011-08-23 21:47:55.000000000 -0400
25774 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25775                 struct ib_atomic_eth *ateth;
25776                 struct ipath_ack_entry *e;
25777                 u64 vaddr;
25778 -               atomic64_t *maddr;
25779 +               atomic64_unchecked_t *maddr;
25780                 u64 sdata;
25781                 u32 rkey;
25782                 u8 next;
25783 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25784                                             IB_ACCESS_REMOTE_ATOMIC)))
25785                         goto nack_acc_unlck;
25786                 /* Perform atomic OP and save result. */
25787 -               maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25788 +               maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25789                 sdata = be64_to_cpu(ateth->swap_data);
25790                 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25791                 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25792 -                       (u64) atomic64_add_return(sdata, maddr) - sdata :
25793 +                       (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25794                         (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25795                                       be64_to_cpu(ateth->compare_data),
25796                                       sdata);
25797 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25798 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25799 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25800 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25801         unsigned long flags;
25802         struct ib_wc wc;
25803         u64 sdata;
25804 -       atomic64_t *maddr;
25805 +       atomic64_unchecked_t *maddr;
25806         enum ib_wc_status send_status;
25807  
25808         /*
25809 @@ -382,11 +382,11 @@ again:
25810                                             IB_ACCESS_REMOTE_ATOMIC)))
25811                         goto acc_err;
25812                 /* Perform atomic OP and save result. */
25813 -               maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25814 +               maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25815                 sdata = wqe->wr.wr.atomic.compare_add;
25816                 *(u64 *) sqp->s_sge.sge.vaddr =
25817                         (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25818 -                       (u64) atomic64_add_return(sdata, maddr) - sdata :
25819 +                       (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25820                         (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25821                                       sdata, wqe->wr.wr.atomic.swap);
25822                 goto send_comp;
25823 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
25824 --- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25825 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25826 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25827  LIST_HEAD(nes_adapter_list);
25828  static LIST_HEAD(nes_dev_list);
25829  
25830 -atomic_t qps_destroyed;
25831 +atomic_unchecked_t qps_destroyed;
25832  
25833  static unsigned int ee_flsh_adapter;
25834  static unsigned int sysfs_nonidx_addr;
25835 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25836         struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25837         struct nes_adapter *nesadapter = nesdev->nesadapter;
25838  
25839 -       atomic_inc(&qps_destroyed);
25840 +       atomic_inc_unchecked(&qps_destroyed);
25841  
25842         /* Free the control structures */
25843  
25844 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
25845 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c      2011-07-21 22:17:23.000000000 -0400
25846 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c      2011-08-23 21:47:55.000000000 -0400
25847 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25848  u32 cm_packets_retrans;
25849  u32 cm_packets_created;
25850  u32 cm_packets_received;
25851 -atomic_t cm_listens_created;
25852 -atomic_t cm_listens_destroyed;
25853 +atomic_unchecked_t cm_listens_created;
25854 +atomic_unchecked_t cm_listens_destroyed;
25855  u32 cm_backlog_drops;
25856 -atomic_t cm_loopbacks;
25857 -atomic_t cm_nodes_created;
25858 -atomic_t cm_nodes_destroyed;
25859 -atomic_t cm_accel_dropped_pkts;
25860 -atomic_t cm_resets_recvd;
25861 +atomic_unchecked_t cm_loopbacks;
25862 +atomic_unchecked_t cm_nodes_created;
25863 +atomic_unchecked_t cm_nodes_destroyed;
25864 +atomic_unchecked_t cm_accel_dropped_pkts;
25865 +atomic_unchecked_t cm_resets_recvd;
25866  
25867  static inline int mini_cm_accelerated(struct nes_cm_core *,
25868         struct nes_cm_node *);
25869 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25870  
25871  static struct nes_cm_core *g_cm_core;
25872  
25873 -atomic_t cm_connects;
25874 -atomic_t cm_accepts;
25875 -atomic_t cm_disconnects;
25876 -atomic_t cm_closes;
25877 -atomic_t cm_connecteds;
25878 -atomic_t cm_connect_reqs;
25879 -atomic_t cm_rejects;
25880 +atomic_unchecked_t cm_connects;
25881 +atomic_unchecked_t cm_accepts;
25882 +atomic_unchecked_t cm_disconnects;
25883 +atomic_unchecked_t cm_closes;
25884 +atomic_unchecked_t cm_connecteds;
25885 +atomic_unchecked_t cm_connect_reqs;
25886 +atomic_unchecked_t cm_rejects;
25887  
25888  
25889  /**
25890 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25891                 kfree(listener);
25892                 listener = NULL;
25893                 ret = 0;
25894 -               atomic_inc(&cm_listens_destroyed);
25895 +               atomic_inc_unchecked(&cm_listens_destroyed);
25896         } else {
25897                 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25898         }
25899 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25900                   cm_node->rem_mac);
25901  
25902         add_hte_node(cm_core, cm_node);
25903 -       atomic_inc(&cm_nodes_created);
25904 +       atomic_inc_unchecked(&cm_nodes_created);
25905  
25906         return cm_node;
25907  }
25908 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25909         }
25910  
25911         atomic_dec(&cm_core->node_cnt);
25912 -       atomic_inc(&cm_nodes_destroyed);
25913 +       atomic_inc_unchecked(&cm_nodes_destroyed);
25914         nesqp = cm_node->nesqp;
25915         if (nesqp) {
25916                 nesqp->cm_node = NULL;
25917 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25918  
25919  static void drop_packet(struct sk_buff *skb)
25920  {
25921 -       atomic_inc(&cm_accel_dropped_pkts);
25922 +       atomic_inc_unchecked(&cm_accel_dropped_pkts);
25923         dev_kfree_skb_any(skb);
25924  }
25925  
25926 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25927  {
25928  
25929         int     reset = 0;      /* whether to send reset in case of err.. */
25930 -       atomic_inc(&cm_resets_recvd);
25931 +       atomic_inc_unchecked(&cm_resets_recvd);
25932         nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25933                         " refcnt=%d\n", cm_node, cm_node->state,
25934                         atomic_read(&cm_node->ref_count));
25935 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25936                                 rem_ref_cm_node(cm_node->cm_core, cm_node);
25937                                 return NULL;
25938                         }
25939 -                       atomic_inc(&cm_loopbacks);
25940 +                       atomic_inc_unchecked(&cm_loopbacks);
25941                         loopbackremotenode->loopbackpartner = cm_node;
25942                         loopbackremotenode->tcp_cntxt.rcv_wscale =
25943                                 NES_CM_DEFAULT_RCV_WND_SCALE;
25944 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25945                         add_ref_cm_node(cm_node);
25946                 } else if (cm_node->state == NES_CM_STATE_TSA) {
25947                         rem_ref_cm_node(cm_core, cm_node);
25948 -                       atomic_inc(&cm_accel_dropped_pkts);
25949 +                       atomic_inc_unchecked(&cm_accel_dropped_pkts);
25950                         dev_kfree_skb_any(skb);
25951                         break;
25952                 }
25953 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25954  
25955         if ((cm_id) && (cm_id->event_handler)) {
25956                 if (issue_disconn) {
25957 -                       atomic_inc(&cm_disconnects);
25958 +                       atomic_inc_unchecked(&cm_disconnects);
25959                         cm_event.event = IW_CM_EVENT_DISCONNECT;
25960                         cm_event.status = disconn_status;
25961                         cm_event.local_addr = cm_id->local_addr;
25962 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25963                 }
25964  
25965                 if (issue_close) {
25966 -                       atomic_inc(&cm_closes);
25967 +                       atomic_inc_unchecked(&cm_closes);
25968                         nes_disconnect(nesqp, 1);
25969  
25970                         cm_id->provider_data = nesqp;
25971 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25972  
25973         nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25974                 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25975 -       atomic_inc(&cm_accepts);
25976 +       atomic_inc_unchecked(&cm_accepts);
25977  
25978         nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25979                         netdev_refcnt_read(nesvnic->netdev));
25980 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25981  
25982         struct nes_cm_core *cm_core;
25983  
25984 -       atomic_inc(&cm_rejects);
25985 +       atomic_inc_unchecked(&cm_rejects);
25986         cm_node = (struct nes_cm_node *) cm_id->provider_data;
25987         loopback = cm_node->loopbackpartner;
25988         cm_core = cm_node->cm_core;
25989 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id, 
25990                 ntohl(cm_id->local_addr.sin_addr.s_addr),
25991                 ntohs(cm_id->local_addr.sin_port));
25992  
25993 -       atomic_inc(&cm_connects);
25994 +       atomic_inc_unchecked(&cm_connects);
25995         nesqp->active_conn = 1;
25996  
25997         /* cache the cm_id in the qp */
25998 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
25999                         g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26000                         return err;
26001                 }
26002 -               atomic_inc(&cm_listens_created);
26003 +               atomic_inc_unchecked(&cm_listens_created);
26004         }
26005  
26006         cm_id->add_ref(cm_id);
26007 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26008         if (nesqp->destroyed) {
26009                 return;
26010         }
26011 -       atomic_inc(&cm_connecteds);
26012 +       atomic_inc_unchecked(&cm_connecteds);
26013         nes_debug(NES_DBG_CM, "QP%u attempting to connect to  0x%08X:0x%04X on"
26014                         " local port 0x%04X. jiffies = %lu.\n",
26015                         nesqp->hwqp.qp_id,
26016 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26017  
26018         cm_id->add_ref(cm_id);
26019         ret = cm_id->event_handler(cm_id, &cm_event);
26020 -       atomic_inc(&cm_closes);
26021 +       atomic_inc_unchecked(&cm_closes);
26022         cm_event.event = IW_CM_EVENT_CLOSE;
26023         cm_event.status = 0;
26024         cm_event.provider_data = cm_id->provider_data;
26025 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26026                 return;
26027         cm_id = cm_node->cm_id;
26028  
26029 -       atomic_inc(&cm_connect_reqs);
26030 +       atomic_inc_unchecked(&cm_connect_reqs);
26031         nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26032                         cm_node, cm_id, jiffies);
26033  
26034 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26035                 return;
26036         cm_id = cm_node->cm_id;
26037  
26038 -       atomic_inc(&cm_connect_reqs);
26039 +       atomic_inc_unchecked(&cm_connect_reqs);
26040         nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26041                         cm_node, cm_id, jiffies);
26042  
26043 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
26044 --- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26045 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26046 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26047  extern unsigned int wqm_quanta;
26048  extern struct list_head nes_adapter_list;
26049  
26050 -extern atomic_t cm_connects;
26051 -extern atomic_t cm_accepts;
26052 -extern atomic_t cm_disconnects;
26053 -extern atomic_t cm_closes;
26054 -extern atomic_t cm_connecteds;
26055 -extern atomic_t cm_connect_reqs;
26056 -extern atomic_t cm_rejects;
26057 -extern atomic_t mod_qp_timouts;
26058 -extern atomic_t qps_created;
26059 -extern atomic_t qps_destroyed;
26060 -extern atomic_t sw_qps_destroyed;
26061 +extern atomic_unchecked_t cm_connects;
26062 +extern atomic_unchecked_t cm_accepts;
26063 +extern atomic_unchecked_t cm_disconnects;
26064 +extern atomic_unchecked_t cm_closes;
26065 +extern atomic_unchecked_t cm_connecteds;
26066 +extern atomic_unchecked_t cm_connect_reqs;
26067 +extern atomic_unchecked_t cm_rejects;
26068 +extern atomic_unchecked_t mod_qp_timouts;
26069 +extern atomic_unchecked_t qps_created;
26070 +extern atomic_unchecked_t qps_destroyed;
26071 +extern atomic_unchecked_t sw_qps_destroyed;
26072  extern u32 mh_detected;
26073  extern u32 mh_pauses_sent;
26074  extern u32 cm_packets_sent;
26075 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26076  extern u32 cm_packets_received;
26077  extern u32 cm_packets_dropped;
26078  extern u32 cm_packets_retrans;
26079 -extern atomic_t cm_listens_created;
26080 -extern atomic_t cm_listens_destroyed;
26081 +extern atomic_unchecked_t cm_listens_created;
26082 +extern atomic_unchecked_t cm_listens_destroyed;
26083  extern u32 cm_backlog_drops;
26084 -extern atomic_t cm_loopbacks;
26085 -extern atomic_t cm_nodes_created;
26086 -extern atomic_t cm_nodes_destroyed;
26087 -extern atomic_t cm_accel_dropped_pkts;
26088 -extern atomic_t cm_resets_recvd;
26089 +extern atomic_unchecked_t cm_loopbacks;
26090 +extern atomic_unchecked_t cm_nodes_created;
26091 +extern atomic_unchecked_t cm_nodes_destroyed;
26092 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26093 +extern atomic_unchecked_t cm_resets_recvd;
26094  
26095  extern u32 int_mod_timer_init;
26096  extern u32 int_mod_cq_depth_256;
26097 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
26098 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c     2011-07-21 22:17:23.000000000 -0400
26099 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c     2011-08-23 21:47:55.000000000 -0400
26100 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26101         target_stat_values[++index] = mh_detected;
26102         target_stat_values[++index] = mh_pauses_sent;
26103         target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26104 -       target_stat_values[++index] = atomic_read(&cm_connects);
26105 -       target_stat_values[++index] = atomic_read(&cm_accepts);
26106 -       target_stat_values[++index] = atomic_read(&cm_disconnects);
26107 -       target_stat_values[++index] = atomic_read(&cm_connecteds);
26108 -       target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26109 -       target_stat_values[++index] = atomic_read(&cm_rejects);
26110 -       target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26111 -       target_stat_values[++index] = atomic_read(&qps_created);
26112 -       target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26113 -       target_stat_values[++index] = atomic_read(&qps_destroyed);
26114 -       target_stat_values[++index] = atomic_read(&cm_closes);
26115 +       target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26116 +       target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26117 +       target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26118 +       target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26119 +       target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26120 +       target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26121 +       target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26122 +       target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26123 +       target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26124 +       target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26125 +       target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26126         target_stat_values[++index] = cm_packets_sent;
26127         target_stat_values[++index] = cm_packets_bounced;
26128         target_stat_values[++index] = cm_packets_created;
26129         target_stat_values[++index] = cm_packets_received;
26130         target_stat_values[++index] = cm_packets_dropped;
26131         target_stat_values[++index] = cm_packets_retrans;
26132 -       target_stat_values[++index] = atomic_read(&cm_listens_created);
26133 -       target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26134 +       target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26135 +       target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26136         target_stat_values[++index] = cm_backlog_drops;
26137 -       target_stat_values[++index] = atomic_read(&cm_loopbacks);
26138 -       target_stat_values[++index] = atomic_read(&cm_nodes_created);
26139 -       target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26140 -       target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26141 -       target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26142 +       target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26143 +       target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26144 +       target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26145 +       target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26146 +       target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26147         target_stat_values[++index] = nesadapter->free_4kpbl;
26148         target_stat_values[++index] = nesadapter->free_256pbl;
26149         target_stat_values[++index] = int_mod_timer_init;
26150 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
26151 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c   2011-07-21 22:17:23.000000000 -0400
26152 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c   2011-08-23 21:47:55.000000000 -0400
26153 @@ -46,9 +46,9 @@
26154  
26155  #include <rdma/ib_umem.h>
26156  
26157 -atomic_t mod_qp_timouts;
26158 -atomic_t qps_created;
26159 -atomic_t sw_qps_destroyed;
26160 +atomic_unchecked_t mod_qp_timouts;
26161 +atomic_unchecked_t qps_created;
26162 +atomic_unchecked_t sw_qps_destroyed;
26163  
26164  static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26165  
26166 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26167         if (init_attr->create_flags)
26168                 return ERR_PTR(-EINVAL);
26169  
26170 -       atomic_inc(&qps_created);
26171 +       atomic_inc_unchecked(&qps_created);
26172         switch (init_attr->qp_type) {
26173                 case IB_QPT_RC:
26174                         if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26175 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26176         struct iw_cm_event cm_event;
26177         int ret;
26178  
26179 -       atomic_inc(&sw_qps_destroyed);
26180 +       atomic_inc_unchecked(&sw_qps_destroyed);
26181         nesqp->destroyed = 1;
26182  
26183         /* Blow away the connection if it exists. */
26184 diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
26185 --- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26186 +++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26187 @@ -51,6 +51,7 @@
26188  #include <linux/completion.h>
26189  #include <linux/kref.h>
26190  #include <linux/sched.h>
26191 +#include <linux/slab.h>
26192  
26193  #include "qib_common.h"
26194  #include "qib_verbs.h"
26195 diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
26196 --- linux-3.0.4/drivers/input/gameport/gameport.c       2011-07-21 22:17:23.000000000 -0400
26197 +++ linux-3.0.4/drivers/input/gameport/gameport.c       2011-08-23 21:47:55.000000000 -0400
26198 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26199   */
26200  static void gameport_init_port(struct gameport *gameport)
26201  {
26202 -       static atomic_t gameport_no = ATOMIC_INIT(0);
26203 +       static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26204  
26205         __module_get(THIS_MODULE);
26206  
26207         mutex_init(&gameport->drv_mutex);
26208         device_initialize(&gameport->dev);
26209         dev_set_name(&gameport->dev, "gameport%lu",
26210 -                       (unsigned long)atomic_inc_return(&gameport_no) - 1);
26211 +                       (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26212         gameport->dev.bus = &gameport_bus;
26213         gameport->dev.release = gameport_release_port;
26214         if (gameport->parent)
26215 diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
26216 --- linux-3.0.4/drivers/input/input.c   2011-07-21 22:17:23.000000000 -0400
26217 +++ linux-3.0.4/drivers/input/input.c   2011-08-23 21:47:55.000000000 -0400
26218 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26219   */
26220  int input_register_device(struct input_dev *dev)
26221  {
26222 -       static atomic_t input_no = ATOMIC_INIT(0);
26223 +       static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26224         struct input_handler *handler;
26225         const char *path;
26226         int error;
26227 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26228                 dev->setkeycode = input_default_setkeycode;
26229  
26230         dev_set_name(&dev->dev, "input%ld",
26231 -                    (unsigned long) atomic_inc_return(&input_no) - 1);
26232 +                    (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26233  
26234         error = device_add(&dev->dev);
26235         if (error)
26236 diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
26237 --- linux-3.0.4/drivers/input/joystick/sidewinder.c     2011-07-21 22:17:23.000000000 -0400
26238 +++ linux-3.0.4/drivers/input/joystick/sidewinder.c     2011-08-23 21:48:14.000000000 -0400
26239 @@ -30,6 +30,7 @@
26240  #include <linux/kernel.h>
26241  #include <linux/module.h>
26242  #include <linux/slab.h>
26243 +#include <linux/sched.h>
26244  #include <linux/init.h>
26245  #include <linux/input.h>
26246  #include <linux/gameport.h>
26247 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26248         unsigned char buf[SW_LENGTH];
26249         int i;
26250  
26251 +       pax_track_stack();
26252 +
26253         i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26254  
26255         if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) {             /* Broken packet, try to fix */
26256 diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
26257 --- linux-3.0.4/drivers/input/joystick/xpad.c   2011-07-21 22:17:23.000000000 -0400
26258 +++ linux-3.0.4/drivers/input/joystick/xpad.c   2011-08-23 21:47:55.000000000 -0400
26259 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26260  
26261  static int xpad_led_probe(struct usb_xpad *xpad)
26262  {
26263 -       static atomic_t led_seq = ATOMIC_INIT(0);
26264 +       static atomic_unchecked_t led_seq       = ATOMIC_INIT(0);
26265         long led_no;
26266         struct xpad_led *led;
26267         struct led_classdev *led_cdev;
26268 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26269         if (!led)
26270                 return -ENOMEM;
26271  
26272 -       led_no = (long)atomic_inc_return(&led_seq) - 1;
26273 +       led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26274  
26275         snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26276         led->xpad = xpad;
26277 diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
26278 --- linux-3.0.4/drivers/input/mousedev.c        2011-07-21 22:17:23.000000000 -0400
26279 +++ linux-3.0.4/drivers/input/mousedev.c        2011-08-23 21:47:55.000000000 -0400
26280 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26281  
26282         spin_unlock_irq(&client->packet_lock);
26283  
26284 -       if (copy_to_user(buffer, data, count))
26285 +       if (count > sizeof(data) || copy_to_user(buffer, data, count))
26286                 return -EFAULT;
26287  
26288         return count;
26289 diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
26290 --- linux-3.0.4/drivers/input/serio/serio.c     2011-07-21 22:17:23.000000000 -0400
26291 +++ linux-3.0.4/drivers/input/serio/serio.c     2011-08-23 21:47:55.000000000 -0400
26292 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26293   */
26294  static void serio_init_port(struct serio *serio)
26295  {
26296 -       static atomic_t serio_no = ATOMIC_INIT(0);
26297 +       static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26298  
26299         __module_get(THIS_MODULE);
26300  
26301 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26302         mutex_init(&serio->drv_mutex);
26303         device_initialize(&serio->dev);
26304         dev_set_name(&serio->dev, "serio%ld",
26305 -                       (long)atomic_inc_return(&serio_no) - 1);
26306 +                       (long)atomic_inc_return_unchecked(&serio_no) - 1);
26307         serio->dev.bus = &serio_bus;
26308         serio->dev.release = serio_release_port;
26309         serio->dev.groups = serio_device_attr_groups;
26310 diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
26311 --- linux-3.0.4/drivers/isdn/capi/capi.c        2011-07-21 22:17:23.000000000 -0400
26312 +++ linux-3.0.4/drivers/isdn/capi/capi.c        2011-08-23 21:47:55.000000000 -0400
26313 @@ -83,8 +83,8 @@ struct capiminor {
26314  
26315         struct capi20_appl      *ap;
26316         u32                     ncci;
26317 -       atomic_t                datahandle;
26318 -       atomic_t                msgid;
26319 +       atomic_unchecked_t      datahandle;
26320 +       atomic_unchecked_t      msgid;
26321  
26322         struct tty_port port;
26323         int                ttyinstop;
26324 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26325                 capimsg_setu16(s, 2, mp->ap->applid);
26326                 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26327                 capimsg_setu8 (s, 5, CAPI_RESP);
26328 -               capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26329 +               capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26330                 capimsg_setu32(s, 8, mp->ncci);
26331                 capimsg_setu16(s, 12, datahandle);
26332         }
26333 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26334                 mp->outbytes -= len;
26335                 spin_unlock_bh(&mp->outlock);
26336  
26337 -               datahandle = atomic_inc_return(&mp->datahandle);
26338 +               datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26339                 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26340                 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26341                 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26342                 capimsg_setu16(skb->data, 2, mp->ap->applid);
26343                 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26344                 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26345 -               capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26346 +               capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26347                 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26348                 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26349                 capimsg_setu16(skb->data, 16, len);     /* Data length */
26350 diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
26351 --- linux-3.0.4/drivers/isdn/gigaset/common.c   2011-07-21 22:17:23.000000000 -0400
26352 +++ linux-3.0.4/drivers/isdn/gigaset/common.c   2011-08-23 21:47:55.000000000 -0400
26353 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct 
26354         cs->commands_pending = 0;
26355         cs->cur_at_seq = 0;
26356         cs->gotfwver = -1;
26357 -       cs->open_count = 0;
26358 +       local_set(&cs->open_count, 0);
26359         cs->dev = NULL;
26360         cs->tty = NULL;
26361         cs->tty_dev = NULL;
26362 diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
26363 --- linux-3.0.4/drivers/isdn/gigaset/gigaset.h  2011-07-21 22:17:23.000000000 -0400
26364 +++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h  2011-08-23 21:47:55.000000000 -0400
26365 @@ -35,6 +35,7 @@
26366  #include <linux/tty_driver.h>
26367  #include <linux/list.h>
26368  #include <asm/atomic.h>
26369 +#include <asm/local.h>
26370  
26371  #define GIG_VERSION {0, 5, 0, 0}
26372  #define GIG_COMPAT  {0, 4, 0, 0}
26373 @@ -433,7 +434,7 @@ struct cardstate {
26374         spinlock_t cmdlock;
26375         unsigned curlen, cmdbytes;
26376  
26377 -       unsigned open_count;
26378 +       local_t open_count;
26379         struct tty_struct *tty;
26380         struct tasklet_struct if_wake_tasklet;
26381         unsigned control_state;
26382 diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
26383 --- linux-3.0.4/drivers/isdn/gigaset/interface.c        2011-07-21 22:17:23.000000000 -0400
26384 +++ linux-3.0.4/drivers/isdn/gigaset/interface.c        2011-08-23 21:47:55.000000000 -0400
26385 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26386         }
26387         tty->driver_data = cs;
26388  
26389 -       ++cs->open_count;
26390 -
26391 -       if (cs->open_count == 1) {
26392 +       if (local_inc_return(&cs->open_count) == 1) {
26393                 spin_lock_irqsave(&cs->lock, flags);
26394                 cs->tty = tty;
26395                 spin_unlock_irqrestore(&cs->lock, flags);
26396 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26397  
26398         if (!cs->connected)
26399                 gig_dbg(DEBUG_IF, "not connected");     /* nothing to do */
26400 -       else if (!cs->open_count)
26401 +       else if (!local_read(&cs->open_count))
26402                 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26403         else {
26404 -               if (!--cs->open_count) {
26405 +               if (!local_dec_return(&cs->open_count)) {
26406                         spin_lock_irqsave(&cs->lock, flags);
26407                         cs->tty = NULL;
26408                         spin_unlock_irqrestore(&cs->lock, flags);
26409 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26410         if (!cs->connected) {
26411                 gig_dbg(DEBUG_IF, "not connected");
26412                 retval = -ENODEV;
26413 -       } else if (!cs->open_count)
26414 +       } else if (!local_read(&cs->open_count))
26415                 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26416         else {
26417                 retval = 0;
26418 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26419                 retval = -ENODEV;
26420                 goto done;
26421         }
26422 -       if (!cs->open_count) {
26423 +       if (!local_read(&cs->open_count)) {
26424                 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26425                 retval = -ENODEV;
26426                 goto done;
26427 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26428         if (!cs->connected) {
26429                 gig_dbg(DEBUG_IF, "not connected");
26430                 retval = -ENODEV;
26431 -       } else if (!cs->open_count)
26432 +       } else if (!local_read(&cs->open_count))
26433                 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26434         else if (cs->mstate != MS_LOCKED) {
26435                 dev_warn(cs->dev, "can't write to unlocked device\n");
26436 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26437  
26438         if (!cs->connected)
26439                 gig_dbg(DEBUG_IF, "not connected");
26440 -       else if (!cs->open_count)
26441 +       else if (!local_read(&cs->open_count))
26442                 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26443         else if (cs->mstate != MS_LOCKED)
26444                 dev_warn(cs->dev, "can't write to unlocked device\n");
26445 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26446  
26447         if (!cs->connected)
26448                 gig_dbg(DEBUG_IF, "not connected");     /* nothing to do */
26449 -       else if (!cs->open_count)
26450 +       else if (!local_read(&cs->open_count))
26451                 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26452         else
26453                 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26454 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26455  
26456         if (!cs->connected)
26457                 gig_dbg(DEBUG_IF, "not connected");     /* nothing to do */
26458 -       else if (!cs->open_count)
26459 +       else if (!local_read(&cs->open_count))
26460                 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26461         else
26462                 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26463 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26464                 goto out;
26465         }
26466  
26467 -       if (!cs->open_count) {
26468 +       if (!local_read(&cs->open_count)) {
26469                 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26470                 goto out;
26471         }
26472 diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
26473 --- linux-3.0.4/drivers/isdn/hardware/avm/b1.c  2011-07-21 22:17:23.000000000 -0400
26474 +++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c  2011-08-23 21:47:55.000000000 -0400
26475 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26476         }
26477         if (left) {
26478                 if (t4file->user) {
26479 -                       if (copy_from_user(buf, dp, left))
26480 +                       if (left > sizeof buf || copy_from_user(buf, dp, left))
26481                                 return -EFAULT;
26482                 } else {
26483                         memcpy(buf, dp, left);
26484 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26485         }
26486         if (left) {
26487                 if (config->user) {
26488 -                       if (copy_from_user(buf, dp, left))
26489 +                       if (left > sizeof buf || copy_from_user(buf, dp, left))
26490                                 return -EFAULT;
26491                 } else {
26492                         memcpy(buf, dp, left);
26493 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
26494 --- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c  2011-07-21 22:17:23.000000000 -0400
26495 +++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c  2011-08-23 21:48:14.000000000 -0400
26496 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26497    byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26498      short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26499  
26500 +  pax_track_stack();
26501  
26502    if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26503    {
26504 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
26505 --- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c  2011-07-21 22:17:23.000000000 -0400
26506 +++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c  2011-08-23 21:48:14.000000000 -0400
26507 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26508         IDI_SYNC_REQ req;
26509         DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26510  
26511 +       pax_track_stack();
26512 +
26513         DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26514  
26515         for (x = 0; x < MAX_DESCRIPTORS; x++) {
26516 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
26517 --- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c  2011-07-21 22:17:23.000000000 -0400
26518 +++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c  2011-08-23 21:48:14.000000000 -0400
26519 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26520         IDI_SYNC_REQ req;
26521         DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26522  
26523 +       pax_track_stack();
26524 +
26525         DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26526  
26527         for (x = 0; x < MAX_DESCRIPTORS; x++) {
26528 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
26529 --- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26530 +++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26531 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26532         IDI_SYNC_REQ req;
26533         DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26534  
26535 +       pax_track_stack();
26536 +
26537         DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26538  
26539         for (x = 0; x < MAX_DESCRIPTORS; x++) {
26540 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
26541 --- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h  2011-07-21 22:17:23.000000000 -0400
26542 +++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h  2011-08-23 21:47:55.000000000 -0400
26543 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26544  } diva_didd_add_adapter_t;
26545  typedef struct _diva_didd_remove_adapter {
26546   IDI_CALL p_request;
26547 -} diva_didd_remove_adapter_t;
26548 +} __no_const diva_didd_remove_adapter_t;
26549  typedef struct _diva_didd_read_adapter_array {
26550   void   * buffer;
26551   dword length;
26552 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
26553 --- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c   2011-07-21 22:17:23.000000000 -0400
26554 +++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c   2011-08-23 21:48:14.000000000 -0400
26555 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26556         IDI_SYNC_REQ req;
26557         DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26558  
26559 +       pax_track_stack();
26560 +
26561         DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26562  
26563         for (x = 0; x < MAX_DESCRIPTORS; x++) {
26564 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
26565 --- linux-3.0.4/drivers/isdn/hardware/eicon/message.c   2011-07-21 22:17:23.000000000 -0400
26566 +++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c   2011-08-23 21:48:14.000000000 -0400
26567 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26568    dword d;
26569    word w;
26570  
26571 +  pax_track_stack();
26572 +
26573    a = plci->adapter;
26574    Id = ((word)plci->Id<<8)|a->Id;
26575    PUT_WORD(&SS_Ind[4],0x0000);
26576 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26577    word j, n, w;
26578    dword d;
26579  
26580 +  pax_track_stack();
26581 +
26582  
26583    for(i=0;i<8;i++) bp_parms[i].length = 0;
26584    for(i=0;i<2;i++) global_config[i].length = 0;
26585 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26586    const byte llc3[] = {4,3,2,2,6,6,0};
26587    const byte header[] = {0,2,3,3,0,0,0};
26588  
26589 +  pax_track_stack();
26590 +
26591    for(i=0;i<8;i++) bp_parms[i].length = 0;
26592    for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26593    for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26594 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26595    word appl_number_group_type[MAX_APPL];
26596    PLCI   *auxplci;
26597  
26598 +  pax_track_stack();
26599 +
26600    set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26601  
26602    if(!a->group_optimization_enabled)
26603 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
26604 --- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c   2011-07-21 22:17:23.000000000 -0400
26605 +++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c   2011-08-23 21:48:14.000000000 -0400
26606 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26607         IDI_SYNC_REQ req;
26608         DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26609  
26610 +       pax_track_stack();
26611 +
26612         DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26613  
26614         for (x = 0; x < MAX_DESCRIPTORS; x++) {
26615 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26616 --- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h       2011-07-21 22:17:23.000000000 -0400
26617 +++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h       2011-08-23 21:47:55.000000000 -0400
26618 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26619  typedef struct _diva_os_idi_adapter_interface {
26620         diva_init_card_proc_t cleanup_adapter_proc;
26621         diva_cmd_card_proc_t cmd_proc;
26622 -} diva_os_idi_adapter_interface_t;
26623 +} __no_const diva_os_idi_adapter_interface_t;
26624  
26625  typedef struct _diva_os_xdi_adapter {
26626         struct list_head link;
26627 diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
26628 --- linux-3.0.4/drivers/isdn/i4l/isdn_common.c  2011-07-21 22:17:23.000000000 -0400
26629 +++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c  2011-08-23 21:48:14.000000000 -0400
26630 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd, 
26631         } iocpar;
26632         void __user *argp = (void __user *)arg;
26633  
26634 +       pax_track_stack();
26635 +
26636  #define name  iocpar.name
26637  #define bname iocpar.bname
26638  #define iocts iocpar.iocts
26639 diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
26640 --- linux-3.0.4/drivers/isdn/icn/icn.c  2011-07-21 22:17:23.000000000 -0400
26641 +++ linux-3.0.4/drivers/isdn/icn/icn.c  2011-08-23 21:47:55.000000000 -0400
26642 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26643                 if (count > len)
26644                         count = len;
26645                 if (user) {
26646 -                       if (copy_from_user(msg, buf, count))
26647 +                       if (count > sizeof msg || copy_from_user(msg, buf, count))
26648                                 return -EFAULT;
26649                 } else
26650                         memcpy(msg, buf, count);
26651 diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
26652 --- linux-3.0.4/drivers/lguest/core.c   2011-07-21 22:17:23.000000000 -0400
26653 +++ linux-3.0.4/drivers/lguest/core.c   2011-08-23 21:47:55.000000000 -0400
26654 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26655          * it's worked so far.  The end address needs +1 because __get_vm_area
26656          * allocates an extra guard page, so we need space for that.
26657          */
26658 +
26659 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26660 +       switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26661 +                                    VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26662 +                                    + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26663 +#else
26664         switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26665                                      VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26666                                      + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26667 +#endif
26668 +
26669         if (!switcher_vma) {
26670                 err = -ENOMEM;
26671                 printk("lguest: could not map switcher pages high\n");
26672 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26673          * Now the Switcher is mapped at the right address, we can't fail!
26674          * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26675          */
26676 -       memcpy(switcher_vma->addr, start_switcher_text,
26677 +       memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26678                end_switcher_text - start_switcher_text);
26679  
26680         printk(KERN_INFO "lguest: mapped switcher at %p\n",
26681 diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
26682 --- linux-3.0.4/drivers/lguest/x86/core.c       2011-07-21 22:17:23.000000000 -0400
26683 +++ linux-3.0.4/drivers/lguest/x86/core.c       2011-08-23 21:47:55.000000000 -0400
26684 @@ -59,7 +59,7 @@ static struct {
26685  /* Offset from where switcher.S was compiled to where we've copied it */
26686  static unsigned long switcher_offset(void)
26687  {
26688 -       return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26689 +       return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26690  }
26691  
26692  /* This cpu's struct lguest_pages. */
26693 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26694          * These copies are pretty cheap, so we do them unconditionally: */
26695         /* Save the current Host top-level page directory.
26696          */
26697 +
26698 +#ifdef CONFIG_PAX_PER_CPU_PGD
26699 +       pages->state.host_cr3 = read_cr3();
26700 +#else
26701         pages->state.host_cr3 = __pa(current->mm->pgd);
26702 +#endif
26703 +
26704         /*
26705          * Set up the Guest's page tables to see this CPU's pages (and no
26706          * other CPU's pages).
26707 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26708          * compiled-in switcher code and the high-mapped copy we just made.
26709          */
26710         for (i = 0; i < IDT_ENTRIES; i++)
26711 -               default_idt_entries[i] += switcher_offset();
26712 +               default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26713  
26714         /*
26715          * Set up the Switcher's per-cpu areas.
26716 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26717          * it will be undisturbed when we switch.  To change %cs and jump we
26718          * need this structure to feed to Intel's "lcall" instruction.
26719          */
26720 -       lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26721 +       lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26722         lguest_entry.segment = LGUEST_CS;
26723  
26724         /*
26725 diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
26726 --- linux-3.0.4/drivers/lguest/x86/switcher_32.S        2011-07-21 22:17:23.000000000 -0400
26727 +++ linux-3.0.4/drivers/lguest/x86/switcher_32.S        2011-08-23 21:47:55.000000000 -0400
26728 @@ -87,6 +87,7 @@
26729  #include <asm/page.h>
26730  #include <asm/segment.h>
26731  #include <asm/lguest.h>
26732 +#include <asm/processor-flags.h>
26733  
26734  // We mark the start of the code to copy
26735  // It's placed in .text tho it's never run here
26736 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26737         // Changes type when we load it: damn Intel!
26738         // For after we switch over our page tables
26739         // That entry will be read-only: we'd crash.
26740 +
26741 +#ifdef CONFIG_PAX_KERNEXEC
26742 +       mov     %cr0, %edx
26743 +       xor     $X86_CR0_WP, %edx
26744 +       mov     %edx, %cr0
26745 +#endif
26746 +
26747         movl    $(GDT_ENTRY_TSS*8), %edx
26748         ltr     %dx
26749  
26750 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26751         // Let's clear it again for our return.
26752         // The GDT descriptor of the Host
26753         // Points to the table after two "size" bytes
26754 -       movl    (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26755 +       movl    (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26756         // Clear "used" from type field (byte 5, bit 2)
26757 -       andb    $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26758 +       andb    $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26759 +
26760 +#ifdef CONFIG_PAX_KERNEXEC
26761 +       mov     %cr0, %eax
26762 +       xor     $X86_CR0_WP, %eax
26763 +       mov     %eax, %cr0
26764 +#endif
26765  
26766         // Once our page table's switched, the Guest is live!
26767         // The Host fades as we run this final step.
26768 @@ -295,13 +309,12 @@ deliver_to_host:
26769         // I consulted gcc, and it gave
26770         // These instructions, which I gladly credit:
26771         leal    (%edx,%ebx,8), %eax
26772 -       movzwl  (%eax),%edx
26773 -       movl    4(%eax), %eax
26774 -       xorw    %ax, %ax
26775 -       orl     %eax, %edx
26776 +       movl    4(%eax), %edx
26777 +       movw    (%eax), %dx
26778         // Now the address of the handler's in %edx
26779         // We call it now: its "iret" drops us home.
26780 -       jmp     *%edx
26781 +       ljmp    $__KERNEL_CS, $1f
26782 +1:     jmp     *%edx
26783  
26784  // Every interrupt can come to us here
26785  // But we must truly tell each apart.
26786 diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
26787 --- linux-3.0.4/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26788 +++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26789 @@ -164,9 +164,9 @@ struct mapped_device {
26790         /*
26791          * Event handling.
26792          */
26793 -       atomic_t event_nr;
26794 +       atomic_unchecked_t event_nr;
26795         wait_queue_head_t eventq;
26796 -       atomic_t uevent_seq;
26797 +       atomic_unchecked_t uevent_seq;
26798         struct list_head uevent_list;
26799         spinlock_t uevent_lock; /* Protect access to uevent_list */
26800  
26801 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26802         rwlock_init(&md->map_lock);
26803         atomic_set(&md->holders, 1);
26804         atomic_set(&md->open_count, 0);
26805 -       atomic_set(&md->event_nr, 0);
26806 -       atomic_set(&md->uevent_seq, 0);
26807 +       atomic_set_unchecked(&md->event_nr, 0);
26808 +       atomic_set_unchecked(&md->uevent_seq, 0);
26809         INIT_LIST_HEAD(&md->uevent_list);
26810         spin_lock_init(&md->uevent_lock);
26811  
26812 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
26813  
26814         dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26815  
26816 -       atomic_inc(&md->event_nr);
26817 +       atomic_inc_unchecked(&md->event_nr);
26818         wake_up(&md->eventq);
26819  }
26820  
26821 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26822  
26823  uint32_t dm_next_uevent_seq(struct mapped_device *md)
26824  {
26825 -       return atomic_add_return(1, &md->uevent_seq);
26826 +       return atomic_add_return_unchecked(1, &md->uevent_seq);
26827  }
26828  
26829  uint32_t dm_get_event_nr(struct mapped_device *md)
26830  {
26831 -       return atomic_read(&md->event_nr);
26832 +       return atomic_read_unchecked(&md->event_nr);
26833  }
26834  
26835  int dm_wait_event(struct mapped_device *md, int event_nr)
26836  {
26837         return wait_event_interruptible(md->eventq,
26838 -                       (event_nr != atomic_read(&md->event_nr)));
26839 +                       (event_nr != atomic_read_unchecked(&md->event_nr)));
26840  }
26841  
26842  void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26843 diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
26844 --- linux-3.0.4/drivers/md/dm-ioctl.c   2011-07-21 22:17:23.000000000 -0400
26845 +++ linux-3.0.4/drivers/md/dm-ioctl.c   2011-08-23 21:47:55.000000000 -0400
26846 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26847             cmd == DM_LIST_VERSIONS_CMD)
26848                 return 0;
26849  
26850 -       if ((cmd == DM_DEV_CREATE_CMD)) {
26851 +       if (cmd == DM_DEV_CREATE_CMD) {
26852                 if (!*param->name) {
26853                         DMWARN("name not supplied when creating device");
26854                         return -EINVAL;
26855 diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
26856 --- linux-3.0.4/drivers/md/dm-raid1.c   2011-07-21 22:17:23.000000000 -0400
26857 +++ linux-3.0.4/drivers/md/dm-raid1.c   2011-08-23 21:47:55.000000000 -0400
26858 @@ -40,7 +40,7 @@ enum dm_raid1_error {
26859  
26860  struct mirror {
26861         struct mirror_set *ms;
26862 -       atomic_t error_count;
26863 +       atomic_unchecked_t error_count;
26864         unsigned long error_type;
26865         struct dm_dev *dev;
26866         sector_t offset;
26867 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26868         struct mirror *m;
26869  
26870         for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26871 -               if (!atomic_read(&m->error_count))
26872 +               if (!atomic_read_unchecked(&m->error_count))
26873                         return m;
26874  
26875         return NULL;
26876 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26877          * simple way to tell if a device has encountered
26878          * errors.
26879          */
26880 -       atomic_inc(&m->error_count);
26881 +       atomic_inc_unchecked(&m->error_count);
26882  
26883         if (test_and_set_bit(error_type, &m->error_type))
26884                 return;
26885 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26886         struct mirror *m = get_default_mirror(ms);
26887  
26888         do {
26889 -               if (likely(!atomic_read(&m->error_count)))
26890 +               if (likely(!atomic_read_unchecked(&m->error_count)))
26891                         return m;
26892  
26893                 if (m-- == ms->mirror)
26894 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26895  {
26896         struct mirror *default_mirror = get_default_mirror(m->ms);
26897  
26898 -       return !atomic_read(&default_mirror->error_count);
26899 +       return !atomic_read_unchecked(&default_mirror->error_count);
26900  }
26901  
26902  static int mirror_available(struct mirror_set *ms, struct bio *bio)
26903 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26904                  */
26905                 if (likely(region_in_sync(ms, region, 1)))
26906                         m = choose_mirror(ms, bio->bi_sector);
26907 -               else if (m && atomic_read(&m->error_count))
26908 +               else if (m && atomic_read_unchecked(&m->error_count))
26909                         m = NULL;
26910  
26911                 if (likely(m))
26912 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set 
26913         }
26914  
26915         ms->mirror[mirror].ms = ms;
26916 -       atomic_set(&(ms->mirror[mirror].error_count), 0);
26917 +       atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26918         ms->mirror[mirror].error_type = 0;
26919         ms->mirror[mirror].offset = offset;
26920  
26921 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26922   */
26923  static char device_status_char(struct mirror *m)
26924  {
26925 -       if (!atomic_read(&(m->error_count)))
26926 +       if (!atomic_read_unchecked(&(m->error_count)))
26927                 return 'A';
26928  
26929         return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26930 diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
26931 --- linux-3.0.4/drivers/md/dm-stripe.c  2011-07-21 22:17:23.000000000 -0400
26932 +++ linux-3.0.4/drivers/md/dm-stripe.c  2011-08-23 21:47:55.000000000 -0400
26933 @@ -20,7 +20,7 @@ struct stripe {
26934         struct dm_dev *dev;
26935         sector_t physical_start;
26936  
26937 -       atomic_t error_count;
26938 +       atomic_unchecked_t error_count;
26939  };
26940  
26941  struct stripe_c {
26942 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26943                         kfree(sc);
26944                         return r;
26945                 }
26946 -               atomic_set(&(sc->stripe[i].error_count), 0);
26947 +               atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26948         }
26949  
26950         ti->private = sc;
26951 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26952                 DMEMIT("%d ", sc->stripes);
26953                 for (i = 0; i < sc->stripes; i++)  {
26954                         DMEMIT("%s ", sc->stripe[i].dev->name);
26955 -                       buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26956 +                       buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26957                                 'D' : 'A';
26958                 }
26959                 buffer[i] = '\0';
26960 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26961          */
26962         for (i = 0; i < sc->stripes; i++)
26963                 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26964 -                       atomic_inc(&(sc->stripe[i].error_count));
26965 -                       if (atomic_read(&(sc->stripe[i].error_count)) <
26966 +                       atomic_inc_unchecked(&(sc->stripe[i].error_count));
26967 +                       if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26968                             DM_IO_ERROR_THRESHOLD)
26969                                 schedule_work(&sc->trigger_event);
26970                 }
26971 diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
26972 --- linux-3.0.4/drivers/md/dm-table.c   2011-07-21 22:17:23.000000000 -0400
26973 +++ linux-3.0.4/drivers/md/dm-table.c   2011-08-23 21:47:55.000000000 -0400
26974 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26975         if (!dev_size)
26976                 return 0;
26977  
26978 -       if ((start >= dev_size) || (start + len > dev_size)) {
26979 +       if ((start >= dev_size) || (len > dev_size - start)) {
26980                 DMWARN("%s: %s too small for target: "
26981                        "start=%llu, len=%llu, dev_size=%llu",
26982                        dm_device_name(ti->table->md), bdevname(bdev, b),
26983 diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
26984 --- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26985 +++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26986 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26987   *  start build, activate spare
26988   */
26989  static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26990 -static atomic_t md_event_count;
26991 +static atomic_unchecked_t md_event_count;
26992  void md_new_event(mddev_t *mddev)
26993  {
26994 -       atomic_inc(&md_event_count);
26995 +       atomic_inc_unchecked(&md_event_count);
26996         wake_up(&md_event_waiters);
26997  }
26998  EXPORT_SYMBOL_GPL(md_new_event);
26999 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27000   */
27001  static void md_new_event_inintr(mddev_t *mddev)
27002  {
27003 -       atomic_inc(&md_event_count);
27004 +       atomic_inc_unchecked(&md_event_count);
27005         wake_up(&md_event_waiters);
27006  }
27007  
27008 @@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27009  
27010         rdev->preferred_minor = 0xffff;
27011         rdev->data_offset = le64_to_cpu(sb->data_offset);
27012 -       atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27013 +       atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27014  
27015         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27016         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27017 @@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27018         else
27019                 sb->resync_offset = cpu_to_le64(0);
27020  
27021 -       sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27022 +       sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27023  
27024         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27025         sb->size = cpu_to_le64(mddev->dev_sectors);
27026 @@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27027  static ssize_t
27028  errors_show(mdk_rdev_t *rdev, char *page)
27029  {
27030 -       return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27031 +       return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27032  }
27033  
27034  static ssize_t
27035 @@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27036         char *e;
27037         unsigned long n = simple_strtoul(buf, &e, 10);
27038         if (*buf && (*e == 0 || *e == '\n')) {
27039 -               atomic_set(&rdev->corrected_errors, n);
27040 +               atomic_set_unchecked(&rdev->corrected_errors, n);
27041                 return len;
27042         }
27043         return -EINVAL;
27044 @@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27045         rdev->last_read_error.tv_sec  = 0;
27046         rdev->last_read_error.tv_nsec = 0;
27047         atomic_set(&rdev->nr_pending, 0);
27048 -       atomic_set(&rdev->read_errors, 0);
27049 -       atomic_set(&rdev->corrected_errors, 0);
27050 +       atomic_set_unchecked(&rdev->read_errors, 0);
27051 +       atomic_set_unchecked(&rdev->corrected_errors, 0);
27052  
27053         INIT_LIST_HEAD(&rdev->same_set);
27054         init_waitqueue_head(&rdev->blocked_wait);
27055 @@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27056  
27057                 spin_unlock(&pers_lock);
27058                 seq_printf(seq, "\n");
27059 -               mi->event = atomic_read(&md_event_count);
27060 +               mi->event = atomic_read_unchecked(&md_event_count);
27061                 return 0;
27062         }
27063         if (v == (void*)2) {
27064 @@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27065                                 chunk_kb ? "KB" : "B");
27066                         if (bitmap->file) {
27067                                 seq_printf(seq, ", file: ");
27068 -                               seq_path(seq, &bitmap->file->f_path, " \t\n");
27069 +                               seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27070                         }
27071  
27072                         seq_printf(seq, "\n");
27073 @@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27074         else {
27075                 struct seq_file *p = file->private_data;
27076                 p->private = mi;
27077 -               mi->event = atomic_read(&md_event_count);
27078 +               mi->event = atomic_read_unchecked(&md_event_count);
27079         }
27080         return error;
27081  }
27082 @@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27083         /* always allow read */
27084         mask = POLLIN | POLLRDNORM;
27085  
27086 -       if (mi->event != atomic_read(&md_event_count))
27087 +       if (mi->event != atomic_read_unchecked(&md_event_count))
27088                 mask |= POLLERR | POLLPRI;
27089         return mask;
27090  }
27091 @@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27092                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27093                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27094                               (int)part_stat_read(&disk->part0, sectors[1]) -
27095 -                             atomic_read(&disk->sync_io);
27096 +                             atomic_read_unchecked(&disk->sync_io);
27097                 /* sync IO will cause sync_io to increase before the disk_stats
27098                  * as sync_io is counted when a request starts, and
27099                  * disk_stats is counted when it completes.
27100 diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
27101 --- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27102 +++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27103 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27104                                          * only maintained for arrays that
27105                                          * support hot removal
27106                                          */
27107 -       atomic_t        read_errors;    /* number of consecutive read errors that
27108 +       atomic_unchecked_t      read_errors;    /* number of consecutive read errors that
27109                                          * we have tried to ignore.
27110                                          */
27111         struct timespec last_read_error;        /* monotonic time since our
27112                                                  * last read error
27113                                                  */
27114 -       atomic_t        corrected_errors; /* number of corrected read errors,
27115 +       atomic_unchecked_t      corrected_errors; /* number of corrected read errors,
27116                                            * for reporting to userspace and storing
27117                                            * in superblock.
27118                                            */
27119 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27120  
27121  static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27122  {
27123 -        atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27124 +       atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27125  }
27126  
27127  struct mdk_personality
27128 diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
27129 --- linux-3.0.4/drivers/md/raid10.c     2011-07-21 22:17:23.000000000 -0400
27130 +++ linux-3.0.4/drivers/md/raid10.c     2011-08-23 21:47:55.000000000 -0400
27131 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27132         if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27133                 set_bit(R10BIO_Uptodate, &r10_bio->state);
27134         else {
27135 -               atomic_add(r10_bio->sectors,
27136 +               atomic_add_unchecked(r10_bio->sectors,
27137                            &conf->mirrors[d].rdev->corrected_errors);
27138                 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27139                         md_error(r10_bio->mddev,
27140 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27141  {
27142         struct timespec cur_time_mon;
27143         unsigned long hours_since_last;
27144 -       unsigned int read_errors = atomic_read(&rdev->read_errors);
27145 +       unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27146  
27147         ktime_get_ts(&cur_time_mon);
27148  
27149 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27150          * overflowing the shift of read_errors by hours_since_last.
27151          */
27152         if (hours_since_last >= 8 * sizeof(read_errors))
27153 -               atomic_set(&rdev->read_errors, 0);
27154 +               atomic_set_unchecked(&rdev->read_errors, 0);
27155         else
27156 -               atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27157 +               atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27158  }
27159  
27160  /*
27161 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27162                 return;
27163  
27164         check_decay_read_errors(mddev, rdev);
27165 -       atomic_inc(&rdev->read_errors);
27166 -       if (atomic_read(&rdev->read_errors) > max_read_errors) {
27167 +       atomic_inc_unchecked(&rdev->read_errors);
27168 +       if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27169                 char b[BDEVNAME_SIZE];
27170                 bdevname(rdev->bdev, b);
27171  
27172 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27173                        "md/raid10:%s: %s: Raid device exceeded "
27174                        "read_error threshold [cur %d:max %d]\n",
27175                        mdname(mddev), b,
27176 -                      atomic_read(&rdev->read_errors), max_read_errors);
27177 +                      atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27178                 printk(KERN_NOTICE
27179                        "md/raid10:%s: %s: Failing raid device\n",
27180                        mdname(mddev), b);
27181 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27182                             test_bit(In_sync, &rdev->flags)) {
27183                                 atomic_inc(&rdev->nr_pending);
27184                                 rcu_read_unlock();
27185 -                               atomic_add(s, &rdev->corrected_errors);
27186 +                               atomic_add_unchecked(s, &rdev->corrected_errors);
27187                                 if (sync_page_io(rdev,
27188                                                  r10_bio->devs[sl].addr +
27189                                                  sect,
27190 diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
27191 --- linux-3.0.4/drivers/md/raid1.c      2011-07-21 22:17:23.000000000 -0400
27192 +++ linux-3.0.4/drivers/md/raid1.c      2011-08-23 21:47:55.000000000 -0400
27193 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27194                                 rdev_dec_pending(rdev, mddev);
27195                                 md_error(mddev, rdev);
27196                         } else
27197 -                               atomic_add(s, &rdev->corrected_errors);
27198 +                               atomic_add_unchecked(s, &rdev->corrected_errors);
27199                 }
27200                 d = start;
27201                 while (d != r1_bio->read_disk) {
27202 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27203                                         /* Well, this device is dead */
27204                                         md_error(mddev, rdev);
27205                                 else {
27206 -                                       atomic_add(s, &rdev->corrected_errors);
27207 +                                       atomic_add_unchecked(s, &rdev->corrected_errors);
27208                                         printk(KERN_INFO
27209                                                "md/raid1:%s: read error corrected "
27210                                                "(%d sectors at %llu on %s)\n",
27211 diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
27212 --- linux-3.0.4/drivers/md/raid5.c      2011-07-21 22:17:23.000000000 -0400
27213 +++ linux-3.0.4/drivers/md/raid5.c      2011-08-23 21:48:14.000000000 -0400
27214 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27215                         bi->bi_next = NULL;
27216                         if ((rw & WRITE) &&
27217                             test_bit(R5_ReWrite, &sh->dev[i].flags))
27218 -                               atomic_add(STRIPE_SECTORS,
27219 +                               atomic_add_unchecked(STRIPE_SECTORS,
27220                                         &rdev->corrected_errors);
27221                         generic_make_request(bi);
27222                 } else {
27223 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27224                         clear_bit(R5_ReadError, &sh->dev[i].flags);
27225                         clear_bit(R5_ReWrite, &sh->dev[i].flags);
27226                 }
27227 -               if (atomic_read(&conf->disks[i].rdev->read_errors))
27228 -                       atomic_set(&conf->disks[i].rdev->read_errors, 0);
27229 +               if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27230 +                       atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27231         } else {
27232                 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27233                 int retry = 0;
27234                 rdev = conf->disks[i].rdev;
27235  
27236                 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27237 -               atomic_inc(&rdev->read_errors);
27238 +               atomic_inc_unchecked(&rdev->read_errors);
27239                 if (conf->mddev->degraded >= conf->max_degraded)
27240                         printk_rl(KERN_WARNING
27241                                   "md/raid:%s: read error not correctable "
27242 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27243                                   (unsigned long long)(sh->sector
27244                                                        + rdev->data_offset),
27245                                   bdn);
27246 -               else if (atomic_read(&rdev->read_errors)
27247 +               else if (atomic_read_unchecked(&rdev->read_errors)
27248                          > conf->max_nr_stripes)
27249                         printk(KERN_WARNING
27250                                "md/raid:%s: Too many read errors, failing device %s.\n",
27251 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27252         sector_t r_sector;
27253         struct stripe_head sh2;
27254  
27255 +       pax_track_stack();
27256  
27257         chunk_offset = sector_div(new_sector, sectors_per_chunk);
27258         stripe = new_sector;
27259 diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
27260 --- linux-3.0.4/drivers/media/common/saa7146_hlp.c      2011-07-21 22:17:23.000000000 -0400
27261 +++ linux-3.0.4/drivers/media/common/saa7146_hlp.c      2011-08-23 21:48:14.000000000 -0400
27262 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27263  
27264         int x[32], y[32], w[32], h[32];
27265  
27266 +       pax_track_stack();
27267 +
27268         /* clear out memory */
27269         memset(&line_list[0],  0x00, sizeof(u32)*32);
27270         memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27271 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27272 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c     2011-07-21 22:17:23.000000000 -0400
27273 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c     2011-08-23 21:48:14.000000000 -0400
27274 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27275         u8 buf[HOST_LINK_BUF_SIZE];
27276         int i;
27277  
27278 +       pax_track_stack();
27279 +
27280         dprintk("%s\n", __func__);
27281  
27282         /* check if we have space for a link buf in the rx_buffer */
27283 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27284         unsigned long timeout;
27285         int written;
27286  
27287 +       pax_track_stack();
27288 +
27289         dprintk("%s\n", __func__);
27290  
27291         /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27292 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
27293 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h  2011-07-21 22:17:23.000000000 -0400
27294 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h  2011-08-24 18:24:40.000000000 -0400
27295 @@ -68,12 +68,12 @@ struct dvb_demux_feed {
27296         union {
27297                 struct dmx_ts_feed ts;
27298                 struct dmx_section_feed sec;
27299 -       } feed;
27300 +       } __no_const feed;
27301  
27302         union {
27303                 dmx_ts_cb ts;
27304                 dmx_section_cb sec;
27305 -       } cb;
27306 +       } __no_const cb;
27307  
27308         struct dvb_demux *demux;
27309         void *priv;
27310 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
27311 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c     2011-07-21 22:17:23.000000000 -0400
27312 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c     2011-08-24 18:24:19.000000000 -0400
27313 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27314                         const struct dvb_device *template, void *priv, int type)
27315  {
27316         struct dvb_device *dvbdev;
27317 -       struct file_operations *dvbdevfops;
27318 +       file_operations_no_const *dvbdevfops;
27319         struct device *clsdev;
27320         int minor;
27321         int id;
27322 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
27323 --- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c       2011-07-21 22:17:23.000000000 -0400
27324 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c       2011-08-24 18:26:33.000000000 -0400
27325 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27326  struct dib0700_adapter_state {
27327         int (*set_param_save) (struct dvb_frontend *,
27328                                struct dvb_frontend_parameters *);
27329 -};
27330 +} __no_const;
27331  
27332  static int dib7070_set_param_override(struct dvb_frontend *fe,
27333                                       struct dvb_frontend_parameters *fep)
27334 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27335 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c        2011-07-21 22:17:23.000000000 -0400
27336 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c        2011-08-23 21:48:14.000000000 -0400
27337 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27338         if (!buf)
27339                 return -ENOMEM;
27340  
27341 +       pax_track_stack();
27342 +
27343         while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27344                 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27345                                 hx.addr, hx.len, hx.chk);
27346 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
27347 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h      2011-07-21 22:17:23.000000000 -0400
27348 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h      2011-08-24 18:27:27.000000000 -0400
27349 @@ -97,7 +97,7 @@
27350  #define DIBUSB_IOCTL_CMD_DISABLE_STREAM        0x02
27351  
27352  struct dibusb_state {
27353 -       struct dib_fe_xfer_ops ops;
27354 +       dib_fe_xfer_ops_no_const ops;
27355         int mt2060_present;
27356         u8 tuner_addr;
27357  };
27358 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
27359 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c      2011-07-21 22:17:23.000000000 -0400
27360 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c      2011-08-24 18:27:45.000000000 -0400
27361 @@ -95,7 +95,7 @@ struct su3000_state {
27362  
27363  struct s6x0_state {
27364         int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27365 -};
27366 +} __no_const;
27367  
27368  /* debug */
27369  static int dvb_usb_dw2102_debug;
27370 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
27371 --- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c     2011-07-21 22:17:23.000000000 -0400
27372 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c     2011-08-23 21:48:14.000000000 -0400
27373 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27374         usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27375                         0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27376  
27377 +       pax_track_stack();
27378  
27379         data[0] = 0x8a;
27380         len_in = 1;
27381 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27382         int ret = 0, len_in;
27383         u8 data[512] = {0};
27384  
27385 +       pax_track_stack();
27386 +
27387         data[0] = 0x0a;
27388         len_in = 1;
27389         info("FRM Firmware Cold Reset");
27390 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
27391 --- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h   2011-07-21 22:17:23.000000000 -0400
27392 +++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h   2011-08-24 18:28:18.000000000 -0400
27393 @@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27394         int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27395         int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27396  };
27397 +typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27398  
27399  #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27400  extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27401 -                                            struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27402 +                                            struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27403  #else
27404  static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27405                                              struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27406 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
27407 --- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27408 +++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27409 @@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27410  static struct dvb_frontend_ops dib3000mb_ops;
27411  
27412  struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27413 -                                     struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27414 +                                     struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27415  {
27416         struct dib3000_state* state = NULL;
27417  
27418 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
27419 --- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c   2011-07-21 22:17:23.000000000 -0400
27420 +++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c   2011-08-23 21:48:14.000000000 -0400
27421 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27422         int ret = -1;
27423         int sync;
27424  
27425 +       pax_track_stack();
27426 +
27427         dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27428  
27429         fcp = 3000;
27430 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
27431 --- linux-3.0.4/drivers/media/dvb/frontends/or51211.c   2011-07-21 22:17:23.000000000 -0400
27432 +++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c   2011-08-23 21:48:14.000000000 -0400
27433 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27434         u8 tudata[585];
27435         int i;
27436  
27437 +       pax_track_stack();
27438 +
27439         dprintk("Firmware is %zd bytes\n",fw->size);
27440  
27441         /* Get eprom data */
27442 diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
27443 --- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c  2011-07-21 22:17:23.000000000 -0400
27444 +++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c  2011-08-23 21:48:14.000000000 -0400
27445 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27446         struct i2c_client c;
27447         u8 eedata[256];
27448  
27449 +       pax_track_stack();
27450 +
27451         memset(&c, 0, sizeof(c));
27452         strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27453         c.adapter = &cx->i2c_adap[0];
27454 diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
27455 --- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c     2011-07-21 22:17:23.000000000 -0400
27456 +++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c     2011-08-23 21:48:14.000000000 -0400
27457 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27458         bool handle = false;
27459         struct ir_raw_event ir_core_event[64];
27460  
27461 +       pax_track_stack();
27462 +
27463         do {
27464                 num = 0;
27465                 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27466 diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27467 --- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c    2011-07-21 22:17:23.000000000 -0400
27468 +++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c    2011-08-23 21:48:14.000000000 -0400
27469 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw 
27470         u8 *eeprom;
27471         struct tveeprom tvdata;
27472  
27473 +       pax_track_stack();
27474 +
27475         memset(&tvdata,0,sizeof(tvdata));
27476  
27477         eeprom = pvr2_eeprom_fetch(hdw);
27478 diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
27479 --- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27480 +++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27481 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27482         unsigned char localPAT[256];
27483         unsigned char localPMT[256];
27484  
27485 +       pax_track_stack();
27486 +
27487         /* Set video format - must be done first as it resets other settings */
27488         set_reg8(client, 0x41, h->video_format);
27489  
27490 diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
27491 --- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c       2011-07-21 22:17:23.000000000 -0400
27492 +++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c       2011-08-23 21:48:14.000000000 -0400
27493 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27494         u8 tmp[512];
27495         dprintk(DBGLVL_CMD, "%s()\n", __func__);
27496  
27497 +       pax_track_stack();
27498 +
27499         /* While any outstand message on the bus exists... */
27500         do {
27501  
27502 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27503         u8 tmp[512];
27504         dprintk(DBGLVL_CMD, "%s()\n", __func__);
27505  
27506 +       pax_track_stack();
27507 +
27508         while (loop) {
27509  
27510                 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27511 diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
27512 --- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27513 +++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27514 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27515  
27516  /* Platform device functions */
27517  
27518 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27519 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27520         .vidioc_querycap                = timblogiw_querycap,
27521         .vidioc_enum_fmt_vid_cap        = timblogiw_enum_fmt,
27522         .vidioc_g_fmt_vid_cap           = timblogiw_g_fmt,
27523 diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
27524 --- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c  2011-07-21 22:17:23.000000000 -0400
27525 +++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c  2011-08-23 21:48:14.000000000 -0400
27526 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27527         unsigned char rv, gv, bv;
27528         static unsigned char *Y, *U, *V;
27529  
27530 +       pax_track_stack();
27531 +
27532         frame = usbvision->cur_frame;
27533         image_size = frame->frmwidth * frame->frmheight;
27534         if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27535 diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
27536 --- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c   2011-07-21 22:17:23.000000000 -0400
27537 +++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c   2011-08-23 21:48:14.000000000 -0400
27538 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27539  {
27540         struct videobuf_queue q;
27541  
27542 +       pax_track_stack();
27543 +
27544         /* Required to make generic handler to call __videobuf_alloc */
27545         q.int_ops = &sg_ops;
27546  
27547 diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
27548 --- linux-3.0.4/drivers/message/fusion/mptbase.c        2011-07-21 22:17:23.000000000 -0400
27549 +++ linux-3.0.4/drivers/message/fusion/mptbase.c        2011-08-23 21:48:14.000000000 -0400
27550 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct 
27551         seq_printf(m, "  MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27552         seq_printf(m, "  MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27553  
27554 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27555 +       seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27556 +#else
27557         seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27558                                         (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27559 +#endif
27560 +
27561         /*
27562          *  Rounding UP to nearest 4-kB boundary here...
27563          */
27564 diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
27565 --- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27566 +++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27567 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27568                 return 0;
27569  }
27570  
27571 +static inline void
27572 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27573 +{
27574 +       if (phy_info->port_details) {
27575 +               phy_info->port_details->rphy = rphy;
27576 +               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27577 +                   ioc->name, rphy));
27578 +       }
27579 +
27580 +       if (rphy) {
27581 +               dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27582 +                   &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27583 +               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27584 +                   ioc->name, rphy, rphy->dev.release));
27585 +       }
27586 +}
27587 +
27588  /* no mutex */
27589  static void
27590  mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27591 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27592                 return NULL;
27593  }
27594  
27595 -static inline void
27596 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27597 -{
27598 -       if (phy_info->port_details) {
27599 -               phy_info->port_details->rphy = rphy;
27600 -               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27601 -                   ioc->name, rphy));
27602 -       }
27603 -
27604 -       if (rphy) {
27605 -               dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27606 -                   &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27607 -               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27608 -                   ioc->name, rphy, rphy->dev.release));
27609 -       }
27610 -}
27611 -
27612  static inline struct sas_port *
27613  mptsas_get_port(struct mptsas_phyinfo *phy_info)
27614  {
27615 diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
27616 --- linux-3.0.4/drivers/message/fusion/mptscsih.c       2011-07-21 22:17:23.000000000 -0400
27617 +++ linux-3.0.4/drivers/message/fusion/mptscsih.c       2011-08-23 21:47:55.000000000 -0400
27618 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27619  
27620         h = shost_priv(SChost);
27621  
27622 -       if (h) {
27623 -               if (h->info_kbuf == NULL)
27624 -                       if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27625 -                               return h->info_kbuf;
27626 -               h->info_kbuf[0] = '\0';
27627 +       if (!h)
27628 +               return NULL;
27629  
27630 -               mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27631 -               h->info_kbuf[size-1] = '\0';
27632 -       }
27633 +       if (h->info_kbuf == NULL)
27634 +               if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27635 +                       return h->info_kbuf;
27636 +       h->info_kbuf[0] = '\0';
27637 +
27638 +       mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27639 +       h->info_kbuf[size-1] = '\0';
27640  
27641         return h->info_kbuf;
27642  }
27643 diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
27644 --- linux-3.0.4/drivers/message/i2o/i2o_config.c        2011-07-21 22:17:23.000000000 -0400
27645 +++ linux-3.0.4/drivers/message/i2o/i2o_config.c        2011-08-23 21:48:14.000000000 -0400
27646 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27647         struct i2o_message *msg;
27648         unsigned int iop;
27649  
27650 +       pax_track_stack();
27651 +
27652         if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27653                 return -EFAULT;
27654  
27655 diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
27656 --- linux-3.0.4/drivers/message/i2o/i2o_proc.c  2011-07-21 22:17:23.000000000 -0400
27657 +++ linux-3.0.4/drivers/message/i2o/i2o_proc.c  2011-08-23 21:47:55.000000000 -0400
27658 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27659         "Array Controller Device"
27660  };
27661  
27662 -static char *chtostr(u8 * chars, int n)
27663 -{
27664 -       char tmp[256];
27665 -       tmp[0] = 0;
27666 -       return strncat(tmp, (char *)chars, n);
27667 -}
27668 -
27669  static int i2o_report_query_status(struct seq_file *seq, int block_status,
27670                                    char *group)
27671  {
27672 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27673  
27674                 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27675                 seq_printf(seq, "%-#8x", ddm_table.module_id);
27676 -               seq_printf(seq, "%-29s",
27677 -                          chtostr(ddm_table.module_name_version, 28));
27678 +               seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27679                 seq_printf(seq, "%9d  ", ddm_table.data_size);
27680                 seq_printf(seq, "%8d", ddm_table.code_size);
27681  
27682 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27683  
27684                 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27685                 seq_printf(seq, "%-#8x", dst->module_id);
27686 -               seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27687 -               seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27688 +               seq_printf(seq, "%-.28s", dst->module_name_version);
27689 +               seq_printf(seq, "%-.8s", dst->date);
27690                 seq_printf(seq, "%8d ", dst->module_size);
27691                 seq_printf(seq, "%8d ", dst->mpb_size);
27692                 seq_printf(seq, "0x%04x", dst->module_flags);
27693 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27694         seq_printf(seq, "Device Class  : %s\n", i2o_get_class_name(work16[0]));
27695         seq_printf(seq, "Owner TID     : %0#5x\n", work16[2]);
27696         seq_printf(seq, "Parent TID    : %0#5x\n", work16[3]);
27697 -       seq_printf(seq, "Vendor info   : %s\n",
27698 -                  chtostr((u8 *) (work32 + 2), 16));
27699 -       seq_printf(seq, "Product info  : %s\n",
27700 -                  chtostr((u8 *) (work32 + 6), 16));
27701 -       seq_printf(seq, "Description   : %s\n",
27702 -                  chtostr((u8 *) (work32 + 10), 16));
27703 -       seq_printf(seq, "Product rev.  : %s\n",
27704 -                  chtostr((u8 *) (work32 + 14), 8));
27705 +       seq_printf(seq, "Vendor info   : %.16s\n", (u8 *) (work32 + 2));
27706 +       seq_printf(seq, "Product info  : %.16s\n", (u8 *) (work32 + 6));
27707 +       seq_printf(seq, "Description   : %.16s\n", (u8 *) (work32 + 10));
27708 +       seq_printf(seq, "Product rev.  : %.8s\n", (u8 *) (work32 + 14));
27709  
27710         seq_printf(seq, "Serial number : ");
27711         print_serial_number(seq, (u8 *) (work32 + 16),
27712 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27713         }
27714  
27715         seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27716 -       seq_printf(seq, "Module name         : %s\n",
27717 -                  chtostr(result.module_name, 24));
27718 -       seq_printf(seq, "Module revision     : %s\n",
27719 -                  chtostr(result.module_rev, 8));
27720 +       seq_printf(seq, "Module name         : %.24s\n", result.module_name);
27721 +       seq_printf(seq, "Module revision     : %.8s\n", result.module_rev);
27722  
27723         seq_printf(seq, "Serial number       : ");
27724         print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27725 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27726                 return 0;
27727         }
27728  
27729 -       seq_printf(seq, "Device name     : %s\n",
27730 -                  chtostr(result.device_name, 64));
27731 -       seq_printf(seq, "Service name    : %s\n",
27732 -                  chtostr(result.service_name, 64));
27733 -       seq_printf(seq, "Physical name   : %s\n",
27734 -                  chtostr(result.physical_location, 64));
27735 -       seq_printf(seq, "Instance number : %s\n",
27736 -                  chtostr(result.instance_number, 4));
27737 +       seq_printf(seq, "Device name     : %.64s\n", result.device_name);
27738 +       seq_printf(seq, "Service name    : %.64s\n", result.service_name);
27739 +       seq_printf(seq, "Physical name   : %.64s\n", result.physical_location);
27740 +       seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27741  
27742         return 0;
27743  }
27744 diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
27745 --- linux-3.0.4/drivers/message/i2o/iop.c       2011-07-21 22:17:23.000000000 -0400
27746 +++ linux-3.0.4/drivers/message/i2o/iop.c       2011-08-23 21:47:55.000000000 -0400
27747 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27748  
27749         spin_lock_irqsave(&c->context_list_lock, flags);
27750  
27751 -       if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27752 -               atomic_inc(&c->context_list_counter);
27753 +       if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27754 +               atomic_inc_unchecked(&c->context_list_counter);
27755  
27756 -       entry->context = atomic_read(&c->context_list_counter);
27757 +       entry->context = atomic_read_unchecked(&c->context_list_counter);
27758  
27759         list_add(&entry->list, &c->context_list);
27760  
27761 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27762  
27763  #if BITS_PER_LONG == 64
27764         spin_lock_init(&c->context_list_lock);
27765 -       atomic_set(&c->context_list_counter, 0);
27766 +       atomic_set_unchecked(&c->context_list_counter, 0);
27767         INIT_LIST_HEAD(&c->context_list);
27768  #endif
27769  
27770 diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
27771 --- linux-3.0.4/drivers/mfd/abx500-core.c       2011-07-21 22:17:23.000000000 -0400
27772 +++ linux-3.0.4/drivers/mfd/abx500-core.c       2011-08-23 21:47:55.000000000 -0400
27773 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27774  
27775  struct abx500_device_entry {
27776         struct list_head list;
27777 -       struct abx500_ops ops;
27778 +       abx500_ops_no_const ops;
27779         struct device *dev;
27780  };
27781  
27782 diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
27783 --- linux-3.0.4/drivers/mfd/janz-cmodio.c       2011-07-21 22:17:23.000000000 -0400
27784 +++ linux-3.0.4/drivers/mfd/janz-cmodio.c       2011-08-23 21:47:55.000000000 -0400
27785 @@ -13,6 +13,7 @@
27786  
27787  #include <linux/kernel.h>
27788  #include <linux/module.h>
27789 +#include <linux/slab.h>
27790  #include <linux/init.h>
27791  #include <linux/pci.h>
27792  #include <linux/interrupt.h>
27793 diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
27794 --- linux-3.0.4/drivers/mfd/wm8350-i2c.c        2011-07-21 22:17:23.000000000 -0400
27795 +++ linux-3.0.4/drivers/mfd/wm8350-i2c.c        2011-08-23 21:48:14.000000000 -0400
27796 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27797         u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27798         int ret;
27799  
27800 +       pax_track_stack();
27801 +
27802         if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27803                 return -EINVAL;
27804  
27805 diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
27806 --- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c      2011-07-21 22:17:23.000000000 -0400
27807 +++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c      2011-08-23 21:47:55.000000000 -0400
27808 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27809          * the lid is closed. This leads to interrupts as soon as a little move
27810          * is done.
27811          */
27812 -       atomic_inc(&lis3_dev.count);
27813 +       atomic_inc_unchecked(&lis3_dev.count);
27814  
27815         wake_up_interruptible(&lis3_dev.misc_wait);
27816         kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27817 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27818         if (lis3_dev.pm_dev)
27819                 pm_runtime_get_sync(lis3_dev.pm_dev);
27820  
27821 -       atomic_set(&lis3_dev.count, 0);
27822 +       atomic_set_unchecked(&lis3_dev.count, 0);
27823         return 0;
27824  }
27825  
27826 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27827         add_wait_queue(&lis3_dev.misc_wait, &wait);
27828         while (true) {
27829                 set_current_state(TASK_INTERRUPTIBLE);
27830 -               data = atomic_xchg(&lis3_dev.count, 0);
27831 +               data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27832                 if (data)
27833                         break;
27834  
27835 @@ -583,7 +583,7 @@ out:
27836  static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27837  {
27838         poll_wait(file, &lis3_dev.misc_wait, wait);
27839 -       if (atomic_read(&lis3_dev.count))
27840 +       if (atomic_read_unchecked(&lis3_dev.count))
27841                 return POLLIN | POLLRDNORM;
27842         return 0;
27843  }
27844 diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
27845 --- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h      2011-07-21 22:17:23.000000000 -0400
27846 +++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h      2011-08-23 21:47:55.000000000 -0400
27847 @@ -265,7 +265,7 @@ struct lis3lv02d {
27848         struct input_polled_dev *idev;     /* input device */
27849         struct platform_device  *pdev;     /* platform device */
27850         struct regulator_bulk_data regulators[2];
27851 -       atomic_t                count;     /* interrupt count after last read */
27852 +       atomic_unchecked_t      count;     /* interrupt count after last read */
27853         union axis_conversion   ac;        /* hw -> logical axis */
27854         int                     mapped_btns[3];
27855  
27856 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
27857 --- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c       2011-07-21 22:17:23.000000000 -0400
27858 +++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c       2011-08-23 21:47:55.000000000 -0400
27859 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27860         unsigned long nsec;
27861  
27862         nsec = CLKS2NSEC(clks);
27863 -       atomic_long_inc(&mcs_op_statistics[op].count);
27864 -       atomic_long_add(nsec, &mcs_op_statistics[op].total);
27865 +       atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27866 +       atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27867         if (mcs_op_statistics[op].max < nsec)
27868                 mcs_op_statistics[op].max = nsec;
27869  }
27870 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
27871 --- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c        2011-07-21 22:17:23.000000000 -0400
27872 +++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c        2011-08-23 21:47:55.000000000 -0400
27873 @@ -32,9 +32,9 @@
27874  
27875  #define printstat(s, f)                printstat_val(s, &gru_stats.f, #f)
27876  
27877 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27878 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27879  {
27880 -       unsigned long val = atomic_long_read(v);
27881 +       unsigned long val = atomic_long_read_unchecked(v);
27882  
27883         seq_printf(s, "%16lu %s\n", val, id);
27884  }
27885 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27886  
27887         seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27888         for (op = 0; op < mcsop_last; op++) {
27889 -               count = atomic_long_read(&mcs_op_statistics[op].count);
27890 -               total = atomic_long_read(&mcs_op_statistics[op].total);
27891 +               count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27892 +               total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27893                 max = mcs_op_statistics[op].max;
27894                 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27895                            count ? total / count : 0, max);
27896 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
27897 --- linux-3.0.4/drivers/misc/sgi-gru/grutables.h        2011-07-21 22:17:23.000000000 -0400
27898 +++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h        2011-08-23 21:47:55.000000000 -0400
27899 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27900   * GRU statistics.
27901   */
27902  struct gru_stats_s {
27903 -       atomic_long_t vdata_alloc;
27904 -       atomic_long_t vdata_free;
27905 -       atomic_long_t gts_alloc;
27906 -       atomic_long_t gts_free;
27907 -       atomic_long_t gms_alloc;
27908 -       atomic_long_t gms_free;
27909 -       atomic_long_t gts_double_allocate;
27910 -       atomic_long_t assign_context;
27911 -       atomic_long_t assign_context_failed;
27912 -       atomic_long_t free_context;
27913 -       atomic_long_t load_user_context;
27914 -       atomic_long_t load_kernel_context;
27915 -       atomic_long_t lock_kernel_context;
27916 -       atomic_long_t unlock_kernel_context;
27917 -       atomic_long_t steal_user_context;
27918 -       atomic_long_t steal_kernel_context;
27919 -       atomic_long_t steal_context_failed;
27920 -       atomic_long_t nopfn;
27921 -       atomic_long_t asid_new;
27922 -       atomic_long_t asid_next;
27923 -       atomic_long_t asid_wrap;
27924 -       atomic_long_t asid_reuse;
27925 -       atomic_long_t intr;
27926 -       atomic_long_t intr_cbr;
27927 -       atomic_long_t intr_tfh;
27928 -       atomic_long_t intr_spurious;
27929 -       atomic_long_t intr_mm_lock_failed;
27930 -       atomic_long_t call_os;
27931 -       atomic_long_t call_os_wait_queue;
27932 -       atomic_long_t user_flush_tlb;
27933 -       atomic_long_t user_unload_context;
27934 -       atomic_long_t user_exception;
27935 -       atomic_long_t set_context_option;
27936 -       atomic_long_t check_context_retarget_intr;
27937 -       atomic_long_t check_context_unload;
27938 -       atomic_long_t tlb_dropin;
27939 -       atomic_long_t tlb_preload_page;
27940 -       atomic_long_t tlb_dropin_fail_no_asid;
27941 -       atomic_long_t tlb_dropin_fail_upm;
27942 -       atomic_long_t tlb_dropin_fail_invalid;
27943 -       atomic_long_t tlb_dropin_fail_range_active;
27944 -       atomic_long_t tlb_dropin_fail_idle;
27945 -       atomic_long_t tlb_dropin_fail_fmm;
27946 -       atomic_long_t tlb_dropin_fail_no_exception;
27947 -       atomic_long_t tfh_stale_on_fault;
27948 -       atomic_long_t mmu_invalidate_range;
27949 -       atomic_long_t mmu_invalidate_page;
27950 -       atomic_long_t flush_tlb;
27951 -       atomic_long_t flush_tlb_gru;
27952 -       atomic_long_t flush_tlb_gru_tgh;
27953 -       atomic_long_t flush_tlb_gru_zero_asid;
27954 -
27955 -       atomic_long_t copy_gpa;
27956 -       atomic_long_t read_gpa;
27957 -
27958 -       atomic_long_t mesq_receive;
27959 -       atomic_long_t mesq_receive_none;
27960 -       atomic_long_t mesq_send;
27961 -       atomic_long_t mesq_send_failed;
27962 -       atomic_long_t mesq_noop;
27963 -       atomic_long_t mesq_send_unexpected_error;
27964 -       atomic_long_t mesq_send_lb_overflow;
27965 -       atomic_long_t mesq_send_qlimit_reached;
27966 -       atomic_long_t mesq_send_amo_nacked;
27967 -       atomic_long_t mesq_send_put_nacked;
27968 -       atomic_long_t mesq_page_overflow;
27969 -       atomic_long_t mesq_qf_locked;
27970 -       atomic_long_t mesq_qf_noop_not_full;
27971 -       atomic_long_t mesq_qf_switch_head_failed;
27972 -       atomic_long_t mesq_qf_unexpected_error;
27973 -       atomic_long_t mesq_noop_unexpected_error;
27974 -       atomic_long_t mesq_noop_lb_overflow;
27975 -       atomic_long_t mesq_noop_qlimit_reached;
27976 -       atomic_long_t mesq_noop_amo_nacked;
27977 -       atomic_long_t mesq_noop_put_nacked;
27978 -       atomic_long_t mesq_noop_page_overflow;
27979 +       atomic_long_unchecked_t vdata_alloc;
27980 +       atomic_long_unchecked_t vdata_free;
27981 +       atomic_long_unchecked_t gts_alloc;
27982 +       atomic_long_unchecked_t gts_free;
27983 +       atomic_long_unchecked_t gms_alloc;
27984 +       atomic_long_unchecked_t gms_free;
27985 +       atomic_long_unchecked_t gts_double_allocate;
27986 +       atomic_long_unchecked_t assign_context;
27987 +       atomic_long_unchecked_t assign_context_failed;
27988 +       atomic_long_unchecked_t free_context;
27989 +       atomic_long_unchecked_t load_user_context;
27990 +       atomic_long_unchecked_t load_kernel_context;
27991 +       atomic_long_unchecked_t lock_kernel_context;
27992 +       atomic_long_unchecked_t unlock_kernel_context;
27993 +       atomic_long_unchecked_t steal_user_context;
27994 +       atomic_long_unchecked_t steal_kernel_context;
27995 +       atomic_long_unchecked_t steal_context_failed;
27996 +       atomic_long_unchecked_t nopfn;
27997 +       atomic_long_unchecked_t asid_new;
27998 +       atomic_long_unchecked_t asid_next;
27999 +       atomic_long_unchecked_t asid_wrap;
28000 +       atomic_long_unchecked_t asid_reuse;
28001 +       atomic_long_unchecked_t intr;
28002 +       atomic_long_unchecked_t intr_cbr;
28003 +       atomic_long_unchecked_t intr_tfh;
28004 +       atomic_long_unchecked_t intr_spurious;
28005 +       atomic_long_unchecked_t intr_mm_lock_failed;
28006 +       atomic_long_unchecked_t call_os;
28007 +       atomic_long_unchecked_t call_os_wait_queue;
28008 +       atomic_long_unchecked_t user_flush_tlb;
28009 +       atomic_long_unchecked_t user_unload_context;
28010 +       atomic_long_unchecked_t user_exception;
28011 +       atomic_long_unchecked_t set_context_option;
28012 +       atomic_long_unchecked_t check_context_retarget_intr;
28013 +       atomic_long_unchecked_t check_context_unload;
28014 +       atomic_long_unchecked_t tlb_dropin;
28015 +       atomic_long_unchecked_t tlb_preload_page;
28016 +       atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28017 +       atomic_long_unchecked_t tlb_dropin_fail_upm;
28018 +       atomic_long_unchecked_t tlb_dropin_fail_invalid;
28019 +       atomic_long_unchecked_t tlb_dropin_fail_range_active;
28020 +       atomic_long_unchecked_t tlb_dropin_fail_idle;
28021 +       atomic_long_unchecked_t tlb_dropin_fail_fmm;
28022 +       atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28023 +       atomic_long_unchecked_t tfh_stale_on_fault;
28024 +       atomic_long_unchecked_t mmu_invalidate_range;
28025 +       atomic_long_unchecked_t mmu_invalidate_page;
28026 +       atomic_long_unchecked_t flush_tlb;
28027 +       atomic_long_unchecked_t flush_tlb_gru;
28028 +       atomic_long_unchecked_t flush_tlb_gru_tgh;
28029 +       atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28030 +
28031 +       atomic_long_unchecked_t copy_gpa;
28032 +       atomic_long_unchecked_t read_gpa;
28033 +
28034 +       atomic_long_unchecked_t mesq_receive;
28035 +       atomic_long_unchecked_t mesq_receive_none;
28036 +       atomic_long_unchecked_t mesq_send;
28037 +       atomic_long_unchecked_t mesq_send_failed;
28038 +       atomic_long_unchecked_t mesq_noop;
28039 +       atomic_long_unchecked_t mesq_send_unexpected_error;
28040 +       atomic_long_unchecked_t mesq_send_lb_overflow;
28041 +       atomic_long_unchecked_t mesq_send_qlimit_reached;
28042 +       atomic_long_unchecked_t mesq_send_amo_nacked;
28043 +       atomic_long_unchecked_t mesq_send_put_nacked;
28044 +       atomic_long_unchecked_t mesq_page_overflow;
28045 +       atomic_long_unchecked_t mesq_qf_locked;
28046 +       atomic_long_unchecked_t mesq_qf_noop_not_full;
28047 +       atomic_long_unchecked_t mesq_qf_switch_head_failed;
28048 +       atomic_long_unchecked_t mesq_qf_unexpected_error;
28049 +       atomic_long_unchecked_t mesq_noop_unexpected_error;
28050 +       atomic_long_unchecked_t mesq_noop_lb_overflow;
28051 +       atomic_long_unchecked_t mesq_noop_qlimit_reached;
28052 +       atomic_long_unchecked_t mesq_noop_amo_nacked;
28053 +       atomic_long_unchecked_t mesq_noop_put_nacked;
28054 +       atomic_long_unchecked_t mesq_noop_page_overflow;
28055  
28056  };
28057  
28058 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28059         tghop_invalidate, mcsop_last};
28060  
28061  struct mcs_op_statistic {
28062 -       atomic_long_t   count;
28063 -       atomic_long_t   total;
28064 +       atomic_long_unchecked_t count;
28065 +       atomic_long_unchecked_t total;
28066         unsigned long   max;
28067  };
28068  
28069 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28070  
28071  #define STAT(id)       do {                                            \
28072                                 if (gru_options & OPT_STATS)            \
28073 -                                       atomic_long_inc(&gru_stats.id); \
28074 +                                       atomic_long_inc_unchecked(&gru_stats.id);       \
28075                         } while (0)
28076  
28077  #ifdef CONFIG_SGI_GRU_DEBUG
28078 diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
28079 --- linux-3.0.4/drivers/misc/sgi-xp/xp.h        2011-07-21 22:17:23.000000000 -0400
28080 +++ linux-3.0.4/drivers/misc/sgi-xp/xp.h        2011-08-23 21:47:55.000000000 -0400
28081 @@ -289,7 +289,7 @@ struct xpc_interface {
28082                                         xpc_notify_func, void *);
28083         void (*received) (short, int, void *);
28084         enum xp_retval (*partid_to_nasids) (short, void *);
28085 -};
28086 +} __no_const;
28087  
28088  extern struct xpc_interface xpc_interface;
28089  
28090 diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
28091 --- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c     2011-07-21 22:17:23.000000000 -0400
28092 +++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c     2011-08-23 21:48:14.000000000 -0400
28093 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28094         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28095         unsigned long timeo = jiffies + HZ;
28096  
28097 +       pax_track_stack();
28098 +
28099         /* Prevent setting state FL_SYNCING for chip in suspended state. */
28100         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28101                 goto sleep;
28102 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28103         unsigned long initial_adr;
28104         int initial_len = len;
28105  
28106 +       pax_track_stack();
28107 +
28108         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28109         adr += chip->start;
28110         initial_adr = adr;
28111 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28112         int retries = 3;
28113         int ret;
28114  
28115 +       pax_track_stack();
28116 +
28117         adr += chip->start;
28118  
28119   retry:
28120 diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
28121 --- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c     2011-07-21 22:17:23.000000000 -0400
28122 +++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c     2011-08-23 21:48:14.000000000 -0400
28123 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28124         unsigned long cmd_addr;
28125         struct cfi_private *cfi = map->fldrv_priv;
28126  
28127 +       pax_track_stack();
28128 +
28129         adr += chip->start;
28130  
28131         /* Ensure cmd read/writes are aligned. */
28132 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28133         DECLARE_WAITQUEUE(wait, current);
28134         int wbufsize, z;
28135  
28136 +       pax_track_stack();
28137 +
28138          /* M58LW064A requires bus alignment for buffer wriets -- saw */
28139          if (adr & (map_bankwidth(map)-1))
28140              return -EINVAL;
28141 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28142         DECLARE_WAITQUEUE(wait, current);
28143         int ret = 0;
28144  
28145 +       pax_track_stack();
28146 +
28147         adr += chip->start;
28148  
28149         /* Let's determine this according to the interleave only once */
28150 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28151         unsigned long timeo = jiffies + HZ;
28152         DECLARE_WAITQUEUE(wait, current);
28153  
28154 +       pax_track_stack();
28155 +
28156         adr += chip->start;
28157  
28158         /* Let's determine this according to the interleave only once */
28159 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28160         unsigned long timeo = jiffies + HZ;
28161         DECLARE_WAITQUEUE(wait, current);
28162  
28163 +       pax_track_stack();
28164 +
28165         adr += chip->start;
28166  
28167         /* Let's determine this according to the interleave only once */
28168 diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
28169 --- linux-3.0.4/drivers/mtd/devices/doc2000.c   2011-07-21 22:17:23.000000000 -0400
28170 +++ linux-3.0.4/drivers/mtd/devices/doc2000.c   2011-08-23 21:47:55.000000000 -0400
28171 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28172  
28173                 /* The ECC will not be calculated correctly if less than 512 is written */
28174  /* DBB-
28175 -               if (len != 0x200 && eccbuf)
28176 +               if (len != 0x200)
28177                         printk(KERN_WARNING
28178                                "ECC needs a full sector write (adr: %lx size %lx)\n",
28179                                (long) to, (long) len);
28180 diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
28181 --- linux-3.0.4/drivers/mtd/devices/doc2001.c   2011-07-21 22:17:23.000000000 -0400
28182 +++ linux-3.0.4/drivers/mtd/devices/doc2001.c   2011-08-23 21:47:55.000000000 -0400
28183 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28184         struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28185  
28186         /* Don't allow read past end of device */
28187 -       if (from >= this->totlen)
28188 +       if (from >= this->totlen || !len)
28189                 return -EINVAL;
28190  
28191         /* Don't allow a single read to cross a 512-byte block boundary */
28192 diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
28193 --- linux-3.0.4/drivers/mtd/ftl.c       2011-07-21 22:17:23.000000000 -0400
28194 +++ linux-3.0.4/drivers/mtd/ftl.c       2011-08-23 21:48:14.000000000 -0400
28195 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28196      loff_t offset;
28197      uint16_t srcunitswap = cpu_to_le16(srcunit);
28198  
28199 +    pax_track_stack();
28200 +
28201      eun = &part->EUNInfo[srcunit];
28202      xfer = &part->XferInfo[xferunit];
28203      DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28204 diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
28205 --- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28206 +++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28207 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28208         struct inftl_oob oob;
28209         size_t retlen;
28210  
28211 +       pax_track_stack();
28212 +
28213         DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28214                 "pending=%d)\n", inftl, thisVUC, pendingblock);
28215  
28216 diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
28217 --- linux-3.0.4/drivers/mtd/inftlmount.c        2011-07-21 22:17:23.000000000 -0400
28218 +++ linux-3.0.4/drivers/mtd/inftlmount.c        2011-08-23 21:48:14.000000000 -0400
28219 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28220         struct INFTLPartition *ip;
28221         size_t retlen;
28222  
28223 +       pax_track_stack();
28224 +
28225         DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28226  
28227          /*
28228 diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
28229 --- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28230 +++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28231 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28232  {
28233         map_word pfow_val[4];
28234  
28235 +       pax_track_stack();
28236 +
28237         /* Check identification string */
28238         pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28239         pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28240 diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
28241 --- linux-3.0.4/drivers/mtd/mtdchar.c   2011-07-21 22:17:23.000000000 -0400
28242 +++ linux-3.0.4/drivers/mtd/mtdchar.c   2011-08-23 21:48:14.000000000 -0400
28243 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file, 
28244         u_long size;
28245         struct mtd_info_user info;
28246  
28247 +       pax_track_stack();
28248 +
28249         DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28250  
28251         size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28252 diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
28253 --- linux-3.0.4/drivers/mtd/nand/denali.c       2011-07-21 22:17:23.000000000 -0400
28254 +++ linux-3.0.4/drivers/mtd/nand/denali.c       2011-08-23 21:47:55.000000000 -0400
28255 @@ -26,6 +26,7 @@
28256  #include <linux/pci.h>
28257  #include <linux/mtd/mtd.h>
28258  #include <linux/module.h>
28259 +#include <linux/slab.h>
28260  
28261  #include "denali.h"
28262  
28263 diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
28264 --- linux-3.0.4/drivers/mtd/nftlcore.c  2011-07-21 22:17:23.000000000 -0400
28265 +++ linux-3.0.4/drivers/mtd/nftlcore.c  2011-08-23 21:48:14.000000000 -0400
28266 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28267         int inplace = 1;
28268         size_t retlen;
28269  
28270 +       pax_track_stack();
28271 +
28272         memset(BlockMap, 0xff, sizeof(BlockMap));
28273         memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28274  
28275 diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
28276 --- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28277 +++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28278 @@ -24,6 +24,7 @@
28279  #include <asm/errno.h>
28280  #include <linux/delay.h>
28281  #include <linux/slab.h>
28282 +#include <linux/sched.h>
28283  #include <linux/mtd/mtd.h>
28284  #include <linux/mtd/nand.h>
28285  #include <linux/mtd/nftl.h>
28286 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28287         struct mtd_info *mtd = nftl->mbd.mtd;
28288         unsigned int i;
28289  
28290 +       pax_track_stack();
28291 +
28292          /* Assume logical EraseSize == physical erasesize for starting the scan.
28293            We'll sort it out later if we find a MediaHeader which says otherwise */
28294         /* Actually, we won't.  The new DiskOnChip driver has already scanned
28295 diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
28296 --- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28297 +++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28298 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28299  static int __init bytes_str_to_int(const char *str)
28300  {
28301         char *endp;
28302 -       unsigned long result;
28303 +       unsigned long result, scale = 1;
28304  
28305         result = simple_strtoul(str, &endp, 0);
28306         if (str == endp || result >= INT_MAX) {
28307 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28308  
28309         switch (*endp) {
28310         case 'G':
28311 -               result *= 1024;
28312 +               scale *= 1024;
28313         case 'M':
28314 -               result *= 1024;
28315 +               scale *= 1024;
28316         case 'K':
28317 -               result *= 1024;
28318 +               scale *= 1024;
28319                 if (endp[1] == 'i' && endp[2] == 'B')
28320                         endp += 2;
28321         case '\0':
28322 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28323                 return -EINVAL;
28324         }
28325  
28326 -       return result;
28327 +       if ((intoverflow_t)result*scale >= INT_MAX) {
28328 +               printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28329 +                      str);
28330 +               return -EINVAL;
28331 +       }
28332 +
28333 +       return result*scale;
28334  }
28335  
28336  /**
28337 diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
28338 --- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c    2011-07-21 22:17:23.000000000 -0400
28339 +++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c    2011-08-23 21:47:55.000000000 -0400
28340 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28341  static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28342  static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28343  
28344 -static struct bfa_ioc_hwif nw_hwif_ct;
28345 +static struct bfa_ioc_hwif nw_hwif_ct = {
28346 +       .ioc_pll_init = bfa_ioc_ct_pll_init,
28347 +       .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28348 +       .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28349 +       .ioc_reg_init = bfa_ioc_ct_reg_init,
28350 +       .ioc_map_port = bfa_ioc_ct_map_port,
28351 +       .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28352 +       .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28353 +       .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28354 +       .ioc_sync_start = bfa_ioc_ct_sync_start,
28355 +       .ioc_sync_join = bfa_ioc_ct_sync_join,
28356 +       .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28357 +       .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28358 +       .ioc_sync_complete = bfa_ioc_ct_sync_complete
28359 +};
28360  
28361  /**
28362   * Called from bfa_ioc_attach() to map asic specific calls.
28363 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28364  void
28365  bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28366  {
28367 -       nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28368 -       nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28369 -       nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28370 -       nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28371 -       nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28372 -       nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28373 -       nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28374 -       nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28375 -       nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28376 -       nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28377 -       nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28378 -       nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28379 -       nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28380 -
28381         ioc->ioc_hwif = &nw_hwif_ct;
28382  }
28383  
28384 diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
28385 --- linux-3.0.4/drivers/net/bna/bnad.c  2011-07-21 22:17:23.000000000 -0400
28386 +++ linux-3.0.4/drivers/net/bna/bnad.c  2011-08-23 21:47:55.000000000 -0400
28387 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28388         struct bna_intr_info *intr_info =
28389                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28390         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28391 -       struct bna_tx_event_cbfn tx_cbfn;
28392 +       static struct bna_tx_event_cbfn tx_cbfn = {
28393 +               /* Initialize the tx event handlers */
28394 +               .tcb_setup_cbfn = bnad_cb_tcb_setup,
28395 +               .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28396 +               .tx_stall_cbfn = bnad_cb_tx_stall,
28397 +               .tx_resume_cbfn = bnad_cb_tx_resume,
28398 +               .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28399 +       };
28400         struct bna_tx *tx;
28401         unsigned long flags;
28402  
28403 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28404         tx_config->txq_depth = bnad->txq_depth;
28405         tx_config->tx_type = BNA_TX_T_REGULAR;
28406  
28407 -       /* Initialize the tx event handlers */
28408 -       tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28409 -       tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28410 -       tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28411 -       tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28412 -       tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28413 -
28414         /* Get BNA's resource requirement for one tx object */
28415         spin_lock_irqsave(&bnad->bna_lock, flags);
28416         bna_tx_res_req(bnad->num_txq_per_tx,
28417 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28418         struct bna_intr_info *intr_info =
28419                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28420         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28421 -       struct bna_rx_event_cbfn rx_cbfn;
28422 +       static struct bna_rx_event_cbfn rx_cbfn = {
28423 +               /* Initialize the Rx event handlers */
28424 +               .rcb_setup_cbfn = bnad_cb_rcb_setup,
28425 +               .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28426 +               .ccb_setup_cbfn = bnad_cb_ccb_setup,
28427 +               .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28428 +               .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28429 +               .rx_post_cbfn = bnad_cb_rx_post
28430 +       };
28431         struct bna_rx *rx;
28432         unsigned long flags;
28433  
28434         /* Initialize the Rx object configuration */
28435         bnad_init_rx_config(bnad, rx_config);
28436  
28437 -       /* Initialize the Rx event handlers */
28438 -       rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28439 -       rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28440 -       rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28441 -       rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28442 -       rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28443 -       rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28444 -
28445         /* Get BNA's resource requirement for one Rx object */
28446         spin_lock_irqsave(&bnad->bna_lock, flags);
28447         bna_rx_res_req(rx_config, res_info);
28448 diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
28449 --- linux-3.0.4/drivers/net/bnx2.c      2011-07-21 22:17:23.000000000 -0400
28450 +++ linux-3.0.4/drivers/net/bnx2.c      2011-08-23 21:48:14.000000000 -0400
28451 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28452         int rc = 0;
28453         u32 magic, csum;
28454  
28455 +       pax_track_stack();
28456 +
28457         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28458                 goto test_nvram_done;
28459  
28460 diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
28461 --- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c       2011-07-21 22:17:23.000000000 -0400
28462 +++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c       2011-08-23 21:48:14.000000000 -0400
28463 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28464         int i, rc;
28465         u32 magic, crc;
28466  
28467 +       pax_track_stack();
28468 +
28469         if (BP_NOMCP(bp))
28470                 return 0;
28471  
28472 diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
28473 --- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28474 +++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28475 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28476   */
28477  struct l2t_skb_cb {
28478         arp_failure_handler_func arp_failure_handler;
28479 -};
28480 +} __no_const;
28481  
28482  #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28483  
28484 diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
28485 --- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c  2011-07-21 22:17:23.000000000 -0400
28486 +++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c  2011-08-23 21:48:14.000000000 -0400
28487 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct 
28488         unsigned int nchan = adap->params.nports;
28489         struct msix_entry entries[MAX_INGQ + 1];
28490  
28491 +       pax_track_stack();
28492 +
28493         for (i = 0; i < ARRAY_SIZE(entries); ++i)
28494                 entries[i].entry = i;
28495  
28496 diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
28497 --- linux-3.0.4/drivers/net/cxgb4/t4_hw.c       2011-07-21 22:17:23.000000000 -0400
28498 +++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c       2011-08-23 21:48:14.000000000 -0400
28499 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28500         u8 vpd[VPD_LEN], csum;
28501         unsigned int vpdr_len, kw_offset, id_len;
28502  
28503 +       pax_track_stack();
28504 +
28505         ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28506         if (ret < 0)
28507                 return ret;
28508 diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
28509 --- linux-3.0.4/drivers/net/e1000e/82571.c      2011-07-21 22:17:23.000000000 -0400
28510 +++ linux-3.0.4/drivers/net/e1000e/82571.c      2011-08-23 21:47:55.000000000 -0400
28511 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28512  {
28513         struct e1000_hw *hw = &adapter->hw;
28514         struct e1000_mac_info *mac = &hw->mac;
28515 -       struct e1000_mac_operations *func = &mac->ops;
28516 +       e1000_mac_operations_no_const *func = &mac->ops;
28517         u32 swsm = 0;
28518         u32 swsm2 = 0;
28519         bool force_clear_smbi = false;
28520 diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
28521 --- linux-3.0.4/drivers/net/e1000e/es2lan.c     2011-07-21 22:17:23.000000000 -0400
28522 +++ linux-3.0.4/drivers/net/e1000e/es2lan.c     2011-08-23 21:47:55.000000000 -0400
28523 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28524  {
28525         struct e1000_hw *hw = &adapter->hw;
28526         struct e1000_mac_info *mac = &hw->mac;
28527 -       struct e1000_mac_operations *func = &mac->ops;
28528 +       e1000_mac_operations_no_const *func = &mac->ops;
28529  
28530         /* Set media type */
28531         switch (adapter->pdev->device) {
28532 diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
28533 --- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28534 +++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28535 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
28536         void (*write_vfta)(struct e1000_hw *, u32, u32);
28537         s32  (*read_mac_addr)(struct e1000_hw *);
28538  };
28539 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28540  
28541  /* Function pointers for the PHY. */
28542  struct e1000_phy_operations {
28543 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
28544         void (*power_up)(struct e1000_hw *);
28545         void (*power_down)(struct e1000_hw *);
28546  };
28547 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28548  
28549  /* Function pointers for the NVM. */
28550  struct e1000_nvm_operations {
28551 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28552         s32  (*validate)(struct e1000_hw *);
28553         s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
28554  };
28555 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28556  
28557  struct e1000_mac_info {
28558 -       struct e1000_mac_operations ops;
28559 +       e1000_mac_operations_no_const ops;
28560         u8 addr[ETH_ALEN];
28561         u8 perm_addr[ETH_ALEN];
28562  
28563 @@ -853,7 +856,7 @@ struct e1000_mac_info {
28564  };
28565  
28566  struct e1000_phy_info {
28567 -       struct e1000_phy_operations ops;
28568 +       e1000_phy_operations_no_const ops;
28569  
28570         enum e1000_phy_type type;
28571  
28572 @@ -887,7 +890,7 @@ struct e1000_phy_info {
28573  };
28574  
28575  struct e1000_nvm_info {
28576 -       struct e1000_nvm_operations ops;
28577 +       e1000_nvm_operations_no_const ops;
28578  
28579         enum e1000_nvm_type type;
28580         enum e1000_nvm_override override;
28581 diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
28582 --- linux-3.0.4/drivers/net/hamradio/6pack.c    2011-07-21 22:17:23.000000000 -0400
28583 +++ linux-3.0.4/drivers/net/hamradio/6pack.c    2011-08-23 21:48:14.000000000 -0400
28584 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28585         unsigned char buf[512];
28586         int count1;
28587  
28588 +       pax_track_stack();
28589 +
28590         if (!count)
28591                 return;
28592  
28593 diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
28594 --- linux-3.0.4/drivers/net/igb/e1000_hw.h      2011-07-21 22:17:23.000000000 -0400
28595 +++ linux-3.0.4/drivers/net/igb/e1000_hw.h      2011-08-23 21:47:55.000000000 -0400
28596 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28597         s32  (*read_mac_addr)(struct e1000_hw *);
28598         s32  (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28599  };
28600 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28601  
28602  struct e1000_phy_operations {
28603         s32  (*acquire)(struct e1000_hw *);
28604 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28605         s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
28606         s32  (*write_reg)(struct e1000_hw *, u32, u16);
28607  };
28608 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28609  
28610  struct e1000_nvm_operations {
28611         s32  (*acquire)(struct e1000_hw *);
28612 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28613         s32  (*update)(struct e1000_hw *);
28614         s32  (*validate)(struct e1000_hw *);
28615  };
28616 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28617  
28618  struct e1000_info {
28619         s32 (*get_invariants)(struct e1000_hw *);
28620 @@ -350,7 +353,7 @@ struct e1000_info {
28621  extern const struct e1000_info e1000_82575_info;
28622  
28623  struct e1000_mac_info {
28624 -       struct e1000_mac_operations ops;
28625 +       e1000_mac_operations_no_const ops;
28626  
28627         u8 addr[6];
28628         u8 perm_addr[6];
28629 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28630  };
28631  
28632  struct e1000_phy_info {
28633 -       struct e1000_phy_operations ops;
28634 +       e1000_phy_operations_no_const ops;
28635  
28636         enum e1000_phy_type type;
28637  
28638 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28639  };
28640  
28641  struct e1000_nvm_info {
28642 -       struct e1000_nvm_operations ops;
28643 +       e1000_nvm_operations_no_const ops;
28644         enum e1000_nvm_type type;
28645         enum e1000_nvm_override override;
28646  
28647 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28648         s32 (*check_for_ack)(struct e1000_hw *, u16);
28649         s32 (*check_for_rst)(struct e1000_hw *, u16);
28650  };
28651 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28652  
28653  struct e1000_mbx_stats {
28654         u32 msgs_tx;
28655 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28656  };
28657  
28658  struct e1000_mbx_info {
28659 -       struct e1000_mbx_operations ops;
28660 +       e1000_mbx_operations_no_const ops;
28661         struct e1000_mbx_stats stats;
28662         u32 timeout;
28663         u32 usec_delay;
28664 diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
28665 --- linux-3.0.4/drivers/net/igbvf/vf.h  2011-07-21 22:17:23.000000000 -0400
28666 +++ linux-3.0.4/drivers/net/igbvf/vf.h  2011-08-23 21:47:55.000000000 -0400
28667 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28668         s32  (*read_mac_addr)(struct e1000_hw *);
28669         s32  (*set_vfta)(struct e1000_hw *, u16, bool);
28670  };
28671 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28672  
28673  struct e1000_mac_info {
28674 -       struct e1000_mac_operations ops;
28675 +       e1000_mac_operations_no_const ops;
28676         u8 addr[6];
28677         u8 perm_addr[6];
28678  
28679 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28680         s32 (*check_for_ack)(struct e1000_hw *);
28681         s32 (*check_for_rst)(struct e1000_hw *);
28682  };
28683 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28684  
28685  struct e1000_mbx_stats {
28686         u32 msgs_tx;
28687 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28688  };
28689  
28690  struct e1000_mbx_info {
28691 -       struct e1000_mbx_operations ops;
28692 +       e1000_mbx_operations_no_const ops;
28693         struct e1000_mbx_stats stats;
28694         u32 timeout;
28695         u32 usec_delay;
28696 diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
28697 --- linux-3.0.4/drivers/net/ixgb/ixgb_main.c    2011-07-21 22:17:23.000000000 -0400
28698 +++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c    2011-08-23 21:48:14.000000000 -0400
28699 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28700         u32 rctl;
28701         int i;
28702  
28703 +       pax_track_stack();
28704 +
28705         /* Check for Promiscuous and All Multicast modes */
28706  
28707         rctl = IXGB_READ_REG(hw, RCTL);
28708 diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
28709 --- linux-3.0.4/drivers/net/ixgb/ixgb_param.c   2011-07-21 22:17:23.000000000 -0400
28710 +++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c   2011-08-23 21:48:14.000000000 -0400
28711 @@ -261,6 +261,9 @@ void __devinit
28712  ixgb_check_options(struct ixgb_adapter *adapter)
28713  {
28714         int bd = adapter->bd_number;
28715 +
28716 +       pax_track_stack();
28717 +
28718         if (bd >= IXGB_MAX_NIC) {
28719                 pr_notice("Warning: no configuration for board #%i\n", bd);
28720                 pr_notice("Using defaults for all values\n");
28721 diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
28722 --- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h  2011-07-21 22:17:23.000000000 -0400
28723 +++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h  2011-08-23 21:47:55.000000000 -0400
28724 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28725         s32 (*update_checksum)(struct ixgbe_hw *);
28726         u16 (*calc_checksum)(struct ixgbe_hw *);
28727  };
28728 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28729  
28730  struct ixgbe_mac_operations {
28731         s32 (*init_hw)(struct ixgbe_hw *);
28732 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28733         /* Flow Control */
28734         s32 (*fc_enable)(struct ixgbe_hw *, s32);
28735  };
28736 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28737  
28738  struct ixgbe_phy_operations {
28739         s32 (*identify)(struct ixgbe_hw *);
28740 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28741         s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28742         s32 (*check_overtemp)(struct ixgbe_hw *);
28743  };
28744 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28745  
28746  struct ixgbe_eeprom_info {
28747 -       struct ixgbe_eeprom_operations  ops;
28748 +       ixgbe_eeprom_operations_no_const ops;
28749         enum ixgbe_eeprom_type          type;
28750         u32                             semaphore_delay;
28751         u16                             word_size;
28752 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28753  
28754  #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED      0x01
28755  struct ixgbe_mac_info {
28756 -       struct ixgbe_mac_operations     ops;
28757 +       ixgbe_mac_operations_no_const   ops;
28758         enum ixgbe_mac_type             type;
28759         u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28760         u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28761 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28762  };
28763  
28764  struct ixgbe_phy_info {
28765 -       struct ixgbe_phy_operations     ops;
28766 +       ixgbe_phy_operations_no_const   ops;
28767         struct mdio_if_info             mdio;
28768         enum ixgbe_phy_type             type;
28769         u32                             id;
28770 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28771         s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28772         s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28773  };
28774 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28775  
28776  struct ixgbe_mbx_stats {
28777         u32 msgs_tx;
28778 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28779  };
28780  
28781  struct ixgbe_mbx_info {
28782 -       struct ixgbe_mbx_operations ops;
28783 +       ixgbe_mbx_operations_no_const ops;
28784         struct ixgbe_mbx_stats stats;
28785         u32 timeout;
28786         u32 usec_delay;
28787 diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
28788 --- linux-3.0.4/drivers/net/ixgbevf/vf.h        2011-07-21 22:17:23.000000000 -0400
28789 +++ linux-3.0.4/drivers/net/ixgbevf/vf.h        2011-08-23 21:47:55.000000000 -0400
28790 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28791         s32 (*clear_vfta)(struct ixgbe_hw *);
28792         s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28793  };
28794 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28795  
28796  enum ixgbe_mac_type {
28797         ixgbe_mac_unknown = 0,
28798 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28799  };
28800  
28801  struct ixgbe_mac_info {
28802 -       struct ixgbe_mac_operations ops;
28803 +       ixgbe_mac_operations_no_const ops;
28804         u8 addr[6];
28805         u8 perm_addr[6];
28806  
28807 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28808         s32 (*check_for_ack)(struct ixgbe_hw *);
28809         s32 (*check_for_rst)(struct ixgbe_hw *);
28810  };
28811 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28812  
28813  struct ixgbe_mbx_stats {
28814         u32 msgs_tx;
28815 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28816  };
28817  
28818  struct ixgbe_mbx_info {
28819 -       struct ixgbe_mbx_operations ops;
28820 +       ixgbe_mbx_operations_no_const ops;
28821         struct ixgbe_mbx_stats stats;
28822         u32 timeout;
28823         u32 udelay;
28824 diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
28825 --- linux-3.0.4/drivers/net/ksz884x.c   2011-07-21 22:17:23.000000000 -0400
28826 +++ linux-3.0.4/drivers/net/ksz884x.c   2011-08-23 21:48:14.000000000 -0400
28827 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28828         int rc;
28829         u64 counter[TOTAL_PORT_COUNTER_NUM];
28830  
28831 +       pax_track_stack();
28832 +
28833         mutex_lock(&hw_priv->lock);
28834         n = SWITCH_PORT_NUM;
28835         for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28836 diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
28837 --- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28838 +++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28839 @@ -40,6 +40,7 @@
28840  #include <linux/dma-mapping.h>
28841  #include <linux/slab.h>
28842  #include <linux/io-mapping.h>
28843 +#include <linux/sched.h>
28844  
28845  #include <linux/mlx4/device.h>
28846  #include <linux/mlx4/doorbell.h>
28847 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28848         u64 icm_size;
28849         int err;
28850  
28851 +       pax_track_stack();
28852 +
28853         err = mlx4_QUERY_FW(dev);
28854         if (err) {
28855                 if (err == -EACCES)
28856 diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
28857 --- linux-3.0.4/drivers/net/niu.c       2011-08-23 21:44:40.000000000 -0400
28858 +++ linux-3.0.4/drivers/net/niu.c       2011-08-23 21:48:14.000000000 -0400
28859 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28860         int i, num_irqs, err;
28861         u8 first_ldg;
28862  
28863 +       pax_track_stack();
28864 +
28865         first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28866         for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28867                 ldg_num_map[i] = first_ldg + i;
28868 diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
28869 --- linux-3.0.4/drivers/net/pcnet32.c   2011-07-21 22:17:23.000000000 -0400
28870 +++ linux-3.0.4/drivers/net/pcnet32.c   2011-08-23 21:47:55.000000000 -0400
28871 @@ -82,7 +82,7 @@ static int cards_found;
28872  /*
28873   * VLB I/O addresses
28874   */
28875 -static unsigned int pcnet32_portlist[] __initdata =
28876 +static unsigned int pcnet32_portlist[] __devinitdata =
28877      { 0x300, 0x320, 0x340, 0x360, 0 };
28878  
28879  static int pcnet32_debug;
28880 @@ -270,7 +270,7 @@ struct pcnet32_private {
28881         struct sk_buff          **rx_skbuff;
28882         dma_addr_t              *tx_dma_addr;
28883         dma_addr_t              *rx_dma_addr;
28884 -       struct pcnet32_access   a;
28885 +       struct pcnet32_access   *a;
28886         spinlock_t              lock;           /* Guard lock */
28887         unsigned int            cur_rx, cur_tx; /* The next free ring entry */
28888         unsigned int            rx_ring_size;   /* current rx ring size */
28889 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28890         u16 val;
28891  
28892         netif_wake_queue(dev);
28893 -       val = lp->a.read_csr(ioaddr, CSR3);
28894 +       val = lp->a->read_csr(ioaddr, CSR3);
28895         val &= 0x00ff;
28896 -       lp->a.write_csr(ioaddr, CSR3, val);
28897 +       lp->a->write_csr(ioaddr, CSR3, val);
28898         napi_enable(&lp->napi);
28899  }
28900  
28901 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28902                 r = mii_link_ok(&lp->mii_if);
28903         } else if (lp->chip_version >= PCNET32_79C970A) {
28904                 ulong ioaddr = dev->base_addr;  /* card base I/O address */
28905 -               r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28906 +               r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28907         } else {        /* can not detect link on really old chips */
28908                 r = 1;
28909         }
28910 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct 
28911                 pcnet32_netif_stop(dev);
28912  
28913         spin_lock_irqsave(&lp->lock, flags);
28914 -       lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* stop the chip */
28915 +       lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);      /* stop the chip */
28916  
28917         size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28918  
28919 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct 
28920  static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28921  {
28922         struct pcnet32_private *lp = netdev_priv(dev);
28923 -       struct pcnet32_access *a = &lp->a;      /* access to registers */
28924 +       struct pcnet32_access *a = lp->a;       /* access to registers */
28925         ulong ioaddr = dev->base_addr;  /* card base I/O address */
28926         struct sk_buff *skb;    /* sk buff */
28927         int x, i;               /* counters */
28928 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct 
28929                 pcnet32_netif_stop(dev);
28930  
28931         spin_lock_irqsave(&lp->lock, flags);
28932 -       lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* stop the chip */
28933 +       lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);      /* stop the chip */
28934  
28935         numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28936  
28937         /* Reset the PCNET32 */
28938 -       lp->a.reset(ioaddr);
28939 -       lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
28940 +       lp->a->reset(ioaddr);
28941 +       lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28942  
28943         /* switch pcnet32 to 32bit mode */
28944 -       lp->a.write_bcr(ioaddr, 20, 2);
28945 +       lp->a->write_bcr(ioaddr, 20, 2);
28946  
28947         /* purge & init rings but don't actually restart */
28948         pcnet32_restart(dev, 0x0000);
28949  
28950 -       lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* Set STOP bit */
28951 +       lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);      /* Set STOP bit */
28952  
28953         /* Initialize Transmit buffers. */
28954         size = data_len + 15;
28955 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct 
28956  
28957         /* set int loopback in CSR15 */
28958         x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28959 -       lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28960 +       lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28961  
28962         teststatus = cpu_to_le16(0x8000);
28963 -       lp->a.write_csr(ioaddr, CSR0, CSR0_START);      /* Set STRT bit */
28964 +       lp->a->write_csr(ioaddr, CSR0, CSR0_START);     /* Set STRT bit */
28965  
28966         /* Check status of descriptors */
28967         for (x = 0; x < numbuffs; x++) {
28968 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct 
28969                 }
28970         }
28971  
28972 -       lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* Set STOP bit */
28973 +       lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);      /* Set STOP bit */
28974         wmb();
28975         if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28976                 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28977 @@ -1015,7 +1015,7 @@ clean_up:
28978                 pcnet32_restart(dev, CSR0_NORMAL);
28979         } else {
28980                 pcnet32_purge_rx_ring(dev);
28981 -               lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28982 +               lp->a->write_bcr(ioaddr, 20, 4);        /* return to 16bit mode */
28983         }
28984         spin_unlock_irqrestore(&lp->lock, flags);
28985  
28986 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28987                                enum ethtool_phys_id_state state)
28988  {
28989         struct pcnet32_private *lp = netdev_priv(dev);
28990 -       struct pcnet32_access *a = &lp->a;
28991 +       struct pcnet32_access *a = lp->a;
28992         ulong ioaddr = dev->base_addr;
28993         unsigned long flags;
28994         int i;
28995 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
28996  {
28997         int csr5;
28998         struct pcnet32_private *lp = netdev_priv(dev);
28999 -       struct pcnet32_access *a = &lp->a;
29000 +       struct pcnet32_access *a = lp->a;
29001         ulong ioaddr = dev->base_addr;
29002         int ticks;
29003  
29004 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
29005         spin_lock_irqsave(&lp->lock, flags);
29006         if (pcnet32_tx(dev)) {
29007                 /* reset the chip to clear the error condition, then restart */
29008 -               lp->a.reset(ioaddr);
29009 -               lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
29010 +               lp->a->reset(ioaddr);
29011 +               lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29012                 pcnet32_restart(dev, CSR0_START);
29013                 netif_wake_queue(dev);
29014         }
29015 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29016                 __napi_complete(napi);
29017  
29018                 /* clear interrupt masks */
29019 -               val = lp->a.read_csr(ioaddr, CSR3);
29020 +               val = lp->a->read_csr(ioaddr, CSR3);
29021                 val &= 0x00ff;
29022 -               lp->a.write_csr(ioaddr, CSR3, val);
29023 +               lp->a->write_csr(ioaddr, CSR3, val);
29024  
29025                 /* Set interrupt enable. */
29026 -               lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29027 +               lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29028  
29029                 spin_unlock_irqrestore(&lp->lock, flags);
29030         }
29031 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29032         int i, csr0;
29033         u16 *buff = ptr;
29034         struct pcnet32_private *lp = netdev_priv(dev);
29035 -       struct pcnet32_access *a = &lp->a;
29036 +       struct pcnet32_access *a = lp->a;
29037         ulong ioaddr = dev->base_addr;
29038         unsigned long flags;
29039  
29040 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29041                 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29042                         if (lp->phymask & (1 << j)) {
29043                                 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29044 -                                       lp->a.write_bcr(ioaddr, 33,
29045 +                                       lp->a->write_bcr(ioaddr, 33,
29046                                                         (j << 5) | i);
29047 -                                       *buff++ = lp->a.read_bcr(ioaddr, 34);
29048 +                                       *buff++ = lp->a->read_bcr(ioaddr, 34);
29049                                 }
29050                         }
29051                 }
29052 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29053             ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29054                 lp->options |= PCNET32_PORT_FD;
29055  
29056 -       lp->a = *a;
29057 +       lp->a = a;
29058  
29059         /* prior to register_netdev, dev->name is not yet correct */
29060         if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29061 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29062         if (lp->mii) {
29063                 /* lp->phycount and lp->phymask are set to 0 by memset above */
29064  
29065 -               lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29066 +               lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29067                 /* scan for PHYs */
29068                 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29069                         unsigned short id1, id2;
29070 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29071                                 pr_info("Found PHY %04x:%04x at address %d\n",
29072                                         id1, id2, i);
29073                 }
29074 -               lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29075 +               lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29076                 if (lp->phycount > 1)
29077                         lp->options |= PCNET32_PORT_MII;
29078         }
29079 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29080         }
29081  
29082         /* Reset the PCNET32 */
29083 -       lp->a.reset(ioaddr);
29084 +       lp->a->reset(ioaddr);
29085  
29086         /* switch pcnet32 to 32bit mode */
29087 -       lp->a.write_bcr(ioaddr, 20, 2);
29088 +       lp->a->write_bcr(ioaddr, 20, 2);
29089  
29090         netif_printk(lp, ifup, KERN_DEBUG, dev,
29091                      "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29092 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29093                      (u32) (lp->init_dma_addr));
29094  
29095         /* set/reset autoselect bit */
29096 -       val = lp->a.read_bcr(ioaddr, 2) & ~2;
29097 +       val = lp->a->read_bcr(ioaddr, 2) & ~2;
29098         if (lp->options & PCNET32_PORT_ASEL)
29099                 val |= 2;
29100 -       lp->a.write_bcr(ioaddr, 2, val);
29101 +       lp->a->write_bcr(ioaddr, 2, val);
29102  
29103         /* handle full duplex setting */
29104         if (lp->mii_if.full_duplex) {
29105 -               val = lp->a.read_bcr(ioaddr, 9) & ~3;
29106 +               val = lp->a->read_bcr(ioaddr, 9) & ~3;
29107                 if (lp->options & PCNET32_PORT_FD) {
29108                         val |= 1;
29109                         if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29110 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29111                         if (lp->chip_version == 0x2627)
29112                                 val |= 3;
29113                 }
29114 -               lp->a.write_bcr(ioaddr, 9, val);
29115 +               lp->a->write_bcr(ioaddr, 9, val);
29116         }
29117  
29118         /* set/reset GPSI bit in test register */
29119 -       val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29120 +       val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29121         if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29122                 val |= 0x10;
29123 -       lp->a.write_csr(ioaddr, 124, val);
29124 +       lp->a->write_csr(ioaddr, 124, val);
29125  
29126         /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29127         if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29128 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29129                  * duplex, and/or enable auto negotiation, and clear DANAS
29130                  */
29131                 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29132 -                       lp->a.write_bcr(ioaddr, 32,
29133 -                                       lp->a.read_bcr(ioaddr, 32) | 0x0080);
29134 +                       lp->a->write_bcr(ioaddr, 32,
29135 +                                       lp->a->read_bcr(ioaddr, 32) | 0x0080);
29136                         /* disable Auto Negotiation, set 10Mpbs, HD */
29137 -                       val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29138 +                       val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29139                         if (lp->options & PCNET32_PORT_FD)
29140                                 val |= 0x10;
29141                         if (lp->options & PCNET32_PORT_100)
29142                                 val |= 0x08;
29143 -                       lp->a.write_bcr(ioaddr, 32, val);
29144 +                       lp->a->write_bcr(ioaddr, 32, val);
29145                 } else {
29146                         if (lp->options & PCNET32_PORT_ASEL) {
29147 -                               lp->a.write_bcr(ioaddr, 32,
29148 -                                               lp->a.read_bcr(ioaddr,
29149 +                               lp->a->write_bcr(ioaddr, 32,
29150 +                                               lp->a->read_bcr(ioaddr,
29151                                                                32) | 0x0080);
29152                                 /* enable auto negotiate, setup, disable fd */
29153 -                               val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29154 +                               val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29155                                 val |= 0x20;
29156 -                               lp->a.write_bcr(ioaddr, 32, val);
29157 +                               lp->a->write_bcr(ioaddr, 32, val);
29158                         }
29159                 }
29160         } else {
29161 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29162                  * There is really no good other way to handle multiple PHYs
29163                  * other than turning off all automatics
29164                  */
29165 -               val = lp->a.read_bcr(ioaddr, 2);
29166 -               lp->a.write_bcr(ioaddr, 2, val & ~2);
29167 -               val = lp->a.read_bcr(ioaddr, 32);
29168 -               lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7));   /* stop MII manager */
29169 +               val = lp->a->read_bcr(ioaddr, 2);
29170 +               lp->a->write_bcr(ioaddr, 2, val & ~2);
29171 +               val = lp->a->read_bcr(ioaddr, 32);
29172 +               lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7));  /* stop MII manager */
29173  
29174                 if (!(lp->options & PCNET32_PORT_ASEL)) {
29175                         /* setup ecmd */
29176 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29177                         ethtool_cmd_speed_set(&ecmd,
29178                                               (lp->options & PCNET32_PORT_100) ?
29179                                               SPEED_100 : SPEED_10);
29180 -                       bcr9 = lp->a.read_bcr(ioaddr, 9);
29181 +                       bcr9 = lp->a->read_bcr(ioaddr, 9);
29182  
29183                         if (lp->options & PCNET32_PORT_FD) {
29184                                 ecmd.duplex = DUPLEX_FULL;
29185 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29186                                 ecmd.duplex = DUPLEX_HALF;
29187                                 bcr9 |= ~(1 << 0);
29188                         }
29189 -                       lp->a.write_bcr(ioaddr, 9, bcr9);
29190 +                       lp->a->write_bcr(ioaddr, 9, bcr9);
29191                 }
29192  
29193                 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29194 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29195  
29196  #ifdef DO_DXSUFLO
29197         if (lp->dxsuflo) {      /* Disable transmit stop on underflow */
29198 -               val = lp->a.read_csr(ioaddr, CSR3);
29199 +               val = lp->a->read_csr(ioaddr, CSR3);
29200                 val |= 0x40;
29201 -               lp->a.write_csr(ioaddr, CSR3, val);
29202 +               lp->a->write_csr(ioaddr, CSR3, val);
29203         }
29204  #endif
29205  
29206 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29207         napi_enable(&lp->napi);
29208  
29209         /* Re-initialize the PCNET32, and start it when done. */
29210 -       lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29211 -       lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29212 +       lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29213 +       lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29214  
29215 -       lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
29216 -       lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29217 +       lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29218 +       lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29219  
29220         netif_start_queue(dev);
29221  
29222 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29223  
29224         i = 0;
29225         while (i++ < 100)
29226 -               if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29227 +               if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29228                         break;
29229         /*
29230          * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29231          * reports that doing so triggers a bug in the '974.
29232          */
29233 -       lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29234 +       lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29235  
29236         netif_printk(lp, ifup, KERN_DEBUG, dev,
29237                      "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29238                      i,
29239                      (u32) (lp->init_dma_addr),
29240 -                    lp->a.read_csr(ioaddr, CSR0));
29241 +                    lp->a->read_csr(ioaddr, CSR0));
29242  
29243         spin_unlock_irqrestore(&lp->lock, flags);
29244  
29245 @@ -2218,7 +2218,7 @@ err_free_ring:
29246          * Switch back to 16bit mode to avoid problems with dumb
29247          * DOS packet driver after a warm reboot
29248          */
29249 -       lp->a.write_bcr(ioaddr, 20, 4);
29250 +       lp->a->write_bcr(ioaddr, 20, 4);
29251  
29252  err_free_irq:
29253         spin_unlock_irqrestore(&lp->lock, flags);
29254 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29255  
29256         /* wait for stop */
29257         for (i = 0; i < 100; i++)
29258 -               if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29259 +               if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29260                         break;
29261  
29262         if (i >= 100)
29263 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29264                 return;
29265  
29266         /* ReInit Ring */
29267 -       lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29268 +       lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29269         i = 0;
29270         while (i++ < 1000)
29271 -               if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29272 +               if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29273                         break;
29274  
29275 -       lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29276 +       lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29277  }
29278  
29279  static void pcnet32_tx_timeout(struct net_device *dev)
29280 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29281         /* Transmitter timeout, serious problems. */
29282         if (pcnet32_debug & NETIF_MSG_DRV)
29283                 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29284 -                      dev->name, lp->a.read_csr(ioaddr, CSR0));
29285 -       lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29286 +                      dev->name, lp->a->read_csr(ioaddr, CSR0));
29287 +       lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29288         dev->stats.tx_errors++;
29289         if (netif_msg_tx_err(lp)) {
29290                 int i;
29291 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29292  
29293         netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29294                      "%s() called, csr0 %4.4x\n",
29295 -                    __func__, lp->a.read_csr(ioaddr, CSR0));
29296 +                    __func__, lp->a->read_csr(ioaddr, CSR0));
29297  
29298         /* Default status -- will not enable Successful-TxDone
29299          * interrupt when that option is available to us.
29300 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29301         dev->stats.tx_bytes += skb->len;
29302  
29303         /* Trigger an immediate send poll. */
29304 -       lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29305 +       lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29306  
29307         if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29308                 lp->tx_full = 1;
29309 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29310  
29311         spin_lock(&lp->lock);
29312  
29313 -       csr0 = lp->a.read_csr(ioaddr, CSR0);
29314 +       csr0 = lp->a->read_csr(ioaddr, CSR0);
29315         while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29316                 if (csr0 == 0xffff)
29317                         break;  /* PCMCIA remove happened */
29318                 /* Acknowledge all of the current interrupt sources ASAP. */
29319 -               lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29320 +               lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29321  
29322                 netif_printk(lp, intr, KERN_DEBUG, dev,
29323                              "interrupt  csr0=%#2.2x new csr=%#2.2x\n",
29324 -                            csr0, lp->a.read_csr(ioaddr, CSR0));
29325 +                            csr0, lp->a->read_csr(ioaddr, CSR0));
29326  
29327                 /* Log misc errors. */
29328                 if (csr0 & 0x4000)
29329 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29330                 if (napi_schedule_prep(&lp->napi)) {
29331                         u16 val;
29332                         /* set interrupt masks */
29333 -                       val = lp->a.read_csr(ioaddr, CSR3);
29334 +                       val = lp->a->read_csr(ioaddr, CSR3);
29335                         val |= 0x5f00;
29336 -                       lp->a.write_csr(ioaddr, CSR3, val);
29337 +                       lp->a->write_csr(ioaddr, CSR3, val);
29338  
29339                         __napi_schedule(&lp->napi);
29340                         break;
29341                 }
29342 -               csr0 = lp->a.read_csr(ioaddr, CSR0);
29343 +               csr0 = lp->a->read_csr(ioaddr, CSR0);
29344         }
29345  
29346         netif_printk(lp, intr, KERN_DEBUG, dev,
29347                      "exiting interrupt, csr0=%#4.4x\n",
29348 -                    lp->a.read_csr(ioaddr, CSR0));
29349 +                    lp->a->read_csr(ioaddr, CSR0));
29350  
29351         spin_unlock(&lp->lock);
29352  
29353 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29354  
29355         spin_lock_irqsave(&lp->lock, flags);
29356  
29357 -       dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29358 +       dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29359  
29360         netif_printk(lp, ifdown, KERN_DEBUG, dev,
29361                      "Shutting down ethercard, status was %2.2x\n",
29362 -                    lp->a.read_csr(ioaddr, CSR0));
29363 +                    lp->a->read_csr(ioaddr, CSR0));
29364  
29365         /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29366 -       lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29367 +       lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29368  
29369         /*
29370          * Switch back to 16bit mode to avoid problems with dumb
29371          * DOS packet driver after a warm reboot
29372          */
29373 -       lp->a.write_bcr(ioaddr, 20, 4);
29374 +       lp->a->write_bcr(ioaddr, 20, 4);
29375  
29376         spin_unlock_irqrestore(&lp->lock, flags);
29377  
29378 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29379         unsigned long flags;
29380  
29381         spin_lock_irqsave(&lp->lock, flags);
29382 -       dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29383 +       dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29384         spin_unlock_irqrestore(&lp->lock, flags);
29385  
29386         return &dev->stats;
29387 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29388         if (dev->flags & IFF_ALLMULTI) {
29389                 ib->filter[0] = cpu_to_le32(~0U);
29390                 ib->filter[1] = cpu_to_le32(~0U);
29391 -               lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29392 -               lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29393 -               lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29394 -               lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29395 +               lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29396 +               lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29397 +               lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29398 +               lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29399                 return;
29400         }
29401         /* clear the multicast filter */
29402 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29403                 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29404         }
29405         for (i = 0; i < 4; i++)
29406 -               lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29407 +               lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29408                                 le16_to_cpu(mcast_table[i]));
29409  }
29410  
29411 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29412  
29413         spin_lock_irqsave(&lp->lock, flags);
29414         suspended = pcnet32_suspend(dev, &flags, 0);
29415 -       csr15 = lp->a.read_csr(ioaddr, CSR15);
29416 +       csr15 = lp->a->read_csr(ioaddr, CSR15);
29417         if (dev->flags & IFF_PROMISC) {
29418                 /* Log any net taps. */
29419                 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29420                 lp->init_block->mode =
29421                     cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29422                                 7);
29423 -               lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29424 +               lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29425         } else {
29426                 lp->init_block->mode =
29427                     cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29428 -               lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29429 +               lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29430                 pcnet32_load_multicast(dev);
29431         }
29432  
29433         if (suspended) {
29434                 int csr5;
29435                 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29436 -               csr5 = lp->a.read_csr(ioaddr, CSR5);
29437 -               lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29438 +               csr5 = lp->a->read_csr(ioaddr, CSR5);
29439 +               lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29440         } else {
29441 -               lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29442 +               lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29443                 pcnet32_restart(dev, CSR0_NORMAL);
29444                 netif_wake_queue(dev);
29445         }
29446 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29447         if (!lp->mii)
29448                 return 0;
29449  
29450 -       lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29451 -       val_out = lp->a.read_bcr(ioaddr, 34);
29452 +       lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29453 +       val_out = lp->a->read_bcr(ioaddr, 34);
29454  
29455         return val_out;
29456  }
29457 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29458         if (!lp->mii)
29459                 return;
29460  
29461 -       lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29462 -       lp->a.write_bcr(ioaddr, 34, val);
29463 +       lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29464 +       lp->a->write_bcr(ioaddr, 34, val);
29465  }
29466  
29467  static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29468 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29469                 curr_link = mii_link_ok(&lp->mii_if);
29470         } else {
29471                 ulong ioaddr = dev->base_addr;  /* card base I/O address */
29472 -               curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29473 +               curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29474         }
29475         if (!curr_link) {
29476                 if (prev_link || verbose) {
29477 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29478                                             (ecmd.duplex == DUPLEX_FULL)
29479                                             ? "full" : "half");
29480                         }
29481 -                       bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29482 +                       bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29483                         if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29484                                 if (lp->mii_if.full_duplex)
29485                                         bcr9 |= (1 << 0);
29486                                 else
29487                                         bcr9 &= ~(1 << 0);
29488 -                               lp->a.write_bcr(dev->base_addr, 9, bcr9);
29489 +                               lp->a->write_bcr(dev->base_addr, 9, bcr9);
29490                         }
29491                 } else {
29492                         netif_info(lp, link, dev, "link up\n");
29493 diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
29494 --- linux-3.0.4/drivers/net/ppp_generic.c       2011-07-21 22:17:23.000000000 -0400
29495 +++ linux-3.0.4/drivers/net/ppp_generic.c       2011-08-23 21:47:55.000000000 -0400
29496 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29497         void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29498         struct ppp_stats stats;
29499         struct ppp_comp_stats cstats;
29500 -       char *vers;
29501  
29502         switch (cmd) {
29503         case SIOCGPPPSTATS:
29504 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29505                 break;
29506  
29507         case SIOCGPPPVER:
29508 -               vers = PPP_VERSION;
29509 -               if (copy_to_user(addr, vers, strlen(vers) + 1))
29510 +               if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29511                         break;
29512                 err = 0;
29513                 break;
29514 diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
29515 --- linux-3.0.4/drivers/net/r8169.c     2011-08-23 21:44:40.000000000 -0400
29516 +++ linux-3.0.4/drivers/net/r8169.c     2011-08-23 21:47:55.000000000 -0400
29517 @@ -645,12 +645,12 @@ struct rtl8169_private {
29518         struct mdio_ops {
29519                 void (*write)(void __iomem *, int, int);
29520                 int (*read)(void __iomem *, int);
29521 -       } mdio_ops;
29522 +       } __no_const mdio_ops;
29523  
29524         struct pll_power_ops {
29525                 void (*down)(struct rtl8169_private *);
29526                 void (*up)(struct rtl8169_private *);
29527 -       } pll_power_ops;
29528 +       } __no_const pll_power_ops;
29529  
29530         int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29531         int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29532 diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
29533 --- linux-3.0.4/drivers/net/tg3.h       2011-07-21 22:17:23.000000000 -0400
29534 +++ linux-3.0.4/drivers/net/tg3.h       2011-08-23 21:47:55.000000000 -0400
29535 @@ -134,6 +134,7 @@
29536  #define  CHIPREV_ID_5750_A0             0x4000
29537  #define  CHIPREV_ID_5750_A1             0x4001
29538  #define  CHIPREV_ID_5750_A3             0x4003
29539 +#define  CHIPREV_ID_5750_C1             0x4201
29540  #define  CHIPREV_ID_5750_C2             0x4202
29541  #define  CHIPREV_ID_5752_A0_HW          0x5000
29542  #define  CHIPREV_ID_5752_A0             0x6000
29543 diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
29544 --- linux-3.0.4/drivers/net/tokenring/abyss.c   2011-07-21 22:17:23.000000000 -0400
29545 +++ linux-3.0.4/drivers/net/tokenring/abyss.c   2011-08-23 21:47:55.000000000 -0400
29546 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = 
29547  
29548  static int __init abyss_init (void)
29549  {
29550 -       abyss_netdev_ops = tms380tr_netdev_ops;
29551 +       pax_open_kernel();
29552 +       memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29553  
29554 -       abyss_netdev_ops.ndo_open = abyss_open;
29555 -       abyss_netdev_ops.ndo_stop = abyss_close;
29556 +       *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29557 +       *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29558 +       pax_close_kernel();
29559  
29560         return pci_register_driver(&abyss_driver);
29561  }
29562 diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
29563 --- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29564 +++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29565 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver 
29566  
29567  static int __init madgemc_init (void)
29568  {
29569 -       madgemc_netdev_ops = tms380tr_netdev_ops;
29570 -       madgemc_netdev_ops.ndo_open = madgemc_open;
29571 -       madgemc_netdev_ops.ndo_stop = madgemc_close;
29572 +       pax_open_kernel();
29573 +       memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29574 +       *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29575 +       *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29576 +       pax_close_kernel();
29577  
29578         return mca_register_driver (&madgemc_driver);
29579  }
29580 diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
29581 --- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29582 +++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29583 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29584         struct platform_device *pdev;
29585         int i, num = 0, err = 0;
29586  
29587 -       proteon_netdev_ops = tms380tr_netdev_ops;
29588 -       proteon_netdev_ops.ndo_open = proteon_open;
29589 -       proteon_netdev_ops.ndo_stop = tms380tr_close;
29590 +       pax_open_kernel();
29591 +       memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29592 +       *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29593 +       *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29594 +       pax_close_kernel();
29595  
29596         err = platform_driver_register(&proteon_driver);
29597         if (err)
29598 diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
29599 --- linux-3.0.4/drivers/net/tokenring/skisa.c   2011-07-21 22:17:23.000000000 -0400
29600 +++ linux-3.0.4/drivers/net/tokenring/skisa.c   2011-08-23 21:47:55.000000000 -0400
29601 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29602         struct platform_device *pdev;
29603         int i, num = 0, err = 0;
29604  
29605 -       sk_isa_netdev_ops = tms380tr_netdev_ops;
29606 -       sk_isa_netdev_ops.ndo_open = sk_isa_open;
29607 -       sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29608 +       pax_open_kernel();
29609 +       memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29610 +       *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29611 +       *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29612 +       pax_close_kernel();
29613  
29614         err = platform_driver_register(&sk_isa_driver);
29615         if (err)
29616 diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
29617 --- linux-3.0.4/drivers/net/tulip/de2104x.c     2011-07-21 22:17:23.000000000 -0400
29618 +++ linux-3.0.4/drivers/net/tulip/de2104x.c     2011-08-23 21:48:14.000000000 -0400
29619 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29620         struct de_srom_info_leaf *il;
29621         void *bufp;
29622  
29623 +       pax_track_stack();
29624 +
29625         /* download entire eeprom */
29626         for (i = 0; i < DE_EEPROM_WORDS; i++)
29627                 ((__le16 *)ee_data)[i] =
29628 diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
29629 --- linux-3.0.4/drivers/net/tulip/de4x5.c       2011-07-21 22:17:23.000000000 -0400
29630 +++ linux-3.0.4/drivers/net/tulip/de4x5.c       2011-08-23 21:47:55.000000000 -0400
29631 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29632         for (i=0; i<ETH_ALEN; i++) {
29633             tmp.addr[i] = dev->dev_addr[i];
29634         }
29635 -       if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29636 +       if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29637         break;
29638  
29639      case DE4X5_SET_HWADDR:           /* Set the hardware address */
29640 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29641         spin_lock_irqsave(&lp->lock, flags);
29642         memcpy(&statbuf, &lp->pktStats, ioc->len);
29643         spin_unlock_irqrestore(&lp->lock, flags);
29644 -       if (copy_to_user(ioc->data, &statbuf, ioc->len))
29645 +       if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29646                 return -EFAULT;
29647         break;
29648      }
29649 diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
29650 --- linux-3.0.4/drivers/net/usb/hso.c   2011-07-21 22:17:23.000000000 -0400
29651 +++ linux-3.0.4/drivers/net/usb/hso.c   2011-08-23 21:47:55.000000000 -0400
29652 @@ -71,7 +71,7 @@
29653  #include <asm/byteorder.h>
29654  #include <linux/serial_core.h>
29655  #include <linux/serial.h>
29656 -
29657 +#include <asm/local.h>
29658  
29659  #define MOD_AUTHOR                     "Option Wireless"
29660  #define MOD_DESCRIPTION                        "USB High Speed Option driver"
29661 @@ -257,7 +257,7 @@ struct hso_serial {
29662  
29663         /* from usb_serial_port */
29664         struct tty_struct *tty;
29665 -       int open_count;
29666 +       local_t open_count;
29667         spinlock_t serial_lock;
29668  
29669         int (*write_data) (struct hso_serial *serial);
29670 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29671         struct urb *urb;
29672  
29673         urb = serial->rx_urb[0];
29674 -       if (serial->open_count > 0) {
29675 +       if (local_read(&serial->open_count) > 0) {
29676                 count = put_rxbuf_data(urb, serial);
29677                 if (count == -1)
29678                         return;
29679 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29680         DUMP1(urb->transfer_buffer, urb->actual_length);
29681  
29682         /* Anyone listening? */
29683 -       if (serial->open_count == 0)
29684 +       if (local_read(&serial->open_count) == 0)
29685                 return;
29686  
29687         if (status == 0) {
29688 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29689         spin_unlock_irq(&serial->serial_lock);
29690  
29691         /* check for port already opened, if not set the termios */
29692 -       serial->open_count++;
29693 -       if (serial->open_count == 1) {
29694 +       if (local_inc_return(&serial->open_count) == 1) {
29695                 serial->rx_state = RX_IDLE;
29696                 /* Force default termio settings */
29697                 _hso_serial_set_termios(tty, NULL);
29698 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29699                 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29700                 if (result) {
29701                         hso_stop_serial_device(serial->parent);
29702 -                       serial->open_count--;
29703 +                       local_dec(&serial->open_count);
29704                         kref_put(&serial->parent->ref, hso_serial_ref_free);
29705                 }
29706         } else {
29707 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29708  
29709         /* reset the rts and dtr */
29710         /* do the actual close */
29711 -       serial->open_count--;
29712 +       local_dec(&serial->open_count);
29713  
29714 -       if (serial->open_count <= 0) {
29715 -               serial->open_count = 0;
29716 +       if (local_read(&serial->open_count) <= 0) {
29717 +               local_set(&serial->open_count,  0);
29718                 spin_lock_irq(&serial->serial_lock);
29719                 if (serial->tty == tty) {
29720                         serial->tty->driver_data = NULL;
29721 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29722  
29723         /* the actual setup */
29724         spin_lock_irqsave(&serial->serial_lock, flags);
29725 -       if (serial->open_count)
29726 +       if (local_read(&serial->open_count))
29727                 _hso_serial_set_termios(tty, old);
29728         else
29729                 tty->termios = old;
29730 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29731                                 D1("Pending read interrupt on port %d\n", i);
29732                                 spin_lock(&serial->serial_lock);
29733                                 if (serial->rx_state == RX_IDLE &&
29734 -                                       serial->open_count > 0) {
29735 +                                       local_read(&serial->open_count) > 0) {
29736                                         /* Setup and send a ctrl req read on
29737                                          * port i */
29738                                         if (!serial->rx_urb_filled[0]) {
29739 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29740         /* Start all serial ports */
29741         for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29742                 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29743 -                       if (dev2ser(serial_table[i])->open_count) {
29744 +                       if (local_read(&dev2ser(serial_table[i])->open_count)) {
29745                                 result =
29746                                     hso_start_serial_device(serial_table[i], GFP_NOIO);
29747                                 hso_kick_transmit(dev2ser(serial_table[i]));
29748 diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29749 --- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c   2011-07-21 22:17:23.000000000 -0400
29750 +++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c   2011-08-23 21:47:55.000000000 -0400
29751 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device 
29752                  * Return with error code if any of the queue indices
29753                  * is out of range
29754                  */
29755 -               if (p->ring_index[i] < 0 ||
29756 -                   p->ring_index[i] >= adapter->num_rx_queues)
29757 +               if (p->ring_index[i] >= adapter->num_rx_queues)
29758                         return -EINVAL;
29759         }
29760  
29761 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
29762 --- linux-3.0.4/drivers/net/vxge/vxge-config.h  2011-07-21 22:17:23.000000000 -0400
29763 +++ linux-3.0.4/drivers/net/vxge/vxge-config.h  2011-08-23 21:47:55.000000000 -0400
29764 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29765         void (*link_down)(struct __vxge_hw_device *devh);
29766         void (*crit_err)(struct __vxge_hw_device *devh,
29767                         enum vxge_hw_event type, u64 ext_data);
29768 -};
29769 +} __no_const;
29770  
29771  /*
29772   * struct __vxge_hw_blockpool_entry - Block private data structure
29773 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
29774 --- linux-3.0.4/drivers/net/vxge/vxge-main.c    2011-07-21 22:17:23.000000000 -0400
29775 +++ linux-3.0.4/drivers/net/vxge/vxge-main.c    2011-08-23 21:48:14.000000000 -0400
29776 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29777         struct sk_buff *completed[NR_SKB_COMPLETED];
29778         int more;
29779  
29780 +       pax_track_stack();
29781 +
29782         do {
29783                 more = 0;
29784                 skb_ptr = completed;
29785 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29786         u8 mtable[256] = {0}; /* CPU to vpath mapping  */
29787         int index;
29788  
29789 +       pax_track_stack();
29790 +
29791         /*
29792          * Filling
29793          *      - itable with bucket numbers
29794 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
29795 --- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29796 +++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29797 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29798                         struct vxge_hw_mempool_dma      *dma_object,
29799                         u32                     index,
29800                         u32                     is_last);
29801 -};
29802 +} __no_const;
29803  
29804  #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath)                             \
29805                 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29806 diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
29807 --- linux-3.0.4/drivers/net/wan/cycx_x25.c      2011-07-21 22:17:23.000000000 -0400
29808 +++ linux-3.0.4/drivers/net/wan/cycx_x25.c      2011-08-23 21:48:14.000000000 -0400
29809 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29810         unsigned char hex[1024],
29811                 * phex = hex;
29812  
29813 +       pax_track_stack();
29814 +
29815         if (len >= (sizeof(hex) / 2))
29816                 len = (sizeof(hex) / 2) - 1;
29817  
29818 diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
29819 --- linux-3.0.4/drivers/net/wan/hdlc_x25.c      2011-07-21 22:17:23.000000000 -0400
29820 +++ linux-3.0.4/drivers/net/wan/hdlc_x25.c      2011-08-23 21:47:55.000000000 -0400
29821 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29822  
29823  static int x25_open(struct net_device *dev)
29824  {
29825 -       struct lapb_register_struct cb;
29826 +       static struct lapb_register_struct cb = {
29827 +               .connect_confirmation = x25_connected,
29828 +               .connect_indication = x25_connected,
29829 +               .disconnect_confirmation = x25_disconnected,
29830 +               .disconnect_indication = x25_disconnected,
29831 +               .data_indication = x25_data_indication,
29832 +               .data_transmit = x25_data_transmit
29833 +       };
29834         int result;
29835  
29836 -       cb.connect_confirmation = x25_connected;
29837 -       cb.connect_indication = x25_connected;
29838 -       cb.disconnect_confirmation = x25_disconnected;
29839 -       cb.disconnect_indication = x25_disconnected;
29840 -       cb.data_indication = x25_data_indication;
29841 -       cb.data_transmit = x25_data_transmit;
29842 -
29843         result = lapb_register(dev, &cb);
29844         if (result != LAPB_OK)
29845                 return result;
29846 diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
29847 --- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c       2011-07-21 22:17:23.000000000 -0400
29848 +++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c       2011-08-23 21:48:14.000000000 -0400
29849 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29850         int do_autopm = 1;
29851         DECLARE_COMPLETION_ONSTACK(notif_completion);
29852  
29853 +       pax_track_stack();
29854 +
29855         d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29856                   i2400m, ack, ack_size);
29857         BUG_ON(_ack == i2400m->bm_ack_buf);
29858 diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
29859 --- linux-3.0.4/drivers/net/wireless/airo.c     2011-08-23 21:44:40.000000000 -0400
29860 +++ linux-3.0.4/drivers/net/wireless/airo.c     2011-08-23 21:48:14.000000000 -0400
29861 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29862         BSSListElement * loop_net;
29863         BSSListElement * tmp_net;
29864  
29865 +       pax_track_stack();
29866 +
29867         /* Blow away current list of scan results */
29868         list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29869                 list_move_tail (&loop_net->list, &ai->network_free_list);
29870 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29871         WepKeyRid wkr;
29872         int rc;
29873  
29874 +       pax_track_stack();
29875 +
29876         memset( &mySsid, 0, sizeof( mySsid ) );
29877         kfree (ai->flash);
29878         ai->flash = NULL;
29879 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29880         __le32 *vals = stats.vals;
29881         int len;
29882  
29883 +       pax_track_stack();
29884 +
29885         if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29886                 return -ENOMEM;
29887         data = file->private_data;
29888 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29889         /* If doLoseSync is not 1, we won't do a Lose Sync */
29890         int doLoseSync = -1;
29891  
29892 +       pax_track_stack();
29893 +
29894         if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29895                 return -ENOMEM;
29896         data = file->private_data;
29897 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29898         int i;
29899         int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29900  
29901 +       pax_track_stack();
29902 +
29903         qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29904         if (!qual)
29905                 return -ENOMEM;
29906 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29907         CapabilityRid cap_rid;
29908         __le32 *vals = stats_rid.vals;
29909  
29910 +       pax_track_stack();
29911 +
29912         /* Get stats out of the card */
29913         clear_bit(JOB_WSTATS, &local->jobs);
29914         if (local->power.event) {
29915 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
29916 --- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c  2011-07-21 22:17:23.000000000 -0400
29917 +++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c  2011-08-23 21:48:14.000000000 -0400
29918 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29919         unsigned int v;
29920         u64 tsf;
29921  
29922 +       pax_track_stack();
29923 +
29924         v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29925         len += snprintf(buf+len, sizeof(buf)-len,
29926                 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29927 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29928         unsigned int len = 0;
29929         unsigned int i;
29930  
29931 +       pax_track_stack();
29932 +
29933         len += snprintf(buf+len, sizeof(buf)-len,
29934                 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29935  
29936 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct 
29937         unsigned int i;
29938         unsigned int v;
29939  
29940 +       pax_track_stack();
29941 +
29942         len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29943                 sc->ah->ah_ant_mode);
29944         len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29945 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29946         unsigned int len = 0;
29947         u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29948  
29949 +       pax_track_stack();
29950 +
29951         len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29952                         sc->bssidmask);
29953         len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29954 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29955         unsigned int len = 0;
29956         int i;
29957  
29958 +       pax_track_stack();
29959 +
29960         len += snprintf(buf+len, sizeof(buf)-len,
29961                         "RX\n---------------------\n");
29962         len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29963 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29964         char buf[700];
29965         unsigned int len = 0;
29966  
29967 +       pax_track_stack();
29968 +
29969         len += snprintf(buf+len, sizeof(buf)-len,
29970                         "HW has PHY error counters:\t%s\n",
29971                         sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29972 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29973         struct ath5k_buf *bf, *bf0;
29974         int i, n;
29975  
29976 +       pax_track_stack();
29977 +
29978         len += snprintf(buf+len, sizeof(buf)-len,
29979                         "available txbuffers: %d\n", sc->txbuf_len);
29980  
29981 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29982 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c   2011-07-21 22:17:23.000000000 -0400
29983 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c   2011-08-23 21:48:14.000000000 -0400
29984 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29985         int i, im, j;
29986         int nmeasurement;
29987  
29988 +       pax_track_stack();
29989 +
29990         for (i = 0; i < AR9300_MAX_CHAINS; i++) {
29991                 if (ah->txchainmask & (1 << i))
29992                         num_chains++;
29993 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
29994 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c   2011-07-21 22:17:23.000000000 -0400
29995 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c   2011-08-23 21:48:14.000000000 -0400
29996 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
29997         int theta_low_bin = 0;
29998         int i;
29999  
30000 +       pax_track_stack();
30001 +
30002         /* disregard any bin that contains <= 16 samples */
30003         thresh_accum_cnt = 16;
30004         scale_factor = 5;
30005 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
30006 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c  2011-07-21 22:17:23.000000000 -0400
30007 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c  2011-08-23 21:48:14.000000000 -0400
30008 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30009         char buf[512];
30010         unsigned int len = 0;
30011  
30012 +       pax_track_stack();
30013 +
30014         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30015                 len += snprintf(buf + len, sizeof(buf) - len,
30016                         "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30017 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30018         u8 addr[ETH_ALEN];
30019         u32 tmp;
30020  
30021 +       pax_track_stack();
30022 +
30023         len += snprintf(buf + len, sizeof(buf) - len,
30024                         "%s (chan=%d  center-freq: %d MHz  channel-type: %d (%s))\n",
30025                         wiphy_name(sc->hw->wiphy),
30026 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30027 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c  2011-07-21 22:17:23.000000000 -0400
30028 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c  2011-08-23 21:48:14.000000000 -0400
30029 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30030         unsigned int len = 0;
30031         int ret = 0;
30032  
30033 +       pax_track_stack();
30034 +
30035         memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30036  
30037         ath9k_htc_ps_wakeup(priv);
30038 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30039         unsigned int len = 0;
30040         int ret = 0;
30041  
30042 +       pax_track_stack();
30043 +
30044         memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30045  
30046         ath9k_htc_ps_wakeup(priv);
30047 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30048         unsigned int len = 0;
30049         int ret = 0;
30050  
30051 +       pax_track_stack();
30052 +
30053         memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30054  
30055         ath9k_htc_ps_wakeup(priv);
30056 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30057         char buf[512];
30058         unsigned int len = 0;
30059  
30060 +       pax_track_stack();
30061 +
30062         len += snprintf(buf + len, sizeof(buf) - len,
30063                         "%20s : %10u\n", "Buffers queued",
30064                         priv->debug.tx_stats.buf_queued);
30065 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30066         char buf[512];
30067         unsigned int len = 0;
30068  
30069 +       pax_track_stack();
30070 +
30071         spin_lock_bh(&priv->tx.tx_lock);
30072  
30073         len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30074 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30075         char buf[512];
30076         unsigned int len = 0;
30077  
30078 +       pax_track_stack();
30079 +
30080         len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30081                         "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30082  
30083 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
30084 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h     2011-08-23 21:44:40.000000000 -0400
30085 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h     2011-08-23 21:47:55.000000000 -0400
30086 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30087  
30088         /* ANI */
30089         void (*ani_cache_ini_regs)(struct ath_hw *ah);
30090 -};
30091 +} __no_const;
30092  
30093  /**
30094   * struct ath_hw_ops - callbacks used by hardware code and driver code
30095 @@ -637,7 +637,7 @@ struct ath_hw_ops {
30096         void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30097                         struct ath_hw_antcomb_conf *antconf);
30098  
30099 -};
30100 +} __no_const;
30101  
30102  struct ath_nf_limits {
30103         s16 max;
30104 @@ -650,7 +650,7 @@ struct ath_nf_limits {
30105  #define AH_UNPLUGGED    0x2 /* The card has been physically removed. */
30106  
30107  struct ath_hw {
30108 -       struct ath_ops reg_ops;
30109 +       ath_ops_no_const reg_ops;
30110  
30111         struct ieee80211_hw *hw;
30112         struct ath_common common;
30113 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
30114 --- linux-3.0.4/drivers/net/wireless/ath/ath.h  2011-07-21 22:17:23.000000000 -0400
30115 +++ linux-3.0.4/drivers/net/wireless/ath/ath.h  2011-08-23 21:47:55.000000000 -0400
30116 @@ -121,6 +121,7 @@ struct ath_ops {
30117         void (*write_flush) (void *);
30118         u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30119  };
30120 +typedef struct ath_ops __no_const ath_ops_no_const;
30121  
30122  struct ath_common;
30123  struct ath_bus_ops;
30124 diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
30125 --- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c  2011-07-21 22:17:23.000000000 -0400
30126 +++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c  2011-08-23 21:48:14.000000000 -0400
30127 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30128         int err;
30129         DECLARE_SSID_BUF(ssid);
30130  
30131 +       pax_track_stack();
30132 +
30133         IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30134  
30135         if (ssid_len)
30136 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30137         struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30138         int err;
30139  
30140 +       pax_track_stack();
30141 +
30142         IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30143                      idx, keylen, len);
30144  
30145 diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30146 --- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c        2011-07-21 22:17:23.000000000 -0400
30147 +++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c        2011-08-23 21:48:14.000000000 -0400
30148 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30149         unsigned long flags;
30150         DECLARE_SSID_BUF(ssid);
30151  
30152 +       pax_track_stack();
30153 +
30154         LIBIPW_DEBUG_SCAN("'%s' (%pM"
30155                      "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30156                      print_ssid(ssid, info_element->data, info_element->len),
30157 diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30158 --- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c    2011-07-21 22:17:23.000000000 -0400
30159 +++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c    2011-08-23 21:47:55.000000000 -0400
30160 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30161          */
30162         if (iwl3945_mod_params.disable_hw_scan) {
30163                 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30164 -               iwl3945_hw_ops.hw_scan = NULL;
30165 +               pax_open_kernel();
30166 +               *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30167 +               pax_close_kernel();
30168         }
30169  
30170         IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30171 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30172 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c       2011-07-21 22:17:23.000000000 -0400
30173 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c       2011-08-23 21:48:14.000000000 -0400
30174 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30175         struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30176         struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30177  
30178 +       pax_track_stack();
30179 +
30180         IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30181  
30182         /* Treat uninitialized rate scaling data same as non-existing. */
30183 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30184                 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30185         struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30186  
30187 +       pax_track_stack();
30188 +
30189         /* Override starting rate (index 0) if needed for debug purposes */
30190         rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30191  
30192 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30193 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c      2011-07-21 22:17:23.000000000 -0400
30194 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c      2011-08-23 21:48:14.000000000 -0400
30195 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30196         int pos = 0;
30197         const size_t bufsz = sizeof(buf);
30198  
30199 +       pax_track_stack();
30200 +
30201         pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30202                 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30203         pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30204 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30205         char buf[256 * NUM_IWL_RXON_CTX];
30206         const size_t bufsz = sizeof(buf);
30207  
30208 +       pax_track_stack();
30209 +
30210         for_each_context(priv, ctx) {
30211                 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30212                                  ctx->ctxid);
30213 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30214 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h        2011-07-21 22:17:23.000000000 -0400
30215 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h        2011-08-23 21:47:55.000000000 -0400
30216 @@ -68,8 +68,8 @@ do {                                    
30217  } while (0)
30218  
30219  #else
30220 -#define IWL_DEBUG(__priv, level, fmt, args...)
30221 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30222 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30223 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30224  static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30225                                       const void *p, u32 len)
30226  {}
30227 diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30228 --- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c     2011-07-21 22:17:23.000000000 -0400
30229 +++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c     2011-08-23 21:48:14.000000000 -0400
30230 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30231         int buf_len = 512;
30232         size_t len = 0;
30233  
30234 +       pax_track_stack();
30235 +
30236         if (*ppos != 0)
30237                 return 0;
30238         if (count < sizeof(buf))
30239 diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
30240 --- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c   2011-07-21 22:17:23.000000000 -0400
30241 +++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c   2011-08-23 21:47:55.000000000 -0400
30242 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30243                 return -EINVAL;
30244  
30245         if (fake_hw_scan) {
30246 -               mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30247 -               mac80211_hwsim_ops.sw_scan_start = NULL;
30248 -               mac80211_hwsim_ops.sw_scan_complete = NULL;
30249 +               pax_open_kernel();
30250 +               *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30251 +               *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30252 +               *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30253 +               pax_close_kernel();
30254         }
30255  
30256         spin_lock_init(&hwsim_radio_lock);
30257 diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
30258 --- linux-3.0.4/drivers/net/wireless/rndis_wlan.c       2011-07-21 22:17:23.000000000 -0400
30259 +++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c       2011-08-23 21:47:55.000000000 -0400
30260 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30261  
30262         netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30263  
30264 -       if (rts_threshold < 0 || rts_threshold > 2347)
30265 +       if (rts_threshold > 2347)
30266                 rts_threshold = 2347;
30267  
30268         tmp = cpu_to_le32(rts_threshold);
30269 diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30270 --- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c      2011-07-21 22:17:23.000000000 -0400
30271 +++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c      2011-08-23 21:48:14.000000000 -0400
30272 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30273         u8 rfpath;
30274         u8 num_total_rfpath = rtlphy->num_total_rfpath;
30275  
30276 +       pax_track_stack();
30277 +
30278         precommoncmdcnt = 0;
30279         _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30280                                          MAX_PRECMD_CNT,
30281 diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
30282 --- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h    2011-07-21 22:17:23.000000000 -0400
30283 +++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h    2011-08-23 21:47:55.000000000 -0400
30284 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
30285         void (*reset)(struct wl1251 *wl);
30286         void (*enable_irq)(struct wl1251 *wl);
30287         void (*disable_irq)(struct wl1251 *wl);
30288 -};
30289 +} __no_const;
30290  
30291  struct wl1251 {
30292         struct ieee80211_hw *hw;
30293 diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
30294 --- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c       2011-07-21 22:17:23.000000000 -0400
30295 +++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c       2011-08-23 21:48:14.000000000 -0400
30296 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct 
30297         u32 chunk_len;
30298         int i;
30299  
30300 +       pax_track_stack();
30301 +
30302         WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30303  
30304         spi_message_init(&m);
30305 diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
30306 --- linux-3.0.4/drivers/oprofile/buffer_sync.c  2011-07-21 22:17:23.000000000 -0400
30307 +++ linux-3.0.4/drivers/oprofile/buffer_sync.c  2011-08-23 21:47:55.000000000 -0400
30308 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30309                 if (cookie == NO_COOKIE)
30310                         offset = pc;
30311                 if (cookie == INVALID_COOKIE) {
30312 -                       atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30313 +                       atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30314                         offset = pc;
30315                 }
30316                 if (cookie != last_cookie) {
30317 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct 
30318         /* add userspace sample */
30319  
30320         if (!mm) {
30321 -               atomic_inc(&oprofile_stats.sample_lost_no_mm);
30322 +               atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30323                 return 0;
30324         }
30325  
30326         cookie = lookup_dcookie(mm, s->eip, &offset);
30327  
30328         if (cookie == INVALID_COOKIE) {
30329 -               atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30330 +               atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30331                 return 0;
30332         }
30333  
30334 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30335                 /* ignore backtraces if failed to add a sample */
30336                 if (state == sb_bt_start) {
30337                         state = sb_bt_ignore;
30338 -                       atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30339 +                       atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30340                 }
30341         }
30342         release_mm(mm);
30343 diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
30344 --- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30345 +++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30346 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30347         }
30348  
30349         if (buffer_pos == buffer_size) {
30350 -               atomic_inc(&oprofile_stats.event_lost_overflow);
30351 +               atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30352                 return;
30353         }
30354  
30355 diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
30356 --- linux-3.0.4/drivers/oprofile/oprof.c        2011-07-21 22:17:23.000000000 -0400
30357 +++ linux-3.0.4/drivers/oprofile/oprof.c        2011-08-23 21:47:55.000000000 -0400
30358 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30359         if (oprofile_ops.switch_events())
30360                 return;
30361  
30362 -       atomic_inc(&oprofile_stats.multiplex_counter);
30363 +       atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30364         start_switch_worker();
30365  }
30366  
30367 diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
30368 --- linux-3.0.4/drivers/oprofile/oprofilefs.c   2011-07-21 22:17:23.000000000 -0400
30369 +++ linux-3.0.4/drivers/oprofile/oprofilefs.c   2011-08-23 21:47:55.000000000 -0400
30370 @@ -186,7 +186,7 @@ static const struct file_operations atom
30371  
30372  
30373  int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30374 -       char const *name, atomic_t *val)
30375 +       char const *name, atomic_unchecked_t *val)
30376  {
30377         return __oprofilefs_create_file(sb, root, name,
30378                                         &atomic_ro_fops, 0444, val);
30379 diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
30380 --- linux-3.0.4/drivers/oprofile/oprofile_stats.c       2011-07-21 22:17:23.000000000 -0400
30381 +++ linux-3.0.4/drivers/oprofile/oprofile_stats.c       2011-08-23 21:47:55.000000000 -0400
30382 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30383                 cpu_buf->sample_invalid_eip = 0;
30384         }
30385  
30386 -       atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30387 -       atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30388 -       atomic_set(&oprofile_stats.event_lost_overflow, 0);
30389 -       atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30390 -       atomic_set(&oprofile_stats.multiplex_counter, 0);
30391 +       atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30392 +       atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30393 +       atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30394 +       atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30395 +       atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30396  }
30397  
30398  
30399 diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
30400 --- linux-3.0.4/drivers/oprofile/oprofile_stats.h       2011-07-21 22:17:23.000000000 -0400
30401 +++ linux-3.0.4/drivers/oprofile/oprofile_stats.h       2011-08-23 21:47:55.000000000 -0400
30402 @@ -13,11 +13,11 @@
30403  #include <asm/atomic.h>
30404  
30405  struct oprofile_stat_struct {
30406 -       atomic_t sample_lost_no_mm;
30407 -       atomic_t sample_lost_no_mapping;
30408 -       atomic_t bt_lost_no_mapping;
30409 -       atomic_t event_lost_overflow;
30410 -       atomic_t multiplex_counter;
30411 +       atomic_unchecked_t sample_lost_no_mm;
30412 +       atomic_unchecked_t sample_lost_no_mapping;
30413 +       atomic_unchecked_t bt_lost_no_mapping;
30414 +       atomic_unchecked_t event_lost_overflow;
30415 +       atomic_unchecked_t multiplex_counter;
30416  };
30417  
30418  extern struct oprofile_stat_struct oprofile_stats;
30419 diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
30420 --- linux-3.0.4/drivers/parport/procfs.c        2011-07-21 22:17:23.000000000 -0400
30421 +++ linux-3.0.4/drivers/parport/procfs.c        2011-08-23 21:47:55.000000000 -0400
30422 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30423  
30424         *ppos += len;
30425  
30426 -       return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30427 +       return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30428  }
30429  
30430  #ifdef CONFIG_PARPORT_1284
30431 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30432  
30433         *ppos += len;
30434  
30435 -       return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30436 +       return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30437  }
30438  #endif /* IEEE1284.3 support. */
30439  
30440 diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
30441 --- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h      2011-07-21 22:17:23.000000000 -0400
30442 +++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h      2011-08-23 21:47:55.000000000 -0400
30443 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30444         int (*hardware_test) (struct slot* slot, u32 value);
30445         u8  (*get_power) (struct slot* slot);
30446         int (*set_power) (struct slot* slot, int value);
30447 -};
30448 +} __no_const;
30449  
30450  struct cpci_hp_controller {
30451         unsigned int irq;
30452 diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
30453 --- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c      2011-07-21 22:17:23.000000000 -0400
30454 +++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c      2011-08-23 21:47:55.000000000 -0400
30455 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30456  
30457  void compaq_nvram_init (void __iomem *rom_start)
30458  {
30459 +
30460 +#ifndef CONFIG_PAX_KERNEXEC
30461         if (rom_start) {
30462                 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30463         }
30464 +#endif
30465 +
30466         dbg("int15 entry  = %p\n", compaq_int15_entry_point);
30467  
30468         /* initialize our int15 lock */
30469 diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
30470 --- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30471 +++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30472 @@ -27,9 +27,9 @@
30473  #define MODULE_PARAM_PREFIX "pcie_aspm."
30474  
30475  /* Note: those are not register definitions */
30476 -#define ASPM_STATE_L0S_UP      (1)     /* Upstream direction L0s state */
30477 -#define ASPM_STATE_L0S_DW      (2)     /* Downstream direction L0s state */
30478 -#define ASPM_STATE_L1          (4)     /* L1 state */
30479 +#define ASPM_STATE_L0S_UP      (1U)    /* Upstream direction L0s state */
30480 +#define ASPM_STATE_L0S_DW      (2U)    /* Downstream direction L0s state */
30481 +#define ASPM_STATE_L1          (4U)    /* L1 state */
30482  #define ASPM_STATE_L0S         (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30483  #define ASPM_STATE_ALL         (ASPM_STATE_L0S | ASPM_STATE_L1)
30484  
30485 diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
30486 --- linux-3.0.4/drivers/pci/probe.c     2011-07-21 22:17:23.000000000 -0400
30487 +++ linux-3.0.4/drivers/pci/probe.c     2011-08-23 21:47:55.000000000 -0400
30488 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30489         u32 l, sz, mask;
30490         u16 orig_cmd;
30491  
30492 -       mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30493 +       mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30494  
30495         if (!dev->mmio_always_on) {
30496                 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30497 diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
30498 --- linux-3.0.4/drivers/pci/proc.c      2011-07-21 22:17:23.000000000 -0400
30499 +++ linux-3.0.4/drivers/pci/proc.c      2011-08-23 21:48:14.000000000 -0400
30500 @@ -476,7 +476,16 @@ static const struct file_operations proc
30501  static int __init pci_proc_init(void)
30502  {
30503         struct pci_dev *dev = NULL;
30504 +
30505 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30506 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30507 +       proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30508 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30509 +       proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30510 +#endif
30511 +#else
30512         proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30513 +#endif
30514         proc_create("devices", 0, proc_bus_pci_dir,
30515                     &proc_bus_pci_dev_operations);
30516         proc_initialized = 1;
30517 diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
30518 --- linux-3.0.4/drivers/pci/xen-pcifront.c      2011-07-21 22:17:23.000000000 -0400
30519 +++ linux-3.0.4/drivers/pci/xen-pcifront.c      2011-08-23 21:48:14.000000000 -0400
30520 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30521         struct pcifront_sd *sd = bus->sysdata;
30522         struct pcifront_device *pdev = pcifront_get_pdev(sd);
30523  
30524 +       pax_track_stack();
30525 +
30526         if (verbose_request)
30527                 dev_info(&pdev->xdev->dev,
30528                          "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30529 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30530         struct pcifront_sd *sd = bus->sysdata;
30531         struct pcifront_device *pdev = pcifront_get_pdev(sd);
30532  
30533 +       pax_track_stack();
30534 +
30535         if (verbose_request)
30536                 dev_info(&pdev->xdev->dev,
30537                          "write dev=%04x:%02x:%02x.%01x - "
30538 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30539         struct pcifront_device *pdev = pcifront_get_pdev(sd);
30540         struct msi_desc *entry;
30541  
30542 +       pax_track_stack();
30543 +
30544         if (nvec > SH_INFO_MAX_VEC) {
30545                 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30546                                    " Increase SH_INFO_MAX_VEC.\n", nvec);
30547 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30548         struct pcifront_sd *sd = dev->bus->sysdata;
30549         struct pcifront_device *pdev = pcifront_get_pdev(sd);
30550  
30551 +       pax_track_stack();
30552 +
30553         err = do_pci_op(pdev, &op);
30554  
30555         /* What should do for error ? */
30556 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30557         struct pcifront_sd *sd = dev->bus->sysdata;
30558         struct pcifront_device *pdev = pcifront_get_pdev(sd);
30559  
30560 +       pax_track_stack();
30561 +
30562         err = do_pci_op(pdev, &op);
30563         if (likely(!err)) {
30564                 vector[0] = op.value;
30565 diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
30566 --- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c    2011-07-21 22:17:23.000000000 -0400
30567 +++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c    2011-08-23 21:47:55.000000000 -0400
30568 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30569         return 0;
30570  }
30571  
30572 -void static hotkey_mask_warn_incomplete_mask(void)
30573 +static void hotkey_mask_warn_incomplete_mask(void)
30574  {
30575         /* log only what the user can fix... */
30576         const u32 wantedmask = hotkey_driver_mask &
30577 diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
30578 --- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30579 +++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30580 @@ -59,7 +59,7 @@ do { \
30581         set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30582  } while(0)
30583  
30584 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30585 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30586                         (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30587  
30588  /*
30589 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30590  
30591         cpu = get_cpu();
30592         save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30593 +
30594 +       pax_open_kernel();
30595         get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30596 +       pax_close_kernel();
30597  
30598         /* On some boxes IRQ's during PnP BIOS calls are deadly.  */
30599         spin_lock_irqsave(&pnp_bios_lock, flags);
30600 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30601                              :"memory");
30602         spin_unlock_irqrestore(&pnp_bios_lock, flags);
30603  
30604 +       pax_open_kernel();
30605         get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30606 +       pax_close_kernel();
30607 +
30608         put_cpu();
30609  
30610         /* If we get here and this is set then the PnP BIOS faulted on us. */
30611 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30612         return status;
30613  }
30614  
30615 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30616 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30617  {
30618         int i;
30619  
30620 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30621         pnp_bios_callpoint.offset = header->fields.pm16offset;
30622         pnp_bios_callpoint.segment = PNP_CS16;
30623  
30624 +       pax_open_kernel();
30625 +
30626         for_each_possible_cpu(i) {
30627                 struct desc_struct *gdt = get_cpu_gdt_table(i);
30628                 if (!gdt)
30629 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30630                 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30631                          (unsigned long)__va(header->fields.pm16dseg));
30632         }
30633 +
30634 +       pax_close_kernel();
30635  }
30636 diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
30637 --- linux-3.0.4/drivers/pnp/resource.c  2011-07-21 22:17:23.000000000 -0400
30638 +++ linux-3.0.4/drivers/pnp/resource.c  2011-08-23 21:47:55.000000000 -0400
30639 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30640                 return 1;
30641  
30642         /* check if the resource is valid */
30643 -       if (*irq < 0 || *irq > 15)
30644 +       if (*irq > 15)
30645                 return 0;
30646  
30647         /* check if the resource is reserved */
30648 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30649                 return 1;
30650  
30651         /* check if the resource is valid */
30652 -       if (*dma < 0 || *dma == 4 || *dma > 7)
30653 +       if (*dma == 4 || *dma > 7)
30654                 return 0;
30655  
30656         /* check if the resource is reserved */
30657 diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
30658 --- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30659 +++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30660 @@ -67,7 +67,7 @@
30661  struct bq27x00_device_info;
30662  struct bq27x00_access_methods {
30663         int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30664 -};
30665 +} __no_const;
30666  
30667  enum bq27x00_chip { BQ27000, BQ27500 };
30668  
30669 diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
30670 --- linux-3.0.4/drivers/regulator/max8660.c     2011-07-21 22:17:23.000000000 -0400
30671 +++ linux-3.0.4/drivers/regulator/max8660.c     2011-08-23 21:47:55.000000000 -0400
30672 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30673                 max8660->shadow_regs[MAX8660_OVER1] = 5;
30674         } else {
30675                 /* Otherwise devices can be toggled via software */
30676 -               max8660_dcdc_ops.enable = max8660_dcdc_enable;
30677 -               max8660_dcdc_ops.disable = max8660_dcdc_disable;
30678 +               pax_open_kernel();
30679 +               *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30680 +               *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30681 +               pax_close_kernel();
30682         }
30683  
30684         /*
30685 diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
30686 --- linux-3.0.4/drivers/regulator/mc13892-regulator.c   2011-07-21 22:17:23.000000000 -0400
30687 +++ linux-3.0.4/drivers/regulator/mc13892-regulator.c   2011-08-23 21:47:55.000000000 -0400
30688 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30689         }
30690         mc13xxx_unlock(mc13892);
30691  
30692 -       mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30693 +       pax_open_kernel();
30694 +       *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30695                 = mc13892_vcam_set_mode;
30696 -       mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30697 +       *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30698                 = mc13892_vcam_get_mode;
30699 +       pax_close_kernel();
30700         for (i = 0; i < pdata->num_regulators; i++) {
30701                 init_data = &pdata->regulators[i];
30702                 priv->regulators[i] = regulator_register(
30703 diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
30704 --- linux-3.0.4/drivers/rtc/rtc-dev.c   2011-07-21 22:17:23.000000000 -0400
30705 +++ linux-3.0.4/drivers/rtc/rtc-dev.c   2011-08-23 21:48:14.000000000 -0400
30706 @@ -14,6 +14,7 @@
30707  #include <linux/module.h>
30708  #include <linux/rtc.h>
30709  #include <linux/sched.h>
30710 +#include <linux/grsecurity.h>
30711  #include "rtc-core.h"
30712  
30713  static dev_t rtc_devt;
30714 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30715                 if (copy_from_user(&tm, uarg, sizeof(tm)))
30716                         return -EFAULT;
30717  
30718 +               gr_log_timechange();
30719 +
30720                 return rtc_set_time(rtc, &tm);
30721  
30722         case RTC_PIE_ON:
30723 diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
30724 --- linux-3.0.4/drivers/scsi/aacraid/aacraid.h  2011-07-21 22:17:23.000000000 -0400
30725 +++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h  2011-08-23 21:47:55.000000000 -0400
30726 @@ -492,7 +492,7 @@ struct adapter_ops
30727         int  (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30728         /* Administrative operations */
30729         int  (*adapter_comm)(struct aac_dev * dev, int comm);
30730 -};
30731 +} __no_const;
30732  
30733  /*
30734   *     Define which interrupt handler needs to be installed
30735 diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
30736 --- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30737 +++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30738 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30739         u32 actual_fibsize64, actual_fibsize = 0;
30740         int i;
30741  
30742 +       pax_track_stack();
30743  
30744         if (dev->in_reset) {
30745                 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30746 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
30747 --- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30748 +++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30749 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30750         struct bfad_vport_s *vport, *vport_new;
30751         struct bfa_fcs_driver_info_s driver_info;
30752  
30753 +       pax_track_stack();
30754 +
30755         /* Fill the driver_info info to fcs*/
30756         memset(&driver_info, 0, sizeof(driver_info));
30757         strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30758 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
30759 --- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c        2011-07-21 22:17:23.000000000 -0400
30760 +++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c        2011-08-23 21:48:14.000000000 -0400
30761 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30762         u16        len, count;
30763         u16     templen;
30764  
30765 +       pax_track_stack();
30766 +
30767         /*
30768          * get hba attributes
30769          */
30770 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30771         u8      count = 0;
30772         u16     templen;
30773  
30774 +       pax_track_stack();
30775 +
30776         /*
30777          * get port attributes
30778          */
30779 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
30780 --- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c        2011-07-21 22:17:23.000000000 -0400
30781 +++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c        2011-08-23 21:48:14.000000000 -0400
30782 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30783         struct fc_rpsc_speed_info_s speeds;
30784         struct bfa_port_attr_s pport_attr;
30785  
30786 +       pax_track_stack();
30787 +
30788         bfa_trc(port->fcs, rx_fchs->s_id);
30789         bfa_trc(port->fcs, rx_fchs->d_id);
30790  
30791 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
30792 --- linux-3.0.4/drivers/scsi/bfa/bfa.h  2011-07-21 22:17:23.000000000 -0400
30793 +++ linux-3.0.4/drivers/scsi/bfa/bfa.h  2011-08-23 21:47:55.000000000 -0400
30794 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30795                                 u32 *nvecs, u32 *maxvec);
30796         void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30797                                        u32 *end);
30798 -};
30799 +} __no_const;
30800  typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30801  
30802  struct bfa_iocfc_s {
30803 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
30804 --- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h      2011-07-21 22:17:23.000000000 -0400
30805 +++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h      2011-08-23 21:47:55.000000000 -0400
30806 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30807         bfa_ioc_disable_cbfn_t  disable_cbfn;
30808         bfa_ioc_hbfail_cbfn_t   hbfail_cbfn;
30809         bfa_ioc_reset_cbfn_t    reset_cbfn;
30810 -};
30811 +} __no_const;
30812  
30813  /*
30814   * Heartbeat failure notification queue element.
30815 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30816         void            (*ioc_sync_leave)       (struct bfa_ioc_s *ioc);
30817         void            (*ioc_sync_ack)         (struct bfa_ioc_s *ioc);
30818         bfa_boolean_t   (*ioc_sync_complete)    (struct bfa_ioc_s *ioc);
30819 -};
30820 +} __no_const;
30821  
30822  #define bfa_ioc_pcifn(__ioc)           ((__ioc)->pcidev.pci_func)
30823  #define bfa_ioc_devid(__ioc)           ((__ioc)->pcidev.device_id)
30824 diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
30825 --- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30826 +++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30827 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30828  static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30829                                                     *PrototypeHostAdapter)
30830  {
30831 +       pax_track_stack();
30832 +
30833         /*
30834            If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30835            Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30836 diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
30837 --- linux-3.0.4/drivers/scsi/dpt_i2o.c  2011-07-21 22:17:23.000000000 -0400
30838 +++ linux-3.0.4/drivers/scsi/dpt_i2o.c  2011-08-23 21:48:14.000000000 -0400
30839 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30840         dma_addr_t addr;
30841         ulong flags = 0;
30842  
30843 +       pax_track_stack();
30844 +
30845         memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30846         // get user msg size in u32s 
30847         if(get_user(size, &user_msg[0])){
30848 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30849         s32 rcode;
30850         dma_addr_t addr;
30851  
30852 +       pax_track_stack();
30853 +
30854         memset(msg, 0 , sizeof(msg));
30855         len = scsi_bufflen(cmd);
30856         direction = 0x00000000; 
30857 diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
30858 --- linux-3.0.4/drivers/scsi/eata.c     2011-07-21 22:17:23.000000000 -0400
30859 +++ linux-3.0.4/drivers/scsi/eata.c     2011-08-23 21:48:14.000000000 -0400
30860 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30861         struct hostdata *ha;
30862         char name[16];
30863  
30864 +       pax_track_stack();
30865 +
30866         sprintf(name, "%s%d", driver_name, j);
30867  
30868         if (!request_region(port_base, REGION_SIZE, driver_name)) {
30869 diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
30870 --- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c   2011-07-21 22:17:23.000000000 -0400
30871 +++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c   2011-08-23 21:48:14.000000000 -0400
30872 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30873         } buf;
30874         int rc;
30875  
30876 +       pax_track_stack();
30877 +
30878         fiph = (struct fip_header *)skb->data;
30879         sub = fiph->fip_subcode;
30880  
30881 diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
30882 --- linux-3.0.4/drivers/scsi/gdth.c     2011-07-21 22:17:23.000000000 -0400
30883 +++ linux-3.0.4/drivers/scsi/gdth.c     2011-08-23 21:48:14.000000000 -0400
30884 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30885      unsigned long flags;
30886      gdth_ha_str *ha;
30887  
30888 +    pax_track_stack();
30889 +
30890      if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30891          return -EFAULT;
30892      ha = gdth_find_ha(ldrv.ionode);
30893 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30894      gdth_ha_str *ha;
30895      int rval;
30896  
30897 +    pax_track_stack();
30898 +
30899      if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30900          res.number >= MAX_HDRIVES)
30901          return -EFAULT;
30902 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30903      gdth_ha_str *ha;
30904      int rval;
30905  
30906 +    pax_track_stack();
30907 +
30908      if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30909          return -EFAULT;
30910      ha = gdth_find_ha(gen.ionode);
30911 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30912      int             i;
30913      gdth_cmd_str    gdtcmd;
30914      char            cmnd[MAX_COMMAND_SIZE];   
30915 +
30916 +    pax_track_stack();
30917 +
30918      memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30919  
30920      TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30921 diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
30922 --- linux-3.0.4/drivers/scsi/gdth_proc.c        2011-07-21 22:17:23.000000000 -0400
30923 +++ linux-3.0.4/drivers/scsi/gdth_proc.c        2011-08-23 21:48:14.000000000 -0400
30924 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30925      u64         paddr;
30926  
30927      char            cmnd[MAX_COMMAND_SIZE];
30928 +
30929 +    pax_track_stack();
30930 +
30931      memset(cmnd, 0xff, 12);
30932      memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30933  
30934 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30935      gdth_hget_str *phg;
30936      char cmnd[MAX_COMMAND_SIZE];
30937  
30938 +    pax_track_stack();
30939 +
30940      gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30941      estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30942      if (!gdtcmd || !estr)
30943 diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
30944 --- linux-3.0.4/drivers/scsi/hosts.c    2011-07-21 22:17:23.000000000 -0400
30945 +++ linux-3.0.4/drivers/scsi/hosts.c    2011-08-23 21:47:55.000000000 -0400
30946 @@ -42,7 +42,7 @@
30947  #include "scsi_logging.h"
30948  
30949  
30950 -static atomic_t scsi_host_next_hn;     /* host_no for next new host */
30951 +static atomic_unchecked_t scsi_host_next_hn;   /* host_no for next new host */
30952  
30953  
30954  static void scsi_host_cls_release(struct device *dev)
30955 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30956          * subtract one because we increment first then return, but we need to
30957          * know what the next host number was before increment
30958          */
30959 -       shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30960 +       shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30961         shost->dma_channel = 0xff;
30962  
30963         /* These three are default values which can be overridden */
30964 diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
30965 --- linux-3.0.4/drivers/scsi/hpsa.c     2011-07-21 22:17:23.000000000 -0400
30966 +++ linux-3.0.4/drivers/scsi/hpsa.c     2011-08-23 21:47:55.000000000 -0400
30967 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30968         u32 a;
30969  
30970         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30971 -               return h->access.command_completed(h);
30972 +               return h->access->command_completed(h);
30973  
30974         if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30975                 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30976 @@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30977         while (!list_empty(&h->reqQ)) {
30978                 c = list_entry(h->reqQ.next, struct CommandList, list);
30979                 /* can't do anything if fifo is full */
30980 -               if ((h->access.fifo_full(h))) {
30981 +               if ((h->access->fifo_full(h))) {
30982                         dev_warn(&h->pdev->dev, "fifo full\n");
30983                         break;
30984                 }
30985 @@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30986                 h->Qdepth--;
30987  
30988                 /* Tell the controller execute command */
30989 -               h->access.submit_command(h, c);
30990 +               h->access->submit_command(h, c);
30991  
30992                 /* Put job onto the completed Q */
30993                 addQ(&h->cmpQ, c);
30994 @@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
30995  
30996  static inline unsigned long get_next_completion(struct ctlr_info *h)
30997  {
30998 -       return h->access.command_completed(h);
30999 +       return h->access->command_completed(h);
31000  }
31001  
31002  static inline bool interrupt_pending(struct ctlr_info *h)
31003  {
31004 -       return h->access.intr_pending(h);
31005 +       return h->access->intr_pending(h);
31006  }
31007  
31008  static inline long interrupt_not_for_us(struct ctlr_info *h)
31009  {
31010 -       return (h->access.intr_pending(h) == 0) ||
31011 +       return (h->access->intr_pending(h) == 0) ||
31012                 (h->interrupts_enabled == 0);
31013  }
31014  
31015 @@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31016         if (prod_index < 0)
31017                 return -ENODEV;
31018         h->product_name = products[prod_index].product_name;
31019 -       h->access = *(products[prod_index].access);
31020 +       h->access = products[prod_index].access;
31021  
31022         if (hpsa_board_disabled(h->pdev)) {
31023                 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31024 @@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31025         }
31026  
31027         /* make sure the board interrupts are off */
31028 -       h->access.set_intr_mask(h, HPSA_INTR_OFF);
31029 +       h->access->set_intr_mask(h, HPSA_INTR_OFF);
31030  
31031         if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31032                 goto clean2;
31033 @@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31034                  * fake ones to scoop up any residual completions.
31035                  */
31036                 spin_lock_irqsave(&h->lock, flags);
31037 -               h->access.set_intr_mask(h, HPSA_INTR_OFF);
31038 +               h->access->set_intr_mask(h, HPSA_INTR_OFF);
31039                 spin_unlock_irqrestore(&h->lock, flags);
31040                 free_irq(h->intr[h->intr_mode], h);
31041                 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31042 @@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31043                 dev_info(&h->pdev->dev, "Board READY.\n");
31044                 dev_info(&h->pdev->dev,
31045                         "Waiting for stale completions to drain.\n");
31046 -               h->access.set_intr_mask(h, HPSA_INTR_ON);
31047 +               h->access->set_intr_mask(h, HPSA_INTR_ON);
31048                 msleep(10000);
31049 -               h->access.set_intr_mask(h, HPSA_INTR_OFF);
31050 +               h->access->set_intr_mask(h, HPSA_INTR_OFF);
31051  
31052                 rc = controller_reset_failed(h->cfgtable);
31053                 if (rc)
31054 @@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31055         }
31056  
31057         /* Turn the interrupts on so we can service requests */
31058 -       h->access.set_intr_mask(h, HPSA_INTR_ON);
31059 +       h->access->set_intr_mask(h, HPSA_INTR_ON);
31060  
31061         hpsa_hba_inquiry(h);
31062         hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
31063 @@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31064          * To write all data in the battery backed cache to disks
31065          */
31066         hpsa_flush_cache(h);
31067 -       h->access.set_intr_mask(h, HPSA_INTR_OFF);
31068 +       h->access->set_intr_mask(h, HPSA_INTR_OFF);
31069         free_irq(h->intr[h->intr_mode], h);
31070  #ifdef CONFIG_PCI_MSI
31071         if (h->msix_vector)
31072 @@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31073                 return;
31074         }
31075         /* Change the access methods to the performant access methods */
31076 -       h->access = SA5_performant_access;
31077 +       h->access = &SA5_performant_access;
31078         h->transMethod = CFGTBL_Trans_Performant;
31079  }
31080  
31081 diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
31082 --- linux-3.0.4/drivers/scsi/hpsa.h     2011-08-23 21:44:40.000000000 -0400
31083 +++ linux-3.0.4/drivers/scsi/hpsa.h     2011-08-23 21:47:55.000000000 -0400
31084 @@ -73,7 +73,7 @@ struct ctlr_info {
31085         unsigned int msix_vector;
31086         unsigned int msi_vector;
31087         int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31088 -       struct access_method access;
31089 +       struct access_method *access;
31090  
31091         /* queue and queue Info */
31092         struct list_head reqQ;
31093 diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
31094 --- linux-3.0.4/drivers/scsi/ips.h      2011-07-21 22:17:23.000000000 -0400
31095 +++ linux-3.0.4/drivers/scsi/ips.h      2011-08-23 21:47:55.000000000 -0400
31096 @@ -1027,7 +1027,7 @@ typedef struct {
31097     int       (*intr)(struct ips_ha *);
31098     void      (*enableint)(struct ips_ha *);
31099     uint32_t (*statupd)(struct ips_ha *);
31100 -} ips_hw_func_t;
31101 +} __no_const ips_hw_func_t;
31102  
31103  typedef struct ips_ha {
31104     uint8_t            ha_id[IPS_MAX_CHANNELS+1];
31105 diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
31106 --- linux-3.0.4/drivers/scsi/libfc/fc_exch.c    2011-07-21 22:17:23.000000000 -0400
31107 +++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c    2011-08-23 21:47:55.000000000 -0400
31108 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31109          * all together if not used XXX
31110          */
31111         struct {
31112 -               atomic_t no_free_exch;
31113 -               atomic_t no_free_exch_xid;
31114 -               atomic_t xid_not_found;
31115 -               atomic_t xid_busy;
31116 -               atomic_t seq_not_found;
31117 -               atomic_t non_bls_resp;
31118 +               atomic_unchecked_t no_free_exch;
31119 +               atomic_unchecked_t no_free_exch_xid;
31120 +               atomic_unchecked_t xid_not_found;
31121 +               atomic_unchecked_t xid_busy;
31122 +               atomic_unchecked_t seq_not_found;
31123 +               atomic_unchecked_t non_bls_resp;
31124         } stats;
31125  };
31126  
31127 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31128         /* allocate memory for exchange */
31129         ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31130         if (!ep) {
31131 -               atomic_inc(&mp->stats.no_free_exch);
31132 +               atomic_inc_unchecked(&mp->stats.no_free_exch);
31133                 goto out;
31134         }
31135         memset(ep, 0, sizeof(*ep));
31136 @@ -761,7 +761,7 @@ out:
31137         return ep;
31138  err:
31139         spin_unlock_bh(&pool->lock);
31140 -       atomic_inc(&mp->stats.no_free_exch_xid);
31141 +       atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31142         mempool_free(ep, mp->ep_pool);
31143         return NULL;
31144  }
31145 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31146                 xid = ntohs(fh->fh_ox_id);      /* we originated exch */
31147                 ep = fc_exch_find(mp, xid);
31148                 if (!ep) {
31149 -                       atomic_inc(&mp->stats.xid_not_found);
31150 +                       atomic_inc_unchecked(&mp->stats.xid_not_found);
31151                         reject = FC_RJT_OX_ID;
31152                         goto out;
31153                 }
31154 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31155                 ep = fc_exch_find(mp, xid);
31156                 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31157                         if (ep) {
31158 -                               atomic_inc(&mp->stats.xid_busy);
31159 +                               atomic_inc_unchecked(&mp->stats.xid_busy);
31160                                 reject = FC_RJT_RX_ID;
31161                                 goto rel;
31162                         }
31163 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31164                         }
31165                         xid = ep->xid;  /* get our XID */
31166                 } else if (!ep) {
31167 -                       atomic_inc(&mp->stats.xid_not_found);
31168 +                       atomic_inc_unchecked(&mp->stats.xid_not_found);
31169                         reject = FC_RJT_RX_ID;  /* XID not found */
31170                         goto out;
31171                 }
31172 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31173         } else {
31174                 sp = &ep->seq;
31175                 if (sp->id != fh->fh_seq_id) {
31176 -                       atomic_inc(&mp->stats.seq_not_found);
31177 +                       atomic_inc_unchecked(&mp->stats.seq_not_found);
31178                         reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31179                         goto rel;
31180                 }
31181 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31182  
31183         ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31184         if (!ep) {
31185 -               atomic_inc(&mp->stats.xid_not_found);
31186 +               atomic_inc_unchecked(&mp->stats.xid_not_found);
31187                 goto out;
31188         }
31189         if (ep->esb_stat & ESB_ST_COMPLETE) {
31190 -               atomic_inc(&mp->stats.xid_not_found);
31191 +               atomic_inc_unchecked(&mp->stats.xid_not_found);
31192                 goto rel;
31193         }
31194         if (ep->rxid == FC_XID_UNKNOWN)
31195                 ep->rxid = ntohs(fh->fh_rx_id);
31196         if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31197 -               atomic_inc(&mp->stats.xid_not_found);
31198 +               atomic_inc_unchecked(&mp->stats.xid_not_found);
31199                 goto rel;
31200         }
31201         if (ep->did != ntoh24(fh->fh_s_id) &&
31202             ep->did != FC_FID_FLOGI) {
31203 -               atomic_inc(&mp->stats.xid_not_found);
31204 +               atomic_inc_unchecked(&mp->stats.xid_not_found);
31205                 goto rel;
31206         }
31207         sof = fr_sof(fp);
31208 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31209                 sp->ssb_stat |= SSB_ST_RESP;
31210                 sp->id = fh->fh_seq_id;
31211         } else if (sp->id != fh->fh_seq_id) {
31212 -               atomic_inc(&mp->stats.seq_not_found);
31213 +               atomic_inc_unchecked(&mp->stats.seq_not_found);
31214                 goto rel;
31215         }
31216  
31217 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31218         sp = fc_seq_lookup_orig(mp, fp);        /* doesn't hold sequence */
31219  
31220         if (!sp)
31221 -               atomic_inc(&mp->stats.xid_not_found);
31222 +               atomic_inc_unchecked(&mp->stats.xid_not_found);
31223         else
31224 -               atomic_inc(&mp->stats.non_bls_resp);
31225 +               atomic_inc_unchecked(&mp->stats.non_bls_resp);
31226  
31227         fc_frame_free(fp);
31228  }
31229 diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
31230 --- linux-3.0.4/drivers/scsi/libsas/sas_ata.c   2011-07-21 22:17:23.000000000 -0400
31231 +++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c   2011-08-23 21:47:55.000000000 -0400
31232 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31233         .postreset              = ata_std_postreset,
31234         .error_handler          = ata_std_error_handler,
31235         .post_internal_cmd      = sas_ata_post_internal,
31236 -       .qc_defer               = ata_std_qc_defer,
31237 +       .qc_defer               = ata_std_qc_defer,
31238         .qc_prep                = ata_noop_qc_prep,
31239         .qc_issue               = sas_ata_qc_issue,
31240         .qc_fill_rtf            = sas_ata_qc_fill_rtf,
31241 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
31242 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c        2011-07-21 22:17:23.000000000 -0400
31243 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c        2011-08-23 21:48:14.000000000 -0400
31244 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31245  
31246  #include <linux/debugfs.h>
31247  
31248 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31249 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31250  static unsigned long lpfc_debugfs_start_time = 0L;
31251  
31252  /* iDiag */
31253 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31254         lpfc_debugfs_enable = 0;
31255  
31256         len = 0;
31257 -       index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31258 +       index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31259                 (lpfc_debugfs_max_disc_trc - 1);
31260         for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31261                 dtp = vport->disc_trc + i;
31262 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31263         lpfc_debugfs_enable = 0;
31264  
31265         len = 0;
31266 -       index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31267 +       index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31268                 (lpfc_debugfs_max_slow_ring_trc - 1);
31269         for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31270                 dtp = phba->slow_ring_trc + i;
31271 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31272         uint32_t *ptr;
31273         char buffer[1024];
31274  
31275 +       pax_track_stack();
31276 +
31277         off = 0;
31278         spin_lock_irq(&phba->hbalock);
31279  
31280 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport 
31281                 !vport || !vport->disc_trc)
31282                 return;
31283  
31284 -       index = atomic_inc_return(&vport->disc_trc_cnt) &
31285 +       index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31286                 (lpfc_debugfs_max_disc_trc - 1);
31287         dtp = vport->disc_trc + index;
31288         dtp->fmt = fmt;
31289         dtp->data1 = data1;
31290         dtp->data2 = data2;
31291         dtp->data3 = data3;
31292 -       dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31293 +       dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31294         dtp->jif = jiffies;
31295  #endif
31296         return;
31297 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31298                 !phba || !phba->slow_ring_trc)
31299                 return;
31300  
31301 -       index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31302 +       index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31303                 (lpfc_debugfs_max_slow_ring_trc - 1);
31304         dtp = phba->slow_ring_trc + index;
31305         dtp->fmt = fmt;
31306         dtp->data1 = data1;
31307         dtp->data2 = data2;
31308         dtp->data3 = data3;
31309 -       dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31310 +       dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31311         dtp->jif = jiffies;
31312  #endif
31313         return;
31314 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31315                                                  "slow_ring buffer\n");
31316                                 goto debug_failed;
31317                         }
31318 -                       atomic_set(&phba->slow_ring_trc_cnt, 0);
31319 +                       atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31320                         memset(phba->slow_ring_trc, 0,
31321                                 (sizeof(struct lpfc_debugfs_trc) *
31322                                 lpfc_debugfs_max_slow_ring_trc));
31323 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31324                                  "buffer\n");
31325                 goto debug_failed;
31326         }
31327 -       atomic_set(&vport->disc_trc_cnt, 0);
31328 +       atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31329  
31330         snprintf(name, sizeof(name), "discovery_trace");
31331         vport->debug_disc_trc =
31332 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
31333 --- linux-3.0.4/drivers/scsi/lpfc/lpfc.h        2011-07-21 22:17:23.000000000 -0400
31334 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h        2011-08-23 21:47:55.000000000 -0400
31335 @@ -420,7 +420,7 @@ struct lpfc_vport {
31336         struct dentry *debug_nodelist;
31337         struct dentry *vport_debugfs_root;
31338         struct lpfc_debugfs_trc *disc_trc;
31339 -       atomic_t disc_trc_cnt;
31340 +       atomic_unchecked_t disc_trc_cnt;
31341  #endif
31342         uint8_t stat_data_enabled;
31343         uint8_t stat_data_blocked;
31344 @@ -826,8 +826,8 @@ struct lpfc_hba {
31345         struct timer_list fabric_block_timer;
31346         unsigned long bit_flags;
31347  #define        FABRIC_COMANDS_BLOCKED  0
31348 -       atomic_t num_rsrc_err;
31349 -       atomic_t num_cmd_success;
31350 +       atomic_unchecked_t num_rsrc_err;
31351 +       atomic_unchecked_t num_cmd_success;
31352         unsigned long last_rsrc_error_time;
31353         unsigned long last_ramp_down_time;
31354         unsigned long last_ramp_up_time;
31355 @@ -841,7 +841,7 @@ struct lpfc_hba {
31356         struct dentry *debug_dumpDif;    /* BlockGuard BPL*/
31357         struct dentry *debug_slow_ring_trc;
31358         struct lpfc_debugfs_trc *slow_ring_trc;
31359 -       atomic_t slow_ring_trc_cnt;
31360 +       atomic_unchecked_t slow_ring_trc_cnt;
31361         /* iDiag debugfs sub-directory */
31362         struct dentry *idiag_root;
31363         struct dentry *idiag_pci_cfg;
31364 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
31365 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c   2011-07-21 22:17:23.000000000 -0400
31366 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c   2011-08-23 21:47:56.000000000 -0400
31367 @@ -9923,8 +9923,10 @@ lpfc_init(void)
31368         printk(LPFC_COPYRIGHT "\n");
31369  
31370         if (lpfc_enable_npiv) {
31371 -               lpfc_transport_functions.vport_create = lpfc_vport_create;
31372 -               lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31373 +               pax_open_kernel();
31374 +               *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31375 +               *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31376 +               pax_close_kernel();
31377         }
31378         lpfc_transport_template =
31379                                 fc_attach_transport(&lpfc_transport_functions);
31380 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
31381 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c   2011-07-21 22:17:23.000000000 -0400
31382 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c   2011-08-23 21:47:56.000000000 -0400
31383 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31384         uint32_t evt_posted;
31385  
31386         spin_lock_irqsave(&phba->hbalock, flags);
31387 -       atomic_inc(&phba->num_rsrc_err);
31388 +       atomic_inc_unchecked(&phba->num_rsrc_err);
31389         phba->last_rsrc_error_time = jiffies;
31390  
31391         if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31392 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31393         unsigned long flags;
31394         struct lpfc_hba *phba = vport->phba;
31395         uint32_t evt_posted;
31396 -       atomic_inc(&phba->num_cmd_success);
31397 +       atomic_inc_unchecked(&phba->num_cmd_success);
31398  
31399         if (vport->cfg_lun_queue_depth <= queue_depth)
31400                 return;
31401 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31402         unsigned long num_rsrc_err, num_cmd_success;
31403         int i;
31404  
31405 -       num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31406 -       num_cmd_success = atomic_read(&phba->num_cmd_success);
31407 +       num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31408 +       num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31409  
31410         vports = lpfc_create_vport_work_array(phba);
31411         if (vports != NULL)
31412 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31413                         }
31414                 }
31415         lpfc_destroy_vport_work_array(phba, vports);
31416 -       atomic_set(&phba->num_rsrc_err, 0);
31417 -       atomic_set(&phba->num_cmd_success, 0);
31418 +       atomic_set_unchecked(&phba->num_rsrc_err, 0);
31419 +       atomic_set_unchecked(&phba->num_cmd_success, 0);
31420  }
31421  
31422  /**
31423 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31424                         }
31425                 }
31426         lpfc_destroy_vport_work_array(phba, vports);
31427 -       atomic_set(&phba->num_rsrc_err, 0);
31428 -       atomic_set(&phba->num_cmd_success, 0);
31429 +       atomic_set_unchecked(&phba->num_rsrc_err, 0);
31430 +       atomic_set_unchecked(&phba->num_cmd_success, 0);
31431  }
31432  
31433  /**
31434 diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
31435 --- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c   2011-07-21 22:17:23.000000000 -0400
31436 +++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c   2011-08-23 21:48:14.000000000 -0400
31437 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31438         int             rval;
31439         int             i;
31440  
31441 +       pax_track_stack();
31442 +
31443         // Allocate memory for the base list of scb for management module.
31444         adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31445  
31446 diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
31447 --- linux-3.0.4/drivers/scsi/osd/osd_initiator.c        2011-07-21 22:17:23.000000000 -0400
31448 +++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c        2011-08-23 21:48:14.000000000 -0400
31449 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31450         int nelem = ARRAY_SIZE(get_attrs), a = 0;
31451         int ret;
31452  
31453 +       pax_track_stack();
31454 +
31455         or = osd_start_request(od, GFP_KERNEL);
31456         if (!or)
31457                 return -ENOMEM;
31458 diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
31459 --- linux-3.0.4/drivers/scsi/pmcraid.c  2011-08-23 21:44:40.000000000 -0400
31460 +++ linux-3.0.4/drivers/scsi/pmcraid.c  2011-08-23 21:47:56.000000000 -0400
31461 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31462                 res->scsi_dev = scsi_dev;
31463                 scsi_dev->hostdata = res;
31464                 res->change_detected = 0;
31465 -               atomic_set(&res->read_failures, 0);
31466 -               atomic_set(&res->write_failures, 0);
31467 +               atomic_set_unchecked(&res->read_failures, 0);
31468 +               atomic_set_unchecked(&res->write_failures, 0);
31469                 rc = 0;
31470         }
31471         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31472 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct 
31473  
31474         /* If this was a SCSI read/write command keep count of errors */
31475         if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31476 -               atomic_inc(&res->read_failures);
31477 +               atomic_inc_unchecked(&res->read_failures);
31478         else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31479 -               atomic_inc(&res->write_failures);
31480 +               atomic_inc_unchecked(&res->write_failures);
31481  
31482         if (!RES_IS_GSCSI(res->cfg_entry) &&
31483                 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31484 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31485          * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31486          * hrrq_id assigned here in queuecommand
31487          */
31488 -       ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31489 +       ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31490                           pinstance->num_hrrq;
31491         cmd->cmd_done = pmcraid_io_done;
31492  
31493 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31494          * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31495          * hrrq_id assigned here in queuecommand
31496          */
31497 -       ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31498 +       ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31499                           pinstance->num_hrrq;
31500  
31501         if (request_size) {
31502 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31503  
31504         pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31505         /* add resources only after host is added into system */
31506 -       if (!atomic_read(&pinstance->expose_resources))
31507 +       if (!atomic_read_unchecked(&pinstance->expose_resources))
31508                 return;
31509  
31510         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31511 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31512         init_waitqueue_head(&pinstance->reset_wait_q);
31513  
31514         atomic_set(&pinstance->outstanding_cmds, 0);
31515 -       atomic_set(&pinstance->last_message_id, 0);
31516 -       atomic_set(&pinstance->expose_resources, 0);
31517 +       atomic_set_unchecked(&pinstance->last_message_id, 0);
31518 +       atomic_set_unchecked(&pinstance->expose_resources, 0);
31519  
31520         INIT_LIST_HEAD(&pinstance->free_res_q);
31521         INIT_LIST_HEAD(&pinstance->used_res_q);
31522 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31523         /* Schedule worker thread to handle CCN and take care of adding and
31524          * removing devices to OS
31525          */
31526 -       atomic_set(&pinstance->expose_resources, 1);
31527 +       atomic_set_unchecked(&pinstance->expose_resources, 1);
31528         schedule_work(&pinstance->worker_q);
31529         return rc;
31530  
31531 diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
31532 --- linux-3.0.4/drivers/scsi/pmcraid.h  2011-07-21 22:17:23.000000000 -0400
31533 +++ linux-3.0.4/drivers/scsi/pmcraid.h  2011-08-23 21:47:56.000000000 -0400
31534 @@ -749,7 +749,7 @@ struct pmcraid_instance {
31535         struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31536  
31537         /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31538 -       atomic_t last_message_id;
31539 +       atomic_unchecked_t last_message_id;
31540  
31541         /* configuration table */
31542         struct pmcraid_config_table *cfg_table;
31543 @@ -778,7 +778,7 @@ struct pmcraid_instance {
31544         atomic_t outstanding_cmds;
31545  
31546         /* should add/delete resources to mid-layer now ?*/
31547 -       atomic_t expose_resources;
31548 +       atomic_unchecked_t expose_resources;
31549  
31550  
31551  
31552 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31553                 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31554         };
31555         struct scsi_device *scsi_dev;   /* Link scsi_device structure */
31556 -       atomic_t read_failures;         /* count of failed READ commands */
31557 -       atomic_t write_failures;        /* count of failed WRITE commands */
31558 +       atomic_unchecked_t read_failures;       /* count of failed READ commands */
31559 +       atomic_unchecked_t write_failures;      /* count of failed WRITE commands */
31560  
31561         /* To indicate add/delete/modify during CCN */
31562         u8 change_detected;
31563 diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
31564 --- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h  2011-07-21 22:17:23.000000000 -0400
31565 +++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h  2011-08-23 21:47:56.000000000 -0400
31566 @@ -2244,7 +2244,7 @@ struct isp_operations {
31567         int (*get_flash_version) (struct scsi_qla_host *, void *);
31568         int (*start_scsi) (srb_t *);
31569         int (*abort_isp) (struct scsi_qla_host *);
31570 -};
31571 +} __no_const;
31572  
31573  /* MSI-X Support *************************************************************/
31574  
31575 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
31576 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h  2011-07-21 22:17:23.000000000 -0400
31577 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h  2011-08-23 21:47:56.000000000 -0400
31578 @@ -256,7 +256,7 @@ struct ddb_entry {
31579         atomic_t retry_relogin_timer; /* Min Time between relogins
31580                                        * (4000 only) */
31581         atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31582 -       atomic_t relogin_retry_count; /* Num of times relogin has been
31583 +       atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31584                                        * retried */
31585  
31586         uint16_t port;
31587 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
31588 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31589 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31590 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31591         ddb_entry->fw_ddb_index = fw_ddb_index;
31592         atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31593         atomic_set(&ddb_entry->relogin_timer, 0);
31594 -       atomic_set(&ddb_entry->relogin_retry_count, 0);
31595 +       atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31596         atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31597         list_add_tail(&ddb_entry->list, &ha->ddb_list);
31598         ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31599 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31600         if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31601            (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31602                 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31603 -               atomic_set(&ddb_entry->relogin_retry_count, 0);
31604 +               atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31605                 atomic_set(&ddb_entry->relogin_timer, 0);
31606                 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31607                 iscsi_unblock_session(ddb_entry->sess);
31608 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
31609 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c   2011-07-21 22:17:23.000000000 -0400
31610 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c   2011-08-23 21:47:56.000000000 -0400
31611 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31612                             ddb_entry->fw_ddb_device_state ==
31613                             DDB_DS_SESSION_FAILED) {
31614                                 /* Reset retry relogin timer */
31615 -                               atomic_inc(&ddb_entry->relogin_retry_count);
31616 +                               atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31617                                 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31618                                               " timed out-retrying"
31619                                               " relogin (%d)\n",
31620                                               ha->host_no,
31621                                               ddb_entry->fw_ddb_index,
31622 -                                             atomic_read(&ddb_entry->
31623 +                                             atomic_read_unchecked(&ddb_entry->
31624                                                           relogin_retry_count))
31625                                         );
31626                                 start_dpc++;
31627 diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
31628 --- linux-3.0.4/drivers/scsi/scsi.c     2011-07-21 22:17:23.000000000 -0400
31629 +++ linux-3.0.4/drivers/scsi/scsi.c     2011-08-23 21:47:56.000000000 -0400
31630 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31631         unsigned long timeout;
31632         int rtn = 0;
31633  
31634 -       atomic_inc(&cmd->device->iorequest_cnt);
31635 +       atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31636  
31637         /* check if the device is still usable */
31638         if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31639 diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
31640 --- linux-3.0.4/drivers/scsi/scsi_debug.c       2011-07-21 22:17:23.000000000 -0400
31641 +++ linux-3.0.4/drivers/scsi/scsi_debug.c       2011-08-23 21:48:14.000000000 -0400
31642 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31643         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31644         unsigned char *cmd = (unsigned char *)scp->cmnd;
31645  
31646 +       pax_track_stack();
31647 +
31648         if ((errsts = check_readiness(scp, 1, devip)))
31649                 return errsts;
31650         memset(arr, 0, sizeof(arr));
31651 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31652         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31653         unsigned char *cmd = (unsigned char *)scp->cmnd;
31654  
31655 +       pax_track_stack();
31656 +
31657         if ((errsts = check_readiness(scp, 1, devip)))
31658                 return errsts;
31659         memset(arr, 0, sizeof(arr));
31660 diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
31661 --- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31662 +++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31663 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31664         shost = sdev->host;
31665         scsi_init_cmd_errh(cmd);
31666         cmd->result = DID_NO_CONNECT << 16;
31667 -       atomic_inc(&cmd->device->iorequest_cnt);
31668 +       atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31669  
31670         /*
31671          * SCSI request completion path will do scsi_device_unbusy(),
31672 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31673  
31674         INIT_LIST_HEAD(&cmd->eh_entry);
31675  
31676 -       atomic_inc(&cmd->device->iodone_cnt);
31677 +       atomic_inc_unchecked(&cmd->device->iodone_cnt);
31678         if (cmd->result)
31679 -               atomic_inc(&cmd->device->ioerr_cnt);
31680 +               atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31681  
31682         disposition = scsi_decide_disposition(cmd);
31683         if (disposition != SUCCESS &&
31684 diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
31685 --- linux-3.0.4/drivers/scsi/scsi_sysfs.c       2011-07-21 22:17:23.000000000 -0400
31686 +++ linux-3.0.4/drivers/scsi/scsi_sysfs.c       2011-08-23 21:47:56.000000000 -0400
31687 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev, 
31688                     char *buf)                                          \
31689  {                                                                      \
31690         struct scsi_device *sdev = to_scsi_device(dev);                 \
31691 -       unsigned long long count = atomic_read(&sdev->field);           \
31692 +       unsigned long long count = atomic_read_unchecked(&sdev->field); \
31693         return snprintf(buf, 20, "0x%llx\n", count);                    \
31694  }                                                                      \
31695  static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31696 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
31697 --- linux-3.0.4/drivers/scsi/scsi_transport_fc.c        2011-07-21 22:17:23.000000000 -0400
31698 +++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c        2011-08-23 21:47:56.000000000 -0400
31699 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31700   * Netlink Infrastructure
31701   */
31702  
31703 -static atomic_t fc_event_seq;
31704 +static atomic_unchecked_t fc_event_seq;
31705  
31706  /**
31707   * fc_get_event_number - Obtain the next sequential FC event number
31708 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31709  u32
31710  fc_get_event_number(void)
31711  {
31712 -       return atomic_add_return(1, &fc_event_seq);
31713 +       return atomic_add_return_unchecked(1, &fc_event_seq);
31714  }
31715  EXPORT_SYMBOL(fc_get_event_number);
31716  
31717 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31718  {
31719         int error;
31720  
31721 -       atomic_set(&fc_event_seq, 0);
31722 +       atomic_set_unchecked(&fc_event_seq, 0);
31723  
31724         error = transport_class_register(&fc_host_class);
31725         if (error)
31726 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31727         char *cp;
31728  
31729         *val = simple_strtoul(buf, &cp, 0);
31730 -       if ((*cp && (*cp != '\n')) || (*val < 0))
31731 +       if (*cp && (*cp != '\n'))
31732                 return -EINVAL;
31733         /*
31734          * Check for overflow; dev_loss_tmo is u32
31735 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
31736 --- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c     2011-07-21 22:17:23.000000000 -0400
31737 +++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c     2011-08-23 21:47:56.000000000 -0400
31738 @@ -83,7 +83,7 @@ struct iscsi_internal {
31739         struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31740  };
31741  
31742 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31743 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31744  static struct workqueue_struct *iscsi_eh_timer_workq;
31745  
31746  /*
31747 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31748         int err;
31749  
31750         ihost = shost->shost_data;
31751 -       session->sid = atomic_add_return(1, &iscsi_session_nr);
31752 +       session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31753  
31754         if (id == ISCSI_MAX_TARGET) {
31755                 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31756 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31757         printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31758                 ISCSI_TRANSPORT_VERSION);
31759  
31760 -       atomic_set(&iscsi_session_nr, 0);
31761 +       atomic_set_unchecked(&iscsi_session_nr, 0);
31762  
31763         err = class_register(&iscsi_transport_class);
31764         if (err)
31765 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
31766 --- linux-3.0.4/drivers/scsi/scsi_transport_srp.c       2011-07-21 22:17:23.000000000 -0400
31767 +++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c       2011-08-23 21:47:56.000000000 -0400
31768 @@ -33,7 +33,7 @@
31769  #include "scsi_transport_srp_internal.h"
31770  
31771  struct srp_host_attrs {
31772 -       atomic_t next_port_id;
31773 +       atomic_unchecked_t next_port_id;
31774  };
31775  #define to_srp_host_attrs(host)        ((struct srp_host_attrs *)(host)->shost_data)
31776  
31777 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31778         struct Scsi_Host *shost = dev_to_shost(dev);
31779         struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31780  
31781 -       atomic_set(&srp_host->next_port_id, 0);
31782 +       atomic_set_unchecked(&srp_host->next_port_id, 0);
31783         return 0;
31784  }
31785  
31786 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31787         memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31788         rport->roles = ids->roles;
31789  
31790 -       id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31791 +       id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31792         dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31793  
31794         transport_setup_device(&rport->dev);
31795 diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
31796 --- linux-3.0.4/drivers/scsi/sg.c       2011-07-21 22:17:23.000000000 -0400
31797 +++ linux-3.0.4/drivers/scsi/sg.c       2011-08-23 21:47:56.000000000 -0400
31798 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31799         const struct file_operations * fops;
31800  };
31801  
31802 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31803 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31804         {"allow_dio", &adio_fops},
31805         {"debug", &debug_fops},
31806         {"def_reserved_size", &dressz_fops},
31807 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31808  {
31809         int k, mask;
31810         int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31811 -       struct sg_proc_leaf * leaf;
31812 +       const struct sg_proc_leaf * leaf;
31813  
31814         sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31815         if (!sg_proc_sgp)
31816 diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31817 --- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c     2011-07-21 22:17:23.000000000 -0400
31818 +++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c     2011-08-23 21:48:14.000000000 -0400
31819 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31820         int do_iounmap = 0;
31821         int do_disable_device = 1;
31822  
31823 +       pax_track_stack();
31824 +
31825         memset(&sym_dev, 0, sizeof(sym_dev));
31826         memset(&nvram, 0, sizeof(nvram));
31827         sym_dev.pdev = pdev;
31828 diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
31829 --- linux-3.0.4/drivers/scsi/vmw_pvscsi.c       2011-07-21 22:17:23.000000000 -0400
31830 +++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c       2011-08-23 21:48:14.000000000 -0400
31831 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31832         dma_addr_t base;
31833         unsigned i;
31834  
31835 +       pax_track_stack();
31836 +
31837         cmd.ringsStatePPN   = adapter->ringStatePA >> PAGE_SHIFT;
31838         cmd.reqRingNumPages = adapter->req_pages;
31839         cmd.cmpRingNumPages = adapter->cmp_pages;
31840 diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
31841 --- linux-3.0.4/drivers/spi/spi.c       2011-07-21 22:17:23.000000000 -0400
31842 +++ linux-3.0.4/drivers/spi/spi.c       2011-08-23 21:47:56.000000000 -0400
31843 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31844  EXPORT_SYMBOL_GPL(spi_bus_unlock);
31845  
31846  /* portable code must never pass more than 32 bytes */
31847 -#define        SPI_BUFSIZ      max(32,SMP_CACHE_BYTES)
31848 +#define        SPI_BUFSIZ      max(32UL,SMP_CACHE_BYTES)
31849  
31850  static u8      *buf;
31851  
31852 diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31853 --- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c    2011-08-23 21:44:40.000000000 -0400
31854 +++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c    2011-08-23 21:48:14.000000000 -0400
31855 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31856          (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31857  
31858  
31859 -static struct net_device_ops ar6000_netdev_ops = {
31860 +static net_device_ops_no_const ar6000_netdev_ops = {
31861      .ndo_init               = NULL,
31862      .ndo_open               = ar6000_open,
31863      .ndo_stop               = ar6000_close,
31864 diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31865 --- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h      2011-07-21 22:17:23.000000000 -0400
31866 +++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h      2011-08-23 21:47:56.000000000 -0400
31867 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31868  typedef struct ar6k_pal_config_s
31869  {
31870         ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31871 -}ar6k_pal_config_t;
31872 +} __no_const ar6k_pal_config_t;
31873  
31874  void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31875  #endif /* _AR6K_PAL_H_ */
31876 diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31877 --- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c  2011-07-21 22:17:23.000000000 -0400
31878 +++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c  2011-08-23 21:47:56.000000000 -0400
31879 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31880                         free_netdev(ifp->net);
31881                 }
31882                 /* Allocate etherdev, including space for private structure */
31883 -               ifp->net = alloc_etherdev(sizeof(dhd));
31884 +               ifp->net = alloc_etherdev(sizeof(*dhd));
31885                 if (!ifp->net) {
31886                         DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31887                         ret = -ENOMEM;
31888                 }
31889                 if (ret == 0) {
31890                         strcpy(ifp->net->name, ifp->name);
31891 -                       memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31892 +                       memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31893                         err = dhd_net_attach(&dhd->pub, ifp->idx);
31894                         if (err != 0) {
31895                                 DHD_ERROR(("%s: dhd_net_attach failed, "
31896 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31897                 strcpy(nv_path, nvram_path);
31898  
31899         /* Allocate etherdev, including space for private structure */
31900 -       net = alloc_etherdev(sizeof(dhd));
31901 +       net = alloc_etherdev(sizeof(*dhd));
31902         if (!net) {
31903                 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31904                 goto fail;
31905 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31906         /*
31907          * Save the dhd_info into the priv
31908          */
31909 -       memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31910 +       memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31911  
31912         /* Set network interface name if it was provided as module parameter */
31913         if (iface_name[0]) {
31914 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31915         /*
31916          * Save the dhd_info into the priv
31917          */
31918 -       memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31919 +       memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31920  
31921  #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31922         g_bus = bus;
31923 diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31924 --- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h    2011-07-21 22:17:23.000000000 -0400
31925 +++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h    2011-08-23 21:47:56.000000000 -0400
31926 @@ -593,7 +593,7 @@ struct phy_func_ptr {
31927         initfn_t carrsuppr;
31928         rxsigpwrfn_t rxsigpwr;
31929         detachfn_t detach;
31930 -};
31931 +} __no_const;
31932  typedef struct phy_func_ptr phy_func_ptr_t;
31933  
31934  struct phy_info {
31935 diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
31936 --- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h      2011-07-21 22:17:23.000000000 -0400
31937 +++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h      2011-08-23 21:47:56.000000000 -0400
31938 @@ -185,7 +185,7 @@ typedef struct {
31939                          u16 func, uint bustype, void *regsva, void *param);
31940         /* detach from device */
31941         void (*detach) (void *ch);
31942 -} bcmsdh_driver_t;
31943 +} __no_const bcmsdh_driver_t;
31944  
31945  /* platform specific/high level functions */
31946  extern int bcmsdh_register(bcmsdh_driver_t *driver);
31947 diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
31948 --- linux-3.0.4/drivers/staging/et131x/et1310_tx.c      2011-07-21 22:17:23.000000000 -0400
31949 +++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c      2011-08-23 21:47:56.000000000 -0400
31950 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31951         struct net_device_stats *stats = &etdev->net_stats;
31952  
31953         if (tcb->flags & fMP_DEST_BROAD)
31954 -               atomic_inc(&etdev->Stats.brdcstxmt);
31955 +               atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31956         else if (tcb->flags & fMP_DEST_MULTI)
31957 -               atomic_inc(&etdev->Stats.multixmt);
31958 +               atomic_inc_unchecked(&etdev->Stats.multixmt);
31959         else
31960 -               atomic_inc(&etdev->Stats.unixmt);
31961 +               atomic_inc_unchecked(&etdev->Stats.unixmt);
31962  
31963         if (tcb->skb) {
31964                 stats->tx_bytes += tcb->skb->len;
31965 diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
31966 --- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31967 +++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31968 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31969          * operations
31970          */
31971         u32 unircv;     /* # multicast packets received */
31972 -       atomic_t unixmt;        /* # multicast packets for Tx */
31973 +       atomic_unchecked_t unixmt;      /* # multicast packets for Tx */
31974         u32 multircv;   /* # multicast packets received */
31975 -       atomic_t multixmt;      /* # multicast packets for Tx */
31976 +       atomic_unchecked_t multixmt;    /* # multicast packets for Tx */
31977         u32 brdcstrcv;  /* # broadcast packets received */
31978 -       atomic_t brdcstxmt;     /* # broadcast packets for Tx */
31979 +       atomic_unchecked_t brdcstxmt;   /* # broadcast packets for Tx */
31980         u32 norcvbuf;   /* # Rx packets discarded */
31981         u32 noxmtbuf;   /* # Tx packets discarded */
31982  
31983 diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
31984 --- linux-3.0.4/drivers/staging/hv/channel.c    2011-08-23 21:44:40.000000000 -0400
31985 +++ linux-3.0.4/drivers/staging/hv/channel.c    2011-08-23 21:47:56.000000000 -0400
31986 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31987         int ret = 0;
31988         int t;
31989  
31990 -       next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31991 -       atomic_inc(&vmbus_connection.next_gpadl_handle);
31992 +       next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31993 +       atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31994  
31995         ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31996         if (ret)
31997 diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
31998 --- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
31999 +++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
32000 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
32001         u64 output_address = (output) ? virt_to_phys(output) : 0;
32002         u32 output_address_hi = output_address >> 32;
32003         u32 output_address_lo = output_address & 0xFFFFFFFF;
32004 -       volatile void *hypercall_page = hv_context.hypercall_page;
32005 +       volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32006  
32007         __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32008                               "=a"(hv_status_lo) : "d" (control_hi),
32009 diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
32010 --- linux-3.0.4/drivers/staging/hv/hv_mouse.c   2011-07-21 22:17:23.000000000 -0400
32011 +++ linux-3.0.4/drivers/staging/hv/hv_mouse.c   2011-08-23 21:47:56.000000000 -0400
32012 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32013         if (hid_dev) {
32014                 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32015  
32016 -               hid_dev->ll_driver->open  = mousevsc_hid_open;
32017 -               hid_dev->ll_driver->close = mousevsc_hid_close;
32018 +               pax_open_kernel();
32019 +               *(void **)&hid_dev->ll_driver->open  = mousevsc_hid_open;
32020 +               *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32021 +               pax_close_kernel();
32022  
32023                 hid_dev->bus = BUS_VIRTUAL;
32024                 hid_dev->vendor = input_device_ctx->device_info.vendor;
32025 diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
32026 --- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h       2011-07-21 22:17:23.000000000 -0400
32027 +++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h       2011-08-23 21:47:56.000000000 -0400
32028 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
32029  struct vmbus_connection {
32030         enum vmbus_connect_state conn_state;
32031  
32032 -       atomic_t next_gpadl_handle;
32033 +       atomic_unchecked_t next_gpadl_handle;
32034  
32035         /*
32036          * Represents channel interrupts. Each bit position represents a
32037 diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
32038 --- linux-3.0.4/drivers/staging/hv/rndis_filter.c       2011-08-23 21:44:40.000000000 -0400
32039 +++ linux-3.0.4/drivers/staging/hv/rndis_filter.c       2011-08-23 21:47:56.000000000 -0400
32040 @@ -43,7 +43,7 @@ struct rndis_device {
32041  
32042         enum rndis_device_state state;
32043         u32 link_stat;
32044 -       atomic_t new_req_id;
32045 +       atomic_unchecked_t new_req_id;
32046  
32047         spinlock_t request_lock;
32048         struct list_head req_list;
32049 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32050          * template
32051          */
32052         set = &rndis_msg->msg.set_req;
32053 -       set->req_id = atomic_inc_return(&dev->new_req_id);
32054 +       set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32055  
32056         /* Add to the request list */
32057         spin_lock_irqsave(&dev->request_lock, flags);
32058 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32059  
32060         /* Setup the rndis set */
32061         halt = &request->request_msg.msg.halt_req;
32062 -       halt->req_id = atomic_inc_return(&dev->new_req_id);
32063 +       halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32064  
32065         /* Ignore return since this msg is optional. */
32066         rndis_filter_send_request(dev, request);
32067 diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
32068 --- linux-3.0.4/drivers/staging/hv/vmbus_drv.c  2011-07-21 22:17:23.000000000 -0400
32069 +++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c  2011-08-23 21:47:56.000000000 -0400
32070 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32071  {
32072         int ret = 0;
32073  
32074 -       static atomic_t device_num = ATOMIC_INIT(0);
32075 +       static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32076  
32077         /* Set the device name. Otherwise, device_register() will fail. */
32078         dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32079 -                    atomic_inc_return(&device_num));
32080 +                    atomic_inc_return_unchecked(&device_num));
32081  
32082         /* The new device belongs to this bus */
32083         child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32084 diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
32085 --- linux-3.0.4/drivers/staging/iio/ring_generic.h      2011-07-21 22:17:23.000000000 -0400
32086 +++ linux-3.0.4/drivers/staging/iio/ring_generic.h      2011-08-23 21:47:56.000000000 -0400
32087 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32088  
32089         int (*is_enabled)(struct iio_ring_buffer *ring);
32090         int (*enable)(struct iio_ring_buffer *ring);
32091 -};
32092 +} __no_const;
32093  
32094  struct iio_ring_setup_ops {
32095         int                             (*preenable)(struct iio_dev *);
32096 diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
32097 --- linux-3.0.4/drivers/staging/octeon/ethernet.c       2011-07-21 22:17:23.000000000 -0400
32098 +++ linux-3.0.4/drivers/staging/octeon/ethernet.c       2011-08-23 21:47:56.000000000 -0400
32099 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32100                  * since the RX tasklet also increments it.
32101                  */
32102  #ifdef CONFIG_64BIT
32103 -               atomic64_add(rx_status.dropped_packets,
32104 -                            (atomic64_t *)&priv->stats.rx_dropped);
32105 +               atomic64_add_unchecked(rx_status.dropped_packets,
32106 +                            (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32107  #else
32108 -               atomic_add(rx_status.dropped_packets,
32109 -                            (atomic_t *)&priv->stats.rx_dropped);
32110 +               atomic_add_unchecked(rx_status.dropped_packets,
32111 +                            (atomic_unchecked_t *)&priv->stats.rx_dropped);
32112  #endif
32113         }
32114  
32115 diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
32116 --- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c    2011-07-21 22:17:23.000000000 -0400
32117 +++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c    2011-08-23 21:47:56.000000000 -0400
32118 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32119                                 /* Increment RX stats for virtual ports */
32120                                 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32121  #ifdef CONFIG_64BIT
32122 -                                       atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32123 -                                       atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32124 +                                       atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32125 +                                       atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32126  #else
32127 -                                       atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32128 -                                       atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32129 +                                       atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32130 +                                       atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32131  #endif
32132                                 }
32133                                 netif_receive_skb(skb);
32134 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32135                                            dev->name);
32136                                 */
32137  #ifdef CONFIG_64BIT
32138 -                               atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32139 +                               atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32140  #else
32141 -                               atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32142 +                               atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32143  #endif
32144                                 dev_kfree_skb_irq(skb);
32145                         }
32146 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
32147 --- linux-3.0.4/drivers/staging/pohmelfs/inode.c        2011-07-21 22:17:23.000000000 -0400
32148 +++ linux-3.0.4/drivers/staging/pohmelfs/inode.c        2011-08-23 21:47:56.000000000 -0400
32149 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32150         mutex_init(&psb->mcache_lock);
32151         psb->mcache_root = RB_ROOT;
32152         psb->mcache_timeout = msecs_to_jiffies(5000);
32153 -       atomic_long_set(&psb->mcache_gen, 0);
32154 +       atomic_long_set_unchecked(&psb->mcache_gen, 0);
32155  
32156         psb->trans_max_pages = 100;
32157  
32158 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32159         INIT_LIST_HEAD(&psb->crypto_ready_list);
32160         INIT_LIST_HEAD(&psb->crypto_active_list);
32161  
32162 -       atomic_set(&psb->trans_gen, 1);
32163 +       atomic_set_unchecked(&psb->trans_gen, 1);
32164         atomic_long_set(&psb->total_inodes, 0);
32165  
32166         mutex_init(&psb->state_lock);
32167 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
32168 --- linux-3.0.4/drivers/staging/pohmelfs/mcache.c       2011-07-21 22:17:23.000000000 -0400
32169 +++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c       2011-08-23 21:47:56.000000000 -0400
32170 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32171         m->data = data;
32172         m->start = start;
32173         m->size = size;
32174 -       m->gen = atomic_long_inc_return(&psb->mcache_gen);
32175 +       m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32176  
32177         mutex_lock(&psb->mcache_lock);
32178         err = pohmelfs_mcache_insert(psb, m);
32179 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
32180 --- linux-3.0.4/drivers/staging/pohmelfs/netfs.h        2011-07-21 22:17:23.000000000 -0400
32181 +++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h        2011-08-23 21:47:56.000000000 -0400
32182 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32183  struct pohmelfs_sb {
32184         struct rb_root          mcache_root;
32185         struct mutex            mcache_lock;
32186 -       atomic_long_t           mcache_gen;
32187 +       atomic_long_unchecked_t mcache_gen;
32188         unsigned long           mcache_timeout;
32189  
32190         unsigned int            idx;
32191  
32192         unsigned int            trans_retries;
32193  
32194 -       atomic_t                trans_gen;
32195 +       atomic_unchecked_t      trans_gen;
32196  
32197         unsigned int            crypto_attached_size;
32198         unsigned int            crypto_align_size;
32199 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
32200 --- linux-3.0.4/drivers/staging/pohmelfs/trans.c        2011-07-21 22:17:23.000000000 -0400
32201 +++ linux-3.0.4/drivers/staging/pohmelfs/trans.c        2011-08-23 21:47:56.000000000 -0400
32202 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32203         int err;
32204         struct netfs_cmd *cmd = t->iovec.iov_base;
32205  
32206 -       t->gen = atomic_inc_return(&psb->trans_gen);
32207 +       t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32208  
32209         cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32210                 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32211 diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
32212 --- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h    2011-07-21 22:17:23.000000000 -0400
32213 +++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h    2011-08-23 21:47:56.000000000 -0400
32214 @@ -83,7 +83,7 @@ struct        _io_ops {
32215                           u8 *pmem);
32216         u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32217                            u8 *pmem);
32218 -};
32219 +} __no_const;
32220  
32221  struct io_req {
32222         struct list_head list;
32223 diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
32224 --- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c      2011-07-21 22:17:23.000000000 -0400
32225 +++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c      2011-08-24 18:21:41.000000000 -0400
32226 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32227         t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32228  
32229         if (rlen)
32230 -               if (copy_to_user(data, &resp, rlen))
32231 +               if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32232                         return -EFAULT;
32233  
32234         return 0;
32235 diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
32236 --- linux-3.0.4/drivers/staging/tty/stallion.c  2011-07-21 22:17:23.000000000 -0400
32237 +++ linux-3.0.4/drivers/staging/tty/stallion.c  2011-08-23 21:48:14.000000000 -0400
32238 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32239         struct stlport  stl_dummyport;
32240         struct stlport  *portp;
32241  
32242 +       pax_track_stack();
32243 +
32244         if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32245                 return -EFAULT;
32246         portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32247 diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
32248 --- linux-3.0.4/drivers/staging/usbip/usbip_common.h    2011-07-21 22:17:23.000000000 -0400
32249 +++ linux-3.0.4/drivers/staging/usbip/usbip_common.h    2011-08-23 21:47:56.000000000 -0400
32250 @@ -315,7 +315,7 @@ struct usbip_device {
32251                 void (*shutdown)(struct usbip_device *);
32252                 void (*reset)(struct usbip_device *);
32253                 void (*unusable)(struct usbip_device *);
32254 -       } eh_ops;
32255 +       } __no_const eh_ops;
32256  };
32257  
32258  void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32259 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
32260 --- linux-3.0.4/drivers/staging/usbip/vhci.h    2011-07-21 22:17:23.000000000 -0400
32261 +++ linux-3.0.4/drivers/staging/usbip/vhci.h    2011-08-23 21:47:56.000000000 -0400
32262 @@ -94,7 +94,7 @@ struct vhci_hcd {
32263         unsigned resuming:1;
32264         unsigned long re_timeout;
32265  
32266 -       atomic_t seqnum;
32267 +       atomic_unchecked_t seqnum;
32268  
32269         /*
32270          * NOTE:
32271 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
32272 --- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c        2011-08-23 21:44:40.000000000 -0400
32273 +++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c        2011-08-23 21:47:56.000000000 -0400
32274 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32275                 return;
32276         }
32277  
32278 -       priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32279 +       priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32280         if (priv->seqnum == 0xffff)
32281                 dev_info(&urb->dev->dev, "seqnum max\n");
32282  
32283 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32284                         return -ENOMEM;
32285                 }
32286  
32287 -               unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32288 +               unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32289                 if (unlink->seqnum == 0xffff)
32290                         pr_info("seqnum max\n");
32291  
32292 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32293                 vdev->rhport = rhport;
32294         }
32295  
32296 -       atomic_set(&vhci->seqnum, 0);
32297 +       atomic_set_unchecked(&vhci->seqnum, 0);
32298         spin_lock_init(&vhci->lock);
32299  
32300         hcd->power_budget = 0; /* no limit */
32301 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
32302 --- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32303 +++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32304 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct 
32305         if (!urb) {
32306                 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32307                 pr_info("max seqnum %d\n",
32308 -                       atomic_read(&the_controller->seqnum));
32309 +                       atomic_read_unchecked(&the_controller->seqnum));
32310                 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32311                 return;
32312         }
32313 diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
32314 --- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32315 +++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32316 @@ -79,14 +79,13 @@ static int          msglevel            
32317   *
32318   */
32319  
32320 +static net_device_ops_no_const apdev_netdev_ops;
32321 +
32322  static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32323  {
32324      PSDevice apdev_priv;
32325         struct net_device *dev = pDevice->dev;
32326         int ret;
32327 -       const struct net_device_ops apdev_netdev_ops = {
32328 -               .ndo_start_xmit         = pDevice->tx_80211,
32329 -       };
32330  
32331      DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32332  
32333 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32334      *apdev_priv = *pDevice;
32335         memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32336  
32337 +       /* only half broken now */
32338 +       apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32339         pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32340  
32341         pDevice->apdev->type = ARPHRD_IEEE80211;
32342 diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
32343 --- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32344 +++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32345 @@ -80,14 +80,13 @@ static int          msglevel            
32346   *
32347   */
32348  
32349 +static net_device_ops_no_const apdev_netdev_ops;
32350 +
32351  static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32352  {
32353      PSDevice apdev_priv;
32354         struct net_device *dev = pDevice->dev;
32355         int ret;
32356 -       const struct net_device_ops apdev_netdev_ops = {
32357 -               .ndo_start_xmit         = pDevice->tx_80211,
32358 -       };
32359  
32360      DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32361  
32362 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32363      *apdev_priv = *pDevice;
32364         memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32365  
32366 +       /* only half broken now */
32367 +       apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32368         pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32369  
32370         pDevice->apdev->type = ARPHRD_IEEE80211;
32371 diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
32372 --- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c   2011-07-21 22:17:23.000000000 -0400
32373 +++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c   2011-08-23 21:47:56.000000000 -0400
32374 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32375  
32376  struct usbctlx_completor {
32377         int (*complete) (struct usbctlx_completor *);
32378 -};
32379 +} __no_const;
32380  
32381  static int
32382  hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32383 diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
32384 --- linux-3.0.4/drivers/staging/zcache/tmem.c   2011-07-21 22:17:23.000000000 -0400
32385 +++ linux-3.0.4/drivers/staging/zcache/tmem.c   2011-08-23 21:47:56.000000000 -0400
32386 @@ -39,7 +39,7 @@
32387   * A tmem host implementation must use this function to register callbacks
32388   * for memory allocation.
32389   */
32390 -static struct tmem_hostops tmem_hostops;
32391 +static tmem_hostops_no_const tmem_hostops;
32392  
32393  static void tmem_objnode_tree_init(void);
32394  
32395 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32396   * A tmem host implementation must use this function to register
32397   * callbacks for a page-accessible memory (PAM) implementation
32398   */
32399 -static struct tmem_pamops tmem_pamops;
32400 +static tmem_pamops_no_const tmem_pamops;
32401  
32402  void tmem_register_pamops(struct tmem_pamops *m)
32403  {
32404 diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
32405 --- linux-3.0.4/drivers/staging/zcache/tmem.h   2011-07-21 22:17:23.000000000 -0400
32406 +++ linux-3.0.4/drivers/staging/zcache/tmem.h   2011-08-23 21:47:56.000000000 -0400
32407 @@ -171,6 +171,7 @@ struct tmem_pamops {
32408         int (*get_data)(struct page *, void *, struct tmem_pool *);
32409         void (*free)(void *, struct tmem_pool *);
32410  };
32411 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32412  extern void tmem_register_pamops(struct tmem_pamops *m);
32413  
32414  /* memory allocation methods provided by the host implementation */
32415 @@ -180,6 +181,7 @@ struct tmem_hostops {
32416         struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32417         void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32418  };
32419 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32420  extern void tmem_register_hostops(struct tmem_hostops *m);
32421  
32422  /* core tmem accessor functions */
32423 diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
32424 --- linux-3.0.4/drivers/target/target_core_alua.c       2011-07-21 22:17:23.000000000 -0400
32425 +++ linux-3.0.4/drivers/target/target_core_alua.c       2011-08-23 21:48:14.000000000 -0400
32426 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32427         char path[ALUA_METADATA_PATH_LEN];
32428         int len;
32429  
32430 +       pax_track_stack();
32431 +
32432         memset(path, 0, ALUA_METADATA_PATH_LEN);
32433  
32434         len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32435 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32436         char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32437         int len;
32438  
32439 +       pax_track_stack();
32440 +
32441         memset(path, 0, ALUA_METADATA_PATH_LEN);
32442         memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32443  
32444 diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
32445 --- linux-3.0.4/drivers/target/target_core_cdb.c        2011-07-21 22:17:23.000000000 -0400
32446 +++ linux-3.0.4/drivers/target/target_core_cdb.c        2011-08-23 21:48:14.000000000 -0400
32447 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32448         int length = 0;
32449         unsigned char buf[SE_MODE_PAGE_BUF];
32450  
32451 +       pax_track_stack();
32452 +
32453         memset(buf, 0, SE_MODE_PAGE_BUF);
32454  
32455         switch (cdb[2] & 0x3f) {
32456 diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
32457 --- linux-3.0.4/drivers/target/target_core_configfs.c   2011-07-21 22:17:23.000000000 -0400
32458 +++ linux-3.0.4/drivers/target/target_core_configfs.c   2011-08-23 21:48:14.000000000 -0400
32459 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32460         ssize_t len = 0;
32461         int reg_count = 0, prf_isid;
32462  
32463 +       pax_track_stack();
32464 +
32465         if (!(su_dev->se_dev_ptr))
32466                 return -ENODEV;
32467  
32468 diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
32469 --- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32470 +++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32471 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32472         unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32473         u16 tpgt;
32474  
32475 +       pax_track_stack();
32476 +
32477         memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32478         memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32479         /*
32480 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32481         ssize_t len = 0;
32482         int reg_count = 0;
32483  
32484 +       pax_track_stack();
32485 +
32486         memset(buf, 0, pr_aptpl_buf_len);
32487         /*
32488          * Called to clear metadata once APTPL has been deactivated.
32489 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32490         char path[512];
32491         int ret;
32492  
32493 +       pax_track_stack();
32494 +
32495         memset(iov, 0, sizeof(struct iovec));
32496         memset(path, 0, 512);
32497  
32498 diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
32499 --- linux-3.0.4/drivers/target/target_core_tmr.c        2011-07-21 22:17:23.000000000 -0400
32500 +++ linux-3.0.4/drivers/target/target_core_tmr.c        2011-08-23 21:47:56.000000000 -0400
32501 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32502                         CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32503                         T_TASK(cmd)->t_task_cdbs,
32504                         atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32505 -                       atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32506 +                       atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32507                         atomic_read(&T_TASK(cmd)->t_transport_active),
32508                         atomic_read(&T_TASK(cmd)->t_transport_stop),
32509                         atomic_read(&T_TASK(cmd)->t_transport_sent));
32510 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32511                         DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32512                                 " task: %p, t_fe_count: %d dev: %p\n", task,
32513                                 fe_count, dev);
32514 -                       atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32515 +                       atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32516                         spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32517                                                 flags);
32518                         core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32519 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32520                 }
32521                 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32522                         " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32523 -               atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32524 +               atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32525                 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32526                 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32527  
32528 diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
32529 --- linux-3.0.4/drivers/target/target_core_transport.c  2011-07-21 22:17:23.000000000 -0400
32530 +++ linux-3.0.4/drivers/target/target_core_transport.c  2011-08-23 21:47:56.000000000 -0400
32531 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32532  
32533         dev->queue_depth        = dev_limits->queue_depth;
32534         atomic_set(&dev->depth_left, dev->queue_depth);
32535 -       atomic_set(&dev->dev_ordered_id, 0);
32536 +       atomic_set_unchecked(&dev->dev_ordered_id, 0);
32537  
32538         se_dev_set_default_attribs(dev, dev_limits);
32539  
32540 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32541          * Used to determine when ORDERED commands should go from
32542          * Dormant to Active status.
32543          */
32544 -       cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32545 +       cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32546         smp_mb__after_atomic_inc();
32547         DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32548                         cmd->se_ordered_id, cmd->sam_task_attr,
32549 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32550                 " t_transport_active: %d t_transport_stop: %d"
32551                 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32552                 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32553 -               atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32554 +               atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32555                 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32556                 atomic_read(&T_TASK(cmd)->t_transport_active),
32557                 atomic_read(&T_TASK(cmd)->t_transport_stop),
32558 @@ -2673,9 +2673,9 @@ check_depth:
32559         spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32560         atomic_set(&task->task_active, 1);
32561         atomic_set(&task->task_sent, 1);
32562 -       atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32563 +       atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32564  
32565 -       if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32566 +       if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32567             T_TASK(cmd)->t_task_cdbs)
32568                 atomic_set(&cmd->transport_sent, 1);
32569  
32570 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32571                 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32572         }
32573         if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32574 -            atomic_read(&T_TASK(cmd)->t_transport_aborted))
32575 +            atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32576                 goto remove;
32577  
32578         atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32579 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32580  {
32581         int ret = 0;
32582  
32583 -       if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32584 +       if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32585                 if (!(send_status) ||
32586                      (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32587                         return 1;
32588 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32589          */
32590         if (cmd->data_direction == DMA_TO_DEVICE) {
32591                 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32592 -                       atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32593 +                       atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32594                         smp_mb__after_atomic_inc();
32595                         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32596                         transport_new_cmd_failure(cmd);
32597 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32598                         CMD_TFO(cmd)->get_task_tag(cmd),
32599                         T_TASK(cmd)->t_task_cdbs,
32600                         atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32601 -                       atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32602 +                       atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32603                         atomic_read(&T_TASK(cmd)->t_transport_active),
32604                         atomic_read(&T_TASK(cmd)->t_transport_stop),
32605                         atomic_read(&T_TASK(cmd)->t_transport_sent));
32606 diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
32607 --- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32608 +++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32609 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32610         bool mContinue;
32611         char *pIn, *pOut;
32612  
32613 +       pax_track_stack();
32614 +
32615         if (!SCI_Prepare(j))
32616                 return 0;
32617  
32618 diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
32619 --- linux-3.0.4/drivers/tty/hvc/hvcs.c  2011-07-21 22:17:23.000000000 -0400
32620 +++ linux-3.0.4/drivers/tty/hvc/hvcs.c  2011-08-23 21:47:56.000000000 -0400
32621 @@ -83,6 +83,7 @@
32622  #include <asm/hvcserver.h>
32623  #include <asm/uaccess.h>
32624  #include <asm/vio.h>
32625 +#include <asm/local.h>
32626  
32627  /*
32628   * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32629 @@ -270,7 +271,7 @@ struct hvcs_struct {
32630         unsigned int index;
32631  
32632         struct tty_struct *tty;
32633 -       int open_count;
32634 +       local_t open_count;
32635  
32636         /*
32637          * Used to tell the driver kernel_thread what operations need to take
32638 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32639  
32640         spin_lock_irqsave(&hvcsd->lock, flags);
32641  
32642 -       if (hvcsd->open_count > 0) {
32643 +       if (local_read(&hvcsd->open_count) > 0) {
32644                 spin_unlock_irqrestore(&hvcsd->lock, flags);
32645                 printk(KERN_INFO "HVCS: vterm state unchanged.  "
32646                                 "The hvcs device node is still in use.\n");
32647 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32648                 if ((retval = hvcs_partner_connect(hvcsd)))
32649                         goto error_release;
32650  
32651 -       hvcsd->open_count = 1;
32652 +       local_set(&hvcsd->open_count, 1);
32653         hvcsd->tty = tty;
32654         tty->driver_data = hvcsd;
32655  
32656 @@ -1179,7 +1180,7 @@ fast_open:
32657  
32658         spin_lock_irqsave(&hvcsd->lock, flags);
32659         kref_get(&hvcsd->kref);
32660 -       hvcsd->open_count++;
32661 +       local_inc(&hvcsd->open_count);
32662         hvcsd->todo_mask |= HVCS_SCHED_READ;
32663         spin_unlock_irqrestore(&hvcsd->lock, flags);
32664  
32665 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32666         hvcsd = tty->driver_data;
32667  
32668         spin_lock_irqsave(&hvcsd->lock, flags);
32669 -       if (--hvcsd->open_count == 0) {
32670 +       if (local_dec_and_test(&hvcsd->open_count)) {
32671  
32672                 vio_disable_interrupts(hvcsd->vdev);
32673  
32674 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32675                 free_irq(irq, hvcsd);
32676                 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32677                 return;
32678 -       } else if (hvcsd->open_count < 0) {
32679 +       } else if (local_read(&hvcsd->open_count) < 0) {
32680                 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32681                                 " is missmanaged.\n",
32682 -               hvcsd->vdev->unit_address, hvcsd->open_count);
32683 +               hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32684         }
32685  
32686         spin_unlock_irqrestore(&hvcsd->lock, flags);
32687 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32688  
32689         spin_lock_irqsave(&hvcsd->lock, flags);
32690         /* Preserve this so that we know how many kref refs to put */
32691 -       temp_open_count = hvcsd->open_count;
32692 +       temp_open_count = local_read(&hvcsd->open_count);
32693  
32694         /*
32695          * Don't kref put inside the spinlock because the destruction
32696 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32697         hvcsd->tty->driver_data = NULL;
32698         hvcsd->tty = NULL;
32699  
32700 -       hvcsd->open_count = 0;
32701 +       local_set(&hvcsd->open_count, 0);
32702  
32703         /* This will drop any buffered data on the floor which is OK in a hangup
32704          * scenario. */
32705 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct 
32706          * the middle of a write operation?  This is a crummy place to do this
32707          * but we want to keep it all in the spinlock.
32708          */
32709 -       if (hvcsd->open_count <= 0) {
32710 +       if (local_read(&hvcsd->open_count) <= 0) {
32711                 spin_unlock_irqrestore(&hvcsd->lock, flags);
32712                 return -ENODEV;
32713         }
32714 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32715  {
32716         struct hvcs_struct *hvcsd = tty->driver_data;
32717  
32718 -       if (!hvcsd || hvcsd->open_count <= 0)
32719 +       if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32720                 return 0;
32721  
32722         return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32723 diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
32724 --- linux-3.0.4/drivers/tty/ipwireless/tty.c    2011-07-21 22:17:23.000000000 -0400
32725 +++ linux-3.0.4/drivers/tty/ipwireless/tty.c    2011-08-23 21:47:56.000000000 -0400
32726 @@ -29,6 +29,7 @@
32727  #include <linux/tty_driver.h>
32728  #include <linux/tty_flip.h>
32729  #include <linux/uaccess.h>
32730 +#include <asm/local.h>
32731  
32732  #include "tty.h"
32733  #include "network.h"
32734 @@ -51,7 +52,7 @@ struct ipw_tty {
32735         int tty_type;
32736         struct ipw_network *network;
32737         struct tty_struct *linux_tty;
32738 -       int open_count;
32739 +       local_t open_count;
32740         unsigned int control_lines;
32741         struct mutex ipw_tty_mutex;
32742         int tx_bytes_queued;
32743 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32744                 mutex_unlock(&tty->ipw_tty_mutex);
32745                 return -ENODEV;
32746         }
32747 -       if (tty->open_count == 0)
32748 +       if (local_read(&tty->open_count) == 0)
32749                 tty->tx_bytes_queued = 0;
32750  
32751 -       tty->open_count++;
32752 +       local_inc(&tty->open_count);
32753  
32754         tty->linux_tty = linux_tty;
32755         linux_tty->driver_data = tty;
32756 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32757  
32758  static void do_ipw_close(struct ipw_tty *tty)
32759  {
32760 -       tty->open_count--;
32761 -
32762 -       if (tty->open_count == 0) {
32763 +       if (local_dec_return(&tty->open_count) == 0) {
32764                 struct tty_struct *linux_tty = tty->linux_tty;
32765  
32766                 if (linux_tty != NULL) {
32767 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32768                 return;
32769  
32770         mutex_lock(&tty->ipw_tty_mutex);
32771 -       if (tty->open_count == 0) {
32772 +       if (local_read(&tty->open_count) == 0) {
32773                 mutex_unlock(&tty->ipw_tty_mutex);
32774                 return;
32775         }
32776 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32777                 return;
32778         }
32779  
32780 -       if (!tty->open_count) {
32781 +       if (!local_read(&tty->open_count)) {
32782                 mutex_unlock(&tty->ipw_tty_mutex);
32783                 return;
32784         }
32785 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32786                 return -ENODEV;
32787  
32788         mutex_lock(&tty->ipw_tty_mutex);
32789 -       if (!tty->open_count) {
32790 +       if (!local_read(&tty->open_count)) {
32791                 mutex_unlock(&tty->ipw_tty_mutex);
32792                 return -EINVAL;
32793         }
32794 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32795         if (!tty)
32796                 return -ENODEV;
32797  
32798 -       if (!tty->open_count)
32799 +       if (!local_read(&tty->open_count))
32800                 return -EINVAL;
32801  
32802         room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32803 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32804         if (!tty)
32805                 return 0;
32806  
32807 -       if (!tty->open_count)
32808 +       if (!local_read(&tty->open_count))
32809                 return 0;
32810  
32811         return tty->tx_bytes_queued;
32812 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32813         if (!tty)
32814                 return -ENODEV;
32815  
32816 -       if (!tty->open_count)
32817 +       if (!local_read(&tty->open_count))
32818                 return -EINVAL;
32819  
32820         return get_control_lines(tty);
32821 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32822         if (!tty)
32823                 return -ENODEV;
32824  
32825 -       if (!tty->open_count)
32826 +       if (!local_read(&tty->open_count))
32827                 return -EINVAL;
32828  
32829         return set_control_lines(tty, set, clear);
32830 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32831         if (!tty)
32832                 return -ENODEV;
32833  
32834 -       if (!tty->open_count)
32835 +       if (!local_read(&tty->open_count))
32836                 return -EINVAL;
32837  
32838         /* FIXME: Exactly how is the tty object locked here .. */
32839 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty 
32840                                    against a parallel ioctl etc */
32841                                 mutex_lock(&ttyj->ipw_tty_mutex);
32842                         }
32843 -                       while (ttyj->open_count)
32844 +                       while (local_read(&ttyj->open_count))
32845                                 do_ipw_close(ttyj);
32846                         ipwireless_disassociate_network_ttys(network,
32847                                                              ttyj->channel_idx);
32848 diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
32849 --- linux-3.0.4/drivers/tty/n_gsm.c     2011-08-23 21:44:40.000000000 -0400
32850 +++ linux-3.0.4/drivers/tty/n_gsm.c     2011-08-23 21:47:56.000000000 -0400
32851 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32852                 return NULL;
32853         spin_lock_init(&dlci->lock);
32854         dlci->fifo = &dlci->_fifo;
32855 -       if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32856 +       if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32857                 kfree(dlci);
32858                 return NULL;
32859         }
32860 diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
32861 --- linux-3.0.4/drivers/tty/n_tty.c     2011-07-21 22:17:23.000000000 -0400
32862 +++ linux-3.0.4/drivers/tty/n_tty.c     2011-08-23 21:47:56.000000000 -0400
32863 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32864  {
32865         *ops = tty_ldisc_N_TTY;
32866         ops->owner = NULL;
32867 -       ops->refcount = ops->flags = 0;
32868 +       atomic_set(&ops->refcount, 0);
32869 +       ops->flags = 0;
32870  }
32871  EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32872 diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
32873 --- linux-3.0.4/drivers/tty/pty.c       2011-07-21 22:17:23.000000000 -0400
32874 +++ linux-3.0.4/drivers/tty/pty.c       2011-08-23 21:47:56.000000000 -0400
32875 @@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32876         register_sysctl_table(pty_root_table);
32877  
32878         /* Now create the /dev/ptmx special device */
32879 +       pax_open_kernel();
32880         tty_default_fops(&ptmx_fops);
32881 -       ptmx_fops.open = ptmx_open;
32882 +       *(void **)&ptmx_fops.open = ptmx_open;
32883 +       pax_close_kernel();
32884  
32885         cdev_init(&ptmx_cdev, &ptmx_fops);
32886         if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32887 diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
32888 --- linux-3.0.4/drivers/tty/rocket.c    2011-07-21 22:17:23.000000000 -0400
32889 +++ linux-3.0.4/drivers/tty/rocket.c    2011-08-23 21:48:14.000000000 -0400
32890 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32891         struct rocket_ports tmp;
32892         int board;
32893  
32894 +       pax_track_stack();
32895 +
32896         if (!retports)
32897                 return -EFAULT;
32898         memset(&tmp, 0, sizeof (tmp));
32899 diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
32900 --- linux-3.0.4/drivers/tty/serial/kgdboc.c     2011-07-21 22:17:23.000000000 -0400
32901 +++ linux-3.0.4/drivers/tty/serial/kgdboc.c     2011-08-23 21:47:56.000000000 -0400
32902 @@ -23,8 +23,9 @@
32903  #define MAX_CONFIG_LEN         40
32904  
32905  static struct kgdb_io          kgdboc_io_ops;
32906 +static struct kgdb_io          kgdboc_io_ops_console;
32907  
32908 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32909 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32910  static int configured          = -1;
32911  
32912  static char config[MAX_CONFIG_LEN];
32913 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32914         kgdboc_unregister_kbd();
32915         if (configured == 1)
32916                 kgdb_unregister_io_module(&kgdboc_io_ops);
32917 +       else if (configured == 2)
32918 +               kgdb_unregister_io_module(&kgdboc_io_ops_console);
32919  }
32920  
32921  static int configure_kgdboc(void)
32922 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32923         int err;
32924         char *cptr = config;
32925         struct console *cons;
32926 +       int is_console = 0;
32927  
32928         err = kgdboc_option_setup(config);
32929         if (err || !strlen(config) || isspace(config[0]))
32930                 goto noconfig;
32931  
32932         err = -ENODEV;
32933 -       kgdboc_io_ops.is_console = 0;
32934         kgdb_tty_driver = NULL;
32935  
32936         kgdboc_use_kms = 0;
32937 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32938                 int idx;
32939                 if (cons->device && cons->device(cons, &idx) == p &&
32940                     idx == tty_line) {
32941 -                       kgdboc_io_ops.is_console = 1;
32942 +                       is_console = 1;
32943                         break;
32944                 }
32945                 cons = cons->next;
32946 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32947         kgdb_tty_line = tty_line;
32948  
32949  do_register:
32950 -       err = kgdb_register_io_module(&kgdboc_io_ops);
32951 +       if (is_console) {
32952 +               err = kgdb_register_io_module(&kgdboc_io_ops_console);
32953 +               configured = 2;
32954 +       } else {
32955 +               err = kgdb_register_io_module(&kgdboc_io_ops);
32956 +               configured = 1;
32957 +       }
32958         if (err)
32959                 goto noconfig;
32960  
32961 -       configured = 1;
32962 -
32963         return 0;
32964  
32965  noconfig:
32966 @@ -212,7 +219,7 @@ noconfig:
32967  static int __init init_kgdboc(void)
32968  {
32969         /* Already configured? */
32970 -       if (configured == 1)
32971 +       if (configured >= 1)
32972                 return 0;
32973  
32974         return configure_kgdboc();
32975 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32976         if (config[len - 1] == '\n')
32977                 config[len - 1] = '\0';
32978  
32979 -       if (configured == 1)
32980 +       if (configured >= 1)
32981                 cleanup_kgdboc();
32982  
32983         /* Go and configure with the new params. */
32984 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32985         .post_exception         = kgdboc_post_exp_handler,
32986  };
32987  
32988 +static struct kgdb_io kgdboc_io_ops_console = {
32989 +       .name                   = "kgdboc",
32990 +       .read_char              = kgdboc_get_char,
32991 +       .write_char             = kgdboc_put_char,
32992 +       .pre_exception          = kgdboc_pre_exp_handler,
32993 +       .post_exception         = kgdboc_post_exp_handler,
32994 +       .is_console             = 1
32995 +};
32996 +
32997  #ifdef CONFIG_KGDB_SERIAL_CONSOLE
32998  /* This is only available if kgdboc is a built in for early debugging */
32999  static int __init kgdboc_early_init(char *opt)
33000 diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
33001 --- linux-3.0.4/drivers/tty/serial/mrst_max3110.c       2011-07-21 22:17:23.000000000 -0400
33002 +++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c       2011-08-23 21:48:14.000000000 -0400
33003 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33004         int loop = 1, num, total = 0;
33005         u8 recv_buf[512], *pbuf;
33006  
33007 +       pax_track_stack();
33008 +
33009         pbuf = recv_buf;
33010         do {
33011                 num = max3110_read_multi(max, pbuf);
33012 diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
33013 --- linux-3.0.4/drivers/tty/tty_io.c    2011-07-21 22:17:23.000000000 -0400
33014 +++ linux-3.0.4/drivers/tty/tty_io.c    2011-08-23 21:47:56.000000000 -0400
33015 @@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33016  
33017  void tty_default_fops(struct file_operations *fops)
33018  {
33019 -       *fops = tty_fops;
33020 +       memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33021  }
33022  
33023  /*
33024 diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
33025 --- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33026 +++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33027 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33028         if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33029                 struct tty_ldisc_ops *ldo = ld->ops;
33030  
33031 -               ldo->refcount--;
33032 +               atomic_dec(&ldo->refcount);
33033                 module_put(ldo->owner);
33034                 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33035  
33036 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct 
33037         spin_lock_irqsave(&tty_ldisc_lock, flags);
33038         tty_ldiscs[disc] = new_ldisc;
33039         new_ldisc->num = disc;
33040 -       new_ldisc->refcount = 0;
33041 +       atomic_set(&new_ldisc->refcount, 0);
33042         spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33043  
33044         return ret;
33045 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33046                 return -EINVAL;
33047  
33048         spin_lock_irqsave(&tty_ldisc_lock, flags);
33049 -       if (tty_ldiscs[disc]->refcount)
33050 +       if (atomic_read(&tty_ldiscs[disc]->refcount))
33051                 ret = -EBUSY;
33052         else
33053                 tty_ldiscs[disc] = NULL;
33054 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33055         if (ldops) {
33056                 ret = ERR_PTR(-EAGAIN);
33057                 if (try_module_get(ldops->owner)) {
33058 -                       ldops->refcount++;
33059 +                       atomic_inc(&ldops->refcount);
33060                         ret = ldops;
33061                 }
33062         }
33063 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33064         unsigned long flags;
33065  
33066         spin_lock_irqsave(&tty_ldisc_lock, flags);
33067 -       ldops->refcount--;
33068 +       atomic_dec(&ldops->refcount);
33069         module_put(ldops->owner);
33070         spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33071  }
33072 diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
33073 --- linux-3.0.4/drivers/tty/vt/keyboard.c       2011-07-21 22:17:23.000000000 -0400
33074 +++ linux-3.0.4/drivers/tty/vt/keyboard.c       2011-08-23 21:48:14.000000000 -0400
33075 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33076              kbd->kbdmode == VC_OFF) &&
33077              value != KVAL(K_SAK))
33078                 return;         /* SAK is allowed even in raw mode */
33079 +
33080 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33081 +       {
33082 +               void *func = fn_handler[value];
33083 +               if (func == fn_show_state || func == fn_show_ptregs ||
33084 +                   func == fn_show_mem)
33085 +                       return;
33086 +       }
33087 +#endif
33088 +
33089         fn_handler[value](vc);
33090  }
33091  
33092 diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
33093 --- linux-3.0.4/drivers/tty/vt/vt.c     2011-07-21 22:17:23.000000000 -0400
33094 +++ linux-3.0.4/drivers/tty/vt/vt.c     2011-08-23 21:47:56.000000000 -0400
33095 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33096  
33097  static void notify_write(struct vc_data *vc, unsigned int unicode)
33098  {
33099 -       struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33100 +       struct vt_notifier_param param = { .vc = vc, .c = unicode };
33101         atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33102  }
33103  
33104 diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
33105 --- linux-3.0.4/drivers/tty/vt/vt_ioctl.c       2011-07-21 22:17:23.000000000 -0400
33106 +++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c       2011-08-23 21:48:14.000000000 -0400
33107 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33108         if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33109                 return -EFAULT;
33110  
33111 -       if (!capable(CAP_SYS_TTY_CONFIG))
33112 -               perm = 0;
33113 -
33114         switch (cmd) {
33115         case KDGKBENT:
33116                 key_map = key_maps[s];
33117 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33118                     val = (i ? K_HOLE : K_NOSUCHMAP);
33119                 return put_user(val, &user_kbe->kb_value);
33120         case KDSKBENT:
33121 +               if (!capable(CAP_SYS_TTY_CONFIG))
33122 +                       perm = 0;
33123 +
33124                 if (!perm)
33125                         return -EPERM;
33126                 if (!i && v == K_NOSUCHMAP) {
33127 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry 
33128         int i, j, k;
33129         int ret;
33130  
33131 -       if (!capable(CAP_SYS_TTY_CONFIG))
33132 -               perm = 0;
33133 -
33134         kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33135         if (!kbs) {
33136                 ret = -ENOMEM;
33137 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry 
33138                 kfree(kbs);
33139                 return ((p && *p) ? -EOVERFLOW : 0);
33140         case KDSKBSENT:
33141 +               if (!capable(CAP_SYS_TTY_CONFIG))
33142 +                       perm = 0;
33143 +
33144                 if (!perm) {
33145                         ret = -EPERM;
33146                         goto reterr;
33147 diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
33148 --- linux-3.0.4/drivers/uio/uio.c       2011-07-21 22:17:23.000000000 -0400
33149 +++ linux-3.0.4/drivers/uio/uio.c       2011-08-23 21:47:56.000000000 -0400
33150 @@ -25,6 +25,7 @@
33151  #include <linux/kobject.h>
33152  #include <linux/cdev.h>
33153  #include <linux/uio_driver.h>
33154 +#include <asm/local.h>
33155  
33156  #define UIO_MAX_DEVICES                (1U << MINORBITS)
33157  
33158 @@ -32,10 +33,10 @@ struct uio_device {
33159         struct module           *owner;
33160         struct device           *dev;
33161         int                     minor;
33162 -       atomic_t                event;
33163 +       atomic_unchecked_t      event;
33164         struct fasync_struct    *async_queue;
33165         wait_queue_head_t       wait;
33166 -       int                     vma_count;
33167 +       local_t                 vma_count;
33168         struct uio_info         *info;
33169         struct kobject          *map_dir;
33170         struct kobject          *portio_dir;
33171 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device 
33172                           struct device_attribute *attr, char *buf)
33173  {
33174         struct uio_device *idev = dev_get_drvdata(dev);
33175 -       return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33176 +       return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33177  }
33178  
33179  static struct device_attribute uio_class_attributes[] = {
33180 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33181  {
33182         struct uio_device *idev = info->uio_dev;
33183  
33184 -       atomic_inc(&idev->event);
33185 +       atomic_inc_unchecked(&idev->event);
33186         wake_up_interruptible(&idev->wait);
33187         kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33188  }
33189 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33190         }
33191  
33192         listener->dev = idev;
33193 -       listener->event_count = atomic_read(&idev->event);
33194 +       listener->event_count = atomic_read_unchecked(&idev->event);
33195         filep->private_data = listener;
33196  
33197         if (idev->info->open) {
33198 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33199                 return -EIO;
33200  
33201         poll_wait(filep, &idev->wait, wait);
33202 -       if (listener->event_count != atomic_read(&idev->event))
33203 +       if (listener->event_count != atomic_read_unchecked(&idev->event))
33204                 return POLLIN | POLLRDNORM;
33205         return 0;
33206  }
33207 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33208         do {
33209                 set_current_state(TASK_INTERRUPTIBLE);
33210  
33211 -               event_count = atomic_read(&idev->event);
33212 +               event_count = atomic_read_unchecked(&idev->event);
33213                 if (event_count != listener->event_count) {
33214                         if (copy_to_user(buf, &event_count, count))
33215                                 retval = -EFAULT;
33216 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33217  static void uio_vma_open(struct vm_area_struct *vma)
33218  {
33219         struct uio_device *idev = vma->vm_private_data;
33220 -       idev->vma_count++;
33221 +       local_inc(&idev->vma_count);
33222  }
33223  
33224  static void uio_vma_close(struct vm_area_struct *vma)
33225  {
33226         struct uio_device *idev = vma->vm_private_data;
33227 -       idev->vma_count--;
33228 +       local_dec(&idev->vma_count);
33229  }
33230  
33231  static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33232 @@ -823,7 +824,7 @@ int __uio_register_device(struct module 
33233         idev->owner = owner;
33234         idev->info = info;
33235         init_waitqueue_head(&idev->wait);
33236 -       atomic_set(&idev->event, 0);
33237 +       atomic_set_unchecked(&idev->event, 0);
33238  
33239         ret = uio_get_minor(idev);
33240         if (ret)
33241 diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
33242 --- linux-3.0.4/drivers/usb/atm/cxacru.c        2011-07-21 22:17:23.000000000 -0400
33243 +++ linux-3.0.4/drivers/usb/atm/cxacru.c        2011-08-23 21:47:56.000000000 -0400
33244 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33245                 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33246                 if (ret < 2)
33247                         return -EINVAL;
33248 -               if (index < 0 || index > 0x7f)
33249 +               if (index > 0x7f)
33250                         return -EINVAL;
33251                 pos += tmp;
33252  
33253 diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
33254 --- linux-3.0.4/drivers/usb/atm/usbatm.c        2011-07-21 22:17:23.000000000 -0400
33255 +++ linux-3.0.4/drivers/usb/atm/usbatm.c        2011-08-23 21:47:56.000000000 -0400
33256 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33257                 if (printk_ratelimit())
33258                         atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33259                                 __func__, vpi, vci);
33260 -               atomic_inc(&vcc->stats->rx_err);
33261 +               atomic_inc_unchecked(&vcc->stats->rx_err);
33262                 return;
33263         }
33264  
33265 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33266                 if (length > ATM_MAX_AAL5_PDU) {
33267                         atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33268                                   __func__, length, vcc);
33269 -                       atomic_inc(&vcc->stats->rx_err);
33270 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
33271                         goto out;
33272                 }
33273  
33274 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33275                 if (sarb->len < pdu_length) {
33276                         atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33277                                   __func__, pdu_length, sarb->len, vcc);
33278 -                       atomic_inc(&vcc->stats->rx_err);
33279 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
33280                         goto out;
33281                 }
33282  
33283                 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33284                         atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33285                                   __func__, vcc);
33286 -                       atomic_inc(&vcc->stats->rx_err);
33287 +                       atomic_inc_unchecked(&vcc->stats->rx_err);
33288                         goto out;
33289                 }
33290  
33291 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33292                         if (printk_ratelimit())
33293                                 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33294                                         __func__, length);
33295 -                       atomic_inc(&vcc->stats->rx_drop);
33296 +                       atomic_inc_unchecked(&vcc->stats->rx_drop);
33297                         goto out;
33298                 }
33299  
33300 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33301  
33302                 vcc->push(vcc, skb);
33303  
33304 -               atomic_inc(&vcc->stats->rx);
33305 +               atomic_inc_unchecked(&vcc->stats->rx);
33306         out:
33307                 skb_trim(sarb, 0);
33308         }
33309 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33310                         struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33311  
33312                         usbatm_pop(vcc, skb);
33313 -                       atomic_inc(&vcc->stats->tx);
33314 +                       atomic_inc_unchecked(&vcc->stats->tx);
33315  
33316                         skb = skb_dequeue(&instance->sndqueue);
33317                 }
33318 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33319         if (!left--)
33320                 return sprintf(page,
33321                                "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33322 -                              atomic_read(&atm_dev->stats.aal5.tx),
33323 -                              atomic_read(&atm_dev->stats.aal5.tx_err),
33324 -                              atomic_read(&atm_dev->stats.aal5.rx),
33325 -                              atomic_read(&atm_dev->stats.aal5.rx_err),
33326 -                              atomic_read(&atm_dev->stats.aal5.rx_drop));
33327 +                              atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33328 +                              atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33329 +                              atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33330 +                              atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33331 +                              atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33332  
33333         if (!left--) {
33334                 if (instance->disconnected)
33335 diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
33336 --- linux-3.0.4/drivers/usb/core/devices.c      2011-07-21 22:17:23.000000000 -0400
33337 +++ linux-3.0.4/drivers/usb/core/devices.c      2011-08-23 21:47:56.000000000 -0400
33338 @@ -126,7 +126,7 @@ static const char format_endpt[] =
33339   * time it gets called.
33340   */
33341  static struct device_connect_event {
33342 -       atomic_t count;
33343 +       atomic_unchecked_t count;
33344         wait_queue_head_t wait;
33345  } device_event = {
33346         .count = ATOMIC_INIT(1),
33347 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33348  
33349  void usbfs_conn_disc_event(void)
33350  {
33351 -       atomic_add(2, &device_event.count);
33352 +       atomic_add_unchecked(2, &device_event.count);
33353         wake_up(&device_event.wait);
33354  }
33355  
33356 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33357  
33358         poll_wait(file, &device_event.wait, wait);
33359  
33360 -       event_count = atomic_read(&device_event.count);
33361 +       event_count = atomic_read_unchecked(&device_event.count);
33362         if (file->f_version != event_count) {
33363                 file->f_version = event_count;
33364                 return POLLIN | POLLRDNORM;
33365 diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
33366 --- linux-3.0.4/drivers/usb/core/message.c      2011-07-21 22:17:23.000000000 -0400
33367 +++ linux-3.0.4/drivers/usb/core/message.c      2011-08-23 21:47:56.000000000 -0400
33368 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33369         buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33370         if (buf) {
33371                 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33372 -               if (len > 0) {
33373 -                       smallbuf = kmalloc(++len, GFP_NOIO);
33374 +               if (len++ > 0) {
33375 +                       smallbuf = kmalloc(len, GFP_NOIO);
33376                         if (!smallbuf)
33377                                 return buf;
33378                         memcpy(smallbuf, buf, len);
33379 diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
33380 --- linux-3.0.4/drivers/usb/early/ehci-dbgp.c   2011-07-21 22:17:23.000000000 -0400
33381 +++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c   2011-08-23 21:47:56.000000000 -0400
33382 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33383  
33384  #ifdef CONFIG_KGDB
33385  static struct kgdb_io kgdbdbgp_io_ops;
33386 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33387 +static struct kgdb_io kgdbdbgp_io_ops_console;
33388 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33389  #else
33390  #define dbgp_kgdb_mode (0)
33391  #endif
33392 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = 
33393         .write_char = kgdbdbgp_write_char,
33394  };
33395  
33396 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33397 +       .name = "kgdbdbgp",
33398 +       .read_char = kgdbdbgp_read_char,
33399 +       .write_char = kgdbdbgp_write_char,
33400 +       .is_console = 1
33401 +};
33402 +
33403  static int kgdbdbgp_wait_time;
33404  
33405  static int __init kgdbdbgp_parse_config(char *str)
33406 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33407                 ptr++;
33408                 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33409         }
33410 -       kgdb_register_io_module(&kgdbdbgp_io_ops);
33411 -       kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33412 +       if (early_dbgp_console.index != -1)
33413 +               kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33414 +       else
33415 +               kgdb_register_io_module(&kgdbdbgp_io_ops);
33416  
33417         return 0;
33418  }
33419 diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
33420 --- linux-3.0.4/drivers/usb/host/xhci-mem.c     2011-07-21 22:17:23.000000000 -0400
33421 +++ linux-3.0.4/drivers/usb/host/xhci-mem.c     2011-08-23 21:48:14.000000000 -0400
33422 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33423         unsigned int num_tests;
33424         int i, ret;
33425  
33426 +       pax_track_stack();
33427 +
33428         num_tests = ARRAY_SIZE(simple_test_vector);
33429         for (i = 0; i < num_tests; i++) {
33430                 ret = xhci_test_trb_in_td(xhci,
33431 diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
33432 --- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h    2011-07-21 22:17:23.000000000 -0400
33433 +++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h    2011-08-23 21:47:56.000000000 -0400
33434 @@ -192,7 +192,7 @@ struct wahc {
33435         struct list_head xfer_delayed_list;
33436         spinlock_t xfer_list_lock;
33437         struct work_struct xfer_work;
33438 -       atomic_t xfer_id_count;
33439 +       atomic_unchecked_t xfer_id_count;
33440  };
33441  
33442  
33443 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33444         INIT_LIST_HEAD(&wa->xfer_delayed_list);
33445         spin_lock_init(&wa->xfer_list_lock);
33446         INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33447 -       atomic_set(&wa->xfer_id_count, 1);
33448 +       atomic_set_unchecked(&wa->xfer_id_count, 1);
33449  }
33450  
33451  /**
33452 diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
33453 --- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c  2011-07-21 22:17:23.000000000 -0400
33454 +++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c  2011-08-23 21:47:56.000000000 -0400
33455 @@ -294,7 +294,7 @@ out:
33456   */
33457  static void wa_xfer_id_init(struct wa_xfer *xfer)
33458  {
33459 -       xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33460 +       xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33461  }
33462  
33463  /*
33464 diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
33465 --- linux-3.0.4/drivers/vhost/vhost.c   2011-07-21 22:17:23.000000000 -0400
33466 +++ linux-3.0.4/drivers/vhost/vhost.c   2011-08-23 21:47:56.000000000 -0400
33467 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33468         return get_user(vq->last_used_idx, &used->idx);
33469  }
33470  
33471 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33472 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33473  {
33474         struct file *eventfp, *filep = NULL,
33475                     *pollstart = NULL, *pollstop = NULL;
33476 diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
33477 --- linux-3.0.4/drivers/video/fbcmap.c  2011-07-21 22:17:23.000000000 -0400
33478 +++ linux-3.0.4/drivers/video/fbcmap.c  2011-08-23 21:47:56.000000000 -0400
33479 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33480                 rc = -ENODEV;
33481                 goto out;
33482         }
33483 -       if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33484 -                               !info->fbops->fb_setcmap)) {
33485 +       if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33486                 rc = -EINVAL;
33487                 goto out1;
33488         }
33489 diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
33490 --- linux-3.0.4/drivers/video/fbmem.c   2011-07-21 22:17:23.000000000 -0400
33491 +++ linux-3.0.4/drivers/video/fbmem.c   2011-08-23 21:48:14.000000000 -0400
33492 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33493                         image->dx += image->width + 8;
33494                 }
33495         } else if (rotate == FB_ROTATE_UD) {
33496 -               for (x = 0; x < num && image->dx >= 0; x++) {
33497 +               for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33498                         info->fbops->fb_imageblit(info, image);
33499                         image->dx -= image->width + 8;
33500                 }
33501 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33502                         image->dy += image->height + 8;
33503                 }
33504         } else if (rotate == FB_ROTATE_CCW) {
33505 -               for (x = 0; x < num && image->dy >= 0; x++) {
33506 +               for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33507                         info->fbops->fb_imageblit(info, image);
33508                         image->dy -= image->height + 8;
33509                 }
33510 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct 
33511         int flags = info->flags;
33512         int ret = 0;
33513  
33514 +       pax_track_stack();
33515 +
33516         if (var->activate & FB_ACTIVATE_INV_MODE) {
33517                 struct fb_videomode mode1, mode2;
33518  
33519 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33520         void __user *argp = (void __user *)arg;
33521         long ret = 0;
33522  
33523 +       pax_track_stack();
33524 +
33525         switch (cmd) {
33526         case FBIOGET_VSCREENINFO:
33527                 if (!lock_fb_info(info))
33528 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33529                         return -EFAULT;
33530                 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33531                         return -EINVAL;
33532 -               if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33533 +               if (con2fb.framebuffer >= FB_MAX)
33534                         return -EINVAL;
33535                 if (!registered_fb[con2fb.framebuffer])
33536                         request_module("fb%d", con2fb.framebuffer);
33537 diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
33538 --- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33539 +++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33540 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct 
33541                 }
33542         }
33543         printk("ringbuffer lockup!!!\n");
33544 +       printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33545         i810_report_error(mmio); 
33546         par->dev_flags |= LOCKUP;
33547         info->pixmap.scan_align = 1;
33548 diff -urNp linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm
33549 --- linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm       2011-07-21 22:17:23.000000000 -0400
33550 +++ linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm       2011-08-29 23:49:40.000000000 -0400
33551 @@ -1,1604 +1,1123 @@
33552  P3
33553 -# Standard 224-color Linux logo
33554  80 80
33555  255
33556 -  0   0   0   0   0   0   0   0   0   0   0   0
33557 -  0   0   0   0   0   0   0   0   0   0   0   0
33558 -  0   0   0   0   0   0   0   0   0   0   0   0
33559 -  0   0   0   0   0   0   0   0   0   0   0   0
33560 -  0   0   0   0   0   0   0   0   0   0   0   0
33561 -  0   0   0   0   0   0   0   0   0   0   0   0
33562 -  0   0   0   0   0   0   0   0   0   0   0   0
33563 -  0   0   0   0   0   0   0   0   0   0   0   0
33564 -  0   0   0   0   0   0   0   0   0   0   0   0
33565 -  6   6   6   6   6   6  10  10  10  10  10  10
33566 - 10  10  10   6   6   6   6   6   6   6   6   6
33567 -  0   0   0   0   0   0   0   0   0   0   0   0
33568 -  0   0   0   0   0   0   0   0   0   0   0   0
33569 -  0   0   0   0   0   0   0   0   0   0   0   0
33570 -  0   0   0   0   0   0   0   0   0   0   0   0
33571 -  0   0   0   0   0   0   0   0   0   0   0   0
33572 -  0   0   0   0   0   0   0   0   0   0   0   0
33573 -  0   0   0   0   0   0   0   0   0   0   0   0
33574 -  0   0   0   0   0   0   0   0   0   0   0   0
33575 -  0   0   0   0   0   0   0   0   0   0   0   0
33576 -  0   0   0   0   0   0   0   0   0   0   0   0
33577 -  0   0   0   0   0   0   0   0   0   0   0   0
33578 -  0   0   0   0   0   0   0   0   0   0   0   0
33579 -  0   0   0   0   0   0   0   0   0   0   0   0
33580 -  0   0   0   0   0   0   0   0   0   0   0   0
33581 -  0   0   0   0   0   0   0   0   0   0   0   0
33582 -  0   0   0   0   0   0   0   0   0   0   0   0
33583 -  0   0   0   0   0   0   0   0   0   0   0   0
33584 -  0   0   0   6   6   6  10  10  10  14  14  14
33585 - 22  22  22  26  26  26  30  30  30  34  34  34
33586 - 30  30  30  30  30  30  26  26  26  18  18  18
33587 - 14  14  14  10  10  10   6   6   6   0   0   0
33588 -  0   0   0   0   0   0   0   0   0   0   0   0
33589 -  0   0   0   0   0   0   0   0   0   0   0   0
33590 -  0   0   0   0   0   0   0   0   0   0   0   0
33591 -  0   0   0   0   0   0   0   0   0   0   0   0
33592 -  0   0   0   0   0   0   0   0   0   0   0   0
33593 -  0   0   0   0   0   0   0   0   0   0   0   0
33594 -  0   0   0   0   0   0   0   0   0   0   0   0
33595 -  0   0   0   0   0   0   0   0   0   0   0   0
33596 -  0   0   0   0   0   0   0   0   0   0   0   0
33597 -  0   0   0   0   0   1   0   0   1   0   0   0
33598 -  0   0   0   0   0   0   0   0   0   0   0   0
33599 -  0   0   0   0   0   0   0   0   0   0   0   0
33600 -  0   0   0   0   0   0   0   0   0   0   0   0
33601 -  0   0   0   0   0   0   0   0   0   0   0   0
33602 -  0   0   0   0   0   0   0   0   0   0   0   0
33603 -  0   0   0   0   0   0   0   0   0   0   0   0
33604 -  6   6   6  14  14  14  26  26  26  42  42  42
33605 - 54  54  54  66  66  66  78  78  78  78  78  78
33606 - 78  78  78  74  74  74  66  66  66  54  54  54
33607 - 42  42  42  26  26  26  18  18  18  10  10  10
33608 -  6   6   6   0   0   0   0   0   0   0   0   0
33609 -  0   0   0   0   0   0   0   0   0   0   0   0
33610 -  0   0   0   0   0   0   0   0   0   0   0   0
33611 -  0   0   0   0   0   0   0   0   0   0   0   0
33612 -  0   0   0   0   0   0   0   0   0   0   0   0
33613 -  0   0   0   0   0   0   0   0   0   0   0   0
33614 -  0   0   0   0   0   0   0   0   0   0   0   0
33615 -  0   0   0   0   0   0   0   0   0   0   0   0
33616 -  0   0   0   0   0   0   0   0   0   0   0   0
33617 -  0   0   1   0   0   0   0   0   0   0   0   0
33618 -  0   0   0   0   0   0   0   0   0   0   0   0
33619 -  0   0   0   0   0   0   0   0   0   0   0   0
33620 -  0   0   0   0   0   0   0   0   0   0   0   0
33621 -  0   0   0   0   0   0   0   0   0   0   0   0
33622 -  0   0   0   0   0   0   0   0   0   0   0   0
33623 -  0   0   0   0   0   0   0   0   0  10  10  10
33624 - 22  22  22  42  42  42  66  66  66  86  86  86
33625 - 66  66  66  38  38  38  38  38  38  22  22  22
33626 - 26  26  26  34  34  34  54  54  54  66  66  66
33627 - 86  86  86  70  70  70  46  46  46  26  26  26
33628 - 14  14  14   6   6   6   0   0   0   0   0   0
33629 -  0   0   0   0   0   0   0   0   0   0   0   0
33630 -  0   0   0   0   0   0   0   0   0   0   0   0
33631 -  0   0   0   0   0   0   0   0   0   0   0   0
33632 -  0   0   0   0   0   0   0   0   0   0   0   0
33633 -  0   0   0   0   0   0   0   0   0   0   0   0
33634 -  0   0   0   0   0   0   0   0   0   0   0   0
33635 -  0   0   0   0   0   0   0   0   0   0   0   0
33636 -  0   0   0   0   0   0   0   0   0   0   0   0
33637 -  0   0   1   0   0   1   0   0   1   0   0   0
33638 -  0   0   0   0   0   0   0   0   0   0   0   0
33639 -  0   0   0   0   0   0   0   0   0   0   0   0
33640 -  0   0   0   0   0   0   0   0   0   0   0   0
33641 -  0   0   0   0   0   0   0   0   0   0   0   0
33642 -  0   0   0   0   0   0   0   0   0   0   0   0
33643 -  0   0   0   0   0   0  10  10  10  26  26  26
33644 - 50  50  50  82  82  82  58  58  58   6   6   6
33645 -  2   2   6   2   2   6   2   2   6   2   2   6
33646 -  2   2   6   2   2   6   2   2   6   2   2   6
33647 -  6   6   6  54  54  54  86  86  86  66  66  66
33648 - 38  38  38  18  18  18   6   6   6   0   0   0
33649 -  0   0   0   0   0   0   0   0   0   0   0   0
33650 -  0   0   0   0   0   0   0   0   0   0   0   0
33651 -  0   0   0   0   0   0   0   0   0   0   0   0
33652 -  0   0   0   0   0   0   0   0   0   0   0   0
33653 -  0   0   0   0   0   0   0   0   0   0   0   0
33654 -  0   0   0   0   0   0   0   0   0   0   0   0
33655 -  0   0   0   0   0   0   0   0   0   0   0   0
33656 -  0   0   0   0   0   0   0   0   0   0   0   0
33657 -  0   0   0   0   0   0   0   0   0   0   0   0
33658 -  0   0   0   0   0   0   0   0   0   0   0   0
33659 -  0   0   0   0   0   0   0   0   0   0   0   0
33660 -  0   0   0   0   0   0   0   0   0   0   0   0
33661 -  0   0   0   0   0   0   0   0   0   0   0   0
33662 -  0   0   0   0   0   0   0   0   0   0   0   0
33663 -  0   0   0   6   6   6  22  22  22  50  50  50
33664 - 78  78  78  34  34  34   2   2   6   2   2   6
33665 -  2   2   6   2   2   6   2   2   6   2   2   6
33666 -  2   2   6   2   2   6   2   2   6   2   2   6
33667 -  2   2   6   2   2   6   6   6   6  70  70  70
33668 - 78  78  78  46  46  46  22  22  22   6   6   6
33669 -  0   0   0   0   0   0   0   0   0   0   0   0
33670 -  0   0   0   0   0   0   0   0   0   0   0   0
33671 -  0   0   0   0   0   0   0   0   0   0   0   0
33672 -  0   0   0   0   0   0   0   0   0   0   0   0
33673 -  0   0   0   0   0   0   0   0   0   0   0   0
33674 -  0   0   0   0   0   0   0   0   0   0   0   0
33675 -  0   0   0   0   0   0   0   0   0   0   0   0
33676 -  0   0   0   0   0   0   0   0   0   0   0   0
33677 -  0   0   1   0   0   1   0   0   1   0   0   0
33678 -  0   0   0   0   0   0   0   0   0   0   0   0
33679 -  0   0   0   0   0   0   0   0   0   0   0   0
33680 -  0   0   0   0   0   0   0   0   0   0   0   0
33681 -  0   0   0   0   0   0   0   0   0   0   0   0
33682 -  0   0   0   0   0   0   0   0   0   0   0   0
33683 -  6   6   6  18  18  18  42  42  42  82  82  82
33684 - 26  26  26   2   2   6   2   2   6   2   2   6
33685 -  2   2   6   2   2   6   2   2   6   2   2   6
33686 -  2   2   6   2   2   6   2   2   6  14  14  14
33687 - 46  46  46  34  34  34   6   6   6   2   2   6
33688 - 42  42  42  78  78  78  42  42  42  18  18  18
33689 -  6   6   6   0   0   0   0   0   0   0   0   0
33690 -  0   0   0   0   0   0   0   0   0   0   0   0
33691 -  0   0   0   0   0   0   0   0   0   0   0   0
33692 -  0   0   0   0   0   0   0   0   0   0   0   0
33693 -  0   0   0   0   0   0   0   0   0   0   0   0
33694 -  0   0   0   0   0   0   0   0   0   0   0   0
33695 -  0   0   0   0   0   0   0   0   0   0   0   0
33696 -  0   0   0   0   0   0   0   0   0   0   0   0
33697 -  0   0   1   0   0   0   0   0   1   0   0   0
33698 -  0   0   0   0   0   0   0   0   0   0   0   0
33699 -  0   0   0   0   0   0   0   0   0   0   0   0
33700 -  0   0   0   0   0   0   0   0   0   0   0   0
33701 -  0   0   0   0   0   0   0   0   0   0   0   0
33702 -  0   0   0   0   0   0   0   0   0   0   0   0
33703 - 10  10  10  30  30  30  66  66  66  58  58  58
33704 -  2   2   6   2   2   6   2   2   6   2   2   6
33705 -  2   2   6   2   2   6   2   2   6   2   2   6
33706 -  2   2   6   2   2   6   2   2   6  26  26  26
33707 - 86  86  86 101 101 101  46  46  46  10  10  10
33708 -  2   2   6  58  58  58  70  70  70  34  34  34
33709 - 10  10  10   0   0   0   0   0   0   0   0   0
33710 -  0   0   0   0   0   0   0   0   0   0   0   0
33711 -  0   0   0   0   0   0   0   0   0   0   0   0
33712 -  0   0   0   0   0   0   0   0   0   0   0   0
33713 -  0   0   0   0   0   0   0   0   0   0   0   0
33714 -  0   0   0   0   0   0   0   0   0   0   0   0
33715 -  0   0   0   0   0   0   0   0   0   0   0   0
33716 -  0   0   0   0   0   0   0   0   0   0   0   0
33717 -  0   0   1   0   0   1   0   0   1   0   0   0
33718 -  0   0   0   0   0   0   0   0   0   0   0   0
33719 -  0   0   0   0   0   0   0   0   0   0   0   0
33720 -  0   0   0   0   0   0   0   0   0   0   0   0
33721 -  0   0   0   0   0   0   0   0   0   0   0   0
33722 -  0   0   0   0   0   0   0   0   0   0   0   0
33723 - 14  14  14  42  42  42  86  86  86  10  10  10
33724 -  2   2   6   2   2   6   2   2   6   2   2   6
33725 -  2   2   6   2   2   6   2   2   6   2   2   6
33726 -  2   2   6   2   2   6   2   2   6  30  30  30
33727 - 94  94  94  94  94  94  58  58  58  26  26  26
33728 -  2   2   6   6   6   6  78  78  78  54  54  54
33729 - 22  22  22   6   6   6   0   0   0   0   0   0
33730 -  0   0   0   0   0   0   0   0   0   0   0   0
33731 -  0   0   0   0   0   0   0   0   0   0   0   0
33732 -  0   0   0   0   0   0   0   0   0   0   0   0
33733 -  0   0   0   0   0   0   0   0   0   0   0   0
33734 -  0   0   0   0   0   0   0   0   0   0   0   0
33735 -  0   0   0   0   0   0   0   0   0   0   0   0
33736 -  0   0   0   0   0   0   0   0   0   0   0   0
33737 -  0   0   0   0   0   0   0   0   0   0   0   0
33738 -  0   0   0   0   0   0   0   0   0   0   0   0
33739 -  0   0   0   0   0   0   0   0   0   0   0   0
33740 -  0   0   0   0   0   0   0   0   0   0   0   0
33741 -  0   0   0   0   0   0   0   0   0   0   0   0
33742 -  0   0   0   0   0   0   0   0   0   6   6   6
33743 - 22  22  22  62  62  62  62  62  62   2   2   6
33744 -  2   2   6   2   2   6   2   2   6   2   2   6
33745 -  2   2   6   2   2   6   2   2   6   2   2   6
33746 -  2   2   6   2   2   6   2   2   6  26  26  26
33747 - 54  54  54  38  38  38  18  18  18  10  10  10
33748 -  2   2   6   2   2   6  34  34  34  82  82  82
33749 - 38  38  38  14  14  14   0   0   0   0   0   0
33750 -  0   0   0   0   0   0   0   0   0   0   0   0
33751 -  0   0   0   0   0   0   0   0   0   0   0   0
33752 -  0   0   0   0   0   0   0   0   0   0   0   0
33753 -  0   0   0   0   0   0   0   0   0   0   0   0
33754 -  0   0   0   0   0   0   0   0   0   0   0   0
33755 -  0   0   0   0   0   0   0   0   0   0   0   0
33756 -  0   0   0   0   0   0   0   0   0   0   0   0
33757 -  0   0   0   0   0   1   0   0   1   0   0   0
33758 -  0   0   0   0   0   0   0   0   0   0   0   0
33759 -  0   0   0   0   0   0   0   0   0   0   0   0
33760 -  0   0   0   0   0   0   0   0   0   0   0   0
33761 -  0   0   0   0   0   0   0   0   0   0   0   0
33762 -  0   0   0   0   0   0   0   0   0   6   6   6
33763 - 30  30  30  78  78  78  30  30  30   2   2   6
33764 -  2   2   6   2   2   6   2   2   6   2   2   6
33765 -  2   2   6   2   2   6   2   2   6   2   2   6
33766 -  2   2   6   2   2   6   2   2   6  10  10  10
33767 - 10  10  10   2   2   6   2   2   6   2   2   6
33768 -  2   2   6   2   2   6   2   2   6  78  78  78
33769 - 50  50  50  18  18  18   6   6   6   0   0   0
33770 -  0   0   0   0   0   0   0   0   0   0   0   0
33771 -  0   0   0   0   0   0   0   0   0   0   0   0
33772 -  0   0   0   0   0   0   0   0   0   0   0   0
33773 -  0   0   0   0   0   0   0   0   0   0   0   0
33774 -  0   0   0   0   0   0   0   0   0   0   0   0
33775 -  0   0   0   0   0   0   0   0   0   0   0   0
33776 -  0   0   0   0   0   0   0   0   0   0   0   0
33777 -  0   0   1   0   0   0   0   0   0   0   0   0
33778 -  0   0   0   0   0   0   0   0   0   0   0   0
33779 -  0   0   0   0   0   0   0   0   0   0   0   0
33780 -  0   0   0   0   0   0   0   0   0   0   0   0
33781 -  0   0   0   0   0   0   0   0   0   0   0   0
33782 -  0   0   0   0   0   0   0   0   0  10  10  10
33783 - 38  38  38  86  86  86  14  14  14   2   2   6
33784 -  2   2   6   2   2   6   2   2   6   2   2   6
33785 -  2   2   6   2   2   6   2   2   6   2   2   6
33786 -  2   2   6   2   2   6   2   2   6   2   2   6
33787 -  2   2   6   2   2   6   2   2   6   2   2   6
33788 -  2   2   6   2   2   6   2   2   6  54  54  54
33789 - 66  66  66  26  26  26   6   6   6   0   0   0
33790 -  0   0   0   0   0   0   0   0   0   0   0   0
33791 -  0   0   0   0   0   0   0   0   0   0   0   0
33792 -  0   0   0   0   0   0   0   0   0   0   0   0
33793 -  0   0   0   0   0   0   0   0   0   0   0   0
33794 -  0   0   0   0   0   0   0   0   0   0   0   0
33795 -  0   0   0   0   0   0   0   0   0   0   0   0
33796 -  0   0   0   0   0   0   0   0   0   0   0   0
33797 -  0   0   0   0   0   1   0   0   1   0   0   0
33798 -  0   0   0   0   0   0   0   0   0   0   0   0
33799 -  0   0   0   0   0   0   0   0   0   0   0   0
33800 -  0   0   0   0   0   0   0   0   0   0   0   0
33801 -  0   0   0   0   0   0   0   0   0   0   0   0
33802 -  0   0   0   0   0   0   0   0   0  14  14  14
33803 - 42  42  42  82  82  82   2   2   6   2   2   6
33804 -  2   2   6   6   6   6  10  10  10   2   2   6
33805 -  2   2   6   2   2   6   2   2   6   2   2   6
33806 -  2   2   6   2   2   6   2   2   6   6   6   6
33807 - 14  14  14  10  10  10   2   2   6   2   2   6
33808 -  2   2   6   2   2   6   2   2   6  18  18  18
33809 - 82  82  82  34  34  34  10  10  10   0   0   0
33810 -  0   0   0   0   0   0   0   0   0   0   0   0
33811 -  0   0   0   0   0   0   0   0   0   0   0   0
33812 -  0   0   0   0   0   0   0   0   0   0   0   0
33813 -  0   0   0   0   0   0   0   0   0   0   0   0
33814 -  0   0   0   0   0   0   0   0   0   0   0   0
33815 -  0   0   0   0   0   0   0   0   0   0   0   0
33816 -  0   0   0   0   0   0   0   0   0   0   0   0
33817 -  0   0   1   0   0   0   0   0   0   0   0   0
33818 -  0   0   0   0   0   0   0   0   0   0   0   0
33819 -  0   0   0   0   0   0   0   0   0   0   0   0
33820 -  0   0   0   0   0   0   0   0   0   0   0   0
33821 -  0   0   0   0   0   0   0   0   0   0   0   0
33822 -  0   0   0   0   0   0   0   0   0  14  14  14
33823 - 46  46  46  86  86  86   2   2   6   2   2   6
33824 -  6   6   6   6   6   6  22  22  22  34  34  34
33825 -  6   6   6   2   2   6   2   2   6   2   2   6
33826 -  2   2   6   2   2   6  18  18  18  34  34  34
33827 - 10  10  10  50  50  50  22  22  22   2   2   6
33828 -  2   2   6   2   2   6   2   2   6  10  10  10
33829 - 86  86  86  42  42  42  14  14  14   0   0   0
33830 -  0   0   0   0   0   0   0   0   0   0   0   0
33831 -  0   0   0   0   0   0   0   0   0   0   0   0
33832 -  0   0   0   0   0   0   0   0   0   0   0   0
33833 -  0   0   0   0   0   0   0   0   0   0   0   0
33834 -  0   0   0   0   0   0   0   0   0   0   0   0
33835 -  0   0   0   0   0   0   0   0   0   0   0   0
33836 -  0   0   0   0   0   0   0   0   0   0   0   0
33837 -  0   0   1   0   0   1   0   0   1   0   0   0
33838 -  0   0   0   0   0   0   0   0   0   0   0   0
33839 -  0   0   0   0   0   0   0   0   0   0   0   0
33840 -  0   0   0   0   0   0   0   0   0   0   0   0
33841 -  0   0   0   0   0   0   0   0   0   0   0   0
33842 -  0   0   0   0   0   0   0   0   0  14  14  14
33843 - 46  46  46  86  86  86   2   2   6   2   2   6
33844 - 38  38  38 116 116 116  94  94  94  22  22  22
33845 - 22  22  22   2   2   6   2   2   6   2   2   6
33846 - 14  14  14  86  86  86 138 138 138 162 162 162
33847 -154 154 154  38  38  38  26  26  26   6   6   6
33848 -  2   2   6   2   2   6   2   2   6   2   2   6
33849 - 86  86  86  46  46  46  14  14  14   0   0   0
33850 -  0   0   0   0   0   0   0   0   0   0   0   0
33851 -  0   0   0   0   0   0   0   0   0   0   0   0
33852 -  0   0   0   0   0   0   0   0   0   0   0   0
33853 -  0   0   0   0   0   0   0   0   0   0   0   0
33854 -  0   0   0   0   0   0   0   0   0   0   0   0
33855 -  0   0   0   0   0   0   0   0   0   0   0   0
33856 -  0   0   0   0   0   0   0   0   0   0   0   0
33857 -  0   0   0   0   0   0   0   0   0   0   0   0
33858 -  0   0   0   0   0   0   0   0   0   0   0   0
33859 -  0   0   0   0   0   0   0   0   0   0   0   0
33860 -  0   0   0   0   0   0   0   0   0   0   0   0
33861 -  0   0   0   0   0   0   0   0   0   0   0   0
33862 -  0   0   0   0   0   0   0   0   0  14  14  14
33863 - 46  46  46  86  86  86   2   2   6  14  14  14
33864 -134 134 134 198 198 198 195 195 195 116 116 116
33865 - 10  10  10   2   2   6   2   2   6   6   6   6
33866 -101  98  89 187 187 187 210 210 210 218 218 218
33867 -214 214 214 134 134 134  14  14  14   6   6   6
33868 -  2   2   6   2   2   6   2   2   6   2   2   6
33869 - 86  86  86  50  50  50  18  18  18   6   6   6
33870 -  0   0   0   0   0   0   0   0   0   0   0   0
33871 -  0   0   0   0   0   0   0   0   0   0   0   0
33872 -  0   0   0   0   0   0   0   0   0   0   0   0
33873 -  0   0   0   0   0   0   0   0   0   0   0   0
33874 -  0   0   0   0   0   0   0   0   0   0   0   0
33875 -  0   0   0   0   0   0   0   0   0   0   0   0
33876 -  0   0   0   0   0   0   0   0   1   0   0   0
33877 -  0   0   1   0   0   1   0   0   1   0   0   0
33878 -  0   0   0   0   0   0   0   0   0   0   0   0
33879 -  0   0   0   0   0   0   0   0   0   0   0   0
33880 -  0   0   0   0   0   0   0   0   0   0   0   0
33881 -  0   0   0   0   0   0   0   0   0   0   0   0
33882 -  0   0   0   0   0   0   0   0   0  14  14  14
33883 - 46  46  46  86  86  86   2   2   6  54  54  54
33884 -218 218 218 195 195 195 226 226 226 246 246 246
33885 - 58  58  58   2   2   6   2   2   6  30  30  30
33886 -210 210 210 253 253 253 174 174 174 123 123 123
33887 -221 221 221 234 234 234  74  74  74   2   2   6
33888 -  2   2   6   2   2   6   2   2   6   2   2   6
33889 - 70  70  70  58  58  58  22  22  22   6   6   6
33890 -  0   0   0   0   0   0   0   0   0   0   0   0
33891 -  0   0   0   0   0   0   0   0   0   0   0   0
33892 -  0   0   0   0   0   0   0   0   0   0   0   0
33893 -  0   0   0   0   0   0   0   0   0   0   0   0
33894 -  0   0   0   0   0   0   0   0   0   0   0   0
33895 -  0   0   0   0   0   0   0   0   0   0   0   0
33896 -  0   0   0   0   0   0   0   0   0   0   0   0
33897 -  0   0   0   0   0   0   0   0   0   0   0   0
33898 -  0   0   0   0   0   0   0   0   0   0   0   0
33899 -  0   0   0   0   0   0   0   0   0   0   0   0
33900 -  0   0   0   0   0   0   0   0   0   0   0   0
33901 -  0   0   0   0   0   0   0   0   0   0   0   0
33902 -  0   0   0   0   0   0   0   0   0  14  14  14
33903 - 46  46  46  82  82  82   2   2   6 106 106 106
33904 -170 170 170  26  26  26  86  86  86 226 226 226
33905 -123 123 123  10  10  10  14  14  14  46  46  46
33906 -231 231 231 190 190 190   6   6   6  70  70  70
33907 - 90  90  90 238 238 238 158 158 158   2   2   6
33908 -  2   2   6   2   2   6   2   2   6   2   2   6
33909 - 70  70  70  58  58  58  22  22  22   6   6   6
33910 -  0   0   0   0   0   0   0   0   0   0   0   0
33911 -  0   0   0   0   0   0   0   0   0   0   0   0
33912 -  0   0   0   0   0   0   0   0   0   0   0   0
33913 -  0   0   0   0   0   0   0   0   0   0   0   0
33914 -  0   0   0   0   0   0   0   0   0   0   0   0
33915 -  0   0   0   0   0   0   0   0   0   0   0   0
33916 -  0   0   0   0   0   0   0   0   1   0   0   0
33917 -  0   0   1   0   0   1   0   0   1   0   0   0
33918 -  0   0   0   0   0   0   0   0   0   0   0   0
33919 -  0   0   0   0   0   0   0   0   0   0   0   0
33920 -  0   0   0   0   0   0   0   0   0   0   0   0
33921 -  0   0   0   0   0   0   0   0   0   0   0   0
33922 -  0   0   0   0   0   0   0   0   0  14  14  14
33923 - 42  42  42  86  86  86   6   6   6 116 116 116
33924 -106 106 106   6   6   6  70  70  70 149 149 149
33925 -128 128 128  18  18  18  38  38  38  54  54  54
33926 -221 221 221 106 106 106   2   2   6  14  14  14
33927 - 46  46  46 190 190 190 198 198 198   2   2   6
33928 -  2   2   6   2   2   6   2   2   6   2   2   6
33929 - 74  74  74  62  62  62  22  22  22   6   6   6
33930 -  0   0   0   0   0   0   0   0   0   0   0   0
33931 -  0   0   0   0   0   0   0   0   0   0   0   0
33932 -  0   0   0   0   0   0   0   0   0   0   0   0
33933 -  0   0   0   0   0   0   0   0   0   0   0   0
33934 -  0   0   0   0   0   0   0   0   0   0   0   0
33935 -  0   0   0   0   0   0   0   0   0   0   0   0
33936 -  0   0   0   0   0   0   0   0   1   0   0   0
33937 -  0   0   1   0   0   0   0   0   1   0   0   0
33938 -  0   0   0   0   0   0   0   0   0   0   0   0
33939 -  0   0   0   0   0   0   0   0   0   0   0   0
33940 -  0   0   0   0   0   0   0   0   0   0   0   0
33941 -  0   0   0   0   0   0   0   0   0   0   0   0
33942 -  0   0   0   0   0   0   0   0   0  14  14  14
33943 - 42  42  42  94  94  94  14  14  14 101 101 101
33944 -128 128 128   2   2   6  18  18  18 116 116 116
33945 -118  98  46 121  92   8 121  92   8  98  78  10
33946 -162 162 162 106 106 106   2   2   6   2   2   6
33947 -  2   2   6 195 195 195 195 195 195   6   6   6
33948 -  2   2   6   2   2   6   2   2   6   2   2   6
33949 - 74  74  74  62  62  62  22  22  22   6   6   6
33950 -  0   0   0   0   0   0   0   0   0   0   0   0
33951 -  0   0   0   0   0   0   0   0   0   0   0   0
33952 -  0   0   0   0   0   0   0   0   0   0   0   0
33953 -  0   0   0   0   0   0   0   0   0   0   0   0
33954 -  0   0   0   0   0   0   0   0   0   0   0   0
33955 -  0   0   0   0   0   0   0   0   0   0   0   0
33956 -  0   0   0   0   0   0   0   0   1   0   0   1
33957 -  0   0   1   0   0   0   0   0   1   0   0   0
33958 -  0   0   0   0   0   0   0   0   0   0   0   0
33959 -  0   0   0   0   0   0   0   0   0   0   0   0
33960 -  0   0   0   0   0   0   0   0   0   0   0   0
33961 -  0   0   0   0   0   0   0   0   0   0   0   0
33962 -  0   0   0   0   0   0   0   0   0  10  10  10
33963 - 38  38  38  90  90  90  14  14  14  58  58  58
33964 -210 210 210  26  26  26  54  38   6 154 114  10
33965 -226 170  11 236 186  11 225 175  15 184 144  12
33966 -215 174  15 175 146  61  37  26   9   2   2   6
33967 - 70  70  70 246 246 246 138 138 138   2   2   6
33968 -  2   2   6   2   2   6   2   2   6   2   2   6
33969 - 70  70  70  66  66  66  26  26  26   6   6   6
33970 -  0   0   0   0   0   0   0   0   0   0   0   0
33971 -  0   0   0   0   0   0   0   0   0   0   0   0
33972 -  0   0   0   0   0   0   0   0   0   0   0   0
33973 -  0   0   0   0   0   0   0   0   0   0   0   0
33974 -  0   0   0   0   0   0   0   0   0   0   0   0
33975 -  0   0   0   0   0   0   0   0   0   0   0   0
33976 -  0   0   0   0   0   0   0   0   0   0   0   0
33977 -  0   0   0   0   0   0   0   0   0   0   0   0
33978 -  0   0   0   0   0   0   0   0   0   0   0   0
33979 -  0   0   0   0   0   0   0   0   0   0   0   0
33980 -  0   0   0   0   0   0   0   0   0   0   0   0
33981 -  0   0   0   0   0   0   0   0   0   0   0   0
33982 -  0   0   0   0   0   0   0   0   0  10  10  10
33983 - 38  38  38  86  86  86  14  14  14  10  10  10
33984 -195 195 195 188 164 115 192 133   9 225 175  15
33985 -239 182  13 234 190  10 232 195  16 232 200  30
33986 -245 207  45 241 208  19 232 195  16 184 144  12
33987 -218 194 134 211 206 186  42  42  42   2   2   6
33988 -  2   2   6   2   2   6   2   2   6   2   2   6
33989 - 50  50  50  74  74  74  30  30  30   6   6   6
33990 -  0   0   0   0   0   0   0   0   0   0   0   0
33991 -  0   0   0   0   0   0   0   0   0   0   0   0
33992 -  0   0   0   0   0   0   0   0   0   0   0   0
33993 -  0   0   0   0   0   0   0   0   0   0   0   0
33994 -  0   0   0   0   0   0   0   0   0   0   0   0
33995 -  0   0   0   0   0   0   0   0   0   0   0   0
33996 -  0   0   0   0   0   0   0   0   0   0   0   0
33997 -  0   0   0   0   0   0   0   0   0   0   0   0
33998 -  0   0   0   0   0   0   0   0   0   0   0   0
33999 -  0   0   0   0   0   0   0   0   0   0   0   0
34000 -  0   0   0   0   0   0   0   0   0   0   0   0
34001 -  0   0   0   0   0   0   0   0   0   0   0   0
34002 -  0   0   0   0   0   0   0   0   0  10  10  10
34003 - 34  34  34  86  86  86  14  14  14   2   2   6
34004 -121  87  25 192 133   9 219 162  10 239 182  13
34005 -236 186  11 232 195  16 241 208  19 244 214  54
34006 -246 218  60 246 218  38 246 215  20 241 208  19
34007 -241 208  19 226 184  13 121  87  25   2   2   6
34008 -  2   2   6   2   2   6   2   2   6   2   2   6
34009 - 50  50  50  82  82  82  34  34  34  10  10  10
34010 -  0   0   0   0   0   0   0   0   0   0   0   0
34011 -  0   0   0   0   0   0   0   0   0   0   0   0
34012 -  0   0   0   0   0   0   0   0   0   0   0   0
34013 -  0   0   0   0   0   0   0   0   0   0   0   0
34014 -  0   0   0   0   0   0   0   0   0   0   0   0
34015 -  0   0   0   0   0   0   0   0   0   0   0   0
34016 -  0   0   0   0   0   0   0   0   0   0   0   0
34017 -  0   0   0   0   0   0   0   0   0   0   0   0
34018 -  0   0   0   0   0   0   0   0   0   0   0   0
34019 -  0   0   0   0   0   0   0   0   0   0   0   0
34020 -  0   0   0   0   0   0   0   0   0   0   0   0
34021 -  0   0   0   0   0   0   0   0   0   0   0   0
34022 -  0   0   0   0   0   0   0   0   0  10  10  10
34023 - 34  34  34  82  82  82  30  30  30  61  42   6
34024 -180 123   7 206 145  10 230 174  11 239 182  13
34025 -234 190  10 238 202  15 241 208  19 246 218  74
34026 -246 218  38 246 215  20 246 215  20 246 215  20
34027 -226 184  13 215 174  15 184 144  12   6   6   6
34028 -  2   2   6   2   2   6   2   2   6   2   2   6
34029 - 26  26  26  94  94  94  42  42  42  14  14  14
34030 -  0   0   0   0   0   0   0   0   0   0   0   0
34031 -  0   0   0   0   0   0   0   0   0   0   0   0
34032 -  0   0   0   0   0   0   0   0   0   0   0   0
34033 -  0   0   0   0   0   0   0   0   0   0   0   0
34034 -  0   0   0   0   0   0   0   0   0   0   0   0
34035 -  0   0   0   0   0   0   0   0   0   0   0   0
34036 -  0   0   0   0   0   0   0   0   0   0   0   0
34037 -  0   0   0   0   0   0   0   0   0   0   0   0
34038 -  0   0   0   0   0   0   0   0   0   0   0   0
34039 -  0   0   0   0   0   0   0   0   0   0   0   0
34040 -  0   0   0   0   0   0   0   0   0   0   0   0
34041 -  0   0   0   0   0   0   0   0   0   0   0   0
34042 -  0   0   0   0   0   0   0   0   0  10  10  10
34043 - 30  30  30  78  78  78  50  50  50 104  69   6
34044 -192 133   9 216 158  10 236 178  12 236 186  11
34045 -232 195  16 241 208  19 244 214  54 245 215  43
34046 -246 215  20 246 215  20 241 208  19 198 155  10
34047 -200 144  11 216 158  10 156 118  10   2   2   6
34048 -  2   2   6   2   2   6   2   2   6   2   2   6
34049 -  6   6   6  90  90  90  54  54  54  18  18  18
34050 -  6   6   6   0   0   0   0   0   0   0   0   0
34051 -  0   0   0   0   0   0   0   0   0   0   0   0
34052 -  0   0   0   0   0   0   0   0   0   0   0   0
34053 -  0   0   0   0   0   0   0   0   0   0   0   0
34054 -  0   0   0   0   0   0   0   0   0   0   0   0
34055 -  0   0   0   0   0   0   0   0   0   0   0   0
34056 -  0   0   0   0   0   0   0   0   0   0   0   0
34057 -  0   0   0   0   0   0   0   0   0   0   0   0
34058 -  0   0   0   0   0   0   0   0   0   0   0   0
34059 -  0   0   0   0   0   0   0   0   0   0   0   0
34060 -  0   0   0   0   0   0   0   0   0   0   0   0
34061 -  0   0   0   0   0   0   0   0   0   0   0   0
34062 -  0   0   0   0   0   0   0   0   0  10  10  10
34063 - 30  30  30  78  78  78  46  46  46  22  22  22
34064 -137  92   6 210 162  10 239 182  13 238 190  10
34065 -238 202  15 241 208  19 246 215  20 246 215  20
34066 -241 208  19 203 166  17 185 133  11 210 150  10
34067 -216 158  10 210 150  10 102  78  10   2   2   6
34068 -  6   6   6  54  54  54  14  14  14   2   2   6
34069 -  2   2   6  62  62  62  74  74  74  30  30  30
34070 - 10  10  10   0   0   0   0   0   0   0   0   0
34071 -  0   0   0   0   0   0   0   0   0   0   0   0
34072 -  0   0   0   0   0   0   0   0   0   0   0   0
34073 -  0   0   0   0   0   0   0   0   0   0   0   0
34074 -  0   0   0   0   0   0   0   0   0   0   0   0
34075 -  0   0   0   0   0   0   0   0   0   0   0   0
34076 -  0   0   0   0   0   0   0   0   0   0   0   0
34077 -  0   0   0   0   0   0   0   0   0   0   0   0
34078 -  0   0   0   0   0   0   0   0   0   0   0   0
34079 -  0   0   0   0   0   0   0   0   0   0   0   0
34080 -  0   0   0   0   0   0   0   0   0   0   0   0
34081 -  0   0   0   0   0   0   0   0   0   0   0   0
34082 -  0   0   0   0   0   0   0   0   0  10  10  10
34083 - 34  34  34  78  78  78  50  50  50   6   6   6
34084 - 94  70  30 139 102  15 190 146  13 226 184  13
34085 -232 200  30 232 195  16 215 174  15 190 146  13
34086 -168 122  10 192 133   9 210 150  10 213 154  11
34087 -202 150  34 182 157 106 101  98  89   2   2   6
34088 -  2   2   6  78  78  78 116 116 116  58  58  58
34089 -  2   2   6  22  22  22  90  90  90  46  46  46
34090 - 18  18  18   6   6   6   0   0   0   0   0   0
34091 -  0   0   0   0   0   0   0   0   0   0   0   0
34092 -  0   0   0   0   0   0   0   0   0   0   0   0
34093 -  0   0   0   0   0   0   0   0   0   0   0   0
34094 -  0   0   0   0   0   0   0   0   0   0   0   0
34095 -  0   0   0   0   0   0   0   0   0   0   0   0
34096 -  0   0   0   0   0   0   0   0   0   0   0   0
34097 -  0   0   0   0   0   0   0   0   0   0   0   0
34098 -  0   0   0   0   0   0   0   0   0   0   0   0
34099 -  0   0   0   0   0   0   0   0   0   0   0   0
34100 -  0   0   0   0   0   0   0   0   0   0   0   0
34101 -  0   0   0   0   0   0   0   0   0   0   0   0
34102 -  0   0   0   0   0   0   0   0   0  10  10  10
34103 - 38  38  38  86  86  86  50  50  50   6   6   6
34104 -128 128 128 174 154 114 156 107  11 168 122  10
34105 -198 155  10 184 144  12 197 138  11 200 144  11
34106 -206 145  10 206 145  10 197 138  11 188 164 115
34107 -195 195 195 198 198 198 174 174 174  14  14  14
34108 -  2   2   6  22  22  22 116 116 116 116 116 116
34109 - 22  22  22   2   2   6  74  74  74  70  70  70
34110 - 30  30  30  10  10  10   0   0   0   0   0   0
34111 -  0   0   0   0   0   0   0   0   0   0   0   0
34112 -  0   0   0   0   0   0   0   0   0   0   0   0
34113 -  0   0   0   0   0   0   0   0   0   0   0   0
34114 -  0   0   0   0   0   0   0   0   0   0   0   0
34115 -  0   0   0   0   0   0   0   0   0   0   0   0
34116 -  0   0   0   0   0   0   0   0   0   0   0   0
34117 -  0   0   0   0   0   0   0   0   0   0   0   0
34118 -  0   0   0   0   0   0   0   0   0   0   0   0
34119 -  0   0   0   0   0   0   0   0   0   0   0   0
34120 -  0   0   0   0   0   0   0   0   0   0   0   0
34121 -  0   0   0   0   0   0   0   0   0   0   0   0
34122 -  0   0   0   0   0   0   6   6   6  18  18  18
34123 - 50  50  50 101 101 101  26  26  26  10  10  10
34124 -138 138 138 190 190 190 174 154 114 156 107  11
34125 -197 138  11 200 144  11 197 138  11 192 133   9
34126 -180 123   7 190 142  34 190 178 144 187 187 187
34127 -202 202 202 221 221 221 214 214 214  66  66  66
34128 -  2   2   6   2   2   6  50  50  50  62  62  62
34129 -  6   6   6   2   2   6  10  10  10  90  90  90
34130 - 50  50  50  18  18  18   6   6   6   0   0   0
34131 -  0   0   0   0   0   0   0   0   0   0   0   0
34132 -  0   0   0   0   0   0   0   0   0   0   0   0
34133 -  0   0   0   0   0   0   0   0   0   0   0   0
34134 -  0   0   0   0   0   0   0   0   0   0   0   0
34135 -  0   0   0   0   0   0   0   0   0   0   0   0
34136 -  0   0   0   0   0   0   0   0   0   0   0   0
34137 -  0   0   0   0   0   0   0   0   0   0   0   0
34138 -  0   0   0   0   0   0   0   0   0   0   0   0
34139 -  0   0   0   0   0   0   0   0   0   0   0   0
34140 -  0   0   0   0   0   0   0   0   0   0   0   0
34141 -  0   0   0   0   0   0   0   0   0   0   0   0
34142 -  0   0   0   0   0   0  10  10  10  34  34  34
34143 - 74  74  74  74  74  74   2   2   6   6   6   6
34144 -144 144 144 198 198 198 190 190 190 178 166 146
34145 -154 121  60 156 107  11 156 107  11 168 124  44
34146 -174 154 114 187 187 187 190 190 190 210 210 210
34147 -246 246 246 253 253 253 253 253 253 182 182 182
34148 -  6   6   6   2   2   6   2   2   6   2   2   6
34149 -  2   2   6   2   2   6   2   2   6  62  62  62
34150 - 74  74  74  34  34  34  14  14  14   0   0   0
34151 -  0   0   0   0   0   0   0   0   0   0   0   0
34152 -  0   0   0   0   0   0   0   0   0   0   0   0
34153 -  0   0   0   0   0   0   0   0   0   0   0   0
34154 -  0   0   0   0   0   0   0   0   0   0   0   0
34155 -  0   0   0   0   0   0   0   0   0   0   0   0
34156 -  0   0   0   0   0   0   0   0   0   0   0   0
34157 -  0   0   0   0   0   0   0   0   0   0   0   0
34158 -  0   0   0   0   0   0   0   0   0   0   0   0
34159 -  0   0   0   0   0   0   0   0   0   0   0   0
34160 -  0   0   0   0   0   0   0   0   0   0   0   0
34161 -  0   0   0   0   0   0   0   0   0   0   0   0
34162 -  0   0   0  10  10  10  22  22  22  54  54  54
34163 - 94  94  94  18  18  18   2   2   6  46  46  46
34164 -234 234 234 221 221 221 190 190 190 190 190 190
34165 -190 190 190 187 187 187 187 187 187 190 190 190
34166 -190 190 190 195 195 195 214 214 214 242 242 242
34167 -253 253 253 253 253 253 253 253 253 253 253 253
34168 - 82  82  82   2   2   6   2   2   6   2   2   6
34169 -  2   2   6   2   2   6   2   2   6  14  14  14
34170 - 86  86  86  54  54  54  22  22  22   6   6   6
34171 -  0   0   0   0   0   0   0   0   0   0   0   0
34172 -  0   0   0   0   0   0   0   0   0   0   0   0
34173 -  0   0   0   0   0   0   0   0   0   0   0   0
34174 -  0   0   0   0   0   0   0   0   0   0   0   0
34175 -  0   0   0   0   0   0   0   0   0   0   0   0
34176 -  0   0   0   0   0   0   0   0   0   0   0   0
34177 -  0   0   0   0   0   0   0   0   0   0   0   0
34178 -  0   0   0   0   0   0   0   0   0   0   0   0
34179 -  0   0   0   0   0   0   0   0   0   0   0   0
34180 -  0   0   0   0   0   0   0   0   0   0   0   0
34181 -  0   0   0   0   0   0   0   0   0   0   0   0
34182 -  6   6   6  18  18  18  46  46  46  90  90  90
34183 - 46  46  46  18  18  18   6   6   6 182 182 182
34184 -253 253 253 246 246 246 206 206 206 190 190 190
34185 -190 190 190 190 190 190 190 190 190 190 190 190
34186 -206 206 206 231 231 231 250 250 250 253 253 253
34187 -253 253 253 253 253 253 253 253 253 253 253 253
34188 -202 202 202  14  14  14   2   2   6   2   2   6
34189 -  2   2   6   2   2   6   2   2   6   2   2   6
34190 - 42  42  42  86  86  86  42  42  42  18  18  18
34191 -  6   6   6   0   0   0   0   0   0   0   0   0
34192 -  0   0   0   0   0   0   0   0   0   0   0   0
34193 -  0   0   0   0   0   0   0   0   0   0   0   0
34194 -  0   0   0   0   0   0   0   0   0   0   0   0
34195 -  0   0   0   0   0   0   0   0   0   0   0   0
34196 -  0   0   0   0   0   0   0   0   0   0   0   0
34197 -  0   0   0   0   0   0   0   0   0   0   0   0
34198 -  0   0   0   0   0   0   0   0   0   0   0   0
34199 -  0   0   0   0   0   0   0   0   0   0   0   0
34200 -  0   0   0   0   0   0   0   0   0   0   0   0
34201 -  0   0   0   0   0   0   0   0   0   6   6   6
34202 - 14  14  14  38  38  38  74  74  74  66  66  66
34203 -  2   2   6   6   6   6  90  90  90 250 250 250
34204 -253 253 253 253 253 253 238 238 238 198 198 198
34205 -190 190 190 190 190 190 195 195 195 221 221 221
34206 -246 246 246 253 253 253 253 253 253 253 253 253
34207 -253 253 253 253 253 253 253 253 253 253 253 253
34208 -253 253 253  82  82  82   2   2   6   2   2   6
34209 -  2   2   6   2   2   6   2   2   6   2   2   6
34210 -  2   2   6  78  78  78  70  70  70  34  34  34
34211 - 14  14  14   6   6   6   0   0   0   0   0   0
34212 -  0   0   0   0   0   0   0   0   0   0   0   0
34213 -  0   0   0   0   0   0   0   0   0   0   0   0
34214 -  0   0   0   0   0   0   0   0   0   0   0   0
34215 -  0   0   0   0   0   0   0   0   0   0   0   0
34216 -  0   0   0   0   0   0   0   0   0   0   0   0
34217 -  0   0   0   0   0   0   0   0   0   0   0   0
34218 -  0   0   0   0   0   0   0   0   0   0   0   0
34219 -  0   0   0   0   0   0   0   0   0   0   0   0
34220 -  0   0   0   0   0   0   0   0   0   0   0   0
34221 -  0   0   0   0   0   0   0   0   0  14  14  14
34222 - 34  34  34  66  66  66  78  78  78   6   6   6
34223 -  2   2   6  18  18  18 218 218 218 253 253 253
34224 -253 253 253 253 253 253 253 253 253 246 246 246
34225 -226 226 226 231 231 231 246 246 246 253 253 253
34226 -253 253 253 253 253 253 253 253 253 253 253 253
34227 -253 253 253 253 253 253 253 253 253 253 253 253
34228 -253 253 253 178 178 178   2   2   6   2   2   6
34229 -  2   2   6   2   2   6   2   2   6   2   2   6
34230 -  2   2   6  18  18  18  90  90  90  62  62  62
34231 - 30  30  30  10  10  10   0   0   0   0   0   0
34232 -  0   0   0   0   0   0   0   0   0   0   0   0
34233 -  0   0   0   0   0   0   0   0   0   0   0   0
34234 -  0   0   0   0   0   0   0   0   0   0   0   0
34235 -  0   0   0   0   0   0   0   0   0   0   0   0
34236 -  0   0   0   0   0   0   0   0   0   0   0   0
34237 -  0   0   0   0   0   0   0   0   0   0   0   0
34238 -  0   0   0   0   0   0   0   0   0   0   0   0
34239 -  0   0   0   0   0   0   0   0   0   0   0   0
34240 -  0   0   0   0   0   0   0   0   0   0   0   0
34241 -  0   0   0   0   0   0  10  10  10  26  26  26
34242 - 58  58  58  90  90  90  18  18  18   2   2   6
34243 -  2   2   6 110 110 110 253 253 253 253 253 253
34244 -253 253 253 253 253 253 253 253 253 253 253 253
34245 -250 250 250 253 253 253 253 253 253 253 253 253
34246 -253 253 253 253 253 253 253 253 253 253 253 253
34247 -253 253 253 253 253 253 253 253 253 253 253 253
34248 -253 253 253 231 231 231  18  18  18   2   2   6
34249 -  2   2   6   2   2   6   2   2   6   2   2   6
34250 -  2   2   6   2   2   6  18  18  18  94  94  94
34251 - 54  54  54  26  26  26  10  10  10   0   0   0
34252 -  0   0   0   0   0   0   0   0   0   0   0   0
34253 -  0   0   0   0   0   0   0   0   0   0   0   0
34254 -  0   0   0   0   0   0   0   0   0   0   0   0
34255 -  0   0   0   0   0   0   0   0   0   0   0   0
34256 -  0   0   0   0   0   0   0   0   0   0   0   0
34257 -  0   0   0   0   0   0   0   0   0   0   0   0
34258 -  0   0   0   0   0   0   0   0   0   0   0   0
34259 -  0   0   0   0   0   0   0   0   0   0   0   0
34260 -  0   0   0   0   0   0   0   0   0   0   0   0
34261 -  0   0   0   6   6   6  22  22  22  50  50  50
34262 - 90  90  90  26  26  26   2   2   6   2   2   6
34263 - 14  14  14 195 195 195 250 250 250 253 253 253
34264 -253 253 253 253 253 253 253 253 253 253 253 253
34265 -253 253 253 253 253 253 253 253 253 253 253 253
34266 -253 253 253 253 253 253 253 253 253 253 253 253
34267 -253 253 253 253 253 253 253 253 253 253 253 253
34268 -250 250 250 242 242 242  54  54  54   2   2   6
34269 -  2   2   6   2   2   6   2   2   6   2   2   6
34270 -  2   2   6   2   2   6   2   2   6  38  38  38
34271 - 86  86  86  50  50  50  22  22  22   6   6   6
34272 -  0   0   0   0   0   0   0   0   0   0   0   0
34273 -  0   0   0   0   0   0   0   0   0   0   0   0
34274 -  0   0   0   0   0   0   0   0   0   0   0   0
34275 -  0   0   0   0   0   0   0   0   0   0   0   0
34276 -  0   0   0   0   0   0   0   0   0   0   0   0
34277 -  0   0   0   0   0   0   0   0   0   0   0   0
34278 -  0   0   0   0   0   0   0   0   0   0   0   0
34279 -  0   0   0   0   0   0   0   0   0   0   0   0
34280 -  0   0   0   0   0   0   0   0   0   0   0   0
34281 -  6   6   6  14  14  14  38  38  38  82  82  82
34282 - 34  34  34   2   2   6   2   2   6   2   2   6
34283 - 42  42  42 195 195 195 246 246 246 253 253 253
34284 -253 253 253 253 253 253 253 253 253 250 250 250
34285 -242 242 242 242 242 242 250 250 250 253 253 253
34286 -253 253 253 253 253 253 253 253 253 253 253 253
34287 -253 253 253 250 250 250 246 246 246 238 238 238
34288 -226 226 226 231 231 231 101 101 101   6   6   6
34289 -  2   2   6   2   2   6   2   2   6   2   2   6
34290 -  2   2   6   2   2   6   2   2   6   2   2   6
34291 - 38  38  38  82  82  82  42  42  42  14  14  14
34292 -  6   6   6   0   0   0   0   0   0   0   0   0
34293 -  0   0   0   0   0   0   0   0   0   0   0   0
34294 -  0   0   0   0   0   0   0   0   0   0   0   0
34295 -  0   0   0   0   0   0   0   0   0   0   0   0
34296 -  0   0   0   0   0   0   0   0   0   0   0   0
34297 -  0   0   0   0   0   0   0   0   0   0   0   0
34298 -  0   0   0   0   0   0   0   0   0   0   0   0
34299 -  0   0   0   0   0   0   0   0   0   0   0   0
34300 -  0   0   0   0   0   0   0   0   0   0   0   0
34301 - 10  10  10  26  26  26  62  62  62  66  66  66
34302 -  2   2   6   2   2   6   2   2   6   6   6   6
34303 - 70  70  70 170 170 170 206 206 206 234 234 234
34304 -246 246 246 250 250 250 250 250 250 238 238 238
34305 -226 226 226 231 231 231 238 238 238 250 250 250
34306 -250 250 250 250 250 250 246 246 246 231 231 231
34307 -214 214 214 206 206 206 202 202 202 202 202 202
34308 -198 198 198 202 202 202 182 182 182  18  18  18
34309 -  2   2   6   2   2   6   2   2   6   2   2   6
34310 -  2   2   6   2   2   6   2   2   6   2   2   6
34311 -  2   2   6  62  62  62  66  66  66  30  30  30
34312 - 10  10  10   0   0   0   0   0   0   0   0   0
34313 -  0   0   0   0   0   0   0   0   0   0   0   0
34314 -  0   0   0   0   0   0   0   0   0   0   0   0
34315 -  0   0   0   0   0   0   0   0   0   0   0   0
34316 -  0   0   0   0   0   0   0   0   0   0   0   0
34317 -  0   0   0   0   0   0   0   0   0   0   0   0
34318 -  0   0   0   0   0   0   0   0   0   0   0   0
34319 -  0   0   0   0   0   0   0   0   0   0   0   0
34320 -  0   0   0   0   0   0   0   0   0   0   0   0
34321 - 14  14  14  42  42  42  82  82  82  18  18  18
34322 -  2   2   6   2   2   6   2   2   6  10  10  10
34323 - 94  94  94 182 182 182 218 218 218 242 242 242
34324 -250 250 250 253 253 253 253 253 253 250 250 250
34325 -234 234 234 253 253 253 253 253 253 253 253 253
34326 -253 253 253 253 253 253 253 253 253 246 246 246
34327 -238 238 238 226 226 226 210 210 210 202 202 202
34328 -195 195 195 195 195 195 210 210 210 158 158 158
34329 -  6   6   6  14  14  14  50  50  50  14  14  14
34330 -  2   2   6   2   2   6   2   2   6   2   2   6
34331 -  2   2   6   6   6   6  86  86  86  46  46  46
34332 - 18  18  18   6   6   6   0   0   0   0   0   0
34333 -  0   0   0   0   0   0   0   0   0   0   0   0
34334 -  0   0   0   0   0   0   0   0   0   0   0   0
34335 -  0   0   0   0   0   0   0   0   0   0   0   0
34336 -  0   0   0   0   0   0   0   0   0   0   0   0
34337 -  0   0   0   0   0   0   0   0   0   0   0   0
34338 -  0   0   0   0   0   0   0   0   0   0   0   0
34339 -  0   0   0   0   0   0   0   0   0   0   0   0
34340 -  0   0   0   0   0   0   0   0   0   6   6   6
34341 - 22  22  22  54  54  54  70  70  70   2   2   6
34342 -  2   2   6  10  10  10   2   2   6  22  22  22
34343 -166 166 166 231 231 231 250 250 250 253 253 253
34344 -253 253 253 253 253 253 253 253 253 250 250 250
34345 -242 242 242 253 253 253 253 253 253 253 253 253
34346 -253 253 253 253 253 253 253 253 253 253 253 253
34347 -253 253 253 253 253 253 253 253 253 246 246 246
34348 -231 231 231 206 206 206 198 198 198 226 226 226
34349 - 94  94  94   2   2   6   6   6   6  38  38  38
34350 - 30  30  30   2   2   6   2   2   6   2   2   6
34351 -  2   2   6   2   2   6  62  62  62  66  66  66
34352 - 26  26  26  10  10  10   0   0   0   0   0   0
34353 -  0   0   0   0   0   0   0   0   0   0   0   0
34354 -  0   0   0   0   0   0   0   0   0   0   0   0
34355 -  0   0   0   0   0   0   0   0   0   0   0   0
34356 -  0   0   0   0   0   0   0   0   0   0   0   0
34357 -  0   0   0   0   0   0   0   0   0   0   0   0
34358 -  0   0   0   0   0   0   0   0   0   0   0   0
34359 -  0   0   0   0   0   0   0   0   0   0   0   0
34360 -  0   0   0   0   0   0   0   0   0  10  10  10
34361 - 30  30  30  74  74  74  50  50  50   2   2   6
34362 - 26  26  26  26  26  26   2   2   6 106 106 106
34363 -238 238 238 253 253 253 253 253 253 253 253 253
34364 -253 253 253 253 253 253 253 253 253 253 253 253
34365 -253 253 253 253 253 253 253 253 253 253 253 253
34366 -253 253 253 253 253 253 253 253 253 253 253 253
34367 -253 253 253 253 253 253 253 253 253 253 253 253
34368 -253 253 253 246 246 246 218 218 218 202 202 202
34369 -210 210 210  14  14  14   2   2   6   2   2   6
34370 - 30  30  30  22  22  22   2   2   6   2   2   6
34371 -  2   2   6   2   2   6  18  18  18  86  86  86
34372 - 42  42  42  14  14  14   0   0   0   0   0   0
34373 -  0   0   0   0   0   0   0   0   0   0   0   0
34374 -  0   0   0   0   0   0   0   0   0   0   0   0
34375 -  0   0   0   0   0   0   0   0   0   0   0   0
34376 -  0   0   0   0   0   0   0   0   0   0   0   0
34377 -  0   0   0   0   0   0   0   0   0   0   0   0
34378 -  0   0   0   0   0   0   0   0   0   0   0   0
34379 -  0   0   0   0   0   0   0   0   0   0   0   0
34380 -  0   0   0   0   0   0   0   0   0  14  14  14
34381 - 42  42  42  90  90  90  22  22  22   2   2   6
34382 - 42  42  42   2   2   6  18  18  18 218 218 218
34383 -253 253 253 253 253 253 253 253 253 253 253 253
34384 -253 253 253 253 253 253 253 253 253 253 253 253
34385 -253 253 253 253 253 253 253 253 253 253 253 253
34386 -253 253 253 253 253 253 253 253 253 253 253 253
34387 -253 253 253 253 253 253 253 253 253 253 253 253
34388 -253 253 253 253 253 253 250 250 250 221 221 221
34389 -218 218 218 101 101 101   2   2   6  14  14  14
34390 - 18  18  18  38  38  38  10  10  10   2   2   6
34391 -  2   2   6   2   2   6   2   2   6  78  78  78
34392 - 58  58  58  22  22  22   6   6   6   0   0   0
34393 -  0   0   0   0   0   0   0   0   0   0   0   0
34394 -  0   0   0   0   0   0   0   0   0   0   0   0
34395 -  0   0   0   0   0   0   0   0   0   0   0   0
34396 -  0   0   0   0   0   0   0   0   0   0   0   0
34397 -  0   0   0   0   0   0   0   0   0   0   0   0
34398 -  0   0   0   0   0   0   0   0   0   0   0   0
34399 -  0   0   0   0   0   0   0   0   0   0   0   0
34400 -  0   0   0   0   0   0   6   6   6  18  18  18
34401 - 54  54  54  82  82  82   2   2   6  26  26  26
34402 - 22  22  22   2   2   6 123 123 123 253 253 253
34403 -253 253 253 253 253 253 253 253 253 253 253 253
34404 -253 253 253 253 253 253 253 253 253 253 253 253
34405 -253 253 253 253 253 253 253 253 253 253 253 253
34406 -253 253 253 253 253 253 253 253 253 253 253 253
34407 -253 253 253 253 253 253 253 253 253 253 253 253
34408 -253 253 253 253 253 253 253 253 253 250 250 250
34409 -238 238 238 198 198 198   6   6   6  38  38  38
34410 - 58  58  58  26  26  26  38  38  38   2   2   6
34411 -  2   2   6   2   2   6   2   2   6  46  46  46
34412 - 78  78  78  30  30  30  10  10  10   0   0   0
34413 -  0   0   0   0   0   0   0   0   0   0   0   0
34414 -  0   0   0   0   0   0   0   0   0   0   0   0
34415 -  0   0   0   0   0   0   0   0   0   0   0   0
34416 -  0   0   0   0   0   0   0   0   0   0   0   0
34417 -  0   0   0   0   0   0   0   0   0   0   0   0
34418 -  0   0   0   0   0   0   0   0   0   0   0   0
34419 -  0   0   0   0   0   0   0   0   0   0   0   0
34420 -  0   0   0   0   0   0  10  10  10  30  30  30
34421 - 74  74  74  58  58  58   2   2   6  42  42  42
34422 -  2   2   6  22  22  22 231 231 231 253 253 253
34423 -253 253 253 253 253 253 253 253 253 253 253 253
34424 -253 253 253 253 253 253 253 253 253 250 250 250
34425 -253 253 253 253 253 253 253 253 253 253 253 253
34426 -253 253 253 253 253 253 253 253 253 253 253 253
34427 -253 253 253 253 253 253 253 253 253 253 253 253
34428 -253 253 253 253 253 253 253 253 253 253 253 253
34429 -253 253 253 246 246 246  46  46  46  38  38  38
34430 - 42  42  42  14  14  14  38  38  38  14  14  14
34431 -  2   2   6   2   2   6   2   2   6   6   6   6
34432 - 86  86  86  46  46  46  14  14  14   0   0   0
34433 -  0   0   0   0   0   0   0   0   0   0   0   0
34434 -  0   0   0   0   0   0   0   0   0   0   0   0
34435 -  0   0   0   0   0   0   0   0   0   0   0   0
34436 -  0   0   0   0   0   0   0   0   0   0   0   0
34437 -  0   0   0   0   0   0   0   0   0   0   0   0
34438 -  0   0   0   0   0   0   0   0   0   0   0   0
34439 -  0   0   0   0   0   0   0   0   0   0   0   0
34440 -  0   0   0   6   6   6  14  14  14  42  42  42
34441 - 90  90  90  18  18  18  18  18  18  26  26  26
34442 -  2   2   6 116 116 116 253 253 253 253 253 253
34443 -253 253 253 253 253 253 253 253 253 253 253 253
34444 -253 253 253 253 253 253 250 250 250 238 238 238
34445 -253 253 253 253 253 253 253 253 253 253 253 253
34446 -253 253 253 253 253 253 253 253 253 253 253 253
34447 -253 253 253 253 253 253 253 253 253 253 253 253
34448 -253 253 253 253 253 253 253 253 253 253 253 253
34449 -253 253 253 253 253 253  94  94  94   6   6   6
34450 -  2   2   6   2   2   6  10  10  10  34  34  34
34451 -  2   2   6   2   2   6   2   2   6   2   2   6
34452 - 74  74  74  58  58  58  22  22  22   6   6   6
34453 -  0   0   0   0   0   0   0   0   0   0   0   0
34454 -  0   0   0   0   0   0   0   0   0   0   0   0
34455 -  0   0   0   0   0   0   0   0   0   0   0   0
34456 -  0   0   0   0   0   0   0   0   0   0   0   0
34457 -  0   0   0   0   0   0   0   0   0   0   0   0
34458 -  0   0   0   0   0   0   0   0   0   0   0   0
34459 -  0   0   0   0   0   0   0   0   0   0   0   0
34460 -  0   0   0  10  10  10  26  26  26  66  66  66
34461 - 82  82  82   2   2   6  38  38  38   6   6   6
34462 - 14  14  14 210 210 210 253 253 253 253 253 253
34463 -253 253 253 253 253 253 253 253 253 253 253 253
34464 -253 253 253 253 253 253 246 246 246 242 242 242
34465 -253 253 253 253 253 253 253 253 253 253 253 253
34466 -253 253 253 253 253 253 253 253 253 253 253 253
34467 -253 253 253 253 253 253 253 253 253 253 253 253
34468 -253 253 253 253 253 253 253 253 253 253 253 253
34469 -253 253 253 253 253 253 144 144 144   2   2   6
34470 -  2   2   6   2   2   6   2   2   6  46  46  46
34471 -  2   2   6   2   2   6   2   2   6   2   2   6
34472 - 42  42  42  74  74  74  30  30  30  10  10  10
34473 -  0   0   0   0   0   0   0   0   0   0   0   0
34474 -  0   0   0   0   0   0   0   0   0   0   0   0
34475 -  0   0   0   0   0   0   0   0   0   0   0   0
34476 -  0   0   0   0   0   0   0   0   0   0   0   0
34477 -  0   0   0   0   0   0   0   0   0   0   0   0
34478 -  0   0   0   0   0   0   0   0   0   0   0   0
34479 -  0   0   0   0   0   0   0   0   0   0   0   0
34480 -  6   6   6  14  14  14  42  42  42  90  90  90
34481 - 26  26  26   6   6   6  42  42  42   2   2   6
34482 - 74  74  74 250 250 250 253 253 253 253 253 253
34483 -253 253 253 253 253 253 253 253 253 253 253 253
34484 -253 253 253 253 253 253 242 242 242 242 242 242
34485 -253 253 253 253 253 253 253 253 253 253 253 253
34486 -253 253 253 253 253 253 253 253 253 253 253 253
34487 -253 253 253 253 253 253 253 253 253 253 253 253
34488 -253 253 253 253 253 253 253 253 253 253 253 253
34489 -253 253 253 253 253 253 182 182 182   2   2   6
34490 -  2   2   6   2   2   6   2   2   6  46  46  46
34491 -  2   2   6   2   2   6   2   2   6   2   2   6
34492 - 10  10  10  86  86  86  38  38  38  10  10  10
34493 -  0   0   0   0   0   0   0   0   0   0   0   0
34494 -  0   0   0   0   0   0   0   0   0   0   0   0
34495 -  0   0   0   0   0   0   0   0   0   0   0   0
34496 -  0   0   0   0   0   0   0   0   0   0   0   0
34497 -  0   0   0   0   0   0   0   0   0   0   0   0
34498 -  0   0   0   0   0   0   0   0   0   0   0   0
34499 -  0   0   0   0   0   0   0   0   0   0   0   0
34500 - 10  10  10  26  26  26  66  66  66  82  82  82
34501 -  2   2   6  22  22  22  18  18  18   2   2   6
34502 -149 149 149 253 253 253 253 253 253 253 253 253
34503 -253 253 253 253 253 253 253 253 253 253 253 253
34504 -253 253 253 253 253 253 234 234 234 242 242 242
34505 -253 253 253 253 253 253 253 253 253 253 253 253
34506 -253 253 253 253 253 253 253 253 253 253 253 253
34507 -253 253 253 253 253 253 253 253 253 253 253 253
34508 -253 253 253 253 253 253 253 253 253 253 253 253
34509 -253 253 253 253 253 253 206 206 206   2   2   6
34510 -  2   2   6   2   2   6   2   2   6  38  38  38
34511 -  2   2   6   2   2   6   2   2   6   2   2   6
34512 -  6   6   6  86  86  86  46  46  46  14  14  14
34513 -  0   0   0   0   0   0   0   0   0   0   0   0
34514 -  0   0   0   0   0   0   0   0   0   0   0   0
34515 -  0   0   0   0   0   0   0   0   0   0   0   0
34516 -  0   0   0   0   0   0   0   0   0   0   0   0
34517 -  0   0   0   0   0   0   0   0   0   0   0   0
34518 -  0   0   0   0   0   0   0   0   0   0   0   0
34519 -  0   0   0   0   0   0   0   0   0   6   6   6
34520 - 18  18  18  46  46  46  86  86  86  18  18  18
34521 -  2   2   6  34  34  34  10  10  10   6   6   6
34522 -210 210 210 253 253 253 253 253 253 253 253 253
34523 -253 253 253 253 253 253 253 253 253 253 253 253
34524 -253 253 253 253 253 253 234 234 234 242 242 242
34525 -253 253 253 253 253 253 253 253 253 253 253 253
34526 -253 253 253 253 253 253 253 253 253 253 253 253
34527 -253 253 253 253 253 253 253 253 253 253 253 253
34528 -253 253 253 253 253 253 253 253 253 253 253 253
34529 -253 253 253 253 253 253 221 221 221   6   6   6
34530 -  2   2   6   2   2   6   6   6   6  30  30  30
34531 -  2   2   6   2   2   6   2   2   6   2   2   6
34532 -  2   2   6  82  82  82  54  54  54  18  18  18
34533 -  6   6   6   0   0   0   0   0   0   0   0   0
34534 -  0   0   0   0   0   0   0   0   0   0   0   0
34535 -  0   0   0   0   0   0   0   0   0   0   0   0
34536 -  0   0   0   0   0   0   0   0   0   0   0   0
34537 -  0   0   0   0   0   0   0   0   0   0   0   0
34538 -  0   0   0   0   0   0   0   0   0   0   0   0
34539 -  0   0   0   0   0   0   0   0   0  10  10  10
34540 - 26  26  26  66  66  66  62  62  62   2   2   6
34541 -  2   2   6  38  38  38  10  10  10  26  26  26
34542 -238 238 238 253 253 253 253 253 253 253 253 253
34543 -253 253 253 253 253 253 253 253 253 253 253 253
34544 -253 253 253 253 253 253 231 231 231 238 238 238
34545 -253 253 253 253 253 253 253 253 253 253 253 253
34546 -253 253 253 253 253 253 253 253 253 253 253 253
34547 -253 253 253 253 253 253 253 253 253 253 253 253
34548 -253 253 253 253 253 253 253 253 253 253 253 253
34549 -253 253 253 253 253 253 231 231 231   6   6   6
34550 -  2   2   6   2   2   6  10  10  10  30  30  30
34551 -  2   2   6   2   2   6   2   2   6   2   2   6
34552 -  2   2   6  66  66  66  58  58  58  22  22  22
34553 -  6   6   6   0   0   0   0   0   0   0   0   0
34554 -  0   0   0   0   0   0   0   0   0   0   0   0
34555 -  0   0   0   0   0   0   0   0   0   0   0   0
34556 -  0   0   0   0   0   0   0   0   0   0   0   0
34557 -  0   0   0   0   0   0   0   0   0   0   0   0
34558 -  0   0   0   0   0   0   0   0   0   0   0   0
34559 -  0   0   0   0   0   0   0   0   0  10  10  10
34560 - 38  38  38  78  78  78   6   6   6   2   2   6
34561 -  2   2   6  46  46  46  14  14  14  42  42  42
34562 -246 246 246 253 253 253 253 253 253 253 253 253
34563 -253 253 253 253 253 253 253 253 253 253 253 253
34564 -253 253 253 253 253 253 231 231 231 242 242 242
34565 -253 253 253 253 253 253 253 253 253 253 253 253
34566 -253 253 253 253 253 253 253 253 253 253 253 253
34567 -253 253 253 253 253 253 253 253 253 253 253 253
34568 -253 253 253 253 253 253 253 253 253 253 253 253
34569 -253 253 253 253 253 253 234 234 234  10  10  10
34570 -  2   2   6   2   2   6  22  22  22  14  14  14
34571 -  2   2   6   2   2   6   2   2   6   2   2   6
34572 -  2   2   6  66  66  66  62  62  62  22  22  22
34573 -  6   6   6   0   0   0   0   0   0   0   0   0
34574 -  0   0   0   0   0   0   0   0   0   0   0   0
34575 -  0   0   0   0   0   0   0   0   0   0   0   0
34576 -  0   0   0   0   0   0   0   0   0   0   0   0
34577 -  0   0   0   0   0   0   0   0   0   0   0   0
34578 -  0   0   0   0   0   0   0   0   0   0   0   0
34579 -  0   0   0   0   0   0   6   6   6  18  18  18
34580 - 50  50  50  74  74  74   2   2   6   2   2   6
34581 - 14  14  14  70  70  70  34  34  34  62  62  62
34582 -250 250 250 253 253 253 253 253 253 253 253 253
34583 -253 253 253 253 253 253 253 253 253 253 253 253
34584 -253 253 253 253 253 253 231 231 231 246 246 246
34585 -253 253 253 253 253 253 253 253 253 253 253 253
34586 -253 253 253 253 253 253 253 253 253 253 253 253
34587 -253 253 253 253 253 253 253 253 253 253 253 253
34588 -253 253 253 253 253 253 253 253 253 253 253 253
34589 -253 253 253 253 253 253 234 234 234  14  14  14
34590 -  2   2   6   2   2   6  30  30  30   2   2   6
34591 -  2   2   6   2   2   6   2   2   6   2   2   6
34592 -  2   2   6  66  66  66  62  62  62  22  22  22
34593 -  6   6   6   0   0   0   0   0   0   0   0   0
34594 -  0   0   0   0   0   0   0   0   0   0   0   0
34595 -  0   0   0   0   0   0   0   0   0   0   0   0
34596 -  0   0   0   0   0   0   0   0   0   0   0   0
34597 -  0   0   0   0   0   0   0   0   0   0   0   0
34598 -  0   0   0   0   0   0   0   0   0   0   0   0
34599 -  0   0   0   0   0   0   6   6   6  18  18  18
34600 - 54  54  54  62  62  62   2   2   6   2   2   6
34601 -  2   2   6  30  30  30  46  46  46  70  70  70
34602 -250 250 250 253 253 253 253 253 253 253 253 253
34603 -253 253 253 253 253 253 253 253 253 253 253 253
34604 -253 253 253 253 253 253 231 231 231 246 246 246
34605 -253 253 253 253 253 253 253 253 253 253 253 253
34606 -253 253 253 253 253 253 253 253 253 253 253 253
34607 -253 253 253 253 253 253 253 253 253 253 253 253
34608 -253 253 253 253 253 253 253 253 253 253 253 253
34609 -253 253 253 253 253 253 226 226 226  10  10  10
34610 -  2   2   6   6   6   6  30  30  30   2   2   6
34611 -  2   2   6   2   2   6   2   2   6   2   2   6
34612 -  2   2   6  66  66  66  58  58  58  22  22  22
34613 -  6   6   6   0   0   0   0   0   0   0   0   0
34614 -  0   0   0   0   0   0   0   0   0   0   0   0
34615 -  0   0   0   0   0   0   0   0   0   0   0   0
34616 -  0   0   0   0   0   0   0   0   0   0   0   0
34617 -  0   0   0   0   0   0   0   0   0   0   0   0
34618 -  0   0   0   0   0   0   0   0   0   0   0   0
34619 -  0   0   0   0   0   0   6   6   6  22  22  22
34620 - 58  58  58  62  62  62   2   2   6   2   2   6
34621 -  2   2   6   2   2   6  30  30  30  78  78  78
34622 -250 250 250 253 253 253 253 253 253 253 253 253
34623 -253 253 253 253 253 253 253 253 253 253 253 253
34624 -253 253 253 253 253 253 231 231 231 246 246 246
34625 -253 253 253 253 253 253 253 253 253 253 253 253
34626 -253 253 253 253 253 253 253 253 253 253 253 253
34627 -253 253 253 253 253 253 253 253 253 253 253 253
34628 -253 253 253 253 253 253 253 253 253 253 253 253
34629 -253 253 253 253 253 253 206 206 206   2   2   6
34630 - 22  22  22  34  34  34  18  14   6  22  22  22
34631 - 26  26  26  18  18  18   6   6   6   2   2   6
34632 -  2   2   6  82  82  82  54  54  54  18  18  18
34633 -  6   6   6   0   0   0   0   0   0   0   0   0
34634 -  0   0   0   0   0   0   0   0   0   0   0   0
34635 -  0   0   0   0   0   0   0   0   0   0   0   0
34636 -  0   0   0   0   0   0   0   0   0   0   0   0
34637 -  0   0   0   0   0   0   0   0   0   0   0   0
34638 -  0   0   0   0   0   0   0   0   0   0   0   0
34639 -  0   0   0   0   0   0   6   6   6  26  26  26
34640 - 62  62  62 106 106 106  74  54  14 185 133  11
34641 -210 162  10 121  92   8   6   6   6  62  62  62
34642 -238 238 238 253 253 253 253 253 253 253 253 253
34643 -253 253 253 253 253 253 253 253 253 253 253 253
34644 -253 253 253 253 253 253 231 231 231 246 246 246
34645 -253 253 253 253 253 253 253 253 253 253 253 253
34646 -253 253 253 253 253 253 253 253 253 253 253 253
34647 -253 253 253 253 253 253 253 253 253 253 253 253
34648 -253 253 253 253 253 253 253 253 253 253 253 253
34649 -253 253 253 253 253 253 158 158 158  18  18  18
34650 - 14  14  14   2   2   6   2   2   6   2   2   6
34651 -  6   6   6  18  18  18  66  66  66  38  38  38
34652 -  6   6   6  94  94  94  50  50  50  18  18  18
34653 -  6   6   6   0   0   0   0   0   0   0   0   0
34654 -  0   0   0   0   0   0   0   0   0   0   0   0
34655 -  0   0   0   0   0   0   0   0   0   0   0   0
34656 -  0   0   0   0   0   0   0   0   0   0   0   0
34657 -  0   0   0   0   0   0   0   0   0   0   0   0
34658 -  0   0   0   0   0   0   0   0   0   6   6   6
34659 - 10  10  10  10  10  10  18  18  18  38  38  38
34660 - 78  78  78 142 134 106 216 158  10 242 186  14
34661 -246 190  14 246 190  14 156 118  10  10  10  10
34662 - 90  90  90 238 238 238 253 253 253 253 253 253
34663 -253 253 253 253 253 253 253 253 253 253 253 253
34664 -253 253 253 253 253 253 231 231 231 250 250 250
34665 -253 253 253 253 253 253 253 253 253 253 253 253
34666 -253 253 253 253 253 253 253 253 253 253 253 253
34667 -253 253 253 253 253 253 253 253 253 253 253 253
34668 -253 253 253 253 253 253 253 253 253 246 230 190
34669 -238 204  91 238 204  91 181 142  44  37  26   9
34670 -  2   2   6   2   2   6   2   2   6   2   2   6
34671 -  2   2   6   2   2   6  38  38  38  46  46  46
34672 - 26  26  26 106 106 106  54  54  54  18  18  18
34673 -  6   6   6   0   0   0   0   0   0   0   0   0
34674 -  0   0   0   0   0   0   0   0   0   0   0   0
34675 -  0   0   0   0   0   0   0   0   0   0   0   0
34676 -  0   0   0   0   0   0   0   0   0   0   0   0
34677 -  0   0   0   0   0   0   0   0   0   0   0   0
34678 -  0   0   0   6   6   6  14  14  14  22  22  22
34679 - 30  30  30  38  38  38  50  50  50  70  70  70
34680 -106 106 106 190 142  34 226 170  11 242 186  14
34681 -246 190  14 246 190  14 246 190  14 154 114  10
34682 -  6   6   6  74  74  74 226 226 226 253 253 253
34683 -253 253 253 253 253 253 253 253 253 253 253 253
34684 -253 253 253 253 253 253 231 231 231 250 250 250
34685 -253 253 253 253 253 253 253 253 253 253 253 253
34686 -253 253 253 253 253 253 253 253 253 253 253 253
34687 -253 253 253 253 253 253 253 253 253 253 253 253
34688 -253 253 253 253 253 253 253 253 253 228 184  62
34689 -241 196  14 241 208  19 232 195  16  38  30  10
34690 -  2   2   6   2   2   6   2   2   6   2   2   6
34691 -  2   2   6   6   6   6  30  30  30  26  26  26
34692 -203 166  17 154 142  90  66  66  66  26  26  26
34693 -  6   6   6   0   0   0   0   0   0   0   0   0
34694 -  0   0   0   0   0   0   0   0   0   0   0   0
34695 -  0   0   0   0   0   0   0   0   0   0   0   0
34696 -  0   0   0   0   0   0   0   0   0   0   0   0
34697 -  0   0   0   0   0   0   0   0   0   0   0   0
34698 -  6   6   6  18  18  18  38  38  38  58  58  58
34699 - 78  78  78  86  86  86 101 101 101 123 123 123
34700 -175 146  61 210 150  10 234 174  13 246 186  14
34701 -246 190  14 246 190  14 246 190  14 238 190  10
34702 -102  78  10   2   2   6  46  46  46 198 198 198
34703 -253 253 253 253 253 253 253 253 253 253 253 253
34704 -253 253 253 253 253 253 234 234 234 242 242 242
34705 -253 253 253 253 253 253 253 253 253 253 253 253
34706 -253 253 253 253 253 253 253 253 253 253 253 253
34707 -253 253 253 253 253 253 253 253 253 253 253 253
34708 -253 253 253 253 253 253 253 253 253 224 178  62
34709 -242 186  14 241 196  14 210 166  10  22  18   6
34710 -  2   2   6   2   2   6   2   2   6   2   2   6
34711 -  2   2   6   2   2   6   6   6   6 121  92   8
34712 -238 202  15 232 195  16  82  82  82  34  34  34
34713 - 10  10  10   0   0   0   0   0   0   0   0   0
34714 -  0   0   0   0   0   0   0   0   0   0   0   0
34715 -  0   0   0   0   0   0   0   0   0   0   0   0
34716 -  0   0   0   0   0   0   0   0   0   0   0   0
34717 -  0   0   0   0   0   0   0   0   0   0   0   0
34718 - 14  14  14  38  38  38  70  70  70 154 122  46
34719 -190 142  34 200 144  11 197 138  11 197 138  11
34720 -213 154  11 226 170  11 242 186  14 246 190  14
34721 -246 190  14 246 190  14 246 190  14 246 190  14
34722 -225 175  15  46  32   6   2   2   6  22  22  22
34723 -158 158 158 250 250 250 253 253 253 253 253 253
34724 -253 253 253 253 253 253 253 253 253 253 253 253
34725 -253 253 253 253 253 253 253 253 253 253 253 253
34726 -253 253 253 253 253 253 253 253 253 253 253 253
34727 -253 253 253 253 253 253 253 253 253 253 253 253
34728 -253 253 253 250 250 250 242 242 242 224 178  62
34729 -239 182  13 236 186  11 213 154  11  46  32   6
34730 -  2   2   6   2   2   6   2   2   6   2   2   6
34731 -  2   2   6   2   2   6  61  42   6 225 175  15
34732 -238 190  10 236 186  11 112 100  78  42  42  42
34733 - 14  14  14   0   0   0   0   0   0   0   0   0
34734 -  0   0   0   0   0   0   0   0   0   0   0   0
34735 -  0   0   0   0   0   0   0   0   0   0   0   0
34736 -  0   0   0   0   0   0   0   0   0   0   0   0
34737 -  0   0   0   0   0   0   0   0   0   6   6   6
34738 - 22  22  22  54  54  54 154 122  46 213 154  11
34739 -226 170  11 230 174  11 226 170  11 226 170  11
34740 -236 178  12 242 186  14 246 190  14 246 190  14
34741 -246 190  14 246 190  14 246 190  14 246 190  14
34742 -241 196  14 184 144  12  10  10  10   2   2   6
34743 -  6   6   6 116 116 116 242 242 242 253 253 253
34744 -253 253 253 253 253 253 253 253 253 253 253 253
34745 -253 253 253 253 253 253 253 253 253 253 253 253
34746 -253 253 253 253 253 253 253 253 253 253 253 253
34747 -253 253 253 253 253 253 253 253 253 253 253 253
34748 -253 253 253 231 231 231 198 198 198 214 170  54
34749 -236 178  12 236 178  12 210 150  10 137  92   6
34750 - 18  14   6   2   2   6   2   2   6   2   2   6
34751 -  6   6   6  70  47   6 200 144  11 236 178  12
34752 -239 182  13 239 182  13 124 112  88  58  58  58
34753 - 22  22  22   6   6   6   0   0   0   0   0   0
34754 -  0   0   0   0   0   0   0   0   0   0   0   0
34755 -  0   0   0   0   0   0   0   0   0   0   0   0
34756 -  0   0   0   0   0   0   0   0   0   0   0   0
34757 -  0   0   0   0   0   0   0   0   0  10  10  10
34758 - 30  30  30  70  70  70 180 133  36 226 170  11
34759 -239 182  13 242 186  14 242 186  14 246 186  14
34760 -246 190  14 246 190  14 246 190  14 246 190  14
34761 -246 190  14 246 190  14 246 190  14 246 190  14
34762 -246 190  14 232 195  16  98  70   6   2   2   6
34763 -  2   2   6   2   2   6  66  66  66 221 221 221
34764 -253 253 253 253 253 253 253 253 253 253 253 253
34765 -253 253 253 253 253 253 253 253 253 253 253 253
34766 -253 253 253 253 253 253 253 253 253 253 253 253
34767 -253 253 253 253 253 253 253 253 253 253 253 253
34768 -253 253 253 206 206 206 198 198 198 214 166  58
34769 -230 174  11 230 174  11 216 158  10 192 133   9
34770 -163 110   8 116  81   8 102  78  10 116  81   8
34771 -167 114   7 197 138  11 226 170  11 239 182  13
34772 -242 186  14 242 186  14 162 146  94  78  78  78
34773 - 34  34  34  14  14  14   6   6   6   0   0   0
34774 -  0   0   0   0   0   0   0   0   0   0   0   0
34775 -  0   0   0   0   0   0   0   0   0   0   0   0
34776 -  0   0   0   0   0   0   0   0   0   0   0   0
34777 -  0   0   0   0   0   0   0   0   0   6   6   6
34778 - 30  30  30  78  78  78 190 142  34 226 170  11
34779 -239 182  13 246 190  14 246 190  14 246 190  14
34780 -246 190  14 246 190  14 246 190  14 246 190  14
34781 -246 190  14 246 190  14 246 190  14 246 190  14
34782 -246 190  14 241 196  14 203 166  17  22  18   6
34783 -  2   2   6   2   2   6   2   2   6  38  38  38
34784 -218 218 218 253 253 253 253 253 253 253 253 253
34785 -253 253 253 253 253 253 253 253 253 253 253 253
34786 -253 253 253 253 253 253 253 253 253 253 253 253
34787 -253 253 253 253 253 253 253 253 253 253 253 253
34788 -250 250 250 206 206 206 198 198 198 202 162  69
34789 -226 170  11 236 178  12 224 166  10 210 150  10
34790 -200 144  11 197 138  11 192 133   9 197 138  11
34791 -210 150  10 226 170  11 242 186  14 246 190  14
34792 -246 190  14 246 186  14 225 175  15 124 112  88
34793 - 62  62  62  30  30  30  14  14  14   6   6   6
34794 -  0   0   0   0   0   0   0   0   0   0   0   0
34795 -  0   0   0   0   0   0   0   0   0   0   0   0
34796 -  0   0   0   0   0   0   0   0   0   0   0   0
34797 -  0   0   0   0   0   0   0   0   0  10  10  10
34798 - 30  30  30  78  78  78 174 135  50 224 166  10
34799 -239 182  13 246 190  14 246 190  14 246 190  14
34800 -246 190  14 246 190  14 246 190  14 246 190  14
34801 -246 190  14 246 190  14 246 190  14 246 190  14
34802 -246 190  14 246 190  14 241 196  14 139 102  15
34803 -  2   2   6   2   2   6   2   2   6   2   2   6
34804 - 78  78  78 250 250 250 253 253 253 253 253 253
34805 -253 253 253 253 253 253 253 253 253 253 253 253
34806 -253 253 253 253 253 253 253 253 253 253 253 253
34807 -253 253 253 253 253 253 253 253 253 253 253 253
34808 -250 250 250 214 214 214 198 198 198 190 150  46
34809 -219 162  10 236 178  12 234 174  13 224 166  10
34810 -216 158  10 213 154  11 213 154  11 216 158  10
34811 -226 170  11 239 182  13 246 190  14 246 190  14
34812 -246 190  14 246 190  14 242 186  14 206 162  42
34813 -101 101 101  58  58  58  30  30  30  14  14  14
34814 -  6   6   6   0   0   0   0   0   0   0   0   0
34815 -  0   0   0   0   0   0   0   0   0   0   0   0
34816 -  0   0   0   0   0   0   0   0   0   0   0   0
34817 -  0   0   0   0   0   0   0   0   0  10  10  10
34818 - 30  30  30  74  74  74 174 135  50 216 158  10
34819 -236 178  12 246 190  14 246 190  14 246 190  14
34820 -246 190  14 246 190  14 246 190  14 246 190  14
34821 -246 190  14 246 190  14 246 190  14 246 190  14
34822 -246 190  14 246 190  14 241 196  14 226 184  13
34823 - 61  42   6   2   2   6   2   2   6   2   2   6
34824 - 22  22  22 238 238 238 253 253 253 253 253 253
34825 -253 253 253 253 253 253 253 253 253 253 253 253
34826 -253 253 253 253 253 253 253 253 253 253 253 253
34827 -253 253 253 253 253 253 253 253 253 253 253 253
34828 -253 253 253 226 226 226 187 187 187 180 133  36
34829 -216 158  10 236 178  12 239 182  13 236 178  12
34830 -230 174  11 226 170  11 226 170  11 230 174  11
34831 -236 178  12 242 186  14 246 190  14 246 190  14
34832 -246 190  14 246 190  14 246 186  14 239 182  13
34833 -206 162  42 106 106 106  66  66  66  34  34  34
34834 - 14  14  14   6   6   6   0   0   0   0   0   0
34835 -  0   0   0   0   0   0   0   0   0   0   0   0
34836 -  0   0   0   0   0   0   0   0   0   0   0   0
34837 -  0   0   0   0   0   0   0   0   0   6   6   6
34838 - 26  26  26  70  70  70 163 133  67 213 154  11
34839 -236 178  12 246 190  14 246 190  14 246 190  14
34840 -246 190  14 246 190  14 246 190  14 246 190  14
34841 -246 190  14 246 190  14 246 190  14 246 190  14
34842 -246 190  14 246 190  14 246 190  14 241 196  14
34843 -190 146  13  18  14   6   2   2   6   2   2   6
34844 - 46  46  46 246 246 246 253 253 253 253 253 253
34845 -253 253 253 253 253 253 253 253 253 253 253 253
34846 -253 253 253 253 253 253 253 253 253 253 253 253
34847 -253 253 253 253 253 253 253 253 253 253 253 253
34848 -253 253 253 221 221 221  86  86  86 156 107  11
34849 -216 158  10 236 178  12 242 186  14 246 186  14
34850 -242 186  14 239 182  13 239 182  13 242 186  14
34851 -242 186  14 246 186  14 246 190  14 246 190  14
34852 -246 190  14 246 190  14 246 190  14 246 190  14
34853 -242 186  14 225 175  15 142 122  72  66  66  66
34854 - 30  30  30  10  10  10   0   0   0   0   0   0
34855 -  0   0   0   0   0   0   0   0   0   0   0   0
34856 -  0   0   0   0   0   0   0   0   0   0   0   0
34857 -  0   0   0   0   0   0   0   0   0   6   6   6
34858 - 26  26  26  70  70  70 163 133  67 210 150  10
34859 -236 178  12 246 190  14 246 190  14 246 190  14
34860 -246 190  14 246 190  14 246 190  14 246 190  14
34861 -246 190  14 246 190  14 246 190  14 246 190  14
34862 -246 190  14 246 190  14 246 190  14 246 190  14
34863 -232 195  16 121  92   8  34  34  34 106 106 106
34864 -221 221 221 253 253 253 253 253 253 253 253 253
34865 -253 253 253 253 253 253 253 253 253 253 253 253
34866 -253 253 253 253 253 253 253 253 253 253 253 253
34867 -253 253 253 253 253 253 253 253 253 253 253 253
34868 -242 242 242  82  82  82  18  14   6 163 110   8
34869 -216 158  10 236 178  12 242 186  14 246 190  14
34870 -246 190  14 246 190  14 246 190  14 246 190  14
34871 -246 190  14 246 190  14 246 190  14 246 190  14
34872 -246 190  14 246 190  14 246 190  14 246 190  14
34873 -246 190  14 246 190  14 242 186  14 163 133  67
34874 - 46  46  46  18  18  18   6   6   6   0   0   0
34875 -  0   0   0   0   0   0   0   0   0   0   0   0
34876 -  0   0   0   0   0   0   0   0   0   0   0   0
34877 -  0   0   0   0   0   0   0   0   0  10  10  10
34878 - 30  30  30  78  78  78 163 133  67 210 150  10
34879 -236 178  12 246 186  14 246 190  14 246 190  14
34880 -246 190  14 246 190  14 246 190  14 246 190  14
34881 -246 190  14 246 190  14 246 190  14 246 190  14
34882 -246 190  14 246 190  14 246 190  14 246 190  14
34883 -241 196  14 215 174  15 190 178 144 253 253 253
34884 -253 253 253 253 253 253 253 253 253 253 253 253
34885 -253 253 253 253 253 253 253 253 253 253 253 253
34886 -253 253 253 253 253 253 253 253 253 253 253 253
34887 -253 253 253 253 253 253 253 253 253 218 218 218
34888 - 58  58  58   2   2   6  22  18   6 167 114   7
34889 -216 158  10 236 178  12 246 186  14 246 190  14
34890 -246 190  14 246 190  14 246 190  14 246 190  14
34891 -246 190  14 246 190  14 246 190  14 246 190  14
34892 -246 190  14 246 190  14 246 190  14 246 190  14
34893 -246 190  14 246 186  14 242 186  14 190 150  46
34894 - 54  54  54  22  22  22   6   6   6   0   0   0
34895 -  0   0   0   0   0   0   0   0   0   0   0   0
34896 -  0   0   0   0   0   0   0   0   0   0   0   0
34897 -  0   0   0   0   0   0   0   0   0  14  14  14
34898 - 38  38  38  86  86  86 180 133  36 213 154  11
34899 -236 178  12 246 186  14 246 190  14 246 190  14
34900 -246 190  14 246 190  14 246 190  14 246 190  14
34901 -246 190  14 246 190  14 246 190  14 246 190  14
34902 -246 190  14 246 190  14 246 190  14 246 190  14
34903 -246 190  14 232 195  16 190 146  13 214 214 214
34904 -253 253 253 253 253 253 253 253 253 253 253 253
34905 -253 253 253 253 253 253 253 253 253 253 253 253
34906 -253 253 253 253 253 253 253 253 253 253 253 253
34907 -253 253 253 250 250 250 170 170 170  26  26  26
34908 -  2   2   6   2   2   6  37  26   9 163 110   8
34909 -219 162  10 239 182  13 246 186  14 246 190  14
34910 -246 190  14 246 190  14 246 190  14 246 190  14
34911 -246 190  14 246 190  14 246 190  14 246 190  14
34912 -246 190  14 246 190  14 246 190  14 246 190  14
34913 -246 186  14 236 178  12 224 166  10 142 122  72
34914 - 46  46  46  18  18  18   6   6   6   0   0   0
34915 -  0   0   0   0   0   0   0   0   0   0   0   0
34916 -  0   0   0   0   0   0   0   0   0   0   0   0
34917 -  0   0   0   0   0   0   6   6   6  18  18  18
34918 - 50  50  50 109 106  95 192 133   9 224 166  10
34919 -242 186  14 246 190  14 246 190  14 246 190  14
34920 -246 190  14 246 190  14 246 190  14 246 190  14
34921 -246 190  14 246 190  14 246 190  14 246 190  14
34922 -246 190  14 246 190  14 246 190  14 246 190  14
34923 -242 186  14 226 184  13 210 162  10 142 110  46
34924 -226 226 226 253 253 253 253 253 253 253 253 253
34925 -253 253 253 253 253 253 253 253 253 253 253 253
34926 -253 253 253 253 253 253 253 253 253 253 253 253
34927 -198 198 198  66  66  66   2   2   6   2   2   6
34928 -  2   2   6   2   2   6  50  34   6 156 107  11
34929 -219 162  10 239 182  13 246 186  14 246 190  14
34930 -246 190  14 246 190  14 246 190  14 246 190  14
34931 -246 190  14 246 190  14 246 190  14 246 190  14
34932 -246 190  14 246 190  14 246 190  14 242 186  14
34933 -234 174  13 213 154  11 154 122  46  66  66  66
34934 - 30  30  30  10  10  10   0   0   0   0   0   0
34935 -  0   0   0   0   0   0   0   0   0   0   0   0
34936 -  0   0   0   0   0   0   0   0   0   0   0   0
34937 -  0   0   0   0   0   0   6   6   6  22  22  22
34938 - 58  58  58 154 121  60 206 145  10 234 174  13
34939 -242 186  14 246 186  14 246 190  14 246 190  14
34940 -246 190  14 246 190  14 246 190  14 246 190  14
34941 -246 190  14 246 190  14 246 190  14 246 190  14
34942 -246 190  14 246 190  14 246 190  14 246 190  14
34943 -246 186  14 236 178  12 210 162  10 163 110   8
34944 - 61  42   6 138 138 138 218 218 218 250 250 250
34945 -253 253 253 253 253 253 253 253 253 250 250 250
34946 -242 242 242 210 210 210 144 144 144  66  66  66
34947 -  6   6   6   2   2   6   2   2   6   2   2   6
34948 -  2   2   6   2   2   6  61  42   6 163 110   8
34949 -216 158  10 236 178  12 246 190  14 246 190  14
34950 -246 190  14 246 190  14 246 190  14 246 190  14
34951 -246 190  14 246 190  14 246 190  14 246 190  14
34952 -246 190  14 239 182  13 230 174  11 216 158  10
34953 -190 142  34 124 112  88  70  70  70  38  38  38
34954 - 18  18  18   6   6   6   0   0   0   0   0   0
34955 -  0   0   0   0   0   0   0   0   0   0   0   0
34956 -  0   0   0   0   0   0   0   0   0   0   0   0
34957 -  0   0   0   0   0   0   6   6   6  22  22  22
34958 - 62  62  62 168 124  44 206 145  10 224 166  10
34959 -236 178  12 239 182  13 242 186  14 242 186  14
34960 -246 186  14 246 190  14 246 190  14 246 190  14
34961 -246 190  14 246 190  14 246 190  14 246 190  14
34962 -246 190  14 246 190  14 246 190  14 246 190  14
34963 -246 190  14 236 178  12 216 158  10 175 118   6
34964 - 80  54   7   2   2   6   6   6   6  30  30  30
34965 - 54  54  54  62  62  62  50  50  50  38  38  38
34966 - 14  14  14   2   2   6   2   2   6   2   2   6
34967 -  2   2   6   2   2   6   2   2   6   2   2   6
34968 -  2   2   6   6   6   6  80  54   7 167 114   7
34969 -213 154  11 236 178  12 246 190  14 246 190  14
34970 -246 190  14 246 190  14 246 190  14 246 190  14
34971 -246 190  14 242 186  14 239 182  13 239 182  13
34972 -230 174  11 210 150  10 174 135  50 124 112  88
34973 - 82  82  82  54  54  54  34  34  34  18  18  18
34974 -  6   6   6   0   0   0   0   0   0   0   0   0
34975 -  0   0   0   0   0   0   0   0   0   0   0   0
34976 -  0   0   0   0   0   0   0   0   0   0   0   0
34977 -  0   0   0   0   0   0   6   6   6  18  18  18
34978 - 50  50  50 158 118  36 192 133   9 200 144  11
34979 -216 158  10 219 162  10 224 166  10 226 170  11
34980 -230 174  11 236 178  12 239 182  13 239 182  13
34981 -242 186  14 246 186  14 246 190  14 246 190  14
34982 -246 190  14 246 190  14 246 190  14 246 190  14
34983 -246 186  14 230 174  11 210 150  10 163 110   8
34984 -104  69   6  10  10  10   2   2   6   2   2   6
34985 -  2   2   6   2   2   6   2   2   6   2   2   6
34986 -  2   2   6   2   2   6   2   2   6   2   2   6
34987 -  2   2   6   2   2   6   2   2   6   2   2   6
34988 -  2   2   6   6   6   6  91  60   6 167 114   7
34989 -206 145  10 230 174  11 242 186  14 246 190  14
34990 -246 190  14 246 190  14 246 186  14 242 186  14
34991 -239 182  13 230 174  11 224 166  10 213 154  11
34992 -180 133  36 124 112  88  86  86  86  58  58  58
34993 - 38  38  38  22  22  22  10  10  10   6   6   6
34994 -  0   0   0   0   0   0   0   0   0   0   0   0
34995 -  0   0   0   0   0   0   0   0   0   0   0   0
34996 -  0   0   0   0   0   0   0   0   0   0   0   0
34997 -  0   0   0   0   0   0   0   0   0  14  14  14
34998 - 34  34  34  70  70  70 138 110  50 158 118  36
34999 -167 114   7 180 123   7 192 133   9 197 138  11
35000 -200 144  11 206 145  10 213 154  11 219 162  10
35001 -224 166  10 230 174  11 239 182  13 242 186  14
35002 -246 186  14 246 186  14 246 186  14 246 186  14
35003 -239 182  13 216 158  10 185 133  11 152  99   6
35004 -104  69   6  18  14   6   2   2   6   2   2   6
35005 -  2   2   6   2   2   6   2   2   6   2   2   6
35006 -  2   2   6   2   2   6   2   2   6   2   2   6
35007 -  2   2   6   2   2   6   2   2   6   2   2   6
35008 -  2   2   6   6   6   6  80  54   7 152  99   6
35009 -192 133   9 219 162  10 236 178  12 239 182  13
35010 -246 186  14 242 186  14 239 182  13 236 178  12
35011 -224 166  10 206 145  10 192 133   9 154 121  60
35012 - 94  94  94  62  62  62  42  42  42  22  22  22
35013 - 14  14  14   6   6   6   0   0   0   0   0   0
35014 -  0   0   0   0   0   0   0   0   0   0   0   0
35015 -  0   0   0   0   0   0   0   0   0   0   0   0
35016 -  0   0   0   0   0   0   0   0   0   0   0   0
35017 -  0   0   0   0   0   0   0   0   0   6   6   6
35018 - 18  18  18  34  34  34  58  58  58  78  78  78
35019 -101  98  89 124 112  88 142 110  46 156 107  11
35020 -163 110   8 167 114   7 175 118   6 180 123   7
35021 -185 133  11 197 138  11 210 150  10 219 162  10
35022 -226 170  11 236 178  12 236 178  12 234 174  13
35023 -219 162  10 197 138  11 163 110   8 130  83   6
35024 - 91  60   6  10  10  10   2   2   6   2   2   6
35025 - 18  18  18  38  38  38  38  38  38  38  38  38
35026 - 38  38  38  38  38  38  38  38  38  38  38  38
35027 - 38  38  38  38  38  38  26  26  26   2   2   6
35028 -  2   2   6   6   6   6  70  47   6 137  92   6
35029 -175 118   6 200 144  11 219 162  10 230 174  11
35030 -234 174  13 230 174  11 219 162  10 210 150  10
35031 -192 133   9 163 110   8 124 112  88  82  82  82
35032 - 50  50  50  30  30  30  14  14  14   6   6   6
35033 -  0   0   0   0   0   0   0   0   0   0   0   0
35034 -  0   0   0   0   0   0   0   0   0   0   0   0
35035 -  0   0   0   0   0   0   0   0   0   0   0   0
35036 -  0   0   0   0   0   0   0   0   0   0   0   0
35037 -  0   0   0   0   0   0   0   0   0   0   0   0
35038 -  6   6   6  14  14  14  22  22  22  34  34  34
35039 - 42  42  42  58  58  58  74  74  74  86  86  86
35040 -101  98  89 122 102  70 130  98  46 121  87  25
35041 -137  92   6 152  99   6 163 110   8 180 123   7
35042 -185 133  11 197 138  11 206 145  10 200 144  11
35043 -180 123   7 156 107  11 130  83   6 104  69   6
35044 - 50  34   6  54  54  54 110 110 110 101  98  89
35045 - 86  86  86  82  82  82  78  78  78  78  78  78
35046 - 78  78  78  78  78  78  78  78  78  78  78  78
35047 - 78  78  78  82  82  82  86  86  86  94  94  94
35048 -106 106 106 101 101 101  86  66  34 124  80   6
35049 -156 107  11 180 123   7 192 133   9 200 144  11
35050 -206 145  10 200 144  11 192 133   9 175 118   6
35051 -139 102  15 109 106  95  70  70  70  42  42  42
35052 - 22  22  22  10  10  10   0   0   0   0   0   0
35053 -  0   0   0   0   0   0   0   0   0   0   0   0
35054 -  0   0   0   0   0   0   0   0   0   0   0   0
35055 -  0   0   0   0   0   0   0   0   0   0   0   0
35056 -  0   0   0   0   0   0   0   0   0   0   0   0
35057 -  0   0   0   0   0   0   0   0   0   0   0   0
35058 -  0   0   0   0   0   0   6   6   6  10  10  10
35059 - 14  14  14  22  22  22  30  30  30  38  38  38
35060 - 50  50  50  62  62  62  74  74  74  90  90  90
35061 -101  98  89 112 100  78 121  87  25 124  80   6
35062 -137  92   6 152  99   6 152  99   6 152  99   6
35063 -138  86   6 124  80   6  98  70   6  86  66  30
35064 -101  98  89  82  82  82  58  58  58  46  46  46
35065 - 38  38  38  34  34  34  34  34  34  34  34  34
35066 - 34  34  34  34  34  34  34  34  34  34  34  34
35067 - 34  34  34  34  34  34  38  38  38  42  42  42
35068 - 54  54  54  82  82  82  94  86  76  91  60   6
35069 -134  86   6 156 107  11 167 114   7 175 118   6
35070 -175 118   6 167 114   7 152  99   6 121  87  25
35071 -101  98  89  62  62  62  34  34  34  18  18  18
35072 -  6   6   6   0   0   0   0   0   0   0   0   0
35073 -  0   0   0   0   0   0   0   0   0   0   0   0
35074 -  0   0   0   0   0   0   0   0   0   0   0   0
35075 -  0   0   0   0   0   0   0   0   0   0   0   0
35076 -  0   0   0   0   0   0   0   0   0   0   0   0
35077 -  0   0   0   0   0   0   0   0   0   0   0   0
35078 -  0   0   0   0   0   0   0   0   0   0   0   0
35079 -  0   0   0   6   6   6   6   6   6  10  10  10
35080 - 18  18  18  22  22  22  30  30  30  42  42  42
35081 - 50  50  50  66  66  66  86  86  86 101  98  89
35082 -106  86  58  98  70   6 104  69   6 104  69   6
35083 -104  69   6  91  60   6  82  62  34  90  90  90
35084 - 62  62  62  38  38  38  22  22  22  14  14  14
35085 - 10  10  10  10  10  10  10  10  10  10  10  10
35086 - 10  10  10  10  10  10   6   6   6  10  10  10
35087 - 10  10  10  10  10  10  10  10  10  14  14  14
35088 - 22  22  22  42  42  42  70  70  70  89  81  66
35089 - 80  54   7 104  69   6 124  80   6 137  92   6
35090 -134  86   6 116  81   8 100  82  52  86  86  86
35091 - 58  58  58  30  30  30  14  14  14   6   6   6
35092 -  0   0   0   0   0   0   0   0   0   0   0   0
35093 -  0   0   0   0   0   0   0   0   0   0   0   0
35094 -  0   0   0   0   0   0   0   0   0   0   0   0
35095 -  0   0   0   0   0   0   0   0   0   0   0   0
35096 -  0   0   0   0   0   0   0   0   0   0   0   0
35097 -  0   0   0   0   0   0   0   0   0   0   0   0
35098 -  0   0   0   0   0   0   0   0   0   0   0   0
35099 -  0   0   0   0   0   0   0   0   0   0   0   0
35100 -  0   0   0   6   6   6  10  10  10  14  14  14
35101 - 18  18  18  26  26  26  38  38  38  54  54  54
35102 - 70  70  70  86  86  86  94  86  76  89  81  66
35103 - 89  81  66  86  86  86  74  74  74  50  50  50
35104 - 30  30  30  14  14  14   6   6   6   0   0   0
35105 -  0   0   0   0   0   0   0   0   0   0   0   0
35106 -  0   0   0   0   0   0   0   0   0   0   0   0
35107 -  0   0   0   0   0   0   0   0   0   0   0   0
35108 -  6   6   6  18  18  18  34  34  34  58  58  58
35109 - 82  82  82  89  81  66  89  81  66  89  81  66
35110 - 94  86  66  94  86  76  74  74  74  50  50  50
35111 - 26  26  26  14  14  14   6   6   6   0   0   0
35112 -  0   0   0   0   0   0   0   0   0   0   0   0
35113 -  0   0   0   0   0   0   0   0   0   0   0   0
35114 -  0   0   0   0   0   0   0   0   0   0   0   0
35115 -  0   0   0   0   0   0   0   0   0   0   0   0
35116 -  0   0   0   0   0   0   0   0   0   0   0   0
35117 -  0   0   0   0   0   0   0   0   0   0   0   0
35118 -  0   0   0   0   0   0   0   0   0   0   0   0
35119 -  0   0   0   0   0   0   0   0   0   0   0   0
35120 -  0   0   0   0   0   0   0   0   0   0   0   0
35121 -  6   6   6   6   6   6  14  14  14  18  18  18
35122 - 30  30  30  38  38  38  46  46  46  54  54  54
35123 - 50  50  50  42  42  42  30  30  30  18  18  18
35124 - 10  10  10   0   0   0   0   0   0   0   0   0
35125 -  0   0   0   0   0   0   0   0   0   0   0   0
35126 -  0   0   0   0   0   0   0   0   0   0   0   0
35127 -  0   0   0   0   0   0   0   0   0   0   0   0
35128 -  0   0   0   6   6   6  14  14  14  26  26  26
35129 - 38  38  38  50  50  50  58  58  58  58  58  58
35130 - 54  54  54  42  42  42  30  30  30  18  18  18
35131 - 10  10  10   0   0   0   0   0   0   0   0   0
35132 -  0   0   0   0   0   0   0   0   0   0   0   0
35133 -  0   0   0   0   0   0   0   0   0   0   0   0
35134 -  0   0   0   0   0   0   0   0   0   0   0   0
35135 -  0   0   0   0   0   0   0   0   0   0   0   0
35136 -  0   0   0   0   0   0   0   0   0   0   0   0
35137 -  0   0   0   0   0   0   0   0   0   0   0   0
35138 -  0   0   0   0   0   0   0   0   0   0   0   0
35139 -  0   0   0   0   0   0   0   0   0   0   0   0
35140 -  0   0   0   0   0   0   0   0   0   0   0   0
35141 -  0   0   0   0   0   0   0   0   0   6   6   6
35142 -  6   6   6  10  10  10  14  14  14  18  18  18
35143 - 18  18  18  14  14  14  10  10  10   6   6   6
35144 -  0   0   0   0   0   0   0   0   0   0   0   0
35145 -  0   0   0   0   0   0   0   0   0   0   0   0
35146 -  0   0   0   0   0   0   0   0   0   0   0   0
35147 -  0   0   0   0   0   0   0   0   0   0   0   0
35148 -  0   0   0   0   0   0   0   0   0   6   6   6
35149 - 14  14  14  18  18  18  22  22  22  22  22  22
35150 - 18  18  18  14  14  14  10  10  10   6   6   6
35151 -  0   0   0   0   0   0   0   0   0   0   0   0
35152 -  0   0   0   0   0   0   0   0   0   0   0   0
35153 -  0   0   0   0   0   0   0   0   0   0   0   0
35154 -  0   0   0   0   0   0   0   0   0   0   0   0
35155 -  0   0   0   0   0   0   0   0   0   0   0   0
35156 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35157 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35158 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35159 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35160 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35161 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35162 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35163 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35164 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35165 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35166 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35167 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35168 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35169 +4 4 4  4 4 4
35170 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35171 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35172 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35173 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35174 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35175 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35176 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35177 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35178 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35179 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35180 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35181 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35182 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35183 +4 4 4  4 4 4
35184 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35185 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35186 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35187 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35188 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35189 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35190 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35191 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35192 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35193 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35194 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35195 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35196 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35197 +4 4 4  4 4 4
35198 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35199 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35200 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35201 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35202 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35203 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35204 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35205 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35206 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35207 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35208 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35209 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35210 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35211 +4 4 4  4 4 4
35212 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35213 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35214 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35215 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35216 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35217 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35218 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35219 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35220 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35221 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35222 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35223 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35224 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35225 +4 4 4  4 4 4
35226 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35227 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35228 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35229 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35230 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35231 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35232 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35233 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35234 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35235 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35236 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35237 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35238 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35239 +4 4 4  4 4 4
35240 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35241 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35242 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35243 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35244 +4 4 4  4 4 4  4 4 4  3 3 3  0 0 0  0 0 0
35245 +0 0 0  0 0 0  0 0 0  0 0 0  3 3 3  4 4 4
35246 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35247 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35248 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35249 +4 4 4  4 4 4  4 4 4  4 4 4  1 1 1  0 0 0
35250 +0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
35251 +4 4 4  4 4 4  4 4 4  2 1 0  2 1 0  3 2 2
35252 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35253 +4 4 4  4 4 4
35254 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35255 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35256 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35257 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35258 +4 4 4  4 4 4  2 2 2  0 0 0  3 4 3  26 28 28
35259 +37 38 37  37 38 37  14 17 19  2 2 2  0 0 0  2 2 2
35260 +5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35261 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35262 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35263 +4 4 4  4 4 4  3 3 3  0 0 0  1 1 1  6 6 6
35264 +2 2 2  0 0 0  3 3 3  4 4 4  4 4 4  4 4 4
35265 +4 4 5  3 3 3  1 0 0  0 0 0  1 0 0  0 0 0
35266 +1 1 1  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35267 +4 4 4  4 4 4
35268 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35269 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35270 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35271 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35272 +2 2 2  0 0 0  0 0 0  14 17 19  60 74 84  137 136 137
35273 +153 152 153  137 136 137  125 124 125  60 73 81  6 6 6  3 1 0
35274 +0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
35275 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35276 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35277 +4 4 4  4 4 4  0 0 0  4 4 4  41 54 63  125 124 125
35278 +60 73 81  6 6 6  4 0 0  3 3 3  4 4 4  4 4 4
35279 +4 4 4  0 0 0  6 9 11  41 54 63  41 65 82  22 30 35
35280 +2 2 2  2 1 0  4 4 4  4 4 4  4 4 4  4 4 4
35281 +4 4 4  4 4 4
35282 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35283 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35284 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35285 +4 4 4  4 4 4  5 5 5  5 5 5  2 2 2  0 0 0
35286 +4 0 0  6 6 6  41 54 63  137 136 137  174 174 174  167 166 167
35287 +165 164 165  165 164 165  163 162 163  163 162 163  125 124 125  41 54 63
35288 +1 1 1  0 0 0  0 0 0  3 3 3  5 5 5  4 4 4
35289 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35290 +4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
35291 +3 3 3  2 0 0  4 0 0  60 73 81  156 155 156  167 166 167
35292 +163 162 163  85 115 134  5 7 8  0 0 0  4 4 4  5 5 5
35293 +0 0 0  2 5 5  55 98 126  90 154 193  90 154 193  72 125 159
35294 +37 51 59  2 0 0  1 1 1  4 5 5  4 4 4  4 4 4
35295 +4 4 4  4 4 4
35296 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35297 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35298 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35299 +4 4 4  5 5 5  4 4 4  1 1 1  0 0 0  3 3 3
35300 +37 38 37  125 124 125  163 162 163  174 174 174  158 157 158  158 157 158
35301 +156 155 156  156 155 156  158 157 158  165 164 165  174 174 174  166 165 166
35302 +125 124 125  16 19 21  1 0 0  0 0 0  0 0 0  4 4 4
35303 +5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
35304 +4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  1 1 1
35305 +0 0 0  0 0 0  37 38 37  153 152 153  174 174 174  158 157 158
35306 +174 174 174  163 162 163  37 38 37  4 3 3  4 0 0  1 1 1
35307 +0 0 0  22 40 52  101 161 196  101 161 196  90 154 193  101 161 196
35308 +64 123 161  14 17 19  0 0 0  4 4 4  4 4 4  4 4 4
35309 +4 4 4  4 4 4
35310 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35311 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35312 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
35313 +5 5 5  2 2 2  0 0 0  4 0 0  24 26 27  85 115 134
35314 +156 155 156  174 174 174  167 166 167  156 155 156  154 153 154  157 156 157
35315 +156 155 156  156 155 156  155 154 155  153 152 153  158 157 158  167 166 167
35316 +174 174 174  156 155 156  60 74 84  16 19 21  0 0 0  0 0 0
35317 +1 1 1  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
35318 +4 4 4  5 5 5  6 6 6  3 3 3  0 0 0  4 0 0
35319 +13 16 17  60 73 81  137 136 137  165 164 165  156 155 156  153 152 153
35320 +174 174 174  177 184 187  60 73 81  3 1 0  0 0 0  1 1 2
35321 +22 30 35  64 123 161  136 185 209  90 154 193  90 154 193  90 154 193
35322 +90 154 193  21 29 34  0 0 0  3 2 2  4 4 5  4 4 4
35323 +4 4 4  4 4 4
35324 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35325 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35326 +4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  3 3 3
35327 +0 0 0  0 0 0  10 13 16  60 74 84  157 156 157  174 174 174
35328 +174 174 174  158 157 158  153 152 153  154 153 154  156 155 156  155 154 155
35329 +156 155 156  155 154 155  154 153 154  157 156 157  154 153 154  153 152 153
35330 +163 162 163  174 174 174  177 184 187  137 136 137  60 73 81  13 16 17
35331 +4 0 0  0 0 0  3 3 3  5 5 5  4 4 4  4 4 4
35332 +5 5 5  4 4 4  1 1 1  0 0 0  3 3 3  41 54 63
35333 +131 129 131  174 174 174  174 174 174  174 174 174  167 166 167  174 174 174
35334 +190 197 201  137 136 137  24 26 27  4 0 0  16 21 25  50 82 103
35335 +90 154 193  136 185 209  90 154 193  101 161 196  101 161 196  101 161 196
35336 +31 91 132  3 6 7  0 0 0  4 4 4  4 4 4  4 4 4
35337 +4 4 4  4 4 4
35338 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35339 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35340 +4 4 4  4 4 4  4 4 4  2 2 2  0 0 0  4 0 0
35341 +4 0 0  43 57 68  137 136 137  177 184 187  174 174 174  163 162 163
35342 +155 154 155  155 154 155  156 155 156  155 154 155  158 157 158  165 164 165
35343 +167 166 167  166 165 166  163 162 163  157 156 157  155 154 155  155 154 155
35344 +153 152 153  156 155 156  167 166 167  174 174 174  174 174 174  131 129 131
35345 +41 54 63  5 5 5  0 0 0  0 0 0  3 3 3  4 4 4
35346 +1 1 1  0 0 0  1 0 0  26 28 28  125 124 125  174 174 174
35347 +177 184 187  174 174 174  174 174 174  156 155 156  131 129 131  137 136 137
35348 +125 124 125  24 26 27  4 0 0  41 65 82  90 154 193  136 185 209
35349 +136 185 209  101 161 196  53 118 160  37 112 160  90 154 193  34 86 122
35350 +7 12 15  0 0 0  4 4 4  4 4 4  4 4 4  4 4 4
35351 +4 4 4  4 4 4
35352 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35353 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35354 +4 4 4  3 3 3  0 0 0  0 0 0  5 5 5  37 38 37
35355 +125 124 125  167 166 167  174 174 174  167 166 167  158 157 158  155 154 155
35356 +156 155 156  156 155 156  156 155 156  163 162 163  167 166 167  155 154 155
35357 +137 136 137  153 152 153  156 155 156  165 164 165  163 162 163  156 155 156
35358 +156 155 156  156 155 156  155 154 155  158 157 158  166 165 166  174 174 174
35359 +167 166 167  125 124 125  37 38 37  1 0 0  0 0 0  0 0 0
35360 +0 0 0  24 26 27  60 74 84  158 157 158  174 174 174  174 174 174
35361 +166 165 166  158 157 158  125 124 125  41 54 63  13 16 17  6 6 6
35362 +6 6 6  37 38 37  80 127 157  136 185 209  101 161 196  101 161 196
35363 +90 154 193  28 67 93  6 10 14  13 20 25  13 20 25  6 10 14
35364 +1 1 2  4 3 3  4 4 4  4 4 4  4 4 4  4 4 4
35365 +4 4 4  4 4 4
35366 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35367 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35368 +1 1 1  1 0 0  4 3 3  37 38 37  60 74 84  153 152 153
35369 +167 166 167  167 166 167  158 157 158  154 153 154  155 154 155  156 155 156
35370 +157 156 157  158 157 158  167 166 167  167 166 167  131 129 131  43 57 68
35371 +26 28 28  37 38 37  60 73 81  131 129 131  165 164 165  166 165 166
35372 +158 157 158  155 154 155  156 155 156  156 155 156  156 155 156  158 157 158
35373 +165 164 165  174 174 174  163 162 163  60 74 84  16 19 21  13 16 17
35374 +60 73 81  131 129 131  174 174 174  174 174 174  167 166 167  165 164 165
35375 +137 136 137  60 73 81  24 26 27  4 0 0  4 0 0  16 19 21
35376 +52 104 138  101 161 196  136 185 209  136 185 209  90 154 193  27 99 146
35377 +13 20 25  4 5 7  2 5 5  4 5 7  1 1 2  0 0 0
35378 +4 4 4  4 4 4  3 3 3  2 2 2  2 2 2  4 4 4
35379 +4 4 4  4 4 4
35380 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35381 +4 4 4  4 4 4  4 4 4  4 4 4  3 3 3  0 0 0
35382 +0 0 0  13 16 17  60 73 81  137 136 137  174 174 174  166 165 166
35383 +158 157 158  156 155 156  157 156 157  156 155 156  155 154 155  158 157 158
35384 +167 166 167  174 174 174  153 152 153  60 73 81  16 19 21  4 0 0
35385 +4 0 0  4 0 0  6 6 6  26 28 28  60 74 84  158 157 158
35386 +174 174 174  166 165 166  157 156 157  155 154 155  156 155 156  156 155 156
35387 +155 154 155  158 157 158  167 166 167  167 166 167  131 129 131  125 124 125
35388 +137 136 137  167 166 167  167 166 167  174 174 174  158 157 158  125 124 125
35389 +16 19 21  4 0 0  4 0 0  10 13 16  49 76 92  107 159 188
35390 +136 185 209  136 185 209  90 154 193  26 108 161  22 40 52  6 10 14
35391 +2 3 3  1 1 2  1 1 2  4 4 5  4 4 5  4 4 5
35392 +4 4 5  2 2 1  0 0 0  0 0 0  0 0 0  2 2 2
35393 +4 4 4  4 4 4
35394 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35395 +4 4 4  5 5 5  3 3 3  0 0 0  1 0 0  4 0 0
35396 +37 51 59  131 129 131  167 166 167  167 166 167  163 162 163  157 156 157
35397 +157 156 157  155 154 155  153 152 153  157 156 157  167 166 167  174 174 174
35398 +153 152 153  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
35399 +4 3 3  4 3 3  4 0 0  6 6 6  4 0 0  37 38 37
35400 +125 124 125  174 174 174  174 174 174  165 164 165  156 155 156  154 153 154
35401 +156 155 156  156 155 156  155 154 155  163 162 163  158 157 158  163 162 163
35402 +174 174 174  174 174 174  174 174 174  125 124 125  37 38 37  0 0 0
35403 +4 0 0  6 9 11  41 54 63  90 154 193  136 185 209  146 190 211
35404 +136 185 209  37 112 160  22 40 52  6 10 14  3 6 7  1 1 2
35405 +1 1 2  3 3 3  1 1 2  3 3 3  4 4 4  4 4 4
35406 +2 2 2  2 0 0  16 19 21  37 38 37  24 26 27  0 0 0
35407 +0 0 0  4 4 4
35408 +4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
35409 +4 4 4  0 0 0  0 0 0  0 0 0  26 28 28  120 125 127
35410 +158 157 158  174 174 174  165 164 165  157 156 157  155 154 155  156 155 156
35411 +153 152 153  153 152 153  167 166 167  174 174 174  174 174 174  125 124 125
35412 +37 38 37  4 0 0  0 0 0  4 0 0  4 3 3  4 4 4
35413 +4 4 4  4 4 4  5 5 5  4 0 0  4 0 0  4 0 0
35414 +4 3 3  43 57 68  137 136 137  174 174 174  174 174 174  165 164 165
35415 +154 153 154  153 152 153  153 152 153  153 152 153  163 162 163  174 174 174
35416 +174 174 174  153 152 153  60 73 81  6 6 6  4 0 0  4 3 3
35417 +32 43 50  80 127 157  136 185 209  146 190 211  146 190 211  90 154 193
35418 +28 67 93  28 67 93  40 71 93  3 6 7  1 1 2  2 5 5
35419 +50 82 103  79 117 143  26 37 45  0 0 0  3 3 3  1 1 1
35420 +0 0 0  41 54 63  137 136 137  174 174 174  153 152 153  60 73 81
35421 +2 0 0  0 0 0
35422 +4 4 4  4 4 4  4 4 4  4 4 4  6 6 6  2 2 2
35423 +0 0 0  2 0 0  24 26 27  60 74 84  153 152 153  174 174 174
35424 +174 174 174  157 156 157  154 153 154  156 155 156  154 153 154  153 152 153
35425 +165 164 165  174 174 174  177 184 187  137 136 137  43 57 68  6 6 6
35426 +4 0 0  2 0 0  3 3 3  5 5 5  5 5 5  4 4 4
35427 +4 4 4  4 4 4  4 4 4  5 5 5  6 6 6  4 3 3
35428 +4 0 0  4 0 0  24 26 27  60 73 81  153 152 153  174 174 174
35429 +174 174 174  158 157 158  158 157 158  174 174 174  174 174 174  158 157 158
35430 +60 74 84  24 26 27  4 0 0  4 0 0  17 23 27  59 113 148
35431 +136 185 209  191 222 234  146 190 211  136 185 209  31 91 132  7 11 13
35432 +22 40 52  101 161 196  90 154 193  6 9 11  3 4 4  43 95 132
35433 +136 185 209  172 205 220  55 98 126  0 0 0  0 0 0  2 0 0
35434 +26 28 28  153 152 153  177 184 187  167 166 167  177 184 187  165 164 165
35435 +37 38 37  0 0 0
35436 +4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
35437 +13 16 17  60 73 81  137 136 137  174 174 174  174 174 174  165 164 165
35438 +153 152 153  153 152 153  155 154 155  154 153 154  158 157 158  174 174 174
35439 +177 184 187  163 162 163  60 73 81  16 19 21  4 0 0  4 0 0
35440 +4 3 3  4 4 4  5 5 5  5 5 5  4 4 4  5 5 5
35441 +5 5 5  5 5 5  5 5 5  4 4 4  4 4 4  5 5 5
35442 +6 6 6  4 0 0  4 0 0  4 0 0  24 26 27  60 74 84
35443 +166 165 166  174 174 174  177 184 187  165 164 165  125 124 125  24 26 27
35444 +4 0 0  4 0 0  5 5 5  50 82 103  136 185 209  172 205 220
35445 +146 190 211  136 185 209  26 108 161  22 40 52  7 12 15  44 81 103
35446 +71 116 144  28 67 93  37 51 59  41 65 82  100 139 164  101 161 196
35447 +90 154 193  90 154 193  28 67 93  0 0 0  0 0 0  26 28 28
35448 +125 124 125  167 166 167  163 162 163  153 152 153  163 162 163  174 174 174
35449 +85 115 134  4 0 0
35450 +4 4 4  5 5 5  4 4 4  1 0 0  4 0 0  34 47 55
35451 +125 124 125  174 174 174  174 174 174  167 166 167  157 156 157  153 152 153
35452 +155 154 155  155 154 155  158 157 158  166 165 166  167 166 167  154 153 154
35453 +125 124 125  26 28 28  4 0 0  4 0 0  4 0 0  5 5 5
35454 +5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  1 1 1
35455 +0 0 0  0 0 0  1 1 1  4 4 4  4 4 4  4 4 4
35456 +5 5 5  5 5 5  4 3 3  4 0 0  4 0 0  6 6 6
35457 +37 38 37  131 129 131  137 136 137  37 38 37  0 0 0  4 0 0
35458 +4 5 5  43 61 72  90 154 193  172 205 220  146 190 211  136 185 209
35459 +90 154 193  28 67 93  13 20 25  43 61 72  71 116 144  44 81 103
35460 +2 5 5  7 11 13  59 113 148  101 161 196  90 154 193  28 67 93
35461 +13 20 25  6 10 14  0 0 0  13 16 17  60 73 81  137 136 137
35462 +166 165 166  158 157 158  156 155 156  154 153 154  167 166 167  174 174 174
35463 +60 73 81  4 0 0
35464 +4 4 4  4 4 4  0 0 0  3 3 3  60 74 84  174 174 174
35465 +174 174 174  167 166 167  163 162 163  155 154 155  157 156 157  155 154 155
35466 +156 155 156  163 162 163  167 166 167  158 157 158  125 124 125  37 38 37
35467 +4 3 3  4 0 0  4 0 0  6 6 6  6 6 6  5 5 5
35468 +4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  2 3 3
35469 +10 13 16  7 11 13  1 0 0  0 0 0  2 2 1  4 4 4
35470 +4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  4 0 0
35471 +4 0 0  7 11 13  13 16 17  4 0 0  3 3 3  34 47 55
35472 +80 127 157  146 190 211  172 205 220  136 185 209  136 185 209  136 185 209
35473 +28 67 93  22 40 52  55 98 126  55 98 126  21 29 34  7 11 13
35474 +50 82 103  101 161 196  101 161 196  35 83 115  13 20 25  2 2 1
35475 +1 1 2  1 1 2  37 51 59  131 129 131  174 174 174  174 174 174
35476 +167 166 167  163 162 163  163 162 163  167 166 167  174 174 174  125 124 125
35477 +16 19 21  4 0 0
35478 +4 4 4  4 0 0  4 0 0  60 74 84  174 174 174  174 174 174
35479 +158 157 158  155 154 155  155 154 155  156 155 156  155 154 155  158 157 158
35480 +167 166 167  165 164 165  131 129 131  60 73 81  13 16 17  4 0 0
35481 +4 0 0  4 3 3  6 6 6  4 3 3  5 5 5  4 4 4
35482 +4 4 4  3 2 2  0 0 0  0 0 0  7 11 13  45 69 86
35483 +80 127 157  71 116 144  43 61 72  7 11 13  0 0 0  1 1 1
35484 +4 3 3  4 4 4  4 4 4  4 4 4  6 6 6  5 5 5
35485 +3 2 2  4 0 0  1 0 0  21 29 34  59 113 148  136 185 209
35486 +146 190 211  136 185 209  136 185 209  136 185 209  136 185 209  136 185 209
35487 +68 124 159  44 81 103  22 40 52  13 16 17  43 61 72  90 154 193
35488 +136 185 209  59 113 148  21 29 34  3 4 3  1 1 1  0 0 0
35489 +24 26 27  125 124 125  163 162 163  174 174 174  166 165 166  165 164 165
35490 +163 162 163  125 124 125  125 124 125  125 124 125  125 124 125  26 28 28
35491 +4 0 0  4 3 3
35492 +3 3 3  0 0 0  24 26 27  153 152 153  177 184 187  158 157 158
35493 +156 155 156  156 155 156  155 154 155  155 154 155  165 164 165  174 174 174
35494 +155 154 155  60 74 84  26 28 28  4 0 0  4 0 0  3 1 0
35495 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 3 3
35496 +2 0 0  0 0 0  0 0 0  32 43 50  72 125 159  101 161 196
35497 +136 185 209  101 161 196  101 161 196  79 117 143  32 43 50  0 0 0
35498 +0 0 0  2 2 2  4 4 4  4 4 4  3 3 3  1 0 0
35499 +0 0 0  4 5 5  49 76 92  101 161 196  146 190 211  146 190 211
35500 +136 185 209  136 185 209  136 185 209  136 185 209  136 185 209  90 154 193
35501 +28 67 93  13 16 17  37 51 59  80 127 157  136 185 209  90 154 193
35502 +22 40 52  6 9 11  3 4 3  2 2 1  16 19 21  60 73 81
35503 +137 136 137  163 162 163  158 157 158  166 165 166  167 166 167  153 152 153
35504 +60 74 84  37 38 37  6 6 6  13 16 17  4 0 0  1 0 0
35505 +3 2 2  4 4 4
35506 +3 2 2  4 0 0  37 38 37  137 136 137  167 166 167  158 157 158
35507 +157 156 157  154 153 154  157 156 157  167 166 167  174 174 174  125 124 125
35508 +37 38 37  4 0 0  4 0 0  4 0 0  4 3 3  4 4 4
35509 +4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
35510 +0 0 0  16 21 25  55 98 126  90 154 193  136 185 209  101 161 196
35511 +101 161 196  101 161 196  136 185 209  136 185 209  101 161 196  55 98 126
35512 +14 17 19  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
35513 +22 40 52  90 154 193  146 190 211  146 190 211  136 185 209  136 185 209
35514 +136 185 209  136 185 209  136 185 209  101 161 196  35 83 115  7 11 13
35515 +17 23 27  59 113 148  136 185 209  101 161 196  34 86 122  7 12 15
35516 +2 5 5  3 4 3  6 6 6  60 73 81  131 129 131  163 162 163
35517 +166 165 166  174 174 174  174 174 174  163 162 163  125 124 125  41 54 63
35518 +13 16 17  4 0 0  4 0 0  4 0 0  1 0 0  2 2 2
35519 +4 4 4  4 4 4
35520 +1 1 1  2 1 0  43 57 68  137 136 137  153 152 153  153 152 153
35521 +163 162 163  156 155 156  165 164 165  167 166 167  60 74 84  6 6 6
35522 +4 0 0  4 0 0  5 5 5  4 4 4  4 4 4  4 4 4
35523 +4 5 5  6 6 6  4 3 3  0 0 0  0 0 0  11 15 18
35524 +40 71 93  100 139 164  101 161 196  101 161 196  101 161 196  101 161 196
35525 +101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  136 185 209
35526 +101 161 196  45 69 86  6 6 6  0 0 0  17 23 27  55 98 126
35527 +136 185 209  146 190 211  136 185 209  136 185 209  136 185 209  136 185 209
35528 +136 185 209  136 185 209  90 154 193  22 40 52  7 11 13  50 82 103
35529 +136 185 209  136 185 209  53 118 160  22 40 52  7 11 13  2 5 5
35530 +3 4 3  37 38 37  125 124 125  157 156 157  166 165 166  167 166 167
35531 +174 174 174  174 174 174  137 136 137  60 73 81  4 0 0  4 0 0
35532 +4 0 0  4 0 0  5 5 5  3 3 3  3 3 3  4 4 4
35533 +4 4 4  4 4 4
35534 +4 0 0  4 0 0  41 54 63  137 136 137  125 124 125  131 129 131
35535 +155 154 155  167 166 167  174 174 174  60 74 84  6 6 6  4 0 0
35536 +4 3 3  6 6 6  4 4 4  4 4 4  4 4 4  5 5 5
35537 +4 4 4  1 1 1  0 0 0  3 6 7  41 65 82  72 125 159
35538 +101 161 196  101 161 196  101 161 196  90 154 193  90 154 193  101 161 196
35539 +101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
35540 +136 185 209  136 185 209  80 127 157  55 98 126  101 161 196  146 190 211
35541 +136 185 209  136 185 209  136 185 209  101 161 196  136 185 209  101 161 196
35542 +136 185 209  101 161 196  35 83 115  22 30 35  101 161 196  172 205 220
35543 +90 154 193  28 67 93  7 11 13  2 5 5  3 4 3  13 16 17
35544 +85 115 134  167 166 167  174 174 174  174 174 174  174 174 174  174 174 174
35545 +167 166 167  60 74 84  13 16 17  4 0 0  4 0 0  4 3 3
35546 +6 6 6  5 5 5  4 4 4  5 5 5  4 4 4  5 5 5
35547 +5 5 5  5 5 5
35548 +1 1 1  4 0 0  41 54 63  137 136 137  137 136 137  125 124 125
35549 +131 129 131  167 166 167  157 156 157  37 38 37  6 6 6  4 0 0
35550 +6 6 6  5 5 5  4 4 4  4 4 4  4 5 5  2 2 1
35551 +0 0 0  0 0 0  26 37 45  58 111 146  101 161 196  101 161 196
35552 +101 161 196  90 154 193  90 154 193  90 154 193  101 161 196  101 161 196
35553 +101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
35554 +101 161 196  136 185 209  136 185 209  136 185 209  146 190 211  136 185 209
35555 +136 185 209  101 161 196  136 185 209  136 185 209  101 161 196  136 185 209
35556 +101 161 196  136 185 209  136 185 209  136 185 209  136 185 209  16 89 141
35557 +7 11 13  2 5 5  2 5 5  13 16 17  60 73 81  154 154 154
35558 +174 174 174  174 174 174  174 174 174  174 174 174  163 162 163  125 124 125
35559 +24 26 27  4 0 0  4 0 0  4 0 0  5 5 5  5 5 5
35560 +4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
35561 +5 5 5  4 4 4
35562 +4 0 0  6 6 6  37 38 37  137 136 137  137 136 137  131 129 131
35563 +131 129 131  153 152 153  131 129 131  26 28 28  4 0 0  4 3 3
35564 +6 6 6  4 4 4  4 4 4  4 4 4  0 0 0  0 0 0
35565 +13 20 25  51 88 114  90 154 193  101 161 196  101 161 196  90 154 193
35566 +90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
35567 +101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  101 161 196
35568 +101 161 196  136 185 209  101 161 196  136 185 209  136 185 209  101 161 196
35569 +136 185 209  101 161 196  136 185 209  101 161 196  101 161 196  101 161 196
35570 +136 185 209  136 185 209  136 185 209  37 112 160  21 29 34  5 7 8
35571 +2 5 5  13 16 17  43 57 68  131 129 131  174 174 174  174 174 174
35572 +174 174 174  167 166 167  157 156 157  125 124 125  37 38 37  4 0 0
35573 +4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
35574 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35575 +4 4 4  4 4 4
35576 +1 1 1  4 0 0  41 54 63  153 152 153  137 136 137  137 136 137
35577 +137 136 137  153 152 153  125 124 125  24 26 27  4 0 0  3 2 2
35578 +4 4 4  4 4 4  4 3 3  4 0 0  3 6 7  43 61 72
35579 +64 123 161  101 161 196  90 154 193  90 154 193  90 154 193  90 154 193
35580 +90 154 193  90 154 193  90 154 193  90 154 193  101 161 196  90 154 193
35581 +101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
35582 +101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
35583 +136 185 209  101 161 196  101 161 196  136 185 209  136 185 209  101 161 196
35584 +101 161 196  90 154 193  28 67 93  13 16 17  7 11 13  3 6 7
35585 +37 51 59  125 124 125  163 162 163  174 174 174  167 166 167  166 165 166
35586 +167 166 167  131 129 131  60 73 81  4 0 0  4 0 0  4 0 0
35587 +3 3 3  5 5 5  6 6 6  4 4 4  4 4 4  4 4 4
35588 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35589 +4 4 4  4 4 4
35590 +4 0 0  4 0 0  41 54 63  137 136 137  153 152 153  137 136 137
35591 +153 152 153  157 156 157  125 124 125  24 26 27  0 0 0  2 2 2
35592 +4 4 4  4 4 4  2 0 0  0 0 0  28 67 93  90 154 193
35593 +90 154 193  90 154 193  90 154 193  90 154 193  64 123 161  90 154 193
35594 +90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
35595 +90 154 193  101 161 196  101 161 196  101 161 196  90 154 193  136 185 209
35596 +101 161 196  101 161 196  136 185 209  101 161 196  136 185 209  101 161 196
35597 +101 161 196  101 161 196  136 185 209  101 161 196  101 161 196  90 154 193
35598 +35 83 115  13 16 17  3 6 7  2 5 5  13 16 17  60 74 84
35599 +154 154 154  166 165 166  165 164 165  158 157 158  163 162 163  157 156 157
35600 +60 74 84  13 16 17  4 0 0  4 0 0  3 2 2  4 4 4
35601 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35602 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35603 +4 4 4  4 4 4
35604 +1 1 1  4 0 0  41 54 63  157 156 157  155 154 155  137 136 137
35605 +153 152 153  158 157 158  137 136 137  26 28 28  2 0 0  2 2 2
35606 +4 4 4  4 4 4  1 0 0  6 10 14  34 86 122  90 154 193
35607 +64 123 161  90 154 193  64 123 161  90 154 193  90 154 193  90 154 193
35608 +64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
35609 +101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
35610 +101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
35611 +136 185 209  101 161 196  136 185 209  90 154 193  26 108 161  22 40 52
35612 +13 16 17  5 7 8  2 5 5  2 5 5  37 38 37  165 164 165
35613 +174 174 174  163 162 163  154 154 154  165 164 165  167 166 167  60 73 81
35614 +6 6 6  4 0 0  4 0 0  4 4 4  4 4 4  4 4 4
35615 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35616 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35617 +4 4 4  4 4 4
35618 +4 0 0  6 6 6  41 54 63  156 155 156  158 157 158  153 152 153
35619 +156 155 156  165 164 165  137 136 137  26 28 28  0 0 0  2 2 2
35620 +4 4 5  4 4 4  2 0 0  7 12 15  31 96 139  64 123 161
35621 +90 154 193  64 123 161  90 154 193  90 154 193  64 123 161  90 154 193
35622 +90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
35623 +90 154 193  90 154 193  90 154 193  101 161 196  101 161 196  101 161 196
35624 +101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
35625 +101 161 196  136 185 209  26 108 161  22 40 52  7 11 13  5 7 8
35626 +2 5 5  2 5 5  2 5 5  2 2 1  37 38 37  158 157 158
35627 +174 174 174  154 154 154  156 155 156  167 166 167  165 164 165  37 38 37
35628 +4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35629 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35630 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35631 +4 4 4  4 4 4
35632 +3 1 0  4 0 0  60 73 81  157 156 157  163 162 163  153 152 153
35633 +158 157 158  167 166 167  137 136 137  26 28 28  2 0 0  2 2 2
35634 +4 5 5  4 4 4  4 0 0  7 12 15  24 86 132  26 108 161
35635 +37 112 160  64 123 161  90 154 193  64 123 161  90 154 193  90 154 193
35636 +90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
35637 +90 154 193  101 161 196  90 154 193  101 161 196  101 161 196  101 161 196
35638 +101 161 196  101 161 196  101 161 196  136 185 209  101 161 196  136 185 209
35639 +90 154 193  35 83 115  13 16 17  13 16 17  7 11 13  3 6 7
35640 +5 7 8  6 6 6  3 4 3  2 2 1  30 32 34  154 154 154
35641 +167 166 167  154 154 154  154 154 154  174 174 174  165 164 165  37 38 37
35642 +6 6 6  4 0 0  6 6 6  4 4 4  4 4 4  4 4 4
35643 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35644 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35645 +4 4 4  4 4 4
35646 +4 0 0  4 0 0  41 54 63  163 162 163  166 165 166  154 154 154
35647 +163 162 163  174 174 174  137 136 137  26 28 28  0 0 0  2 2 2
35648 +4 5 5  4 4 5  1 1 2  6 10 14  28 67 93  18 97 151
35649 +18 97 151  18 97 151  26 108 161  37 112 160  37 112 160  90 154 193
35650 +64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
35651 +90 154 193  101 161 196  101 161 196  90 154 193  101 161 196  101 161 196
35652 +101 161 196  101 161 196  101 161 196  136 185 209  90 154 193  16 89 141
35653 +13 20 25  7 11 13  5 7 8  5 7 8  2 5 5  4 5 5
35654 +3 4 3  4 5 5  3 4 3  0 0 0  37 38 37  158 157 158
35655 +174 174 174  158 157 158  158 157 158  167 166 167  174 174 174  41 54 63
35656 +4 0 0  3 2 2  5 5 5  4 4 4  4 4 4  4 4 4
35657 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35658 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35659 +4 4 4  4 4 4
35660 +1 1 1  4 0 0  60 73 81  165 164 165  174 174 174  158 157 158
35661 +167 166 167  174 174 174  153 152 153  26 28 28  2 0 0  2 2 2
35662 +4 5 5  4 4 4  4 0 0  7 12 15  10 87 144  10 87 144
35663 +18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
35664 +26 108 161  37 112 160  53 118 160  90 154 193  90 154 193  90 154 193
35665 +90 154 193  90 154 193  101 161 196  101 161 196  101 161 196  101 161 196
35666 +101 161 196  136 185 209  90 154 193  26 108 161  22 40 52  13 16 17
35667 +7 11 13  3 6 7  5 7 8  5 7 8  2 5 5  4 5 5
35668 +4 5 5  6 6 6  3 4 3  0 0 0  30 32 34  158 157 158
35669 +174 174 174  156 155 156  155 154 155  165 164 165  154 153 154  37 38 37
35670 +4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35671 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35672 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35673 +4 4 4  4 4 4
35674 +4 0 0  4 0 0  60 73 81  167 166 167  174 174 174  163 162 163
35675 +174 174 174  174 174 174  153 152 153  26 28 28  0 0 0  3 3 3
35676 +5 5 5  4 4 4  1 1 2  7 12 15  28 67 93  18 97 151
35677 +18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
35678 +26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
35679 +90 154 193  26 108 161  90 154 193  90 154 193  90 154 193  101 161 196
35680 +101 161 196  26 108 161  22 40 52  13 16 17  7 11 13  2 5 5
35681 +2 5 5  6 6 6  2 5 5  4 5 5  4 5 5  4 5 5
35682 +3 4 3  5 5 5  3 4 3  2 0 0  30 32 34  137 136 137
35683 +153 152 153  137 136 137  131 129 131  137 136 137  131 129 131  37 38 37
35684 +4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35685 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35686 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35687 +4 4 4  4 4 4
35688 +1 1 1  4 0 0  60 73 81  167 166 167  174 174 174  166 165 166
35689 +174 174 174  177 184 187  153 152 153  30 32 34  1 0 0  3 3 3
35690 +5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
35691 +18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
35692 +26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
35693 +26 108 161  26 108 161  26 108 161  90 154 193  90 154 193  26 108 161
35694 +35 83 115  13 16 17  7 11 13  5 7 8  3 6 7  5 7 8
35695 +2 5 5  6 6 6  4 5 5  4 5 5  3 4 3  4 5 5
35696 +3 4 3  6 6 6  3 4 3  0 0 0  26 28 28  125 124 125
35697 +131 129 131  125 124 125  125 124 125  131 129 131  131 129 131  37 38 37
35698 +4 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35699 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35700 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35701 +4 4 4  4 4 4
35702 +3 1 0  4 0 0  60 73 81  174 174 174  177 184 187  167 166 167
35703 +174 174 174  177 184 187  153 152 153  30 32 34  0 0 0  3 3 3
35704 +5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
35705 +18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
35706 +26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
35707 +26 108 161  90 154 193  26 108 161  26 108 161  24 86 132  13 20 25
35708 +7 11 13  13 20 25  22 40 52  5 7 8  3 4 3  3 4 3
35709 +4 5 5  3 4 3  4 5 5  3 4 3  4 5 5  3 4 3
35710 +4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
35711 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35712 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35713 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35714 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35715 +4 4 4  4 4 4
35716 +1 1 1  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
35717 +174 174 174  190 197 201  157 156 157  30 32 34  1 0 0  3 3 3
35718 +5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
35719 +18 97 151  19 95 150  19 95 150  18 97 151  18 97 151  26 108 161
35720 +18 97 151  26 108 161  26 108 161  26 108 161  26 108 161  90 154 193
35721 +26 108 161  26 108 161  26 108 161  22 40 52  2 5 5  3 4 3
35722 +28 67 93  37 112 160  34 86 122  2 5 5  3 4 3  3 4 3
35723 +3 4 3  3 4 3  3 4 3  2 2 1  3 4 3  4 4 4
35724 +4 5 5  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
35725 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35726 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35727 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35728 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35729 +4 4 4  4 4 4
35730 +4 0 0  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
35731 +174 174 174  190 197 201  158 157 158  30 32 34  0 0 0  2 2 2
35732 +5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
35733 +10 87 144  19 95 150  19 95 150  18 97 151  18 97 151  18 97 151
35734 +26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
35735 +18 97 151  22 40 52  2 5 5  2 2 1  22 40 52  26 108 161
35736 +90 154 193  37 112 160  22 40 52  3 4 3  13 20 25  22 30 35
35737 +3 6 7  1 1 1  2 2 2  6 9 11  5 5 5  4 3 3
35738 +4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
35739 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35740 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35741 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35742 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35743 +4 4 4  4 4 4
35744 +1 1 1  4 0 0  60 73 81  177 184 187  193 200 203  174 174 174
35745 +177 184 187  193 200 203  163 162 163  30 32 34  4 0 0  2 2 2
35746 +5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
35747 +10 87 144  10 87 144  19 95 150  19 95 150  19 95 150  18 97 151
35748 +26 108 161  26 108 161  26 108 161  90 154 193  26 108 161  28 67 93
35749 +6 10 14  2 5 5  13 20 25  24 86 132  37 112 160  90 154 193
35750 +10 87 144  7 12 15  2 5 5  28 67 93  37 112 160  28 67 93
35751 +2 2 1  7 12 15  35 83 115  28 67 93  3 6 7  1 0 0
35752 +4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
35753 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35754 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35755 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35756 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35757 +4 4 4  4 4 4
35758 +4 0 0  4 0 0  60 73 81  174 174 174  190 197 201  174 174 174
35759 +177 184 187  193 200 203  163 162 163  30 32 34  0 0 0  2 2 2
35760 +5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
35761 +10 87 144  16 89 141  19 95 150  10 87 144  26 108 161  26 108 161
35762 +26 108 161  26 108 161  26 108 161  28 67 93  6 10 14  1 1 2
35763 +7 12 15  28 67 93  26 108 161  16 89 141  24 86 132  21 29 34
35764 +3 4 3  21 29 34  37 112 160  37 112 160  27 99 146  21 29 34
35765 +21 29 34  26 108 161  90 154 193  35 83 115  1 1 2  2 0 0
35766 +4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
35767 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35768 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35769 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35770 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35771 +4 4 4  4 4 4
35772 +3 1 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
35773 +190 197 201  193 200 203  165 164 165  37 38 37  4 0 0  2 2 2
35774 +5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
35775 +10 87 144  10 87 144  16 89 141  18 97 151  18 97 151  10 87 144
35776 +24 86 132  24 86 132  13 20 25  4 5 7  4 5 7  22 40 52
35777 +18 97 151  37 112 160  26 108 161  7 12 15  1 1 1  0 0 0
35778 +28 67 93  37 112 160  26 108 161  28 67 93  22 40 52  28 67 93
35779 +26 108 161  90 154 193  26 108 161  10 87 144  0 0 0  2 0 0
35780 +4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
35781 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35782 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35783 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35784 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35785 +4 4 4  4 4 4
35786 +4 0 0  6 6 6  60 73 81  174 174 174  193 200 203  174 174 174
35787 +190 197 201  193 200 203  165 164 165  30 32 34  0 0 0  2 2 2
35788 +5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
35789 +10 87 144  10 87 144  10 87 144  18 97 151  28 67 93  6 10 14
35790 +0 0 0  1 1 2  4 5 7  13 20 25  16 89 141  26 108 161
35791 +26 108 161  26 108 161  24 86 132  6 9 11  2 3 3  22 40 52
35792 +37 112 160  16 89 141  22 40 52  28 67 93  26 108 161  26 108 161
35793 +90 154 193  26 108 161  26 108 161  28 67 93  1 1 1  4 0 0
35794 +4 4 4  5 5 5  3 3 3  4 0 0  26 28 28  124 126 130
35795 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35796 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35797 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35798 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35799 +4 4 4  4 4 4
35800 +4 0 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
35801 +193 200 203  193 200 203  167 166 167  37 38 37  4 0 0  2 2 2
35802 +5 5 5  4 4 4  4 0 0  6 10 14  28 67 93  10 87 144
35803 +10 87 144  10 87 144  18 97 151  10 87 144  13 20 25  4 5 7
35804 +1 1 2  1 1 1  22 40 52  26 108 161  26 108 161  26 108 161
35805 +26 108 161  26 108 161  26 108 161  24 86 132  22 40 52  22 40 52
35806 +22 40 52  22 40 52  10 87 144  26 108 161  26 108 161  26 108 161
35807 +26 108 161  26 108 161  90 154 193  10 87 144  0 0 0  4 0 0
35808 +4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
35809 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35810 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35811 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35812 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35813 +4 4 4  4 4 4
35814 +4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
35815 +190 197 201  205 212 215  167 166 167  30 32 34  0 0 0  2 2 2
35816 +5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
35817 +10 87 144  10 87 144  10 87 144  10 87 144  22 40 52  1 1 2
35818 +2 0 0  1 1 2  24 86 132  26 108 161  26 108 161  26 108 161
35819 +26 108 161  19 95 150  16 89 141  10 87 144  22 40 52  22 40 52
35820 +10 87 144  26 108 161  37 112 160  26 108 161  26 108 161  26 108 161
35821 +26 108 161  26 108 161  26 108 161  28 67 93  2 0 0  3 1 0
35822 +4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
35823 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35824 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35825 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35826 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35827 +4 4 4  4 4 4
35828 +4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
35829 +193 200 203  193 200 203  174 174 174  37 38 37  4 0 0  2 2 2
35830 +5 5 5  4 4 4  3 2 2  1 1 2  13 20 25  10 87 144
35831 +10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  13 20 25
35832 +13 20 25  22 40 52  10 87 144  18 97 151  18 97 151  26 108 161
35833 +10 87 144  13 20 25  6 10 14  21 29 34  24 86 132  18 97 151
35834 +26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
35835 +26 108 161  90 154 193  18 97 151  13 20 25  0 0 0  4 3 3
35836 +4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
35837 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35838 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35839 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35840 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35841 +4 4 4  4 4 4
35842 +4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
35843 +190 197 201  220 221 221  167 166 167  30 32 34  1 0 0  2 2 2
35844 +5 5 5  4 4 4  4 4 5  2 5 5  4 5 7  13 20 25
35845 +28 67 93  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
35846 +10 87 144  10 87 144  18 97 151  10 87 144  18 97 151  18 97 151
35847 +28 67 93  2 3 3  0 0 0  28 67 93  26 108 161  26 108 161
35848 +26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
35849 +26 108 161  10 87 144  13 20 25  1 1 2  3 2 2  4 4 4
35850 +4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
35851 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35852 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35853 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35854 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35855 +4 4 4  4 4 4
35856 +4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
35857 +193 200 203  193 200 203  174 174 174  26 28 28  4 0 0  4 3 3
35858 +5 5 5  4 4 4  4 4 4  4 4 5  1 1 2  2 5 5
35859 +4 5 7  22 40 52  10 87 144  10 87 144  18 97 151  10 87 144
35860 +10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  18 97 151
35861 +10 87 144  28 67 93  22 40 52  10 87 144  26 108 161  18 97 151
35862 +18 97 151  18 97 151  26 108 161  26 108 161  26 108 161  26 108 161
35863 +22 40 52  1 1 2  0 0 0  2 3 3  4 4 4  4 4 4
35864 +4 4 4  5 5 5  4 4 4  0 0 0  26 28 28  131 129 131
35865 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35866 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35867 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35868 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35869 +4 4 4  4 4 4
35870 +4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
35871 +190 197 201  220 221 221  190 197 201  41 54 63  4 0 0  2 2 2
35872 +6 6 6  4 4 4  4 4 4  4 4 5  4 4 5  3 3 3
35873 +1 1 2  1 1 2  6 10 14  22 40 52  10 87 144  18 97 151
35874 +18 97 151  10 87 144  10 87 144  10 87 144  18 97 151  10 87 144
35875 +10 87 144  18 97 151  26 108 161  18 97 151  18 97 151  10 87 144
35876 +26 108 161  26 108 161  26 108 161  10 87 144  28 67 93  6 10 14
35877 +1 1 2  1 1 2  4 3 3  4 4 5  4 4 4  4 4 4
35878 +5 5 5  5 5 5  1 1 1  4 0 0  37 51 59  137 136 137
35879 +137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35880 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35881 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35882 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35883 +4 4 4  4 4 4
35884 +4 0 0  4 0 0  60 73 81  220 221 221  193 200 203  174 174 174
35885 +193 200 203  193 200 203  220 221 221  137 136 137  13 16 17  4 0 0
35886 +2 2 2  4 4 4  4 4 4  4 4 4  4 4 4  4 4 5
35887 +4 4 5  4 3 3  1 1 2  4 5 7  13 20 25  28 67 93
35888 +10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
35889 +10 87 144  18 97 151  18 97 151  10 87 144  18 97 151  26 108 161
35890 +26 108 161  18 97 151  28 67 93  6 10 14  0 0 0  0 0 0
35891 +2 3 3  4 5 5  4 4 5  4 4 4  4 4 4  5 5 5
35892 +3 3 3  1 1 1  0 0 0  16 19 21  125 124 125  137 136 137
35893 +131 129 131  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35894 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35895 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35896 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35897 +4 4 4  4 4 4
35898 +4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
35899 +193 200 203  190 197 201  220 221 221  220 221 221  153 152 153  30 32 34
35900 +0 0 0  0 0 0  2 2 2  4 4 4  4 4 4  4 4 4
35901 +4 4 4  4 5 5  4 5 7  1 1 2  1 1 2  4 5 7
35902 +13 20 25  28 67 93  10 87 144  18 97 151  10 87 144  10 87 144
35903 +10 87 144  10 87 144  10 87 144  18 97 151  26 108 161  18 97 151
35904 +28 67 93  7 12 15  0 0 0  0 0 0  2 2 1  4 4 4
35905 +4 5 5  4 5 5  4 4 4  4 4 4  3 3 3  0 0 0
35906 +0 0 0  0 0 0  37 38 37  125 124 125  158 157 158  131 129 131
35907 +125 124 125  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
35908 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35909 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35910 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35911 +4 4 4  4 4 4
35912 +4 3 3  4 0 0  41 54 63  193 200 203  220 221 221  174 174 174
35913 +193 200 203  193 200 203  193 200 203  220 221 221  244 246 246  193 200 203
35914 +120 125 127  5 5 5  1 0 0  0 0 0  1 1 1  4 4 4
35915 +4 4 4  4 4 4  4 5 5  4 5 5  4 4 5  1 1 2
35916 +4 5 7  4 5 7  22 40 52  10 87 144  10 87 144  10 87 144
35917 +10 87 144  10 87 144  18 97 151  10 87 144  10 87 144  13 20 25
35918 +4 5 7  2 3 3  1 1 2  4 4 4  4 5 5  4 4 4
35919 +4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 1 2
35920 +24 26 27  60 74 84  153 152 153  163 162 163  137 136 137  125 124 125
35921 +125 124 125  125 124 125  125 124 125  137 136 137  125 124 125  26 28 28
35922 +0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
35923 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35924 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35925 +4 4 4  4 4 4
35926 +4 0 0  6 6 6  26 28 28  156 155 156  220 221 221  220 221 221
35927 +174 174 174  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
35928 +220 221 221  167 166 167  60 73 81  7 11 13  0 0 0  0 0 0
35929 +3 3 3  4 4 4  4 4 4  4 4 4  4 4 5  4 4 5
35930 +4 4 5  1 1 2  1 1 2  4 5 7  22 40 52  10 87 144
35931 +10 87 144  10 87 144  10 87 144  22 40 52  4 5 7  1 1 2
35932 +1 1 2  4 4 5  4 4 4  4 4 4  4 4 4  4 4 4
35933 +5 5 5  2 2 2  0 0 0  4 0 0  16 19 21  60 73 81
35934 +137 136 137  167 166 167  158 157 158  137 136 137  131 129 131  131 129 131
35935 +125 124 125  125 124 125  131 129 131  155 154 155  60 74 84  5 7 8
35936 +0 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35937 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35938 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35939 +4 4 4  4 4 4
35940 +5 5 5  4 0 0  4 0 0  60 73 81  193 200 203  220 221 221
35941 +193 200 203  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
35942 +220 221 221  220 221 221  220 221 221  137 136 137  43 57 68  6 6 6
35943 +4 0 0  1 1 1  4 4 4  4 4 4  4 4 4  4 4 4
35944 +4 4 5  4 4 5  3 2 2  1 1 2  2 5 5  13 20 25
35945 +22 40 52  22 40 52  13 20 25  2 3 3  1 1 2  3 3 3
35946 +4 5 7  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35947 +1 1 1  0 0 0  2 3 3  41 54 63  131 129 131  166 165 166
35948 +166 165 166  155 154 155  153 152 153  137 136 137  137 136 137  125 124 125
35949 +125 124 125  137 136 137  137 136 137  125 124 125  37 38 37  4 3 3
35950 +4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
35951 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35952 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35953 +4 4 4  4 4 4
35954 +4 3 3  6 6 6  6 6 6  13 16 17  60 73 81  167 166 167
35955 +220 221 221  220 221 221  220 221 221  193 200 203  193 200 203  193 200 203
35956 +205 212 215  220 221 221  220 221 221  244 246 246  205 212 215  125 124 125
35957 +24 26 27  0 0 0  0 0 0  2 2 2  5 5 5  5 5 5
35958 +4 4 4  4 4 4  4 4 4  4 4 5  1 1 2  4 5 7
35959 +4 5 7  4 5 7  1 1 2  3 2 2  4 4 5  4 4 4
35960 +4 4 4  4 4 4  5 5 5  4 4 4  0 0 0  0 0 0
35961 +2 0 0  26 28 28  125 124 125  174 174 174  174 174 174  166 165 166
35962 +156 155 156  153 152 153  137 136 137  137 136 137  131 129 131  137 136 137
35963 +137 136 137  137 136 137  60 74 84  30 32 34  4 0 0  4 0 0
35964 +5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35965 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35966 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35967 +4 4 4  4 4 4
35968 +5 5 5  6 6 6  4 0 0  4 0 0  6 6 6  26 28 28
35969 +125 124 125  174 174 174  220 221 221  220 221 221  220 221 221  193 200 203
35970 +205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
35971 +193 200 203  60 74 84  13 16 17  4 0 0  0 0 0  3 3 3
35972 +5 5 5  5 5 5  4 4 4  4 4 4  4 4 5  3 3 3
35973 +1 1 2  3 3 3  4 4 5  4 4 5  4 4 4  4 4 4
35974 +5 5 5  5 5 5  2 2 2  0 0 0  0 0 0  13 16 17
35975 +60 74 84  174 174 174  193 200 203  174 174 174  167 166 167  163 162 163
35976 +153 152 153  153 152 153  137 136 137  137 136 137  153 152 153  137 136 137
35977 +125 124 125  41 54 63  24 26 27  4 0 0  4 0 0  5 5 5
35978 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35979 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35980 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35981 +4 4 4  4 4 4
35982 +4 3 3  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
35983 +6 6 6  37 38 37  131 129 131  220 221 221  220 221 221  220 221 221
35984 +193 200 203  193 200 203  220 221 221  205 212 215  220 221 221  244 246 246
35985 +244 246 246  244 246 246  174 174 174  41 54 63  0 0 0  0 0 0
35986 +0 0 0  4 4 4  5 5 5  5 5 5  4 4 4  4 4 5
35987 +4 4 5  4 4 5  4 4 4  4 4 4  6 6 6  6 6 6
35988 +3 3 3  0 0 0  2 0 0  13 16 17  60 73 81  156 155 156
35989 +220 221 221  193 200 203  174 174 174  165 164 165  163 162 163  154 153 154
35990 +153 152 153  153 152 153  158 157 158  163 162 163  137 136 137  60 73 81
35991 +13 16 17  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
35992 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35993 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35994 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
35995 +4 4 4  4 4 4
35996 +5 5 5  4 3 3  4 3 3  6 6 6  6 6 6  6 6 6
35997 +6 6 6  6 6 6  6 6 6  37 38 37  167 166 167  244 246 246
35998 +244 246 246  220 221 221  205 212 215  205 212 215  220 221 221  193 200 203
35999 +220 221 221  244 246 246  244 246 246  244 246 246  137 136 137  37 38 37
36000 +3 2 2  0 0 0  1 1 1  5 5 5  5 5 5  4 4 4
36001 +4 4 4  4 4 4  4 4 4  5 5 5  4 4 4  1 1 1
36002 +0 0 0  5 5 5  43 57 68  153 152 153  193 200 203  220 221 221
36003 +177 184 187  174 174 174  167 166 167  166 165 166  158 157 158  157 156 157
36004 +158 157 158  166 165 166  156 155 156  85 115 134  13 16 17  4 0 0
36005 +4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
36006 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36007 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36008 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36009 +4 4 4  4 4 4
36010 +5 5 5  4 3 3  6 6 6  6 6 6  4 0 0  6 6 6
36011 +6 6 6  6 6 6  6 6 6  6 6 6  13 16 17  60 73 81
36012 +177 184 187  220 221 221  220 221 221  220 221 221  205 212 215  220 221 221
36013 +220 221 221  205 212 215  220 221 221  244 246 246  244 246 246  205 212 215
36014 +125 124 125  30 32 34  0 0 0  0 0 0  2 2 2  5 5 5
36015 +4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 0 0
36016 +37 38 37  131 129 131  205 212 215  220 221 221  193 200 203  174 174 174
36017 +174 174 174  174 174 174  167 166 167  165 164 165  166 165 166  167 166 167
36018 +158 157 158  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
36019 +4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
36020 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36021 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36022 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36023 +4 4 4  4 4 4
36024 +4 4 4  5 5 5  4 3 3  4 3 3  6 6 6  6 6 6
36025 +4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
36026 +26 28 28  125 124 125  205 212 215  220 221 221  220 221 221  220 221 221
36027 +205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
36028 +244 246 246  190 197 201  60 74 84  16 19 21  4 0 0  0 0 0
36029 +0 0 0  0 0 0  0 0 0  0 0 0  16 19 21  120 125 127
36030 +177 184 187  220 221 221  205 212 215  177 184 187  174 174 174  177 184 187
36031 +174 174 174  174 174 174  167 166 167  174 174 174  166 165 166  137 136 137
36032 +60 73 81  13 16 17  4 0 0  4 0 0  4 3 3  6 6 6
36033 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36034 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36035 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36036 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36037 +4 4 4  4 4 4
36038 +5 5 5  4 3 3  5 5 5  4 3 3  6 6 6  4 0 0
36039 +6 6 6  6 6 6  4 0 0  6 6 6  4 0 0  6 6 6
36040 +6 6 6  6 6 6  37 38 37  137 136 137  193 200 203  220 221 221
36041 +220 221 221  205 212 215  220 221 221  205 212 215  205 212 215  220 221 221
36042 +220 221 221  220 221 221  244 246 246  166 165 166  43 57 68  2 2 2
36043 +0 0 0  4 0 0  16 19 21  60 73 81  157 156 157  202 210 214
36044 +220 221 221  193 200 203  177 184 187  177 184 187  177 184 187  174 174 174
36045 +174 174 174  174 174 174  174 174 174  157 156 157  60 74 84  24 26 27
36046 +4 0 0  4 0 0  4 0 0  6 6 6  4 4 4  4 4 4
36047 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36048 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36049 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36050 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36051 +4 4 4  4 4 4
36052 +4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
36053 +6 6 6  4 0 0  6 6 6  6 6 6  6 6 6  4 0 0
36054 +4 0 0  4 0 0  6 6 6  24 26 27  60 73 81  167 166 167
36055 +220 221 221  220 221 221  220 221 221  205 212 215  205 212 215  205 212 215
36056 +205 212 215  220 221 221  220 221 221  220 221 221  205 212 215  137 136 137
36057 +60 74 84  125 124 125  137 136 137  190 197 201  220 221 221  193 200 203
36058 +177 184 187  177 184 187  177 184 187  174 174 174  174 174 174  177 184 187
36059 +190 197 201  174 174 174  125 124 125  37 38 37  6 6 6  4 0 0
36060 +4 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36061 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36062 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36063 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36064 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36065 +4 4 4  4 4 4
36066 +4 4 4  4 4 4  5 5 5  5 5 5  4 3 3  6 6 6
36067 +4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  6 6 6
36068 +6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
36069 +125 124 125  193 200 203  244 246 246  220 221 221  205 212 215  205 212 215
36070 +205 212 215  193 200 203  205 212 215  205 212 215  220 221 221  220 221 221
36071 +193 200 203  193 200 203  205 212 215  193 200 203  193 200 203  177 184 187
36072 +190 197 201  190 197 201  174 174 174  190 197 201  193 200 203  190 197 201
36073 +153 152 153  60 73 81  4 0 0  4 0 0  4 0 0  3 2 2
36074 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36075 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36076 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36077 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36078 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36079 +4 4 4  4 4 4
36080 +4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
36081 +6 6 6  4 3 3  4 3 3  4 3 3  6 6 6  6 6 6
36082 +4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  4 0 0
36083 +4 0 0  26 28 28  131 129 131  220 221 221  244 246 246  220 221 221
36084 +205 212 215  193 200 203  205 212 215  193 200 203  193 200 203  205 212 215
36085 +220 221 221  193 200 203  193 200 203  193 200 203  190 197 201  174 174 174
36086 +174 174 174  190 197 201  193 200 203  193 200 203  167 166 167  125 124 125
36087 +6 6 6  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
36088 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36089 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36090 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36091 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36092 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36093 +4 4 4  4 4 4
36094 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
36095 +5 5 5  4 3 3  5 5 5  6 6 6  4 3 3  5 5 5
36096 +6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
36097 +4 0 0  4 0 0  6 6 6  41 54 63  158 157 158  220 221 221
36098 +220 221 221  220 221 221  193 200 203  193 200 203  193 200 203  190 197 201
36099 +190 197 201  190 197 201  190 197 201  190 197 201  174 174 174  193 200 203
36100 +193 200 203  220 221 221  174 174 174  125 124 125  37 38 37  4 0 0
36101 +4 0 0  4 3 3  6 6 6  4 4 4  4 4 4  4 4 4
36102 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36103 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36104 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36105 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36106 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36107 +4 4 4  4 4 4
36108 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36109 +4 4 4  5 5 5  4 3 3  4 3 3  4 3 3  5 5 5
36110 +4 3 3  6 6 6  5 5 5  4 3 3  6 6 6  6 6 6
36111 +6 6 6  6 6 6  4 0 0  4 0 0  13 16 17  60 73 81
36112 +174 174 174  220 221 221  220 221 221  205 212 215  190 197 201  174 174 174
36113 +193 200 203  174 174 174  190 197 201  174 174 174  193 200 203  220 221 221
36114 +193 200 203  131 129 131  37 38 37  6 6 6  4 0 0  4 0 0
36115 +6 6 6  6 6 6  4 3 3  5 5 5  4 4 4  4 4 4
36116 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36117 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36118 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36119 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36120 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36121 +4 4 4  4 4 4
36122 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36123 +4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
36124 +5 5 5  4 3 3  4 3 3  5 5 5  4 3 3  4 3 3
36125 +5 5 5  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
36126 +6 6 6  125 124 125  174 174 174  220 221 221  220 221 221  193 200 203
36127 +193 200 203  193 200 203  193 200 203  193 200 203  220 221 221  158 157 158
36128 +60 73 81  6 6 6  4 0 0  4 0 0  5 5 5  6 6 6
36129 +5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
36130 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36131 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36132 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36133 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36134 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36135 +4 4 4  4 4 4
36136 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36137 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36138 +4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
36139 +5 5 5  5 5 5  6 6 6  6 6 6  4 0 0  4 0 0
36140 +4 0 0  4 0 0  26 28 28  125 124 125  174 174 174  193 200 203
36141 +193 200 203  174 174 174  193 200 203  167 166 167  125 124 125  6 6 6
36142 +6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  5 5 5
36143 +4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
36144 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36145 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36146 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36147 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36148 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36149 +4 4 4  4 4 4
36150 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36151 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36152 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
36153 +4 3 3  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
36154 +6 6 6  4 0 0  4 0 0  6 6 6  37 38 37  125 124 125
36155 +153 152 153  131 129 131  125 124 125  37 38 37  6 6 6  6 6 6
36156 +6 6 6  4 0 0  6 6 6  6 6 6  4 3 3  5 5 5
36157 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36158 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36159 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36160 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36161 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36162 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36163 +4 4 4  4 4 4
36164 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36165 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36166 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36167 +4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
36168 +6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
36169 +24 26 27  24 26 27  6 6 6  6 6 6  6 6 6  4 0 0
36170 +6 6 6  6 6 6  4 0 0  6 6 6  5 5 5  4 3 3
36171 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36172 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36173 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36174 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36175 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36176 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36177 +4 4 4  4 4 4
36178 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36179 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36180 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36181 +4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
36182 +4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
36183 +6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
36184 +4 0 0  6 6 6  6 6 6  4 3 3  5 5 5  4 4 4
36185 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36186 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36187 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36188 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36189 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36190 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36191 +4 4 4  4 4 4
36192 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36193 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36194 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36195 +4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  5 5 5
36196 +5 5 5  5 5 5  4 0 0  6 6 6  4 0 0  6 6 6
36197 +6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  4 0 0
36198 +6 6 6  4 3 3  5 5 5  4 3 3  5 5 5  4 4 4
36199 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36200 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36201 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36202 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36203 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36204 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36205 +4 4 4  4 4 4
36206 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36207 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36208 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36209 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
36210 +4 3 3  6 6 6  4 3 3  6 6 6  6 6 6  6 6 6
36211 +4 0 0  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
36212 +6 6 6  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
36213 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36214 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36215 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36216 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36217 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36218 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36219 +4 4 4  4 4 4
36220 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36221 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36222 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36223 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36224 +4 4 4  5 5 5  4 3 3  5 5 5  4 0 0  6 6 6
36225 +6 6 6  4 0 0  6 6 6  6 6 6  4 0 0  6 6 6
36226 +4 3 3  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
36227 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36228 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36229 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36230 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36231 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36232 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36233 +4 4 4  4 4 4
36234 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36235 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36236 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36237 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36238 +4 4 4  5 5 5  4 3 3  5 5 5  6 6 6  4 3 3
36239 +4 3 3  6 6 6  6 6 6  4 3 3  6 6 6  4 3 3
36240 +5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36241 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36242 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36243 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36244 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36245 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36246 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36247 +4 4 4  4 4 4
36248 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36249 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36250 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36251 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36252 +4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  6 6 6
36253 +5 5 5  4 3 3  4 3 3  4 3 3  5 5 5  5 5 5
36254 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36255 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36256 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36257 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36258 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36259 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36260 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36261 +4 4 4  4 4 4
36262 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36263 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36264 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36265 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36266 +4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
36267 +5 5 5  4 3 3  5 5 5  5 5 5  4 4 4  4 4 4
36268 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36269 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36270 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36271 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36272 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36273 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36274 +4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
36275 +4 4 4  4 4 4
36276 diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
36277 --- linux-3.0.4/drivers/video/udlfb.c   2011-07-21 22:17:23.000000000 -0400
36278 +++ linux-3.0.4/drivers/video/udlfb.c   2011-08-23 21:47:56.000000000 -0400
36279 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data 
36280                 dlfb_urb_completion(urb);
36281  
36282  error:
36283 -       atomic_add(bytes_sent, &dev->bytes_sent);
36284 -       atomic_add(bytes_identical, &dev->bytes_identical);
36285 -       atomic_add(width*height*2, &dev->bytes_rendered);
36286 +       atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36287 +       atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36288 +       atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
36289         end_cycles = get_cycles();
36290 -       atomic_add(((unsigned int) ((end_cycles - start_cycles)
36291 +       atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36292                     >> 10)), /* Kcycles */
36293                    &dev->cpu_kcycles_used);
36294  
36295 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct 
36296                 dlfb_urb_completion(urb);
36297  
36298  error:
36299 -       atomic_add(bytes_sent, &dev->bytes_sent);
36300 -       atomic_add(bytes_identical, &dev->bytes_identical);
36301 -       atomic_add(bytes_rendered, &dev->bytes_rendered);
36302 +       atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36303 +       atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36304 +       atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
36305         end_cycles = get_cycles();
36306 -       atomic_add(((unsigned int) ((end_cycles - start_cycles)
36307 +       atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36308                     >> 10)), /* Kcycles */
36309                    &dev->cpu_kcycles_used);
36310  }
36311 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
36312         struct fb_info *fb_info = dev_get_drvdata(fbdev);
36313         struct dlfb_data *dev = fb_info->par;
36314         return snprintf(buf, PAGE_SIZE, "%u\n",
36315 -                       atomic_read(&dev->bytes_rendered));
36316 +                       atomic_read_unchecked(&dev->bytes_rendered));
36317  }
36318  
36319  static ssize_t metrics_bytes_identical_show(struct device *fbdev,
36320 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
36321         struct fb_info *fb_info = dev_get_drvdata(fbdev);
36322         struct dlfb_data *dev = fb_info->par;
36323         return snprintf(buf, PAGE_SIZE, "%u\n",
36324 -                       atomic_read(&dev->bytes_identical));
36325 +                       atomic_read_unchecked(&dev->bytes_identical));
36326  }
36327  
36328  static ssize_t metrics_bytes_sent_show(struct device *fbdev,
36329 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
36330         struct fb_info *fb_info = dev_get_drvdata(fbdev);
36331         struct dlfb_data *dev = fb_info->par;
36332         return snprintf(buf, PAGE_SIZE, "%u\n",
36333 -                       atomic_read(&dev->bytes_sent));
36334 +                       atomic_read_unchecked(&dev->bytes_sent));
36335  }
36336  
36337  static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
36338 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
36339         struct fb_info *fb_info = dev_get_drvdata(fbdev);
36340         struct dlfb_data *dev = fb_info->par;
36341         return snprintf(buf, PAGE_SIZE, "%u\n",
36342 -                       atomic_read(&dev->cpu_kcycles_used));
36343 +                       atomic_read_unchecked(&dev->cpu_kcycles_used));
36344  }
36345  
36346  static ssize_t edid_show(
36347 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
36348         struct fb_info *fb_info = dev_get_drvdata(fbdev);
36349         struct dlfb_data *dev = fb_info->par;
36350  
36351 -       atomic_set(&dev->bytes_rendered, 0);
36352 -       atomic_set(&dev->bytes_identical, 0);
36353 -       atomic_set(&dev->bytes_sent, 0);
36354 -       atomic_set(&dev->cpu_kcycles_used, 0);
36355 +       atomic_set_unchecked(&dev->bytes_rendered, 0);
36356 +       atomic_set_unchecked(&dev->bytes_identical, 0);
36357 +       atomic_set_unchecked(&dev->bytes_sent, 0);
36358 +       atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
36359  
36360         return count;
36361  }
36362 diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
36363 --- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
36364 +++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
36365 @@ -19,6 +19,7 @@
36366  #include <linux/io.h>
36367  #include <linux/mutex.h>
36368  #include <linux/slab.h>
36369 +#include <linux/moduleloader.h>
36370  #include <video/edid.h>
36371  #include <video/uvesafb.h>
36372  #ifdef CONFIG_X86
36373 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
36374                 NULL,
36375         };
36376  
36377 -       return call_usermodehelper(v86d_path, argv, envp, 1);
36378 +       return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36379  }
36380  
36381  /*
36382 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
36383         if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36384                 par->pmi_setpal = par->ypan = 0;
36385         } else {
36386 +
36387 +#ifdef CONFIG_PAX_KERNEXEC
36388 +#ifdef CONFIG_MODULES
36389 +               par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36390 +#endif
36391 +               if (!par->pmi_code) {
36392 +                       par->pmi_setpal = par->ypan = 0;
36393 +                       return 0;
36394 +               }
36395 +#endif
36396 +
36397                 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36398                                                 + task->t.regs.edi);
36399 +
36400 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36401 +               pax_open_kernel();
36402 +               memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36403 +               pax_close_kernel();
36404 +
36405 +               par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36406 +               par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36407 +#else
36408                 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36409                 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36410 +#endif
36411 +
36412                 printk(KERN_INFO "uvesafb: protected mode interface info at "
36413                                  "%04x:%04x\n",
36414                                  (u16)task->t.regs.es, (u16)task->t.regs.edi);
36415 @@ -1821,6 +1844,11 @@ out:
36416         if (par->vbe_modes)
36417                 kfree(par->vbe_modes);
36418  
36419 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36420 +       if (par->pmi_code)
36421 +               module_free_exec(NULL, par->pmi_code);
36422 +#endif
36423 +
36424         framebuffer_release(info);
36425         return err;
36426  }
36427 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
36428                                 kfree(par->vbe_state_orig);
36429                         if (par->vbe_state_saved)
36430                                 kfree(par->vbe_state_saved);
36431 +
36432 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36433 +                       if (par->pmi_code)
36434 +                               module_free_exec(NULL, par->pmi_code);
36435 +#endif
36436 +
36437                 }
36438  
36439                 framebuffer_release(info);
36440 diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
36441 --- linux-3.0.4/drivers/video/vesafb.c  2011-07-21 22:17:23.000000000 -0400
36442 +++ linux-3.0.4/drivers/video/vesafb.c  2011-08-23 21:47:56.000000000 -0400
36443 @@ -9,6 +9,7 @@
36444   */
36445  
36446  #include <linux/module.h>
36447 +#include <linux/moduleloader.h>
36448  #include <linux/kernel.h>
36449  #include <linux/errno.h>
36450  #include <linux/string.h>
36451 @@ -52,8 +53,8 @@ static int   vram_remap __initdata;           /* 
36452  static int   vram_total __initdata;            /* Set total amount of memory */
36453  static int   pmi_setpal __read_mostly = 1;     /* pmi for palette changes ??? */
36454  static int   ypan       __read_mostly;         /* 0..nothing, 1..ypan, 2..ywrap */
36455 -static void  (*pmi_start)(void) __read_mostly;
36456 -static void  (*pmi_pal)  (void) __read_mostly;
36457 +static void  (*pmi_start)(void) __read_only;
36458 +static void  (*pmi_pal)  (void) __read_only;
36459  static int   depth      __read_mostly;
36460  static int   vga_compat __read_mostly;
36461  /* --------------------------------------------------------------------- */
36462 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
36463         unsigned int size_vmode;
36464         unsigned int size_remap;
36465         unsigned int size_total;
36466 +       void *pmi_code = NULL;
36467  
36468         if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
36469                 return -ENODEV;
36470 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
36471                 size_remap = size_total;
36472         vesafb_fix.smem_len = size_remap;
36473  
36474 -#ifndef __i386__
36475 -       screen_info.vesapm_seg = 0;
36476 -#endif
36477 -
36478         if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
36479                 printk(KERN_WARNING
36480                        "vesafb: cannot reserve video memory at 0x%lx\n",
36481 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
36482         printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
36483                vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
36484  
36485 +#ifdef __i386__
36486 +
36487 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36488 +       pmi_code = module_alloc_exec(screen_info.vesapm_size);
36489 +       if (!pmi_code)
36490 +#elif !defined(CONFIG_PAX_KERNEXEC)
36491 +       if (0)
36492 +#endif
36493 +
36494 +#endif
36495 +       screen_info.vesapm_seg = 0;
36496 +
36497         if (screen_info.vesapm_seg) {
36498 -               printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
36499 -                      screen_info.vesapm_seg,screen_info.vesapm_off);
36500 +               printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
36501 +                      screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
36502         }
36503  
36504         if (screen_info.vesapm_seg < 0xc000)
36505 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
36506  
36507         if (ypan || pmi_setpal) {
36508                 unsigned short *pmi_base;
36509 +
36510                 pmi_base  = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36511 -               pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
36512 -               pmi_pal   = (void*)((char*)pmi_base + pmi_base[2]);
36513 +
36514 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36515 +               pax_open_kernel();
36516 +               memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
36517 +#else
36518 +               pmi_code  = pmi_base;
36519 +#endif
36520 +
36521 +               pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
36522 +               pmi_pal   = (void*)((char*)pmi_code + pmi_base[2]);
36523 +
36524 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36525 +               pmi_start = ktva_ktla(pmi_start);
36526 +               pmi_pal = ktva_ktla(pmi_pal);
36527 +               pax_close_kernel();
36528 +#endif
36529 +
36530                 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
36531                 if (pmi_base[3]) {
36532                         printk(KERN_INFO "vesafb: pmi: ports = ");
36533 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
36534                info->node, info->fix.id);
36535         return 0;
36536  err:
36537 +
36538 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36539 +       module_free_exec(NULL, pmi_code);
36540 +#endif
36541 +
36542         if (info->screen_base)
36543                 iounmap(info->screen_base);
36544         framebuffer_release(info);
36545 diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
36546 --- linux-3.0.4/drivers/video/via/via_clock.h   2011-07-21 22:17:23.000000000 -0400
36547 +++ linux-3.0.4/drivers/video/via/via_clock.h   2011-08-23 21:47:56.000000000 -0400
36548 @@ -56,7 +56,7 @@ struct via_clock {
36549  
36550         void (*set_engine_pll_state)(u8 state);
36551         void (*set_engine_pll)(struct via_pll_config config);
36552 -};
36553 +} __no_const;
36554  
36555  
36556  static inline u32 get_pll_internal_frequency(u32 ref_freq,
36557 diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
36558 --- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
36559 +++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
36560 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct 
36561         struct sysinfo i;
36562         int idx = 0;
36563  
36564 +       pax_track_stack();
36565 +
36566         all_vm_events(events);
36567         si_meminfo(&i);
36568  
36569 diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
36570 --- linux-3.0.4/fs/9p/vfs_inode.c       2011-07-21 22:17:23.000000000 -0400
36571 +++ linux-3.0.4/fs/9p/vfs_inode.c       2011-08-23 21:47:56.000000000 -0400
36572 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
36573  void
36574  v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36575  {
36576 -       char *s = nd_get_link(nd);
36577 +       const char *s = nd_get_link(nd);
36578  
36579         P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36580                 IS_ERR(s) ? "<error>" : s);
36581 diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
36582 --- linux-3.0.4/fs/aio.c        2011-07-21 22:17:23.000000000 -0400
36583 +++ linux-3.0.4/fs/aio.c        2011-08-23 21:48:14.000000000 -0400
36584 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx 
36585         size += sizeof(struct io_event) * nr_events;
36586         nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36587  
36588 -       if (nr_pages < 0)
36589 +       if (nr_pages <= 0)
36590                 return -EINVAL;
36591  
36592         nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36593 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
36594         struct aio_timeout      to;
36595         int                     retry = 0;
36596  
36597 +       pax_track_stack();
36598 +
36599         /* needed to zero any padding within an entry (there shouldn't be 
36600          * any, but C is fun!
36601          */
36602 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
36603  static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
36604  {
36605         ssize_t ret;
36606 +       struct iovec iovstack;
36607  
36608  #ifdef CONFIG_COMPAT
36609         if (compat)
36610                 ret = compat_rw_copy_check_uvector(type,
36611                                 (struct compat_iovec __user *)kiocb->ki_buf,
36612 -                               kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
36613 +                               kiocb->ki_nbytes, 1, &iovstack,
36614                                 &kiocb->ki_iovec);
36615         else
36616  #endif
36617                 ret = rw_copy_check_uvector(type,
36618                                 (struct iovec __user *)kiocb->ki_buf,
36619 -                               kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
36620 +                               kiocb->ki_nbytes, 1, &iovstack,
36621                                 &kiocb->ki_iovec);
36622         if (ret < 0)
36623                 goto out;
36624  
36625 +       if (kiocb->ki_iovec == &iovstack) {
36626 +               kiocb->ki_inline_vec = iovstack;
36627 +               kiocb->ki_iovec = &kiocb->ki_inline_vec;
36628 +       }
36629         kiocb->ki_nr_segs = kiocb->ki_nbytes;
36630         kiocb->ki_cur_seg = 0;
36631         /* ki_nbytes/left now reflect bytes instead of segs */
36632 diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
36633 --- linux-3.0.4/fs/attr.c       2011-07-21 22:17:23.000000000 -0400
36634 +++ linux-3.0.4/fs/attr.c       2011-08-23 21:48:14.000000000 -0400
36635 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode 
36636                 unsigned long limit;
36637  
36638                 limit = rlimit(RLIMIT_FSIZE);
36639 +               gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36640                 if (limit != RLIM_INFINITY && offset > limit)
36641                         goto out_sig;
36642                 if (offset > inode->i_sb->s_maxbytes)
36643 diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
36644 --- linux-3.0.4/fs/befs/linuxvfs.c      2011-08-29 23:26:13.000000000 -0400
36645 +++ linux-3.0.4/fs/befs/linuxvfs.c      2011-08-29 23:26:27.000000000 -0400
36646 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry 
36647  {
36648         befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36649         if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36650 -               char *link = nd_get_link(nd);
36651 +               const char *link = nd_get_link(nd);
36652                 if (!IS_ERR(link))
36653                         kfree(link);
36654         }
36655 diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
36656 --- linux-3.0.4/fs/binfmt_aout.c        2011-07-21 22:17:23.000000000 -0400
36657 +++ linux-3.0.4/fs/binfmt_aout.c        2011-08-23 21:48:14.000000000 -0400
36658 @@ -16,6 +16,7 @@
36659  #include <linux/string.h>
36660  #include <linux/fs.h>
36661  #include <linux/file.h>
36662 +#include <linux/security.h>
36663  #include <linux/stat.h>
36664  #include <linux/fcntl.h>
36665  #include <linux/ptrace.h>
36666 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
36667  #endif
36668  #       define START_STACK(u)   ((void __user *)u.start_stack)
36669  
36670 +       memset(&dump, 0, sizeof(dump));
36671 +
36672         fs = get_fs();
36673         set_fs(KERNEL_DS);
36674         has_dumped = 1;
36675 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
36676  
36677  /* If the size of the dump file exceeds the rlimit, then see what would happen
36678     if we wrote the stack, but not the data area.  */
36679 +       gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36680         if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
36681                 dump.u_dsize = 0;
36682  
36683  /* Make sure we have enough room to write the stack and data areas. */
36684 +       gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36685         if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
36686                 dump.u_ssize = 0;
36687  
36688 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
36689         rlim = rlimit(RLIMIT_DATA);
36690         if (rlim >= RLIM_INFINITY)
36691                 rlim = ~0;
36692 +
36693 +       gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36694         if (ex.a_data + ex.a_bss > rlim)
36695                 return -ENOMEM;
36696  
36697 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
36698         install_exec_creds(bprm);
36699         current->flags &= ~PF_FORKNOEXEC;
36700  
36701 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36702 +       current->mm->pax_flags = 0UL;
36703 +#endif
36704 +
36705 +#ifdef CONFIG_PAX_PAGEEXEC
36706 +       if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36707 +               current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36708 +
36709 +#ifdef CONFIG_PAX_EMUTRAMP
36710 +               if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36711 +                       current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36712 +#endif
36713 +
36714 +#ifdef CONFIG_PAX_MPROTECT
36715 +               if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36716 +                       current->mm->pax_flags |= MF_PAX_MPROTECT;
36717 +#endif
36718 +
36719 +       }
36720 +#endif
36721 +
36722         if (N_MAGIC(ex) == OMAGIC) {
36723                 unsigned long text_addr, map_size;
36724                 loff_t pos;
36725 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
36726  
36727                 down_write(&current->mm->mmap_sem);
36728                 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36729 -                               PROT_READ | PROT_WRITE | PROT_EXEC,
36730 +                               PROT_READ | PROT_WRITE,
36731                                 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36732                                 fd_offset + ex.a_text);
36733                 up_write(&current->mm->mmap_sem);
36734 diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
36735 --- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
36736 +++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
36737 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
36738  #define elf_core_dump  NULL
36739  #endif
36740  
36741 +#ifdef CONFIG_PAX_MPROTECT
36742 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36743 +#endif
36744 +
36745  #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36746  #define ELF_MIN_ALIGN  ELF_EXEC_PAGESIZE
36747  #else
36748 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format = 
36749         .load_binary    = load_elf_binary,
36750         .load_shlib     = load_elf_library,
36751         .core_dump      = elf_core_dump,
36752 +
36753 +#ifdef CONFIG_PAX_MPROTECT
36754 +               .handle_mprotect= elf_handle_mprotect,
36755 +#endif
36756 +
36757         .min_coredump   = ELF_EXEC_PAGESIZE,
36758  };
36759  
36760 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = 
36761  
36762  static int set_brk(unsigned long start, unsigned long end)
36763  {
36764 +       unsigned long e = end;
36765 +
36766         start = ELF_PAGEALIGN(start);
36767         end = ELF_PAGEALIGN(end);
36768         if (end > start) {
36769 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start, 
36770                 if (BAD_ADDR(addr))
36771                         return addr;
36772         }
36773 -       current->mm->start_brk = current->mm->brk = end;
36774 +       current->mm->start_brk = current->mm->brk = e;
36775         return 0;
36776  }
36777  
36778 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36779         elf_addr_t __user *u_rand_bytes;
36780         const char *k_platform = ELF_PLATFORM;
36781         const char *k_base_platform = ELF_BASE_PLATFORM;
36782 -       unsigned char k_rand_bytes[16];
36783 +       u32 k_rand_bytes[4];
36784         int items;
36785         elf_addr_t *elf_info;
36786         int ei_index = 0;
36787         const struct cred *cred = current_cred();
36788         struct vm_area_struct *vma;
36789 +       unsigned long saved_auxv[AT_VECTOR_SIZE];
36790 +
36791 +       pax_track_stack();
36792  
36793         /*
36794          * In some cases (e.g. Hyper-Threading), we want to avoid L1
36795 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36796          * Generate 16 random bytes for userspace PRNG seeding.
36797          */
36798         get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36799 -       u_rand_bytes = (elf_addr_t __user *)
36800 -                      STACK_ALLOC(p, sizeof(k_rand_bytes));
36801 +       srandom32(k_rand_bytes[0] ^ random32());
36802 +       srandom32(k_rand_bytes[1] ^ random32());
36803 +       srandom32(k_rand_bytes[2] ^ random32());
36804 +       srandom32(k_rand_bytes[3] ^ random32());
36805 +       p = STACK_ROUND(p, sizeof(k_rand_bytes));
36806 +       u_rand_bytes = (elf_addr_t __user *) p;
36807         if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36808                 return -EFAULT;
36809  
36810 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36811                 return -EFAULT;
36812         current->mm->env_end = p;
36813  
36814 +       memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36815 +
36816         /* Put the elf_info on the stack in the right place.  */
36817         sp = (elf_addr_t __user *)envp + 1;
36818 -       if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36819 +       if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36820                 return -EFAULT;
36821         return 0;
36822  }
36823 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
36824  {
36825         struct elf_phdr *elf_phdata;
36826         struct elf_phdr *eppnt;
36827 -       unsigned long load_addr = 0;
36828 +       unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36829         int load_addr_set = 0;
36830         unsigned long last_bss = 0, elf_bss = 0;
36831 -       unsigned long error = ~0UL;
36832 +       unsigned long error = -EINVAL;
36833         unsigned long total_size;
36834         int retval, i, size;
36835  
36836 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
36837                 goto out_close;
36838         }
36839  
36840 +#ifdef CONFIG_PAX_SEGMEXEC
36841 +       if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36842 +               pax_task_size = SEGMEXEC_TASK_SIZE;
36843 +#endif
36844 +
36845         eppnt = elf_phdata;
36846         for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36847                 if (eppnt->p_type == PT_LOAD) {
36848 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
36849                         k = load_addr + eppnt->p_vaddr;
36850                         if (BAD_ADDR(k) ||
36851                             eppnt->p_filesz > eppnt->p_memsz ||
36852 -                           eppnt->p_memsz > TASK_SIZE ||
36853 -                           TASK_SIZE - eppnt->p_memsz < k) {
36854 +                           eppnt->p_memsz > pax_task_size ||
36855 +                           pax_task_size - eppnt->p_memsz < k) {
36856                                 error = -ENOMEM;
36857                                 goto out_close;
36858                         }
36859 @@ -528,6 +553,193 @@ out:
36860         return error;
36861  }
36862  
36863 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36864 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36865 +{
36866 +       unsigned long pax_flags = 0UL;
36867 +
36868 +#ifdef CONFIG_PAX_PAGEEXEC
36869 +       if (elf_phdata->p_flags & PF_PAGEEXEC)
36870 +               pax_flags |= MF_PAX_PAGEEXEC;
36871 +#endif
36872 +
36873 +#ifdef CONFIG_PAX_SEGMEXEC
36874 +       if (elf_phdata->p_flags & PF_SEGMEXEC)
36875 +               pax_flags |= MF_PAX_SEGMEXEC;
36876 +#endif
36877 +
36878 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36879 +       if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36880 +               if ((__supported_pte_mask & _PAGE_NX))
36881 +                       pax_flags &= ~MF_PAX_SEGMEXEC;
36882 +               else
36883 +                       pax_flags &= ~MF_PAX_PAGEEXEC;
36884 +       }
36885 +#endif
36886 +
36887 +#ifdef CONFIG_PAX_EMUTRAMP
36888 +       if (elf_phdata->p_flags & PF_EMUTRAMP)
36889 +               pax_flags |= MF_PAX_EMUTRAMP;
36890 +#endif
36891 +
36892 +#ifdef CONFIG_PAX_MPROTECT
36893 +       if (elf_phdata->p_flags & PF_MPROTECT)
36894 +               pax_flags |= MF_PAX_MPROTECT;
36895 +#endif
36896 +
36897 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36898 +       if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36899 +               pax_flags |= MF_PAX_RANDMMAP;
36900 +#endif
36901 +
36902 +       return pax_flags;
36903 +}
36904 +#endif
36905 +
36906 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36907 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36908 +{
36909 +       unsigned long pax_flags = 0UL;
36910 +
36911 +#ifdef CONFIG_PAX_PAGEEXEC
36912 +       if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36913 +               pax_flags |= MF_PAX_PAGEEXEC;
36914 +#endif
36915 +
36916 +#ifdef CONFIG_PAX_SEGMEXEC
36917 +       if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36918 +               pax_flags |= MF_PAX_SEGMEXEC;
36919 +#endif
36920 +
36921 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36922 +       if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36923 +               if ((__supported_pte_mask & _PAGE_NX))
36924 +                       pax_flags &= ~MF_PAX_SEGMEXEC;
36925 +               else
36926 +                       pax_flags &= ~MF_PAX_PAGEEXEC;
36927 +       }
36928 +#endif
36929 +
36930 +#ifdef CONFIG_PAX_EMUTRAMP
36931 +       if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36932 +               pax_flags |= MF_PAX_EMUTRAMP;
36933 +#endif
36934 +
36935 +#ifdef CONFIG_PAX_MPROTECT
36936 +       if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36937 +               pax_flags |= MF_PAX_MPROTECT;
36938 +#endif
36939 +
36940 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36941 +       if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36942 +               pax_flags |= MF_PAX_RANDMMAP;
36943 +#endif
36944 +
36945 +       return pax_flags;
36946 +}
36947 +#endif
36948 +
36949 +#ifdef CONFIG_PAX_EI_PAX
36950 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36951 +{
36952 +       unsigned long pax_flags = 0UL;
36953 +
36954 +#ifdef CONFIG_PAX_PAGEEXEC
36955 +       if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36956 +               pax_flags |= MF_PAX_PAGEEXEC;
36957 +#endif
36958 +
36959 +#ifdef CONFIG_PAX_SEGMEXEC
36960 +       if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36961 +               pax_flags |= MF_PAX_SEGMEXEC;
36962 +#endif
36963 +
36964 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36965 +       if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36966 +               if ((__supported_pte_mask & _PAGE_NX))
36967 +                       pax_flags &= ~MF_PAX_SEGMEXEC;
36968 +               else
36969 +                       pax_flags &= ~MF_PAX_PAGEEXEC;
36970 +       }
36971 +#endif
36972 +
36973 +#ifdef CONFIG_PAX_EMUTRAMP
36974 +       if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36975 +               pax_flags |= MF_PAX_EMUTRAMP;
36976 +#endif
36977 +
36978 +#ifdef CONFIG_PAX_MPROTECT
36979 +       if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36980 +               pax_flags |= MF_PAX_MPROTECT;
36981 +#endif
36982 +
36983 +#ifdef CONFIG_PAX_ASLR
36984 +       if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36985 +               pax_flags |= MF_PAX_RANDMMAP;
36986 +#endif
36987 +
36988 +       return pax_flags;
36989 +}
36990 +#endif
36991 +
36992 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36993 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36994 +{
36995 +       unsigned long pax_flags = 0UL;
36996 +
36997 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36998 +       unsigned long i;
36999 +       int found_flags = 0;
37000 +#endif
37001 +
37002 +#ifdef CONFIG_PAX_EI_PAX
37003 +       pax_flags = pax_parse_ei_pax(elf_ex);
37004 +#endif
37005 +
37006 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
37007 +       for (i = 0UL; i < elf_ex->e_phnum; i++)
37008 +               if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
37009 +                       if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
37010 +                           ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
37011 +                           ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
37012 +                           ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
37013 +                           ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
37014 +                               return -EINVAL;
37015 +
37016 +#ifdef CONFIG_PAX_SOFTMODE
37017 +                       if (pax_softmode)
37018 +                               pax_flags = pax_parse_softmode(&elf_phdata[i]);
37019 +                       else
37020 +#endif
37021 +
37022 +                               pax_flags = pax_parse_hardmode(&elf_phdata[i]);
37023 +                       found_flags = 1;
37024 +                       break;
37025 +               }
37026 +#endif
37027 +
37028 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
37029 +       if (found_flags == 0) {
37030 +               struct elf_phdr phdr;
37031 +               memset(&phdr, 0, sizeof(phdr));
37032 +               phdr.p_flags = PF_NOEMUTRAMP;
37033 +#ifdef CONFIG_PAX_SOFTMODE
37034 +               if (pax_softmode)
37035 +                       pax_flags = pax_parse_softmode(&phdr);
37036 +               else
37037 +#endif
37038 +                       pax_flags = pax_parse_hardmode(&phdr);
37039 +       }
37040 +#endif
37041 +
37042 +       if (0 > pax_check_flags(&pax_flags))
37043 +               return -EINVAL;
37044 +
37045 +       current->mm->pax_flags = pax_flags;
37046 +       return 0;
37047 +}
37048 +#endif
37049 +
37050  /*
37051   * These are the functions used to load ELF style executables and shared
37052   * libraries.  There is no binary dependent code anywhere else.
37053 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
37054  {
37055         unsigned int random_variable = 0;
37056  
37057 +#ifdef CONFIG_PAX_RANDUSTACK
37058 +       if (randomize_va_space)
37059 +               return stack_top - current->mm->delta_stack;
37060 +#endif
37061 +
37062         if ((current->flags & PF_RANDOMIZE) &&
37063                 !(current->personality & ADDR_NO_RANDOMIZE)) {
37064                 random_variable = get_random_int() & STACK_RND_MASK;
37065 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
37066         unsigned long load_addr = 0, load_bias = 0;
37067         int load_addr_set = 0;
37068         char * elf_interpreter = NULL;
37069 -       unsigned long error;
37070 +       unsigned long error = 0;
37071         struct elf_phdr *elf_ppnt, *elf_phdata;
37072         unsigned long elf_bss, elf_brk;
37073         int retval, i;
37074 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
37075         unsigned long start_code, end_code, start_data, end_data;
37076         unsigned long reloc_func_desc __maybe_unused = 0;
37077         int executable_stack = EXSTACK_DEFAULT;
37078 -       unsigned long def_flags = 0;
37079         struct {
37080                 struct elfhdr elf_ex;
37081                 struct elfhdr interp_elf_ex;
37082         } *loc;
37083 +       unsigned long pax_task_size = TASK_SIZE;
37084  
37085         loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37086         if (!loc) {
37087 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
37088  
37089         /* OK, This is the point of no return */
37090         current->flags &= ~PF_FORKNOEXEC;
37091 -       current->mm->def_flags = def_flags;
37092 +
37093 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37094 +       current->mm->pax_flags = 0UL;
37095 +#endif
37096 +
37097 +#ifdef CONFIG_PAX_DLRESOLVE
37098 +       current->mm->call_dl_resolve = 0UL;
37099 +#endif
37100 +
37101 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37102 +       current->mm->call_syscall = 0UL;
37103 +#endif
37104 +
37105 +#ifdef CONFIG_PAX_ASLR
37106 +       current->mm->delta_mmap = 0UL;
37107 +       current->mm->delta_stack = 0UL;
37108 +#endif
37109 +
37110 +       current->mm->def_flags = 0;
37111 +
37112 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37113 +       if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37114 +               send_sig(SIGKILL, current, 0);
37115 +               goto out_free_dentry;
37116 +       }
37117 +#endif
37118 +
37119 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37120 +       pax_set_initial_flags(bprm);
37121 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37122 +       if (pax_set_initial_flags_func)
37123 +               (pax_set_initial_flags_func)(bprm);
37124 +#endif
37125 +
37126 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37127 +       if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
37128 +               current->mm->context.user_cs_limit = PAGE_SIZE;
37129 +               current->mm->def_flags |= VM_PAGEEXEC;
37130 +       }
37131 +#endif
37132 +
37133 +#ifdef CONFIG_PAX_SEGMEXEC
37134 +       if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37135 +               current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37136 +               current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37137 +               pax_task_size = SEGMEXEC_TASK_SIZE;
37138 +               current->mm->def_flags |= VM_NOHUGEPAGE;
37139 +       }
37140 +#endif
37141 +
37142 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37143 +       if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37144 +               set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37145 +               put_cpu();
37146 +       }
37147 +#endif
37148  
37149         /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37150            may depend on the personality.  */
37151         SET_PERSONALITY(loc->elf_ex);
37152 +
37153 +#ifdef CONFIG_PAX_ASLR
37154 +       if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37155 +               current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37156 +               current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37157 +       }
37158 +#endif
37159 +
37160 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37161 +       if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37162 +               executable_stack = EXSTACK_DISABLE_X;
37163 +               current->personality &= ~READ_IMPLIES_EXEC;
37164 +       } else
37165 +#endif
37166 +
37167         if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37168                 current->personality |= READ_IMPLIES_EXEC;
37169  
37170 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
37171  #else
37172                         load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37173  #endif
37174 +
37175 +#ifdef CONFIG_PAX_RANDMMAP
37176 +                       /* PaX: randomize base address at the default exe base if requested */
37177 +                       if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37178 +#ifdef CONFIG_SPARC64
37179 +                               load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37180 +#else
37181 +                               load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37182 +#endif
37183 +                               load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37184 +                               elf_flags |= MAP_FIXED;
37185 +                       }
37186 +#endif
37187 +
37188                 }
37189  
37190                 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37191 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
37192                  * allowed task size. Note that p_filesz must always be
37193                  * <= p_memsz so it is only necessary to check p_memsz.
37194                  */
37195 -               if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37196 -                   elf_ppnt->p_memsz > TASK_SIZE ||
37197 -                   TASK_SIZE - elf_ppnt->p_memsz < k) {
37198 +               if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37199 +                   elf_ppnt->p_memsz > pax_task_size ||
37200 +                   pax_task_size - elf_ppnt->p_memsz < k) {
37201                         /* set_brk can never work. Avoid overflows. */
37202                         send_sig(SIGKILL, current, 0);
37203                         retval = -EINVAL;
37204 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
37205         start_data += load_bias;
37206         end_data += load_bias;
37207  
37208 +#ifdef CONFIG_PAX_RANDMMAP
37209 +       if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37210 +               elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37211 +#endif
37212 +
37213         /* Calling set_brk effectively mmaps the pages that we need
37214          * for the bss and break sections.  We must do this before
37215          * mapping in the interpreter, to make sure it doesn't wind
37216 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
37217                 goto out_free_dentry;
37218         }
37219         if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37220 -               send_sig(SIGSEGV, current, 0);
37221 -               retval = -EFAULT; /* Nobody gets to see this, but.. */
37222 -               goto out_free_dentry;
37223 +               /*
37224 +                * This bss-zeroing can fail if the ELF
37225 +                * file specifies odd protections. So
37226 +                * we don't check the return value
37227 +                */
37228         }
37229  
37230         if (elf_interpreter) {
37231 @@ -1090,7 +1398,7 @@ out:
37232   * Decide what to dump of a segment, part, all or none.
37233   */
37234  static unsigned long vma_dump_size(struct vm_area_struct *vma,
37235 -                                  unsigned long mm_flags)
37236 +                                  unsigned long mm_flags, long signr)
37237  {
37238  #define FILTER(type)   (mm_flags & (1UL << MMF_DUMP_##type))
37239  
37240 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
37241         if (vma->vm_file == NULL)
37242                 return 0;
37243  
37244 -       if (FILTER(MAPPED_PRIVATE))
37245 +       if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37246                 goto whole;
37247  
37248         /*
37249 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
37250  {
37251         elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37252         int i = 0;
37253 -       do
37254 +       do {
37255                 i += 2;
37256 -       while (auxv[i - 2] != AT_NULL);
37257 +       } while (auxv[i - 2] != AT_NULL);
37258         fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37259  }
37260  
37261 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
37262  }
37263  
37264  static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
37265 -                                    unsigned long mm_flags)
37266 +                                    struct coredump_params *cprm)
37267  {
37268         struct vm_area_struct *vma;
37269         size_t size = 0;
37270  
37271         for (vma = first_vma(current, gate_vma); vma != NULL;
37272              vma = next_vma(vma, gate_vma))
37273 -               size += vma_dump_size(vma, mm_flags);
37274 +               size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37275         return size;
37276  }
37277  
37278 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
37279  
37280         dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
37281  
37282 -       offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
37283 +       offset += elf_core_vma_data_size(gate_vma, cprm);
37284         offset += elf_core_extra_data_size();
37285         e_shoff = offset;
37286  
37287 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
37288         offset = dataoff;
37289  
37290         size += sizeof(*elf);
37291 +       gr_learn_resource(current, RLIMIT_CORE, size, 1);
37292         if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
37293                 goto end_coredump;
37294  
37295         size += sizeof(*phdr4note);
37296 +       gr_learn_resource(current, RLIMIT_CORE, size, 1);
37297         if (size > cprm->limit
37298             || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
37299                 goto end_coredump;
37300 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
37301                 phdr.p_offset = offset;
37302                 phdr.p_vaddr = vma->vm_start;
37303                 phdr.p_paddr = 0;
37304 -               phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
37305 +               phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37306                 phdr.p_memsz = vma->vm_end - vma->vm_start;
37307                 offset += phdr.p_filesz;
37308                 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37309 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
37310                 phdr.p_align = ELF_EXEC_PAGESIZE;
37311  
37312                 size += sizeof(phdr);
37313 +               gr_learn_resource(current, RLIMIT_CORE, size, 1);
37314                 if (size > cprm->limit
37315                     || !dump_write(cprm->file, &phdr, sizeof(phdr)))
37316                         goto end_coredump;
37317 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
37318                 unsigned long addr;
37319                 unsigned long end;
37320  
37321 -               end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
37322 +               end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37323  
37324                 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37325                         struct page *page;
37326 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
37327                         page = get_dump_page(addr);
37328                         if (page) {
37329                                 void *kaddr = kmap(page);
37330 +                               gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37331                                 stop = ((size += PAGE_SIZE) > cprm->limit) ||
37332                                         !dump_write(cprm->file, kaddr,
37333                                                     PAGE_SIZE);
37334 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
37335  
37336         if (e_phnum == PN_XNUM) {
37337                 size += sizeof(*shdr4extnum);
37338 +               gr_learn_resource(current, RLIMIT_CORE, size, 1);
37339                 if (size > cprm->limit
37340                     || !dump_write(cprm->file, shdr4extnum,
37341                                    sizeof(*shdr4extnum)))
37342 @@ -2067,6 +2380,97 @@ out:
37343  
37344  #endif         /* CONFIG_ELF_CORE */
37345  
37346 +#ifdef CONFIG_PAX_MPROTECT
37347 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
37348 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37349 + * we'll remove VM_MAYWRITE for good on RELRO segments.
37350 + *
37351 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37352 + * basis because we want to allow the common case and not the special ones.
37353 + */
37354 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37355 +{
37356 +       struct elfhdr elf_h;
37357 +       struct elf_phdr elf_p;
37358 +       unsigned long i;
37359 +       unsigned long oldflags;
37360 +       bool is_textrel_rw, is_textrel_rx, is_relro;
37361 +
37362 +       if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37363 +               return;
37364 +
37365 +       oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37366 +       newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37367 +
37368 +#ifdef CONFIG_PAX_ELFRELOCS
37369 +       /* possible TEXTREL */
37370 +       is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37371 +       is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37372 +#else
37373 +       is_textrel_rw = false;
37374 +       is_textrel_rx = false;
37375 +#endif
37376 +
37377 +       /* possible RELRO */
37378 +       is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37379 +
37380 +       if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37381 +               return;
37382 +
37383 +       if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37384 +           memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37385 +
37386 +#ifdef CONFIG_PAX_ETEXECRELOCS
37387 +           ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37388 +#else
37389 +           ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37390 +#endif
37391 +
37392 +           (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37393 +           !elf_check_arch(&elf_h) ||
37394 +           elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37395 +           elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37396 +               return;
37397 +
37398 +       for (i = 0UL; i < elf_h.e_phnum; i++) {
37399 +               if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37400 +                       return;
37401 +               switch (elf_p.p_type) {
37402 +               case PT_DYNAMIC:
37403 +                       if (!is_textrel_rw && !is_textrel_rx)
37404 +                               continue;
37405 +                       i = 0UL;
37406 +                       while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37407 +                               elf_dyn dyn;
37408 +
37409 +                               if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37410 +                                       return;
37411 +                               if (dyn.d_tag == DT_NULL)
37412 +                                       return;
37413 +                               if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37414 +                                       gr_log_textrel(vma);
37415 +                                       if (is_textrel_rw)
37416 +                                               vma->vm_flags |= VM_MAYWRITE;
37417 +                                       else
37418 +                                               /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37419 +                                               vma->vm_flags &= ~VM_MAYWRITE;
37420 +                                       return;
37421 +                               }
37422 +                               i++;
37423 +                       }
37424 +                       return;
37425 +
37426 +               case PT_GNU_RELRO:
37427 +                       if (!is_relro)
37428 +                               continue;
37429 +                       if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37430 +                               vma->vm_flags &= ~VM_MAYWRITE;
37431 +                       return;
37432 +               }
37433 +       }
37434 +}
37435 +#endif
37436 +
37437  static int __init init_elf_binfmt(void)
37438  {
37439         return register_binfmt(&elf_format);
37440 diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
37441 --- linux-3.0.4/fs/binfmt_flat.c        2011-07-21 22:17:23.000000000 -0400
37442 +++ linux-3.0.4/fs/binfmt_flat.c        2011-08-23 21:47:56.000000000 -0400
37443 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
37444                                 realdatastart = (unsigned long) -ENOMEM;
37445                         printk("Unable to allocate RAM for process data, errno %d\n",
37446                                         (int)-realdatastart);
37447 +                       down_write(&current->mm->mmap_sem);
37448                         do_munmap(current->mm, textpos, text_len);
37449 +                       up_write(&current->mm->mmap_sem);
37450                         ret = realdatastart;
37451                         goto err;
37452                 }
37453 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
37454                 }
37455                 if (IS_ERR_VALUE(result)) {
37456                         printk("Unable to read data+bss, errno %d\n", (int)-result);
37457 +                       down_write(&current->mm->mmap_sem);
37458                         do_munmap(current->mm, textpos, text_len);
37459                         do_munmap(current->mm, realdatastart, len);
37460 +                       up_write(&current->mm->mmap_sem);
37461                         ret = result;
37462                         goto err;
37463                 }
37464 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
37465                 }
37466                 if (IS_ERR_VALUE(result)) {
37467                         printk("Unable to read code+data+bss, errno %d\n",(int)-result);
37468 +                       down_write(&current->mm->mmap_sem);
37469                         do_munmap(current->mm, textpos, text_len + data_len + extra +
37470                                 MAX_SHARED_LIBS * sizeof(unsigned long));
37471 +                       up_write(&current->mm->mmap_sem);
37472                         ret = result;
37473                         goto err;
37474                 }
37475 diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
37476 --- linux-3.0.4/fs/bio.c        2011-07-21 22:17:23.000000000 -0400
37477 +++ linux-3.0.4/fs/bio.c        2011-08-23 21:47:56.000000000 -0400
37478 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
37479         const int read = bio_data_dir(bio) == READ;
37480         struct bio_map_data *bmd = bio->bi_private;
37481         int i;
37482 -       char *p = bmd->sgvecs[0].iov_base;
37483 +       char *p = (__force char *)bmd->sgvecs[0].iov_base;
37484  
37485         __bio_for_each_segment(bvec, bio, i, 0) {
37486                 char *addr = page_address(bvec->bv_page);
37487 diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
37488 --- linux-3.0.4/fs/block_dev.c  2011-07-21 22:17:23.000000000 -0400
37489 +++ linux-3.0.4/fs/block_dev.c  2011-08-23 21:47:56.000000000 -0400
37490 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
37491         else if (bdev->bd_contains == bdev)
37492                 return true;     /* is a whole device which isn't held */
37493  
37494 -       else if (whole->bd_holder == bd_may_claim)
37495 +       else if (whole->bd_holder == (void *)bd_may_claim)
37496                 return true;     /* is a partition of a device that is being partitioned */
37497         else if (whole->bd_holder != NULL)
37498                 return false;    /* is a partition of a held device */
37499 diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
37500 --- linux-3.0.4/fs/btrfs/ctree.c        2011-07-21 22:17:23.000000000 -0400
37501 +++ linux-3.0.4/fs/btrfs/ctree.c        2011-08-23 21:47:56.000000000 -0400
37502 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
37503                 free_extent_buffer(buf);
37504                 add_root_to_dirty_list(root);
37505         } else {
37506 -               if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
37507 -                       parent_start = parent->start;
37508 -               else
37509 +               if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
37510 +                       if (parent)
37511 +                               parent_start = parent->start;
37512 +                       else
37513 +                               parent_start = 0;
37514 +               } else
37515                         parent_start = 0;
37516  
37517                 WARN_ON(trans->transid != btrfs_header_generation(parent));
37518 diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
37519 --- linux-3.0.4/fs/btrfs/inode.c        2011-07-21 22:17:23.000000000 -0400
37520 +++ linux-3.0.4/fs/btrfs/inode.c        2011-08-23 21:48:14.000000000 -0400
37521 @@ -6895,7 +6895,7 @@ fail:
37522         return -ENOMEM;
37523  }
37524  
37525 -static int btrfs_getattr(struct vfsmount *mnt,
37526 +int btrfs_getattr(struct vfsmount *mnt,
37527                          struct dentry *dentry, struct kstat *stat)
37528  {
37529         struct inode *inode = dentry->d_inode;
37530 @@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
37531         return 0;
37532  }
37533  
37534 +EXPORT_SYMBOL(btrfs_getattr);
37535 +
37536 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
37537 +{
37538 +       return BTRFS_I(inode)->root->anon_super.s_dev;
37539 +}
37540 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37541 +
37542  /*
37543   * If a file is moved, it will inherit the cow and compression flags of the new
37544   * directory.
37545 diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
37546 --- linux-3.0.4/fs/btrfs/ioctl.c        2011-07-21 22:17:23.000000000 -0400
37547 +++ linux-3.0.4/fs/btrfs/ioctl.c        2011-08-23 21:48:14.000000000 -0400
37548 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
37549         for (i = 0; i < num_types; i++) {
37550                 struct btrfs_space_info *tmp;
37551  
37552 +               /* Don't copy in more than we allocated */
37553                 if (!slot_count)
37554                         break;
37555  
37556 +               slot_count--;
37557 +
37558                 info = NULL;
37559                 rcu_read_lock();
37560                 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
37561 @@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
37562                                 memcpy(dest, &space, sizeof(space));
37563                                 dest++;
37564                                 space_args.total_spaces++;
37565 -                               slot_count--;
37566                         }
37567 -                       if (!slot_count)
37568 -                               break;
37569                 }
37570                 up_read(&info->groups_sem);
37571         }
37572 diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
37573 --- linux-3.0.4/fs/btrfs/relocation.c   2011-07-21 22:17:23.000000000 -0400
37574 +++ linux-3.0.4/fs/btrfs/relocation.c   2011-08-23 21:47:56.000000000 -0400
37575 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
37576         }
37577         spin_unlock(&rc->reloc_root_tree.lock);
37578  
37579 -       BUG_ON((struct btrfs_root *)node->data != root);
37580 +       BUG_ON(!node || (struct btrfs_root *)node->data != root);
37581  
37582         if (!del) {
37583                 spin_lock(&rc->reloc_root_tree.lock);
37584 diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
37585 --- linux-3.0.4/fs/cachefiles/bind.c    2011-07-21 22:17:23.000000000 -0400
37586 +++ linux-3.0.4/fs/cachefiles/bind.c    2011-08-23 21:47:56.000000000 -0400
37587 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37588                args);
37589  
37590         /* start by checking things over */
37591 -       ASSERT(cache->fstop_percent >= 0 &&
37592 -              cache->fstop_percent < cache->fcull_percent &&
37593 +       ASSERT(cache->fstop_percent < cache->fcull_percent &&
37594                cache->fcull_percent < cache->frun_percent &&
37595                cache->frun_percent  < 100);
37596  
37597 -       ASSERT(cache->bstop_percent >= 0 &&
37598 -              cache->bstop_percent < cache->bcull_percent &&
37599 +       ASSERT(cache->bstop_percent < cache->bcull_percent &&
37600                cache->bcull_percent < cache->brun_percent &&
37601                cache->brun_percent  < 100);
37602  
37603 diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
37604 --- linux-3.0.4/fs/cachefiles/daemon.c  2011-07-21 22:17:23.000000000 -0400
37605 +++ linux-3.0.4/fs/cachefiles/daemon.c  2011-08-23 21:47:56.000000000 -0400
37606 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
37607         if (n > buflen)
37608                 return -EMSGSIZE;
37609  
37610 -       if (copy_to_user(_buffer, buffer, n) != 0)
37611 +       if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
37612                 return -EFAULT;
37613  
37614         return n;
37615 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
37616         if (test_bit(CACHEFILES_DEAD, &cache->flags))
37617                 return -EIO;
37618  
37619 -       if (datalen < 0 || datalen > PAGE_SIZE - 1)
37620 +       if (datalen > PAGE_SIZE - 1)
37621                 return -EOPNOTSUPP;
37622  
37623         /* drag the command string into the kernel so we can parse it */
37624 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
37625         if (args[0] != '%' || args[1] != '\0')
37626                 return -EINVAL;
37627  
37628 -       if (fstop < 0 || fstop >= cache->fcull_percent)
37629 +       if (fstop >= cache->fcull_percent)
37630                 return cachefiles_daemon_range_error(cache, args);
37631  
37632         cache->fstop_percent = fstop;
37633 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
37634         if (args[0] != '%' || args[1] != '\0')
37635                 return -EINVAL;
37636  
37637 -       if (bstop < 0 || bstop >= cache->bcull_percent)
37638 +       if (bstop >= cache->bcull_percent)
37639                 return cachefiles_daemon_range_error(cache, args);
37640  
37641         cache->bstop_percent = bstop;
37642 diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
37643 --- linux-3.0.4/fs/cachefiles/internal.h        2011-07-21 22:17:23.000000000 -0400
37644 +++ linux-3.0.4/fs/cachefiles/internal.h        2011-08-23 21:47:56.000000000 -0400
37645 @@ -57,7 +57,7 @@ struct cachefiles_cache {
37646         wait_queue_head_t               daemon_pollwq;  /* poll waitqueue for daemon */
37647         struct rb_root                  active_nodes;   /* active nodes (can't be culled) */
37648         rwlock_t                        active_lock;    /* lock for active_nodes */
37649 -       atomic_t                        gravecounter;   /* graveyard uniquifier */
37650 +       atomic_unchecked_t              gravecounter;   /* graveyard uniquifier */
37651         unsigned                        frun_percent;   /* when to stop culling (% files) */
37652         unsigned                        fcull_percent;  /* when to start culling (% files) */
37653         unsigned                        fstop_percent;  /* when to stop allocating (% files) */
37654 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
37655   * proc.c
37656   */
37657  #ifdef CONFIG_CACHEFILES_HISTOGRAM
37658 -extern atomic_t cachefiles_lookup_histogram[HZ];
37659 -extern atomic_t cachefiles_mkdir_histogram[HZ];
37660 -extern atomic_t cachefiles_create_histogram[HZ];
37661 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37662 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37663 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37664  
37665  extern int __init cachefiles_proc_init(void);
37666  extern void cachefiles_proc_cleanup(void);
37667  static inline
37668 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37669 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37670  {
37671         unsigned long jif = jiffies - start_jif;
37672         if (jif >= HZ)
37673                 jif = HZ - 1;
37674 -       atomic_inc(&histogram[jif]);
37675 +       atomic_inc_unchecked(&histogram[jif]);
37676  }
37677  
37678  #else
37679 diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
37680 --- linux-3.0.4/fs/cachefiles/namei.c   2011-07-21 22:17:23.000000000 -0400
37681 +++ linux-3.0.4/fs/cachefiles/namei.c   2011-08-23 21:47:56.000000000 -0400
37682 @@ -318,7 +318,7 @@ try_again:
37683         /* first step is to make up a grave dentry in the graveyard */
37684         sprintf(nbuffer, "%08x%08x",
37685                 (uint32_t) get_seconds(),
37686 -               (uint32_t) atomic_inc_return(&cache->gravecounter));
37687 +               (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37688  
37689         /* do the multiway lock magic */
37690         trap = lock_rename(cache->graveyard, dir);
37691 diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
37692 --- linux-3.0.4/fs/cachefiles/proc.c    2011-07-21 22:17:23.000000000 -0400
37693 +++ linux-3.0.4/fs/cachefiles/proc.c    2011-08-23 21:47:56.000000000 -0400
37694 @@ -14,9 +14,9 @@
37695  #include <linux/seq_file.h>
37696  #include "internal.h"
37697  
37698 -atomic_t cachefiles_lookup_histogram[HZ];
37699 -atomic_t cachefiles_mkdir_histogram[HZ];
37700 -atomic_t cachefiles_create_histogram[HZ];
37701 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37702 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37703 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37704  
37705  /*
37706   * display the latency histogram
37707 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37708                 return 0;
37709         default:
37710                 index = (unsigned long) v - 3;
37711 -               x = atomic_read(&cachefiles_lookup_histogram[index]);
37712 -               y = atomic_read(&cachefiles_mkdir_histogram[index]);
37713 -               z = atomic_read(&cachefiles_create_histogram[index]);
37714 +               x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37715 +               y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37716 +               z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37717                 if (x == 0 && y == 0 && z == 0)
37718                         return 0;
37719  
37720 diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
37721 --- linux-3.0.4/fs/cachefiles/rdwr.c    2011-07-21 22:17:23.000000000 -0400
37722 +++ linux-3.0.4/fs/cachefiles/rdwr.c    2011-08-23 21:47:56.000000000 -0400
37723 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
37724                         old_fs = get_fs();
37725                         set_fs(KERNEL_DS);
37726                         ret = file->f_op->write(
37727 -                               file, (const void __user *) data, len, &pos);
37728 +                               file, (__force const void __user *) data, len, &pos);
37729                         set_fs(old_fs);
37730                         kunmap(page);
37731                         if (ret != len)
37732 diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
37733 --- linux-3.0.4/fs/ceph/dir.c   2011-07-21 22:17:23.000000000 -0400
37734 +++ linux-3.0.4/fs/ceph/dir.c   2011-08-23 21:47:56.000000000 -0400
37735 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
37736         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
37737         struct ceph_mds_client *mdsc = fsc->mdsc;
37738         unsigned frag = fpos_frag(filp->f_pos);
37739 -       int off = fpos_off(filp->f_pos);
37740 +       unsigned int off = fpos_off(filp->f_pos);
37741         int err;
37742         u32 ftype;
37743         struct ceph_mds_reply_info_parsed *rinfo;
37744 diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
37745 --- linux-3.0.4/fs/cifs/cifs_debug.c    2011-07-21 22:17:23.000000000 -0400
37746 +++ linux-3.0.4/fs/cifs/cifs_debug.c    2011-08-25 17:18:05.000000000 -0400
37747 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
37748  
37749         if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
37750  #ifdef CONFIG_CIFS_STATS2
37751 -               atomic_set(&totBufAllocCount, 0);
37752 -               atomic_set(&totSmBufAllocCount, 0);
37753 +               atomic_set_unchecked(&totBufAllocCount, 0);
37754 +               atomic_set_unchecked(&totSmBufAllocCount, 0);
37755  #endif /* CONFIG_CIFS_STATS2 */
37756                 spin_lock(&cifs_tcp_ses_lock);
37757                 list_for_each(tmp1, &cifs_tcp_ses_list) {
37758 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
37759                                         tcon = list_entry(tmp3,
37760                                                           struct cifs_tcon,
37761                                                           tcon_list);
37762 -                                       atomic_set(&tcon->num_smbs_sent, 0);
37763 -                                       atomic_set(&tcon->num_writes, 0);
37764 -                                       atomic_set(&tcon->num_reads, 0);
37765 -                                       atomic_set(&tcon->num_oplock_brks, 0);
37766 -                                       atomic_set(&tcon->num_opens, 0);
37767 -                                       atomic_set(&tcon->num_posixopens, 0);
37768 -                                       atomic_set(&tcon->num_posixmkdirs, 0);
37769 -                                       atomic_set(&tcon->num_closes, 0);
37770 -                                       atomic_set(&tcon->num_deletes, 0);
37771 -                                       atomic_set(&tcon->num_mkdirs, 0);
37772 -                                       atomic_set(&tcon->num_rmdirs, 0);
37773 -                                       atomic_set(&tcon->num_renames, 0);
37774 -                                       atomic_set(&tcon->num_t2renames, 0);
37775 -                                       atomic_set(&tcon->num_ffirst, 0);
37776 -                                       atomic_set(&tcon->num_fnext, 0);
37777 -                                       atomic_set(&tcon->num_fclose, 0);
37778 -                                       atomic_set(&tcon->num_hardlinks, 0);
37779 -                                       atomic_set(&tcon->num_symlinks, 0);
37780 -                                       atomic_set(&tcon->num_locks, 0);
37781 +                                       atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37782 +                                       atomic_set_unchecked(&tcon->num_writes, 0);
37783 +                                       atomic_set_unchecked(&tcon->num_reads, 0);
37784 +                                       atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37785 +                                       atomic_set_unchecked(&tcon->num_opens, 0);
37786 +                                       atomic_set_unchecked(&tcon->num_posixopens, 0);
37787 +                                       atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37788 +                                       atomic_set_unchecked(&tcon->num_closes, 0);
37789 +                                       atomic_set_unchecked(&tcon->num_deletes, 0);
37790 +                                       atomic_set_unchecked(&tcon->num_mkdirs, 0);
37791 +                                       atomic_set_unchecked(&tcon->num_rmdirs, 0);
37792 +                                       atomic_set_unchecked(&tcon->num_renames, 0);
37793 +                                       atomic_set_unchecked(&tcon->num_t2renames, 0);
37794 +                                       atomic_set_unchecked(&tcon->num_ffirst, 0);
37795 +                                       atomic_set_unchecked(&tcon->num_fnext, 0);
37796 +                                       atomic_set_unchecked(&tcon->num_fclose, 0);
37797 +                                       atomic_set_unchecked(&tcon->num_hardlinks, 0);
37798 +                                       atomic_set_unchecked(&tcon->num_symlinks, 0);
37799 +                                       atomic_set_unchecked(&tcon->num_locks, 0);
37800                                 }
37801                         }
37802                 }
37803 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
37804                         smBufAllocCount.counter, cifs_min_small);
37805  #ifdef CONFIG_CIFS_STATS2
37806         seq_printf(m, "Total Large %d Small %d Allocations\n",
37807 -                               atomic_read(&totBufAllocCount),
37808 -                               atomic_read(&totSmBufAllocCount));
37809 +                               atomic_read_unchecked(&totBufAllocCount),
37810 +                               atomic_read_unchecked(&totSmBufAllocCount));
37811  #endif /* CONFIG_CIFS_STATS2 */
37812  
37813         seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
37814 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
37815                                 if (tcon->need_reconnect)
37816                                         seq_puts(m, "\tDISCONNECTED ");
37817                                 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37818 -                                       atomic_read(&tcon->num_smbs_sent),
37819 -                                       atomic_read(&tcon->num_oplock_brks));
37820 +                                       atomic_read_unchecked(&tcon->num_smbs_sent),
37821 +                                       atomic_read_unchecked(&tcon->num_oplock_brks));
37822                                 seq_printf(m, "\nReads:  %d Bytes: %lld",
37823 -                                       atomic_read(&tcon->num_reads),
37824 +                                       atomic_read_unchecked(&tcon->num_reads),
37825                                         (long long)(tcon->bytes_read));
37826                                 seq_printf(m, "\nWrites: %d Bytes: %lld",
37827 -                                       atomic_read(&tcon->num_writes),
37828 +                                       atomic_read_unchecked(&tcon->num_writes),
37829                                         (long long)(tcon->bytes_written));
37830                                 seq_printf(m, "\nFlushes: %d",
37831 -                                       atomic_read(&tcon->num_flushes));
37832 +                                       atomic_read_unchecked(&tcon->num_flushes));
37833                                 seq_printf(m, "\nLocks: %d HardLinks: %d "
37834                                               "Symlinks: %d",
37835 -                                       atomic_read(&tcon->num_locks),
37836 -                                       atomic_read(&tcon->num_hardlinks),
37837 -                                       atomic_read(&tcon->num_symlinks));
37838 +                                       atomic_read_unchecked(&tcon->num_locks),
37839 +                                       atomic_read_unchecked(&tcon->num_hardlinks),
37840 +                                       atomic_read_unchecked(&tcon->num_symlinks));
37841                                 seq_printf(m, "\nOpens: %d Closes: %d "
37842                                               "Deletes: %d",
37843 -                                       atomic_read(&tcon->num_opens),
37844 -                                       atomic_read(&tcon->num_closes),
37845 -                                       atomic_read(&tcon->num_deletes));
37846 +                                       atomic_read_unchecked(&tcon->num_opens),
37847 +                                       atomic_read_unchecked(&tcon->num_closes),
37848 +                                       atomic_read_unchecked(&tcon->num_deletes));
37849                                 seq_printf(m, "\nPosix Opens: %d "
37850                                               "Posix Mkdirs: %d",
37851 -                                       atomic_read(&tcon->num_posixopens),
37852 -                                       atomic_read(&tcon->num_posixmkdirs));
37853 +                                       atomic_read_unchecked(&tcon->num_posixopens),
37854 +                                       atomic_read_unchecked(&tcon->num_posixmkdirs));
37855                                 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37856 -                                       atomic_read(&tcon->num_mkdirs),
37857 -                                       atomic_read(&tcon->num_rmdirs));
37858 +                                       atomic_read_unchecked(&tcon->num_mkdirs),
37859 +                                       atomic_read_unchecked(&tcon->num_rmdirs));
37860                                 seq_printf(m, "\nRenames: %d T2 Renames %d",
37861 -                                       atomic_read(&tcon->num_renames),
37862 -                                       atomic_read(&tcon->num_t2renames));
37863 +                                       atomic_read_unchecked(&tcon->num_renames),
37864 +                                       atomic_read_unchecked(&tcon->num_t2renames));
37865                                 seq_printf(m, "\nFindFirst: %d FNext %d "
37866                                               "FClose %d",
37867 -                                       atomic_read(&tcon->num_ffirst),
37868 -                                       atomic_read(&tcon->num_fnext),
37869 -                                       atomic_read(&tcon->num_fclose));
37870 +                                       atomic_read_unchecked(&tcon->num_ffirst),
37871 +                                       atomic_read_unchecked(&tcon->num_fnext),
37872 +                                       atomic_read_unchecked(&tcon->num_fclose));
37873                         }
37874                 }
37875         }
37876 diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
37877 --- linux-3.0.4/fs/cifs/cifsfs.c        2011-08-23 21:44:40.000000000 -0400
37878 +++ linux-3.0.4/fs/cifs/cifsfs.c        2011-08-25 17:18:05.000000000 -0400
37879 @@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
37880         cifs_req_cachep = kmem_cache_create("cifs_request",
37881                                             CIFSMaxBufSize +
37882                                             MAX_CIFS_HDR_SIZE, 0,
37883 -                                           SLAB_HWCACHE_ALIGN, NULL);
37884 +                                           SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
37885         if (cifs_req_cachep == NULL)
37886                 return -ENOMEM;
37887  
37888 @@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
37889         efficient to alloc 1 per page off the slab compared to 17K (5page)
37890         alloc of large cifs buffers even when page debugging is on */
37891         cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
37892 -                       MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
37893 +                       MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
37894                         NULL);
37895         if (cifs_sm_req_cachep == NULL) {
37896                 mempool_destroy(cifs_req_poolp);
37897 @@ -1106,8 +1106,8 @@ init_cifs(void)
37898         atomic_set(&bufAllocCount, 0);
37899         atomic_set(&smBufAllocCount, 0);
37900  #ifdef CONFIG_CIFS_STATS2
37901 -       atomic_set(&totBufAllocCount, 0);
37902 -       atomic_set(&totSmBufAllocCount, 0);
37903 +       atomic_set_unchecked(&totBufAllocCount, 0);
37904 +       atomic_set_unchecked(&totSmBufAllocCount, 0);
37905  #endif /* CONFIG_CIFS_STATS2 */
37906  
37907         atomic_set(&midCount, 0);
37908 diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
37909 --- linux-3.0.4/fs/cifs/cifsglob.h      2011-07-21 22:17:23.000000000 -0400
37910 +++ linux-3.0.4/fs/cifs/cifsglob.h      2011-08-25 17:18:05.000000000 -0400
37911 @@ -381,28 +381,28 @@ struct cifs_tcon {
37912         __u16 Flags;            /* optional support bits */
37913         enum statusEnum tidStatus;
37914  #ifdef CONFIG_CIFS_STATS
37915 -       atomic_t num_smbs_sent;
37916 -       atomic_t num_writes;
37917 -       atomic_t num_reads;
37918 -       atomic_t num_flushes;
37919 -       atomic_t num_oplock_brks;
37920 -       atomic_t num_opens;
37921 -       atomic_t num_closes;
37922 -       atomic_t num_deletes;
37923 -       atomic_t num_mkdirs;
37924 -       atomic_t num_posixopens;
37925 -       atomic_t num_posixmkdirs;
37926 -       atomic_t num_rmdirs;
37927 -       atomic_t num_renames;
37928 -       atomic_t num_t2renames;
37929 -       atomic_t num_ffirst;
37930 -       atomic_t num_fnext;
37931 -       atomic_t num_fclose;
37932 -       atomic_t num_hardlinks;
37933 -       atomic_t num_symlinks;
37934 -       atomic_t num_locks;
37935 -       atomic_t num_acl_get;
37936 -       atomic_t num_acl_set;
37937 +       atomic_unchecked_t num_smbs_sent;
37938 +       atomic_unchecked_t num_writes;
37939 +       atomic_unchecked_t num_reads;
37940 +       atomic_unchecked_t num_flushes;
37941 +       atomic_unchecked_t num_oplock_brks;
37942 +       atomic_unchecked_t num_opens;
37943 +       atomic_unchecked_t num_closes;
37944 +       atomic_unchecked_t num_deletes;
37945 +       atomic_unchecked_t num_mkdirs;
37946 +       atomic_unchecked_t num_posixopens;
37947 +       atomic_unchecked_t num_posixmkdirs;
37948 +       atomic_unchecked_t num_rmdirs;
37949 +       atomic_unchecked_t num_renames;
37950 +       atomic_unchecked_t num_t2renames;
37951 +       atomic_unchecked_t num_ffirst;
37952 +       atomic_unchecked_t num_fnext;
37953 +       atomic_unchecked_t num_fclose;
37954 +       atomic_unchecked_t num_hardlinks;
37955 +       atomic_unchecked_t num_symlinks;
37956 +       atomic_unchecked_t num_locks;
37957 +       atomic_unchecked_t num_acl_get;
37958 +       atomic_unchecked_t num_acl_set;
37959  #ifdef CONFIG_CIFS_STATS2
37960         unsigned long long time_writes;
37961         unsigned long long time_reads;
37962 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
37963  }
37964  
37965  #ifdef CONFIG_CIFS_STATS
37966 -#define cifs_stats_inc atomic_inc
37967 +#define cifs_stats_inc atomic_inc_unchecked
37968  
37969  static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
37970                                             unsigned int bytes)
37971 @@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
37972  /* Various Debug counters */
37973  GLOBAL_EXTERN atomic_t bufAllocCount;    /* current number allocated  */
37974  #ifdef CONFIG_CIFS_STATS2
37975 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
37976 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
37977 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
37978 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
37979  #endif
37980  GLOBAL_EXTERN atomic_t smBufAllocCount;
37981  GLOBAL_EXTERN atomic_t midCount;
37982 diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
37983 --- linux-3.0.4/fs/cifs/link.c  2011-07-21 22:17:23.000000000 -0400
37984 +++ linux-3.0.4/fs/cifs/link.c  2011-08-23 21:47:56.000000000 -0400
37985 @@ -587,7 +587,7 @@ symlink_exit:
37986  
37987  void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37988  {
37989 -       char *p = nd_get_link(nd);
37990 +       const char *p = nd_get_link(nd);
37991         if (!IS_ERR(p))
37992                 kfree(p);
37993  }
37994 diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
37995 --- linux-3.0.4/fs/cifs/misc.c  2011-07-21 22:17:23.000000000 -0400
37996 +++ linux-3.0.4/fs/cifs/misc.c  2011-08-25 17:18:05.000000000 -0400
37997 @@ -156,7 +156,7 @@ cifs_buf_get(void)
37998                 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
37999                 atomic_inc(&bufAllocCount);
38000  #ifdef CONFIG_CIFS_STATS2
38001 -               atomic_inc(&totBufAllocCount);
38002 +               atomic_inc_unchecked(&totBufAllocCount);
38003  #endif /* CONFIG_CIFS_STATS2 */
38004         }
38005  
38006 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
38007         /*      memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
38008                 atomic_inc(&smBufAllocCount);
38009  #ifdef CONFIG_CIFS_STATS2
38010 -               atomic_inc(&totSmBufAllocCount);
38011 +               atomic_inc_unchecked(&totSmBufAllocCount);
38012  #endif /* CONFIG_CIFS_STATS2 */
38013  
38014         }
38015 diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
38016 --- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
38017 +++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
38018 @@ -24,7 +24,7 @@
38019  #include "coda_linux.h"
38020  #include "coda_cache.h"
38021  
38022 -static atomic_t permission_epoch = ATOMIC_INIT(0);
38023 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38024  
38025  /* replace or extend an acl cache hit */
38026  void coda_cache_enter(struct inode *inode, int mask)
38027 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
38028         struct coda_inode_info *cii = ITOC(inode);
38029  
38030         spin_lock(&cii->c_lock);
38031 -       cii->c_cached_epoch = atomic_read(&permission_epoch);
38032 +       cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38033         if (cii->c_uid != current_fsuid()) {
38034                 cii->c_uid = current_fsuid();
38035                  cii->c_cached_perm = mask;
38036 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
38037  {
38038         struct coda_inode_info *cii = ITOC(inode);
38039         spin_lock(&cii->c_lock);
38040 -       cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38041 +       cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38042         spin_unlock(&cii->c_lock);
38043  }
38044  
38045  /* remove all acl caches */
38046  void coda_cache_clear_all(struct super_block *sb)
38047  {
38048 -       atomic_inc(&permission_epoch);
38049 +       atomic_inc_unchecked(&permission_epoch);
38050  }
38051  
38052  
38053 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
38054         spin_lock(&cii->c_lock);
38055         hit = (mask & cii->c_cached_perm) == mask &&
38056             cii->c_uid == current_fsuid() &&
38057 -           cii->c_cached_epoch == atomic_read(&permission_epoch);
38058 +           cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38059         spin_unlock(&cii->c_lock);
38060  
38061         return hit;
38062 diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
38063 --- linux-3.0.4/fs/compat_binfmt_elf.c  2011-07-21 22:17:23.000000000 -0400
38064 +++ linux-3.0.4/fs/compat_binfmt_elf.c  2011-08-23 21:47:56.000000000 -0400
38065 @@ -30,11 +30,13 @@
38066  #undef elf_phdr
38067  #undef elf_shdr
38068  #undef elf_note
38069 +#undef elf_dyn
38070  #undef elf_addr_t
38071  #define elfhdr         elf32_hdr
38072  #define elf_phdr       elf32_phdr
38073  #define elf_shdr       elf32_shdr
38074  #define elf_note       elf32_note
38075 +#define elf_dyn                Elf32_Dyn
38076  #define elf_addr_t     Elf32_Addr
38077  
38078  /*
38079 diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
38080 --- linux-3.0.4/fs/compat.c     2011-07-21 22:17:23.000000000 -0400
38081 +++ linux-3.0.4/fs/compat.c     2011-08-23 22:49:33.000000000 -0400
38082 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
38083                 goto out;
38084  
38085         ret = -EINVAL;
38086 -       if (nr_segs > UIO_MAXIOV || nr_segs < 0)
38087 +       if (nr_segs > UIO_MAXIOV)
38088                 goto out;
38089         if (nr_segs > fast_segs) {
38090                 ret = -ENOMEM;
38091 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
38092  
38093  struct compat_readdir_callback {
38094         struct compat_old_linux_dirent __user *dirent;
38095 +       struct file * file;
38096         int result;
38097  };
38098  
38099 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
38100                 buf->result = -EOVERFLOW;
38101                 return -EOVERFLOW;
38102         }
38103 +
38104 +       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38105 +               return 0;
38106 +
38107         buf->result++;
38108         dirent = buf->dirent;
38109         if (!access_ok(VERIFY_WRITE, dirent,
38110 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
38111  
38112         buf.result = 0;
38113         buf.dirent = dirent;
38114 +       buf.file = file;
38115  
38116         error = vfs_readdir(file, compat_fillonedir, &buf);
38117         if (buf.result)
38118 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
38119  struct compat_getdents_callback {
38120         struct compat_linux_dirent __user *current_dir;
38121         struct compat_linux_dirent __user *previous;
38122 +       struct file * file;
38123         int count;
38124         int error;
38125  };
38126 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
38127                 buf->error = -EOVERFLOW;
38128                 return -EOVERFLOW;
38129         }
38130 +
38131 +       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38132 +               return 0;
38133 +
38134         dirent = buf->previous;
38135         if (dirent) {
38136                 if (__put_user(offset, &dirent->d_off))
38137 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
38138         buf.previous = NULL;
38139         buf.count = count;
38140         buf.error = 0;
38141 +       buf.file = file;
38142  
38143         error = vfs_readdir(file, compat_filldir, &buf);
38144         if (error >= 0)
38145 @@ -1006,6 +1018,7 @@ out:
38146  struct compat_getdents_callback64 {
38147         struct linux_dirent64 __user *current_dir;
38148         struct linux_dirent64 __user *previous;
38149 +       struct file * file;
38150         int count;
38151         int error;
38152  };
38153 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
38154         buf->error = -EINVAL;   /* only used if we fail.. */
38155         if (reclen > buf->count)
38156                 return -EINVAL;
38157 +
38158 +       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38159 +               return 0;
38160 +
38161         dirent = buf->previous;
38162  
38163         if (dirent) {
38164 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
38165         buf.previous = NULL;
38166         buf.count = count;
38167         buf.error = 0;
38168 +       buf.file = file;
38169  
38170         error = vfs_readdir(file, compat_filldir64, &buf);
38171         if (error >= 0)
38172 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
38173         struct fdtable *fdt;
38174         long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38175  
38176 +       pax_track_stack();
38177 +
38178         if (n < 0)
38179                 goto out_nofds;
38180  
38181 diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
38182 --- linux-3.0.4/fs/compat_ioctl.c       2011-07-21 22:17:23.000000000 -0400
38183 +++ linux-3.0.4/fs/compat_ioctl.c       2011-08-23 21:47:56.000000000 -0400
38184 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
38185  
38186         err  = get_user(palp, &up->palette);
38187         err |= get_user(length, &up->length);
38188 +       if (err)
38189 +               return -EFAULT;
38190  
38191         up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38192         err  = put_user(compat_ptr(palp), &up_native->palette);
38193 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
38194  static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
38195  {
38196         unsigned int a, b;
38197 -       a = *(unsigned int *)p;
38198 -       b = *(unsigned int *)q;
38199 +       a = *(const unsigned int *)p;
38200 +       b = *(const unsigned int *)q;
38201         if (a > b)
38202                 return 1;
38203         if (a < b)
38204 diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
38205 --- linux-3.0.4/fs/configfs/dir.c       2011-07-21 22:17:23.000000000 -0400
38206 +++ linux-3.0.4/fs/configfs/dir.c       2011-08-23 21:47:56.000000000 -0400
38207 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file 
38208                         }
38209                         for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38210                                 struct configfs_dirent *next;
38211 -                               const char * name;
38212 +                               const unsigned char * name;
38213 +                               char d_name[sizeof(next->s_dentry->d_iname)];
38214                                 int len;
38215                                 struct inode *inode = NULL;
38216  
38217 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file 
38218                                         continue;
38219  
38220                                 name = configfs_get_name(next);
38221 -                               len = strlen(name);
38222 +                               if (next->s_dentry && name == next->s_dentry->d_iname) {
38223 +                                       len =  next->s_dentry->d_name.len;
38224 +                                       memcpy(d_name, name, len);
38225 +                                       name = d_name;
38226 +                               } else
38227 +                                       len = strlen(name);
38228  
38229                                 /*
38230                                  * We'll have a dentry and an inode for
38231 diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
38232 --- linux-3.0.4/fs/dcache.c     2011-07-21 22:17:23.000000000 -0400
38233 +++ linux-3.0.4/fs/dcache.c     2011-08-23 21:47:56.000000000 -0400
38234 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
38235         mempages -= reserve;
38236  
38237         names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38238 -                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38239 +                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38240  
38241         dcache_init();
38242         inode_init();
38243 diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
38244 --- linux-3.0.4/fs/ecryptfs/inode.c     2011-08-23 21:44:40.000000000 -0400
38245 +++ linux-3.0.4/fs/ecryptfs/inode.c     2011-08-23 21:47:56.000000000 -0400
38246 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
38247         old_fs = get_fs();
38248         set_fs(get_ds());
38249         rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38250 -                                                  (char __user *)lower_buf,
38251 +                                                  (__force char __user *)lower_buf,
38252                                                    lower_bufsiz);
38253         set_fs(old_fs);
38254         if (rc < 0)
38255 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
38256         }
38257         old_fs = get_fs();
38258         set_fs(get_ds());
38259 -       rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38260 +       rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38261         set_fs(old_fs);
38262         if (rc < 0) {
38263                 kfree(buf);
38264 @@ -765,7 +765,7 @@ out:
38265  static void
38266  ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
38267  {
38268 -       char *buf = nd_get_link(nd);
38269 +       const char *buf = nd_get_link(nd);
38270         if (!IS_ERR(buf)) {
38271                 /* Free the char* */
38272                 kfree(buf);
38273 diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
38274 --- linux-3.0.4/fs/ecryptfs/miscdev.c   2011-07-21 22:17:23.000000000 -0400
38275 +++ linux-3.0.4/fs/ecryptfs/miscdev.c   2011-08-23 21:47:56.000000000 -0400
38276 @@ -328,7 +328,7 @@ check_list:
38277                 goto out_unlock_msg_ctx;
38278         i = 5;
38279         if (msg_ctx->msg) {
38280 -               if (copy_to_user(&buf[i], packet_length, packet_length_size))
38281 +               if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
38282                         goto out_unlock_msg_ctx;
38283                 i += packet_length_size;
38284                 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
38285 diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
38286 --- linux-3.0.4/fs/exec.c       2011-07-21 22:17:23.000000000 -0400
38287 +++ linux-3.0.4/fs/exec.c       2011-08-25 17:26:58.000000000 -0400
38288 @@ -55,12 +55,24 @@
38289  #include <linux/pipe_fs_i.h>
38290  #include <linux/oom.h>
38291  #include <linux/compat.h>
38292 +#include <linux/random.h>
38293 +#include <linux/seq_file.h>
38294 +
38295 +#ifdef CONFIG_PAX_REFCOUNT
38296 +#include <linux/kallsyms.h>
38297 +#include <linux/kdebug.h>
38298 +#endif
38299  
38300  #include <asm/uaccess.h>
38301  #include <asm/mmu_context.h>
38302  #include <asm/tlb.h>
38303  #include "internal.h"
38304  
38305 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38306 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38307 +EXPORT_SYMBOL(pax_set_initial_flags_func);
38308 +#endif
38309 +
38310  int core_uses_pid;
38311  char core_pattern[CORENAME_MAX_SIZE] = "core";
38312  unsigned int core_pipe_limit;
38313 @@ -70,7 +82,7 @@ struct core_name {
38314         char *corename;
38315         int used, size;
38316  };
38317 -static atomic_t call_count = ATOMIC_INIT(1);
38318 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
38319  
38320  /* The maximal length of core_pattern is also specified in sysctl.c */
38321  
38322 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38323         char *tmp = getname(library);
38324         int error = PTR_ERR(tmp);
38325         static const struct open_flags uselib_flags = {
38326 -               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38327 +               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38328                 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
38329                 .intent = LOOKUP_OPEN
38330         };
38331 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct 
38332                 int write)
38333  {
38334         struct page *page;
38335 -       int ret;
38336  
38337 -#ifdef CONFIG_STACK_GROWSUP
38338 -       if (write) {
38339 -               ret = expand_downwards(bprm->vma, pos);
38340 -               if (ret < 0)
38341 -                       return NULL;
38342 -       }
38343 -#endif
38344 -       ret = get_user_pages(current, bprm->mm, pos,
38345 -                       1, write, 1, &page, NULL);
38346 -       if (ret <= 0)
38347 +       if (0 > expand_downwards(bprm->vma, pos))
38348 +               return NULL;
38349 +       if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38350                 return NULL;
38351  
38352         if (write) {
38353 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
38354         vma->vm_end = STACK_TOP_MAX;
38355         vma->vm_start = vma->vm_end - PAGE_SIZE;
38356         vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
38357 +
38358 +#ifdef CONFIG_PAX_SEGMEXEC
38359 +       vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38360 +#endif
38361 +
38362         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38363         INIT_LIST_HEAD(&vma->anon_vma_chain);
38364  
38365 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
38366         mm->stack_vm = mm->total_vm = 1;
38367         up_write(&mm->mmap_sem);
38368         bprm->p = vma->vm_end - sizeof(void *);
38369 +
38370 +#ifdef CONFIG_PAX_RANDUSTACK
38371 +       if (randomize_va_space)
38372 +               bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38373 +#endif
38374 +
38375         return 0;
38376  err:
38377         up_write(&mm->mmap_sem);
38378 @@ -403,19 +418,7 @@ err:
38379         return err;
38380  }
38381  
38382 -struct user_arg_ptr {
38383 -#ifdef CONFIG_COMPAT
38384 -       bool is_compat;
38385 -#endif
38386 -       union {
38387 -               const char __user *const __user *native;
38388 -#ifdef CONFIG_COMPAT
38389 -               compat_uptr_t __user *compat;
38390 -#endif
38391 -       } ptr;
38392 -};
38393 -
38394 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38395 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38396  {
38397         const char __user *native;
38398  
38399 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const 
38400         int r;
38401         mm_segment_t oldfs = get_fs();
38402         struct user_arg_ptr argv = {
38403 -               .ptr.native = (const char __user *const  __user *)__argv,
38404 +               .ptr.native = (__force const char __user *const  __user *)__argv,
38405         };
38406  
38407         set_fs(KERNEL_DS);
38408 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
38409         unsigned long new_end = old_end - shift;
38410         struct mmu_gather tlb;
38411  
38412 -       BUG_ON(new_start > new_end);
38413 +       if (new_start >= new_end || new_start < mmap_min_addr)
38414 +               return -ENOMEM;
38415  
38416         /*
38417          * ensure there are no vmas between where we want to go
38418 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
38419         if (vma != find_vma(mm, new_start))
38420                 return -EFAULT;
38421  
38422 +#ifdef CONFIG_PAX_SEGMEXEC
38423 +       BUG_ON(pax_find_mirror_vma(vma));
38424 +#endif
38425 +
38426         /*
38427          * cover the whole range: [new_start, old_end)
38428          */
38429 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm 
38430         stack_top = arch_align_stack(stack_top);
38431         stack_top = PAGE_ALIGN(stack_top);
38432  
38433 -       if (unlikely(stack_top < mmap_min_addr) ||
38434 -           unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38435 -               return -ENOMEM;
38436 -
38437         stack_shift = vma->vm_end - stack_top;
38438  
38439         bprm->p -= stack_shift;
38440 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm 
38441         bprm->exec -= stack_shift;
38442  
38443         down_write(&mm->mmap_sem);
38444 +
38445 +       /* Move stack pages down in memory. */
38446 +       if (stack_shift) {
38447 +               ret = shift_arg_pages(vma, stack_shift);
38448 +               if (ret)
38449 +                       goto out_unlock;
38450 +       }
38451 +
38452         vm_flags = VM_STACK_FLAGS;
38453  
38454 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38455 +       if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38456 +               vm_flags &= ~VM_EXEC;
38457 +
38458 +#ifdef CONFIG_PAX_MPROTECT
38459 +               if (mm->pax_flags & MF_PAX_MPROTECT)
38460 +                       vm_flags &= ~VM_MAYEXEC;
38461 +#endif
38462 +
38463 +       }
38464 +#endif
38465 +
38466         /*
38467          * Adjust stack execute permissions; explicitly enable for
38468          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
38469 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm 
38470                 goto out_unlock;
38471         BUG_ON(prev != vma);
38472  
38473 -       /* Move stack pages down in memory. */
38474 -       if (stack_shift) {
38475 -               ret = shift_arg_pages(vma, stack_shift);
38476 -               if (ret)
38477 -                       goto out_unlock;
38478 -       }
38479 -
38480         /* mprotect_fixup is overkill to remove the temporary stack flags */
38481         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
38482  
38483 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
38484         struct file *file;
38485         int err;
38486         static const struct open_flags open_exec_flags = {
38487 -               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38488 +               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38489                 .acc_mode = MAY_EXEC | MAY_OPEN,
38490                 .intent = LOOKUP_OPEN
38491         };
38492 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
38493         old_fs = get_fs();
38494         set_fs(get_ds());
38495         /* The cast to a user pointer is valid due to the set_fs() */
38496 -       result = vfs_read(file, (void __user *)addr, count, &pos);
38497 +       result = vfs_read(file, (__force void __user *)addr, count, &pos);
38498         set_fs(old_fs);
38499         return result;
38500  }
38501 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
38502         }
38503         rcu_read_unlock();
38504  
38505 -       if (p->fs->users > n_fs) {
38506 +       if (atomic_read(&p->fs->users) > n_fs) {
38507                 bprm->unsafe |= LSM_UNSAFE_SHARE;
38508         } else {
38509                 res = -EAGAIN;
38510 @@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
38511                                 struct user_arg_ptr envp,
38512                                 struct pt_regs *regs)
38513  {
38514 +#ifdef CONFIG_GRKERNSEC
38515 +       struct file *old_exec_file;
38516 +       struct acl_subject_label *old_acl;
38517 +       struct rlimit old_rlim[RLIM_NLIMITS];
38518 +#endif
38519         struct linux_binprm *bprm;
38520         struct file *file;
38521         struct files_struct *displaced;
38522         bool clear_in_exec;
38523         int retval;
38524 +       const struct cred *cred = current_cred();
38525 +
38526 +       gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38527 +
38528 +       /*
38529 +        * We move the actual failure in case of RLIMIT_NPROC excess from
38530 +        * set*uid() to execve() because too many poorly written programs
38531 +        * don't check setuid() return code.  Here we additionally recheck
38532 +        * whether NPROC limit is still exceeded.
38533 +        */
38534 +       if ((current->flags & PF_NPROC_EXCEEDED) &&
38535 +           atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
38536 +               retval = -EAGAIN;
38537 +               goto out_ret;
38538 +       }
38539 +
38540 +       /* We're below the limit (still or again), so we don't want to make
38541 +        * further execve() calls fail. */
38542 +       current->flags &= ~PF_NPROC_EXCEEDED;
38543  
38544         retval = unshare_files(&displaced);
38545         if (retval)
38546 @@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
38547         bprm->filename = filename;
38548         bprm->interp = filename;
38549  
38550 +       if (gr_process_user_ban()) {
38551 +               retval = -EPERM;
38552 +               goto out_file;
38553 +       }
38554 +
38555 +       if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38556 +               retval = -EACCES;
38557 +               goto out_file;
38558 +       }
38559 +
38560         retval = bprm_mm_init(bprm);
38561         if (retval)
38562                 goto out_file;
38563 @@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
38564         if (retval < 0)
38565                 goto out;
38566  
38567 +       if (!gr_tpe_allow(file)) {
38568 +               retval = -EACCES;
38569 +               goto out;
38570 +       }
38571 +
38572 +       if (gr_check_crash_exec(file)) {
38573 +               retval = -EACCES;
38574 +               goto out;
38575 +       }
38576 +
38577 +       gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38578 +
38579 +       gr_handle_exec_args(bprm, argv);
38580 +
38581 +#ifdef CONFIG_GRKERNSEC
38582 +       old_acl = current->acl;
38583 +       memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38584 +       old_exec_file = current->exec_file;
38585 +       get_file(file);
38586 +       current->exec_file = file;
38587 +#endif
38588 +
38589 +       retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38590 +                                       bprm->unsafe & LSM_UNSAFE_SHARE);
38591 +       if (retval < 0)
38592 +               goto out_fail;
38593 +
38594         retval = search_binary_handler(bprm,regs);
38595         if (retval < 0)
38596 -               goto out;
38597 +               goto out_fail;
38598 +#ifdef CONFIG_GRKERNSEC
38599 +       if (old_exec_file)
38600 +               fput(old_exec_file);
38601 +#endif
38602  
38603         /* execve succeeded */
38604         current->fs->in_exec = 0;
38605 @@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
38606                 put_files_struct(displaced);
38607         return retval;
38608  
38609 +out_fail:
38610 +#ifdef CONFIG_GRKERNSEC
38611 +       current->acl = old_acl;
38612 +       memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38613 +       fput(current->exec_file);
38614 +       current->exec_file = old_exec_file;
38615 +#endif
38616 +
38617  out:
38618         if (bprm->mm) {
38619                 acct_arg_size(bprm, 0);
38620 @@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
38621  {
38622         char *old_corename = cn->corename;
38623  
38624 -       cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
38625 +       cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
38626         cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
38627  
38628         if (!cn->corename) {
38629 @@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
38630         int pid_in_pattern = 0;
38631         int err = 0;
38632  
38633 -       cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
38634 +       cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
38635         cn->corename = kmalloc(cn->size, GFP_KERNEL);
38636         cn->used = 0;
38637  
38638 @@ -1758,6 +1848,219 @@ out:
38639         return ispipe;
38640  }
38641  
38642 +int pax_check_flags(unsigned long *flags)
38643 +{
38644 +       int retval = 0;
38645 +
38646 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38647 +       if (*flags & MF_PAX_SEGMEXEC)
38648 +       {
38649 +               *flags &= ~MF_PAX_SEGMEXEC;
38650 +               retval = -EINVAL;
38651 +       }
38652 +#endif
38653 +
38654 +       if ((*flags & MF_PAX_PAGEEXEC)
38655 +
38656 +#ifdef CONFIG_PAX_PAGEEXEC
38657 +           &&  (*flags & MF_PAX_SEGMEXEC)
38658 +#endif
38659 +
38660 +          )
38661 +       {
38662 +               *flags &= ~MF_PAX_PAGEEXEC;
38663 +               retval = -EINVAL;
38664 +       }
38665 +
38666 +       if ((*flags & MF_PAX_MPROTECT)
38667 +
38668 +#ifdef CONFIG_PAX_MPROTECT
38669 +           && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38670 +#endif
38671 +
38672 +          )
38673 +       {
38674 +               *flags &= ~MF_PAX_MPROTECT;
38675 +               retval = -EINVAL;
38676 +       }
38677 +
38678 +       if ((*flags & MF_PAX_EMUTRAMP)
38679 +
38680 +#ifdef CONFIG_PAX_EMUTRAMP
38681 +           && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38682 +#endif
38683 +
38684 +          )
38685 +       {
38686 +               *flags &= ~MF_PAX_EMUTRAMP;
38687 +               retval = -EINVAL;
38688 +       }
38689 +
38690 +       return retval;
38691 +}
38692 +
38693 +EXPORT_SYMBOL(pax_check_flags);
38694 +
38695 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38696 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38697 +{
38698 +       struct task_struct *tsk = current;
38699 +       struct mm_struct *mm = current->mm;
38700 +       char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38701 +       char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38702 +       char *path_exec = NULL;
38703 +       char *path_fault = NULL;
38704 +       unsigned long start = 0UL, end = 0UL, offset = 0UL;
38705 +
38706 +       if (buffer_exec && buffer_fault) {
38707 +               struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38708 +
38709 +               down_read(&mm->mmap_sem);
38710 +               vma = mm->mmap;
38711 +               while (vma && (!vma_exec || !vma_fault)) {
38712 +                       if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38713 +                               vma_exec = vma;
38714 +                       if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38715 +                               vma_fault = vma;
38716 +                       vma = vma->vm_next;
38717 +               }
38718 +               if (vma_exec) {
38719 +                       path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38720 +                       if (IS_ERR(path_exec))
38721 +                               path_exec = "<path too long>";
38722 +                       else {
38723 +                               path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38724 +                               if (path_exec) {
38725 +                                       *path_exec = 0;
38726 +                                       path_exec = buffer_exec;
38727 +                               } else
38728 +                                       path_exec = "<path too long>";
38729 +                       }
38730 +               }
38731 +               if (vma_fault) {
38732 +                       start = vma_fault->vm_start;
38733 +                       end = vma_fault->vm_end;
38734 +                       offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38735 +                       if (vma_fault->vm_file) {
38736 +                               path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38737 +                               if (IS_ERR(path_fault))
38738 +                                       path_fault = "<path too long>";
38739 +                               else {
38740 +                                       path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38741 +                                       if (path_fault) {
38742 +                                               *path_fault = 0;
38743 +                                               path_fault = buffer_fault;
38744 +                                       } else
38745 +                                               path_fault = "<path too long>";
38746 +                               }
38747 +                       } else
38748 +                               path_fault = "<anonymous mapping>";
38749 +               }
38750 +               up_read(&mm->mmap_sem);
38751 +       }
38752 +       if (tsk->signal->curr_ip)
38753 +               printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38754 +       else
38755 +               printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38756 +       printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38757 +                       "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38758 +                       task_uid(tsk), task_euid(tsk), pc, sp);
38759 +       free_page((unsigned long)buffer_exec);
38760 +       free_page((unsigned long)buffer_fault);
38761 +       pax_report_insns(pc, sp);
38762 +       do_coredump(SIGKILL, SIGKILL, regs);
38763 +}
38764 +#endif
38765 +
38766 +#ifdef CONFIG_PAX_REFCOUNT
38767 +void pax_report_refcount_overflow(struct pt_regs *regs)
38768 +{
38769 +       if (current->signal->curr_ip)
38770 +               printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38771 +                                &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38772 +       else
38773 +               printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38774 +                                current->comm, task_pid_nr(current), current_uid(), current_euid());
38775 +       print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38776 +       show_regs(regs);
38777 +       force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
38778 +}
38779 +#endif
38780 +
38781 +#ifdef CONFIG_PAX_USERCOPY
38782 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38783 +int object_is_on_stack(const void *obj, unsigned long len)
38784 +{
38785 +       const void * const stack = task_stack_page(current);
38786 +       const void * const stackend = stack + THREAD_SIZE;
38787 +
38788 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38789 +       const void *frame = NULL;
38790 +       const void *oldframe;
38791 +#endif
38792 +
38793 +       if (obj + len < obj)
38794 +               return -1;
38795 +
38796 +       if (obj + len <= stack || stackend <= obj)
38797 +               return 0;
38798 +
38799 +       if (obj < stack || stackend < obj + len)
38800 +               return -1;
38801 +
38802 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38803 +       oldframe = __builtin_frame_address(1);
38804 +       if (oldframe)
38805 +               frame = __builtin_frame_address(2);
38806 +       /*
38807 +         low ----------------------------------------------> high
38808 +         [saved bp][saved ip][args][local vars][saved bp][saved ip]
38809 +                             ^----------------^
38810 +                         allow copies only within here
38811 +       */
38812 +       while (stack <= frame && frame < stackend) {
38813 +               /* if obj + len extends past the last frame, this
38814 +                  check won't pass and the next frame will be 0,
38815 +                  causing us to bail out and correctly report
38816 +                  the copy as invalid
38817 +               */
38818 +               if (obj + len <= frame)
38819 +                       return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38820 +               oldframe = frame;
38821 +               frame = *(const void * const *)frame;
38822 +       }
38823 +       return -1;
38824 +#else
38825 +       return 1;
38826 +#endif
38827 +}
38828 +
38829 +
38830 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38831 +{
38832 +       if (current->signal->curr_ip)
38833 +               printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38834 +                       &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38835 +       else
38836 +               printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38837 +                       to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38838 +       dump_stack();
38839 +       gr_handle_kernel_exploit();
38840 +       do_group_exit(SIGKILL);
38841 +}
38842 +#endif
38843 +
38844 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38845 +void pax_track_stack(void)
38846 +{
38847 +       unsigned long sp = (unsigned long)&sp;
38848 +       if (sp < current_thread_info()->lowest_stack &&
38849 +           sp > (unsigned long)task_stack_page(current))
38850 +               current_thread_info()->lowest_stack = sp;
38851 +}
38852 +EXPORT_SYMBOL(pax_track_stack);
38853 +#endif
38854 +
38855  static int zap_process(struct task_struct *start, int exit_code)
38856  {
38857         struct task_struct *t;
38858 @@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
38859         pipe = file->f_path.dentry->d_inode->i_pipe;
38860  
38861         pipe_lock(pipe);
38862 -       pipe->readers++;
38863 -       pipe->writers--;
38864 +       atomic_inc(&pipe->readers);
38865 +       atomic_dec(&pipe->writers);
38866  
38867 -       while ((pipe->readers > 1) && (!signal_pending(current))) {
38868 +       while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38869                 wake_up_interruptible_sync(&pipe->wait);
38870                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38871                 pipe_wait(pipe);
38872         }
38873  
38874 -       pipe->readers--;
38875 -       pipe->writers++;
38876 +       atomic_dec(&pipe->readers);
38877 +       atomic_inc(&pipe->writers);
38878         pipe_unlock(pipe);
38879  
38880  }
38881 @@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
38882         int retval = 0;
38883         int flag = 0;
38884         int ispipe;
38885 -       static atomic_t core_dump_count = ATOMIC_INIT(0);
38886 +       static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38887         struct coredump_params cprm = {
38888                 .signr = signr,
38889                 .regs = regs,
38890 @@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
38891  
38892         audit_core_dumps(signr);
38893  
38894 +       if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38895 +               gr_handle_brute_attach(current, cprm.mm_flags);
38896 +
38897         binfmt = mm->binfmt;
38898         if (!binfmt || !binfmt->core_dump)
38899                 goto fail;
38900 @@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
38901                 goto fail_corename;
38902         }
38903  
38904 +       gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38905 +
38906         if (ispipe) {
38907                 int dump_count;
38908                 char **helper_argv;
38909 @@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
38910                 }
38911                 cprm.limit = RLIM_INFINITY;
38912  
38913 -               dump_count = atomic_inc_return(&core_dump_count);
38914 +               dump_count = atomic_inc_return_unchecked(&core_dump_count);
38915                 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38916                         printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38917                                task_tgid_vnr(current), current->comm);
38918 @@ -2192,7 +2500,7 @@ close_fail:
38919                 filp_close(cprm.file, NULL);
38920  fail_dropcount:
38921         if (ispipe)
38922 -               atomic_dec(&core_dump_count);
38923 +               atomic_dec_unchecked(&core_dump_count);
38924  fail_unlock:
38925         kfree(cn.corename);
38926  fail_corename:
38927 diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
38928 --- linux-3.0.4/fs/ext2/balloc.c        2011-07-21 22:17:23.000000000 -0400
38929 +++ linux-3.0.4/fs/ext2/balloc.c        2011-08-23 21:48:14.000000000 -0400
38930 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38931  
38932         free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38933         root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38934 -       if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38935 +       if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38936                 sbi->s_resuid != current_fsuid() &&
38937                 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38938                 return 0;
38939 diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
38940 --- linux-3.0.4/fs/ext3/balloc.c        2011-07-21 22:17:23.000000000 -0400
38941 +++ linux-3.0.4/fs/ext3/balloc.c        2011-08-23 21:48:14.000000000 -0400
38942 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
38943  
38944         free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38945         root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38946 -       if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38947 +       if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38948                 sbi->s_resuid != current_fsuid() &&
38949                 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38950                 return 0;
38951 diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
38952 --- linux-3.0.4/fs/ext4/balloc.c        2011-07-21 22:17:23.000000000 -0400
38953 +++ linux-3.0.4/fs/ext4/balloc.c        2011-08-23 21:48:14.000000000 -0400
38954 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
38955         /* Hm, nope.  Are (enough) root reserved blocks available? */
38956         if (sbi->s_resuid == current_fsuid() ||
38957             ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38958 -           capable(CAP_SYS_RESOURCE) ||
38959 -               (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
38960 +               (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
38961 +               capable_nolog(CAP_SYS_RESOURCE)) {
38962  
38963                 if (free_blocks >= (nblocks + dirty_blocks))
38964                         return 1;
38965 diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
38966 --- linux-3.0.4/fs/ext4/ext4.h  2011-08-23 21:44:40.000000000 -0400
38967 +++ linux-3.0.4/fs/ext4/ext4.h  2011-08-23 21:47:56.000000000 -0400
38968 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
38969         unsigned long s_mb_last_start;
38970  
38971         /* stats for buddy allocator */
38972 -       atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
38973 -       atomic_t s_bal_success; /* we found long enough chunks */
38974 -       atomic_t s_bal_allocated;       /* in blocks */
38975 -       atomic_t s_bal_ex_scanned;      /* total extents scanned */
38976 -       atomic_t s_bal_goals;   /* goal hits */
38977 -       atomic_t s_bal_breaks;  /* too long searches */
38978 -       atomic_t s_bal_2orders; /* 2^order hits */
38979 +       atomic_unchecked_t s_bal_reqs;  /* number of reqs with len > 1 */
38980 +       atomic_unchecked_t s_bal_success;       /* we found long enough chunks */
38981 +       atomic_unchecked_t s_bal_allocated;     /* in blocks */
38982 +       atomic_unchecked_t s_bal_ex_scanned;    /* total extents scanned */
38983 +       atomic_unchecked_t s_bal_goals; /* goal hits */
38984 +       atomic_unchecked_t s_bal_breaks;        /* too long searches */
38985 +       atomic_unchecked_t s_bal_2orders;       /* 2^order hits */
38986         spinlock_t s_bal_lock;
38987         unsigned long s_mb_buddies_generated;
38988         unsigned long long s_mb_generation_time;
38989 -       atomic_t s_mb_lost_chunks;
38990 -       atomic_t s_mb_preallocated;
38991 -       atomic_t s_mb_discarded;
38992 +       atomic_unchecked_t s_mb_lost_chunks;
38993 +       atomic_unchecked_t s_mb_preallocated;
38994 +       atomic_unchecked_t s_mb_discarded;
38995         atomic_t s_lock_busy;
38996  
38997         /* locality groups */
38998 diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
38999 --- linux-3.0.4/fs/ext4/mballoc.c       2011-08-23 21:44:40.000000000 -0400
39000 +++ linux-3.0.4/fs/ext4/mballoc.c       2011-08-23 21:48:14.000000000 -0400
39001 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
39002                 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
39003  
39004                 if (EXT4_SB(sb)->s_mb_stats)
39005 -                       atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
39006 +                       atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39007  
39008                 break;
39009         }
39010 @@ -2087,7 +2087,7 @@ repeat:
39011                         ac->ac_status = AC_STATUS_CONTINUE;
39012                         ac->ac_flags |= EXT4_MB_HINT_FIRST;
39013                         cr = 3;
39014 -                       atomic_inc(&sbi->s_mb_lost_chunks);
39015 +                       atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39016                         goto repeat;
39017                 }
39018         }
39019 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
39020                 ext4_grpblk_t counters[16];
39021         } sg;
39022  
39023 +       pax_track_stack();
39024 +
39025         group--;
39026         if (group == 0)
39027                 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39028 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
39029         if (sbi->s_mb_stats) {
39030                 printk(KERN_INFO
39031                        "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39032 -                               atomic_read(&sbi->s_bal_allocated),
39033 -                               atomic_read(&sbi->s_bal_reqs),
39034 -                               atomic_read(&sbi->s_bal_success));
39035 +                               atomic_read_unchecked(&sbi->s_bal_allocated),
39036 +                               atomic_read_unchecked(&sbi->s_bal_reqs),
39037 +                               atomic_read_unchecked(&sbi->s_bal_success));
39038                 printk(KERN_INFO
39039                       "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39040                                 "%u 2^N hits, %u breaks, %u lost\n",
39041 -                               atomic_read(&sbi->s_bal_ex_scanned),
39042 -                               atomic_read(&sbi->s_bal_goals),
39043 -                               atomic_read(&sbi->s_bal_2orders),
39044 -                               atomic_read(&sbi->s_bal_breaks),
39045 -                               atomic_read(&sbi->s_mb_lost_chunks));
39046 +                               atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39047 +                               atomic_read_unchecked(&sbi->s_bal_goals),
39048 +                               atomic_read_unchecked(&sbi->s_bal_2orders),
39049 +                               atomic_read_unchecked(&sbi->s_bal_breaks),
39050 +                               atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39051                 printk(KERN_INFO
39052                        "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39053                                 sbi->s_mb_buddies_generated++,
39054                                 sbi->s_mb_generation_time);
39055                 printk(KERN_INFO
39056                        "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39057 -                               atomic_read(&sbi->s_mb_preallocated),
39058 -                               atomic_read(&sbi->s_mb_discarded));
39059 +                               atomic_read_unchecked(&sbi->s_mb_preallocated),
39060 +                               atomic_read_unchecked(&sbi->s_mb_discarded));
39061         }
39062  
39063         free_percpu(sbi->s_locality_groups);
39064 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
39065         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39066  
39067         if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39068 -               atomic_inc(&sbi->s_bal_reqs);
39069 -               atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39070 +               atomic_inc_unchecked(&sbi->s_bal_reqs);
39071 +               atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39072                 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
39073 -                       atomic_inc(&sbi->s_bal_success);
39074 -               atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39075 +                       atomic_inc_unchecked(&sbi->s_bal_success);
39076 +               atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39077                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39078                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39079 -                       atomic_inc(&sbi->s_bal_goals);
39080 +                       atomic_inc_unchecked(&sbi->s_bal_goals);
39081                 if (ac->ac_found > sbi->s_mb_max_to_scan)
39082 -                       atomic_inc(&sbi->s_bal_breaks);
39083 +                       atomic_inc_unchecked(&sbi->s_bal_breaks);
39084         }
39085  
39086         if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39087 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39088         trace_ext4_mb_new_inode_pa(ac, pa);
39089  
39090         ext4_mb_use_inode_pa(ac, pa);
39091 -       atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39092 +       atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39093  
39094         ei = EXT4_I(ac->ac_inode);
39095         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39096 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39097         trace_ext4_mb_new_group_pa(ac, pa);
39098  
39099         ext4_mb_use_group_pa(ac, pa);
39100 -       atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39101 +       atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39102  
39103         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39104         lg = ac->ac_lg;
39105 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39106                  * from the bitmap and continue.
39107                  */
39108         }
39109 -       atomic_add(free, &sbi->s_mb_discarded);
39110 +       atomic_add_unchecked(free, &sbi->s_mb_discarded);
39111  
39112         return err;
39113  }
39114 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39115         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39116         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39117         mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39118 -       atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39119 +       atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39120         trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
39121  
39122         return 0;
39123 diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
39124 --- linux-3.0.4/fs/fcntl.c      2011-07-21 22:17:23.000000000 -0400
39125 +++ linux-3.0.4/fs/fcntl.c      2011-08-23 21:48:14.000000000 -0400
39126 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
39127         if (err)
39128                 return err;
39129  
39130 +       if (gr_handle_chroot_fowner(pid, type))
39131 +               return -ENOENT;
39132 +       if (gr_check_protected_task_fowner(pid, type))
39133 +               return -EACCES;
39134 +
39135         f_modown(filp, pid, type, force);
39136         return 0;
39137  }
39138 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
39139         switch (cmd) {
39140         case F_DUPFD:
39141         case F_DUPFD_CLOEXEC:
39142 +               gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39143                 if (arg >= rlimit(RLIMIT_NOFILE))
39144                         break;
39145                 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39146 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
39147          * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
39148          * is defined as O_NONBLOCK on some platforms and not on others.
39149          */
39150 -       BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39151 +       BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39152                 O_RDONLY        | O_WRONLY      | O_RDWR        |
39153                 O_CREAT         | O_EXCL        | O_NOCTTY      |
39154                 O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
39155                 __O_SYNC        | O_DSYNC       | FASYNC        |
39156                 O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
39157                 O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
39158 -               __FMODE_EXEC    | O_PATH
39159 +               __FMODE_EXEC    | O_PATH        | FMODE_GREXEC
39160                 ));
39161  
39162         fasync_cache = kmem_cache_create("fasync_cache",
39163 diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
39164 --- linux-3.0.4/fs/fifo.c       2011-07-21 22:17:23.000000000 -0400
39165 +++ linux-3.0.4/fs/fifo.c       2011-08-23 21:47:56.000000000 -0400
39166 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
39167          */
39168                 filp->f_op = &read_pipefifo_fops;
39169                 pipe->r_counter++;
39170 -               if (pipe->readers++ == 0)
39171 +               if (atomic_inc_return(&pipe->readers) == 1)
39172                         wake_up_partner(inode);
39173  
39174 -               if (!pipe->writers) {
39175 +               if (!atomic_read(&pipe->writers)) {
39176                         if ((filp->f_flags & O_NONBLOCK)) {
39177                                 /* suppress POLLHUP until we have
39178                                  * seen a writer */
39179 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
39180          *  errno=ENXIO when there is no process reading the FIFO.
39181          */
39182                 ret = -ENXIO;
39183 -               if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39184 +               if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39185                         goto err;
39186  
39187                 filp->f_op = &write_pipefifo_fops;
39188                 pipe->w_counter++;
39189 -               if (!pipe->writers++)
39190 +               if (atomic_inc_return(&pipe->writers) == 1)
39191                         wake_up_partner(inode);
39192  
39193 -               if (!pipe->readers) {
39194 +               if (!atomic_read(&pipe->readers)) {
39195                         wait_for_partner(inode, &pipe->r_counter);
39196                         if (signal_pending(current))
39197                                 goto err_wr;
39198 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
39199          */
39200                 filp->f_op = &rdwr_pipefifo_fops;
39201  
39202 -               pipe->readers++;
39203 -               pipe->writers++;
39204 +               atomic_inc(&pipe->readers);
39205 +               atomic_inc(&pipe->writers);
39206                 pipe->r_counter++;
39207                 pipe->w_counter++;
39208 -               if (pipe->readers == 1 || pipe->writers == 1)
39209 +               if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39210                         wake_up_partner(inode);
39211                 break;
39212  
39213 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
39214         return 0;
39215  
39216  err_rd:
39217 -       if (!--pipe->readers)
39218 +       if (atomic_dec_and_test(&pipe->readers))
39219                 wake_up_interruptible(&pipe->wait);
39220         ret = -ERESTARTSYS;
39221         goto err;
39222  
39223  err_wr:
39224 -       if (!--pipe->writers)
39225 +       if (atomic_dec_and_test(&pipe->writers))
39226                 wake_up_interruptible(&pipe->wait);
39227         ret = -ERESTARTSYS;
39228         goto err;
39229  
39230  err:
39231 -       if (!pipe->readers && !pipe->writers)
39232 +       if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39233                 free_pipe_info(inode);
39234  
39235  err_nocleanup:
39236 diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
39237 --- linux-3.0.4/fs/file.c       2011-07-21 22:17:23.000000000 -0400
39238 +++ linux-3.0.4/fs/file.c       2011-08-23 21:48:14.000000000 -0400
39239 @@ -15,6 +15,7 @@
39240  #include <linux/slab.h>
39241  #include <linux/vmalloc.h>
39242  #include <linux/file.h>
39243 +#include <linux/security.h>
39244  #include <linux/fdtable.h>
39245  #include <linux/bitops.h>
39246  #include <linux/interrupt.h>
39247 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
39248          * N.B. For clone tasks sharing a files structure, this test
39249          * will limit the total number of files that can be opened.
39250          */
39251 +       gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39252         if (nr >= rlimit(RLIMIT_NOFILE))
39253                 return -EMFILE;
39254  
39255 diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
39256 --- linux-3.0.4/fs/filesystems.c        2011-07-21 22:17:23.000000000 -0400
39257 +++ linux-3.0.4/fs/filesystems.c        2011-08-23 21:48:14.000000000 -0400
39258 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
39259         int len = dot ? dot - name : strlen(name);
39260  
39261         fs = __get_fs_type(name, len);
39262 +       
39263 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
39264 +       if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39265 +#else
39266         if (!fs && (request_module("%.*s", len, name) == 0))
39267 +#endif
39268                 fs = __get_fs_type(name, len);
39269  
39270         if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39271 diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
39272 --- linux-3.0.4/fs/fscache/cookie.c     2011-07-21 22:17:23.000000000 -0400
39273 +++ linux-3.0.4/fs/fscache/cookie.c     2011-08-23 21:47:56.000000000 -0400
39274 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39275                parent ? (char *) parent->def->name : "<no-parent>",
39276                def->name, netfs_data);
39277  
39278 -       fscache_stat(&fscache_n_acquires);
39279 +       fscache_stat_unchecked(&fscache_n_acquires);
39280  
39281         /* if there's no parent cookie, then we don't create one here either */
39282         if (!parent) {
39283 -               fscache_stat(&fscache_n_acquires_null);
39284 +               fscache_stat_unchecked(&fscache_n_acquires_null);
39285                 _leave(" [no parent]");
39286                 return NULL;
39287         }
39288 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39289         /* allocate and initialise a cookie */
39290         cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39291         if (!cookie) {
39292 -               fscache_stat(&fscache_n_acquires_oom);
39293 +               fscache_stat_unchecked(&fscache_n_acquires_oom);
39294                 _leave(" [ENOMEM]");
39295                 return NULL;
39296         }
39297 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39298  
39299         switch (cookie->def->type) {
39300         case FSCACHE_COOKIE_TYPE_INDEX:
39301 -               fscache_stat(&fscache_n_cookie_index);
39302 +               fscache_stat_unchecked(&fscache_n_cookie_index);
39303                 break;
39304         case FSCACHE_COOKIE_TYPE_DATAFILE:
39305 -               fscache_stat(&fscache_n_cookie_data);
39306 +               fscache_stat_unchecked(&fscache_n_cookie_data);
39307                 break;
39308         default:
39309 -               fscache_stat(&fscache_n_cookie_special);
39310 +               fscache_stat_unchecked(&fscache_n_cookie_special);
39311                 break;
39312         }
39313  
39314 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39315                 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39316                         atomic_dec(&parent->n_children);
39317                         __fscache_cookie_put(cookie);
39318 -                       fscache_stat(&fscache_n_acquires_nobufs);
39319 +                       fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39320                         _leave(" = NULL");
39321                         return NULL;
39322                 }
39323         }
39324  
39325 -       fscache_stat(&fscache_n_acquires_ok);
39326 +       fscache_stat_unchecked(&fscache_n_acquires_ok);
39327         _leave(" = %p", cookie);
39328         return cookie;
39329  }
39330 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39331         cache = fscache_select_cache_for_object(cookie->parent);
39332         if (!cache) {
39333                 up_read(&fscache_addremove_sem);
39334 -               fscache_stat(&fscache_n_acquires_no_cache);
39335 +               fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39336                 _leave(" = -ENOMEDIUM [no cache]");
39337                 return -ENOMEDIUM;
39338         }
39339 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39340         object = cache->ops->alloc_object(cache, cookie);
39341         fscache_stat_d(&fscache_n_cop_alloc_object);
39342         if (IS_ERR(object)) {
39343 -               fscache_stat(&fscache_n_object_no_alloc);
39344 +               fscache_stat_unchecked(&fscache_n_object_no_alloc);
39345                 ret = PTR_ERR(object);
39346                 goto error;
39347         }
39348  
39349 -       fscache_stat(&fscache_n_object_alloc);
39350 +       fscache_stat_unchecked(&fscache_n_object_alloc);
39351  
39352         object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39353  
39354 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39355         struct fscache_object *object;
39356         struct hlist_node *_p;
39357  
39358 -       fscache_stat(&fscache_n_updates);
39359 +       fscache_stat_unchecked(&fscache_n_updates);
39360  
39361         if (!cookie) {
39362 -               fscache_stat(&fscache_n_updates_null);
39363 +               fscache_stat_unchecked(&fscache_n_updates_null);
39364                 _leave(" [no cookie]");
39365                 return;
39366         }
39367 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct 
39368         struct fscache_object *object;
39369         unsigned long event;
39370  
39371 -       fscache_stat(&fscache_n_relinquishes);
39372 +       fscache_stat_unchecked(&fscache_n_relinquishes);
39373         if (retire)
39374 -               fscache_stat(&fscache_n_relinquishes_retire);
39375 +               fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39376  
39377         if (!cookie) {
39378 -               fscache_stat(&fscache_n_relinquishes_null);
39379 +               fscache_stat_unchecked(&fscache_n_relinquishes_null);
39380                 _leave(" [no cookie]");
39381                 return;
39382         }
39383 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct 
39384  
39385         /* wait for the cookie to finish being instantiated (or to fail) */
39386         if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39387 -               fscache_stat(&fscache_n_relinquishes_waitcrt);
39388 +               fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39389                 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39390                             fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39391         }
39392 diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
39393 --- linux-3.0.4/fs/fscache/internal.h   2011-07-21 22:17:23.000000000 -0400
39394 +++ linux-3.0.4/fs/fscache/internal.h   2011-08-23 21:47:56.000000000 -0400
39395 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
39396  extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39397  extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39398  
39399 -extern atomic_t fscache_n_op_pend;
39400 -extern atomic_t fscache_n_op_run;
39401 -extern atomic_t fscache_n_op_enqueue;
39402 -extern atomic_t fscache_n_op_deferred_release;
39403 -extern atomic_t fscache_n_op_release;
39404 -extern atomic_t fscache_n_op_gc;
39405 -extern atomic_t fscache_n_op_cancelled;
39406 -extern atomic_t fscache_n_op_rejected;
39407 -
39408 -extern atomic_t fscache_n_attr_changed;
39409 -extern atomic_t fscache_n_attr_changed_ok;
39410 -extern atomic_t fscache_n_attr_changed_nobufs;
39411 -extern atomic_t fscache_n_attr_changed_nomem;
39412 -extern atomic_t fscache_n_attr_changed_calls;
39413 -
39414 -extern atomic_t fscache_n_allocs;
39415 -extern atomic_t fscache_n_allocs_ok;
39416 -extern atomic_t fscache_n_allocs_wait;
39417 -extern atomic_t fscache_n_allocs_nobufs;
39418 -extern atomic_t fscache_n_allocs_intr;
39419 -extern atomic_t fscache_n_allocs_object_dead;
39420 -extern atomic_t fscache_n_alloc_ops;
39421 -extern atomic_t fscache_n_alloc_op_waits;
39422 -
39423 -extern atomic_t fscache_n_retrievals;
39424 -extern atomic_t fscache_n_retrievals_ok;
39425 -extern atomic_t fscache_n_retrievals_wait;
39426 -extern atomic_t fscache_n_retrievals_nodata;
39427 -extern atomic_t fscache_n_retrievals_nobufs;
39428 -extern atomic_t fscache_n_retrievals_intr;
39429 -extern atomic_t fscache_n_retrievals_nomem;
39430 -extern atomic_t fscache_n_retrievals_object_dead;
39431 -extern atomic_t fscache_n_retrieval_ops;
39432 -extern atomic_t fscache_n_retrieval_op_waits;
39433 -
39434 -extern atomic_t fscache_n_stores;
39435 -extern atomic_t fscache_n_stores_ok;
39436 -extern atomic_t fscache_n_stores_again;
39437 -extern atomic_t fscache_n_stores_nobufs;
39438 -extern atomic_t fscache_n_stores_oom;
39439 -extern atomic_t fscache_n_store_ops;
39440 -extern atomic_t fscache_n_store_calls;
39441 -extern atomic_t fscache_n_store_pages;
39442 -extern atomic_t fscache_n_store_radix_deletes;
39443 -extern atomic_t fscache_n_store_pages_over_limit;
39444 -
39445 -extern atomic_t fscache_n_store_vmscan_not_storing;
39446 -extern atomic_t fscache_n_store_vmscan_gone;
39447 -extern atomic_t fscache_n_store_vmscan_busy;
39448 -extern atomic_t fscache_n_store_vmscan_cancelled;
39449 -
39450 -extern atomic_t fscache_n_marks;
39451 -extern atomic_t fscache_n_uncaches;
39452 -
39453 -extern atomic_t fscache_n_acquires;
39454 -extern atomic_t fscache_n_acquires_null;
39455 -extern atomic_t fscache_n_acquires_no_cache;
39456 -extern atomic_t fscache_n_acquires_ok;
39457 -extern atomic_t fscache_n_acquires_nobufs;
39458 -extern atomic_t fscache_n_acquires_oom;
39459 -
39460 -extern atomic_t fscache_n_updates;
39461 -extern atomic_t fscache_n_updates_null;
39462 -extern atomic_t fscache_n_updates_run;
39463 -
39464 -extern atomic_t fscache_n_relinquishes;
39465 -extern atomic_t fscache_n_relinquishes_null;
39466 -extern atomic_t fscache_n_relinquishes_waitcrt;
39467 -extern atomic_t fscache_n_relinquishes_retire;
39468 -
39469 -extern atomic_t fscache_n_cookie_index;
39470 -extern atomic_t fscache_n_cookie_data;
39471 -extern atomic_t fscache_n_cookie_special;
39472 -
39473 -extern atomic_t fscache_n_object_alloc;
39474 -extern atomic_t fscache_n_object_no_alloc;
39475 -extern atomic_t fscache_n_object_lookups;
39476 -extern atomic_t fscache_n_object_lookups_negative;
39477 -extern atomic_t fscache_n_object_lookups_positive;
39478 -extern atomic_t fscache_n_object_lookups_timed_out;
39479 -extern atomic_t fscache_n_object_created;
39480 -extern atomic_t fscache_n_object_avail;
39481 -extern atomic_t fscache_n_object_dead;
39482 -
39483 -extern atomic_t fscache_n_checkaux_none;
39484 -extern atomic_t fscache_n_checkaux_okay;
39485 -extern atomic_t fscache_n_checkaux_update;
39486 -extern atomic_t fscache_n_checkaux_obsolete;
39487 +extern atomic_unchecked_t fscache_n_op_pend;
39488 +extern atomic_unchecked_t fscache_n_op_run;
39489 +extern atomic_unchecked_t fscache_n_op_enqueue;
39490 +extern atomic_unchecked_t fscache_n_op_deferred_release;
39491 +extern atomic_unchecked_t fscache_n_op_release;
39492 +extern atomic_unchecked_t fscache_n_op_gc;
39493 +extern atomic_unchecked_t fscache_n_op_cancelled;
39494 +extern atomic_unchecked_t fscache_n_op_rejected;
39495 +
39496 +extern atomic_unchecked_t fscache_n_attr_changed;
39497 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
39498 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39499 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39500 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
39501 +
39502 +extern atomic_unchecked_t fscache_n_allocs;
39503 +extern atomic_unchecked_t fscache_n_allocs_ok;
39504 +extern atomic_unchecked_t fscache_n_allocs_wait;
39505 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
39506 +extern atomic_unchecked_t fscache_n_allocs_intr;
39507 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
39508 +extern atomic_unchecked_t fscache_n_alloc_ops;
39509 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
39510 +
39511 +extern atomic_unchecked_t fscache_n_retrievals;
39512 +extern atomic_unchecked_t fscache_n_retrievals_ok;
39513 +extern atomic_unchecked_t fscache_n_retrievals_wait;
39514 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
39515 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39516 +extern atomic_unchecked_t fscache_n_retrievals_intr;
39517 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
39518 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39519 +extern atomic_unchecked_t fscache_n_retrieval_ops;
39520 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39521 +
39522 +extern atomic_unchecked_t fscache_n_stores;
39523 +extern atomic_unchecked_t fscache_n_stores_ok;
39524 +extern atomic_unchecked_t fscache_n_stores_again;
39525 +extern atomic_unchecked_t fscache_n_stores_nobufs;
39526 +extern atomic_unchecked_t fscache_n_stores_oom;
39527 +extern atomic_unchecked_t fscache_n_store_ops;
39528 +extern atomic_unchecked_t fscache_n_store_calls;
39529 +extern atomic_unchecked_t fscache_n_store_pages;
39530 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
39531 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39532 +
39533 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39534 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39535 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39536 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39537 +
39538 +extern atomic_unchecked_t fscache_n_marks;
39539 +extern atomic_unchecked_t fscache_n_uncaches;
39540 +
39541 +extern atomic_unchecked_t fscache_n_acquires;
39542 +extern atomic_unchecked_t fscache_n_acquires_null;
39543 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
39544 +extern atomic_unchecked_t fscache_n_acquires_ok;
39545 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
39546 +extern atomic_unchecked_t fscache_n_acquires_oom;
39547 +
39548 +extern atomic_unchecked_t fscache_n_updates;
39549 +extern atomic_unchecked_t fscache_n_updates_null;
39550 +extern atomic_unchecked_t fscache_n_updates_run;
39551 +
39552 +extern atomic_unchecked_t fscache_n_relinquishes;
39553 +extern atomic_unchecked_t fscache_n_relinquishes_null;
39554 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39555 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
39556 +
39557 +extern atomic_unchecked_t fscache_n_cookie_index;
39558 +extern atomic_unchecked_t fscache_n_cookie_data;
39559 +extern atomic_unchecked_t fscache_n_cookie_special;
39560 +
39561 +extern atomic_unchecked_t fscache_n_object_alloc;
39562 +extern atomic_unchecked_t fscache_n_object_no_alloc;
39563 +extern atomic_unchecked_t fscache_n_object_lookups;
39564 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
39565 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
39566 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39567 +extern atomic_unchecked_t fscache_n_object_created;
39568 +extern atomic_unchecked_t fscache_n_object_avail;
39569 +extern atomic_unchecked_t fscache_n_object_dead;
39570 +
39571 +extern atomic_unchecked_t fscache_n_checkaux_none;
39572 +extern atomic_unchecked_t fscache_n_checkaux_okay;
39573 +extern atomic_unchecked_t fscache_n_checkaux_update;
39574 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39575  
39576  extern atomic_t fscache_n_cop_alloc_object;
39577  extern atomic_t fscache_n_cop_lookup_object;
39578 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
39579         atomic_inc(stat);
39580  }
39581  
39582 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39583 +{
39584 +       atomic_inc_unchecked(stat);
39585 +}
39586 +
39587  static inline void fscache_stat_d(atomic_t *stat)
39588  {
39589         atomic_dec(stat);
39590 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
39591  
39592  #define __fscache_stat(stat) (NULL)
39593  #define fscache_stat(stat) do {} while (0)
39594 +#define fscache_stat_unchecked(stat) do {} while (0)
39595  #define fscache_stat_d(stat) do {} while (0)
39596  #endif
39597  
39598 diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
39599 --- linux-3.0.4/fs/fscache/object.c     2011-07-21 22:17:23.000000000 -0400
39600 +++ linux-3.0.4/fs/fscache/object.c     2011-08-23 21:47:56.000000000 -0400
39601 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
39602                 /* update the object metadata on disk */
39603         case FSCACHE_OBJECT_UPDATING:
39604                 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39605 -               fscache_stat(&fscache_n_updates_run);
39606 +               fscache_stat_unchecked(&fscache_n_updates_run);
39607                 fscache_stat(&fscache_n_cop_update_object);
39608                 object->cache->ops->update_object(object);
39609                 fscache_stat_d(&fscache_n_cop_update_object);
39610 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
39611                 spin_lock(&object->lock);
39612                 object->state = FSCACHE_OBJECT_DEAD;
39613                 spin_unlock(&object->lock);
39614 -               fscache_stat(&fscache_n_object_dead);
39615 +               fscache_stat_unchecked(&fscache_n_object_dead);
39616                 goto terminal_transit;
39617  
39618                 /* handle the parent cache of this object being withdrawn from
39619 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
39620                 spin_lock(&object->lock);
39621                 object->state = FSCACHE_OBJECT_DEAD;
39622                 spin_unlock(&object->lock);
39623 -               fscache_stat(&fscache_n_object_dead);
39624 +               fscache_stat_unchecked(&fscache_n_object_dead);
39625                 goto terminal_transit;
39626  
39627                 /* complain about the object being woken up once it is
39628 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
39629                parent->cookie->def->name, cookie->def->name,
39630                object->cache->tag->name);
39631  
39632 -       fscache_stat(&fscache_n_object_lookups);
39633 +       fscache_stat_unchecked(&fscache_n_object_lookups);
39634         fscache_stat(&fscache_n_cop_lookup_object);
39635         ret = object->cache->ops->lookup_object(object);
39636         fscache_stat_d(&fscache_n_cop_lookup_object);
39637 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
39638         if (ret == -ETIMEDOUT) {
39639                 /* probably stuck behind another object, so move this one to
39640                  * the back of the queue */
39641 -               fscache_stat(&fscache_n_object_lookups_timed_out);
39642 +               fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39643                 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39644         }
39645  
39646 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
39647  
39648         spin_lock(&object->lock);
39649         if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39650 -               fscache_stat(&fscache_n_object_lookups_negative);
39651 +               fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39652  
39653                 /* transit here to allow write requests to begin stacking up
39654                  * and read requests to begin returning ENODATA */
39655 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
39656          * result, in which case there may be data available */
39657         spin_lock(&object->lock);
39658         if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39659 -               fscache_stat(&fscache_n_object_lookups_positive);
39660 +               fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39661  
39662                 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39663  
39664 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
39665                 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39666         } else {
39667                 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39668 -               fscache_stat(&fscache_n_object_created);
39669 +               fscache_stat_unchecked(&fscache_n_object_created);
39670  
39671                 object->state = FSCACHE_OBJECT_AVAILABLE;
39672                 spin_unlock(&object->lock);
39673 @@ -602,7 +602,7 @@ static void fscache_object_available(str
39674         fscache_enqueue_dependents(object);
39675  
39676         fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39677 -       fscache_stat(&fscache_n_object_avail);
39678 +       fscache_stat_unchecked(&fscache_n_object_avail);
39679  
39680         _leave("");
39681  }
39682 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39683         enum fscache_checkaux result;
39684  
39685         if (!object->cookie->def->check_aux) {
39686 -               fscache_stat(&fscache_n_checkaux_none);
39687 +               fscache_stat_unchecked(&fscache_n_checkaux_none);
39688                 return FSCACHE_CHECKAUX_OKAY;
39689         }
39690  
39691 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39692         switch (result) {
39693                 /* entry okay as is */
39694         case FSCACHE_CHECKAUX_OKAY:
39695 -               fscache_stat(&fscache_n_checkaux_okay);
39696 +               fscache_stat_unchecked(&fscache_n_checkaux_okay);
39697                 break;
39698  
39699                 /* entry requires update */
39700         case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39701 -               fscache_stat(&fscache_n_checkaux_update);
39702 +               fscache_stat_unchecked(&fscache_n_checkaux_update);
39703                 break;
39704  
39705                 /* entry requires deletion */
39706         case FSCACHE_CHECKAUX_OBSOLETE:
39707 -               fscache_stat(&fscache_n_checkaux_obsolete);
39708 +               fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39709                 break;
39710  
39711         default:
39712 diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
39713 --- linux-3.0.4/fs/fscache/operation.c  2011-07-21 22:17:23.000000000 -0400
39714 +++ linux-3.0.4/fs/fscache/operation.c  2011-08-23 21:47:56.000000000 -0400
39715 @@ -17,7 +17,7 @@
39716  #include <linux/slab.h>
39717  #include "internal.h"
39718  
39719 -atomic_t fscache_op_debug_id;
39720 +atomic_unchecked_t fscache_op_debug_id;
39721  EXPORT_SYMBOL(fscache_op_debug_id);
39722  
39723  /**
39724 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
39725         ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39726         ASSERTCMP(atomic_read(&op->usage), >, 0);
39727  
39728 -       fscache_stat(&fscache_n_op_enqueue);
39729 +       fscache_stat_unchecked(&fscache_n_op_enqueue);
39730         switch (op->flags & FSCACHE_OP_TYPE) {
39731         case FSCACHE_OP_ASYNC:
39732                 _debug("queue async");
39733 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
39734                 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39735         if (op->processor)
39736                 fscache_enqueue_operation(op);
39737 -       fscache_stat(&fscache_n_op_run);
39738 +       fscache_stat_unchecked(&fscache_n_op_run);
39739  }
39740  
39741  /*
39742 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
39743                 if (object->n_ops > 1) {
39744                         atomic_inc(&op->usage);
39745                         list_add_tail(&op->pend_link, &object->pending_ops);
39746 -                       fscache_stat(&fscache_n_op_pend);
39747 +                       fscache_stat_unchecked(&fscache_n_op_pend);
39748                 } else if (!list_empty(&object->pending_ops)) {
39749                         atomic_inc(&op->usage);
39750                         list_add_tail(&op->pend_link, &object->pending_ops);
39751 -                       fscache_stat(&fscache_n_op_pend);
39752 +                       fscache_stat_unchecked(&fscache_n_op_pend);
39753                         fscache_start_operations(object);
39754                 } else {
39755                         ASSERTCMP(object->n_in_progress, ==, 0);
39756 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
39757                 object->n_exclusive++;  /* reads and writes must wait */
39758                 atomic_inc(&op->usage);
39759                 list_add_tail(&op->pend_link, &object->pending_ops);
39760 -               fscache_stat(&fscache_n_op_pend);
39761 +               fscache_stat_unchecked(&fscache_n_op_pend);
39762                 ret = 0;
39763         } else {
39764                 /* not allowed to submit ops in any other state */
39765 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
39766                 if (object->n_exclusive > 0) {
39767                         atomic_inc(&op->usage);
39768                         list_add_tail(&op->pend_link, &object->pending_ops);
39769 -                       fscache_stat(&fscache_n_op_pend);
39770 +                       fscache_stat_unchecked(&fscache_n_op_pend);
39771                 } else if (!list_empty(&object->pending_ops)) {
39772                         atomic_inc(&op->usage);
39773                         list_add_tail(&op->pend_link, &object->pending_ops);
39774 -                       fscache_stat(&fscache_n_op_pend);
39775 +                       fscache_stat_unchecked(&fscache_n_op_pend);
39776                         fscache_start_operations(object);
39777                 } else {
39778                         ASSERTCMP(object->n_exclusive, ==, 0);
39779 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
39780                 object->n_ops++;
39781                 atomic_inc(&op->usage);
39782                 list_add_tail(&op->pend_link, &object->pending_ops);
39783 -               fscache_stat(&fscache_n_op_pend);
39784 +               fscache_stat_unchecked(&fscache_n_op_pend);
39785                 ret = 0;
39786         } else if (object->state == FSCACHE_OBJECT_DYING ||
39787                    object->state == FSCACHE_OBJECT_LC_DYING ||
39788                    object->state == FSCACHE_OBJECT_WITHDRAWING) {
39789 -               fscache_stat(&fscache_n_op_rejected);
39790 +               fscache_stat_unchecked(&fscache_n_op_rejected);
39791                 ret = -ENOBUFS;
39792         } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39793                 fscache_report_unexpected_submission(object, op, ostate);
39794 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
39795  
39796         ret = -EBUSY;
39797         if (!list_empty(&op->pend_link)) {
39798 -               fscache_stat(&fscache_n_op_cancelled);
39799 +               fscache_stat_unchecked(&fscache_n_op_cancelled);
39800                 list_del_init(&op->pend_link);
39801                 object->n_ops--;
39802                 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39803 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
39804         if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39805                 BUG();
39806  
39807 -       fscache_stat(&fscache_n_op_release);
39808 +       fscache_stat_unchecked(&fscache_n_op_release);
39809  
39810         if (op->release) {
39811                 op->release(op);
39812 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
39813          * lock, and defer it otherwise */
39814         if (!spin_trylock(&object->lock)) {
39815                 _debug("defer put");
39816 -               fscache_stat(&fscache_n_op_deferred_release);
39817 +               fscache_stat_unchecked(&fscache_n_op_deferred_release);
39818  
39819                 cache = object->cache;
39820                 spin_lock(&cache->op_gc_list_lock);
39821 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
39822  
39823                 _debug("GC DEFERRED REL OBJ%x OP%x",
39824                        object->debug_id, op->debug_id);
39825 -               fscache_stat(&fscache_n_op_gc);
39826 +               fscache_stat_unchecked(&fscache_n_op_gc);
39827  
39828                 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39829  
39830 diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
39831 --- linux-3.0.4/fs/fscache/page.c       2011-07-21 22:17:23.000000000 -0400
39832 +++ linux-3.0.4/fs/fscache/page.c       2011-08-23 21:47:56.000000000 -0400
39833 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
39834         val = radix_tree_lookup(&cookie->stores, page->index);
39835         if (!val) {
39836                 rcu_read_unlock();
39837 -               fscache_stat(&fscache_n_store_vmscan_not_storing);
39838 +               fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39839                 __fscache_uncache_page(cookie, page);
39840                 return true;
39841         }
39842 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
39843         spin_unlock(&cookie->stores_lock);
39844  
39845         if (xpage) {
39846 -               fscache_stat(&fscache_n_store_vmscan_cancelled);
39847 -               fscache_stat(&fscache_n_store_radix_deletes);
39848 +               fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39849 +               fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39850                 ASSERTCMP(xpage, ==, page);
39851         } else {
39852 -               fscache_stat(&fscache_n_store_vmscan_gone);
39853 +               fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39854         }
39855  
39856         wake_up_bit(&cookie->flags, 0);
39857 @@ -107,7 +107,7 @@ page_busy:
39858         /* we might want to wait here, but that could deadlock the allocator as
39859          * the work threads writing to the cache may all end up sleeping
39860          * on memory allocation */
39861 -       fscache_stat(&fscache_n_store_vmscan_busy);
39862 +       fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39863         return false;
39864  }
39865  EXPORT_SYMBOL(__fscache_maybe_release_page);
39866 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
39867                                      FSCACHE_COOKIE_STORING_TAG);
39868                 if (!radix_tree_tag_get(&cookie->stores, page->index,
39869                                         FSCACHE_COOKIE_PENDING_TAG)) {
39870 -                       fscache_stat(&fscache_n_store_radix_deletes);
39871 +                       fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39872                         xpage = radix_tree_delete(&cookie->stores, page->index);
39873                 }
39874                 spin_unlock(&cookie->stores_lock);
39875 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
39876  
39877         _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39878  
39879 -       fscache_stat(&fscache_n_attr_changed_calls);
39880 +       fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39881  
39882         if (fscache_object_is_active(object)) {
39883                 fscache_stat(&fscache_n_cop_attr_changed);
39884 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
39885  
39886         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39887  
39888 -       fscache_stat(&fscache_n_attr_changed);
39889 +       fscache_stat_unchecked(&fscache_n_attr_changed);
39890  
39891         op = kzalloc(sizeof(*op), GFP_KERNEL);
39892         if (!op) {
39893 -               fscache_stat(&fscache_n_attr_changed_nomem);
39894 +               fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39895                 _leave(" = -ENOMEM");
39896                 return -ENOMEM;
39897         }
39898 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
39899         if (fscache_submit_exclusive_op(object, op) < 0)
39900                 goto nobufs;
39901         spin_unlock(&cookie->lock);
39902 -       fscache_stat(&fscache_n_attr_changed_ok);
39903 +       fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39904         fscache_put_operation(op);
39905         _leave(" = 0");
39906         return 0;
39907 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
39908  nobufs:
39909         spin_unlock(&cookie->lock);
39910         kfree(op);
39911 -       fscache_stat(&fscache_n_attr_changed_nobufs);
39912 +       fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39913         _leave(" = %d", -ENOBUFS);
39914         return -ENOBUFS;
39915  }
39916 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
39917         /* allocate a retrieval operation and attempt to submit it */
39918         op = kzalloc(sizeof(*op), GFP_NOIO);
39919         if (!op) {
39920 -               fscache_stat(&fscache_n_retrievals_nomem);
39921 +               fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39922                 return NULL;
39923         }
39924  
39925 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
39926                 return 0;
39927         }
39928  
39929 -       fscache_stat(&fscache_n_retrievals_wait);
39930 +       fscache_stat_unchecked(&fscache_n_retrievals_wait);
39931  
39932         jif = jiffies;
39933         if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39934                         fscache_wait_bit_interruptible,
39935                         TASK_INTERRUPTIBLE) != 0) {
39936 -               fscache_stat(&fscache_n_retrievals_intr);
39937 +               fscache_stat_unchecked(&fscache_n_retrievals_intr);
39938                 _leave(" = -ERESTARTSYS");
39939                 return -ERESTARTSYS;
39940         }
39941 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
39942   */
39943  static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39944                                                  struct fscache_retrieval *op,
39945 -                                                atomic_t *stat_op_waits,
39946 -                                                atomic_t *stat_object_dead)
39947 +                                                atomic_unchecked_t *stat_op_waits,
39948 +                                                atomic_unchecked_t *stat_object_dead)
39949  {
39950         int ret;
39951  
39952 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
39953                 goto check_if_dead;
39954  
39955         _debug(">>> WT");
39956 -       fscache_stat(stat_op_waits);
39957 +       fscache_stat_unchecked(stat_op_waits);
39958         if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39959                         fscache_wait_bit_interruptible,
39960                         TASK_INTERRUPTIBLE) < 0) {
39961 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
39962  
39963  check_if_dead:
39964         if (unlikely(fscache_object_is_dead(object))) {
39965 -               fscache_stat(stat_object_dead);
39966 +               fscache_stat_unchecked(stat_object_dead);
39967                 return -ENOBUFS;
39968         }
39969         return 0;
39970 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct 
39971  
39972         _enter("%p,%p,,,", cookie, page);
39973  
39974 -       fscache_stat(&fscache_n_retrievals);
39975 +       fscache_stat_unchecked(&fscache_n_retrievals);
39976  
39977         if (hlist_empty(&cookie->backing_objects))
39978                 goto nobufs;
39979 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct 
39980                 goto nobufs_unlock;
39981         spin_unlock(&cookie->lock);
39982  
39983 -       fscache_stat(&fscache_n_retrieval_ops);
39984 +       fscache_stat_unchecked(&fscache_n_retrieval_ops);
39985  
39986         /* pin the netfs read context in case we need to do the actual netfs
39987          * read because we've encountered a cache read failure */
39988 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct 
39989  
39990  error:
39991         if (ret == -ENOMEM)
39992 -               fscache_stat(&fscache_n_retrievals_nomem);
39993 +               fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39994         else if (ret == -ERESTARTSYS)
39995 -               fscache_stat(&fscache_n_retrievals_intr);
39996 +               fscache_stat_unchecked(&fscache_n_retrievals_intr);
39997         else if (ret == -ENODATA)
39998 -               fscache_stat(&fscache_n_retrievals_nodata);
39999 +               fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40000         else if (ret < 0)
40001 -               fscache_stat(&fscache_n_retrievals_nobufs);
40002 +               fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40003         else
40004 -               fscache_stat(&fscache_n_retrievals_ok);
40005 +               fscache_stat_unchecked(&fscache_n_retrievals_ok);
40006  
40007         fscache_put_retrieval(op);
40008         _leave(" = %d", ret);
40009 @@ -429,7 +429,7 @@ nobufs_unlock:
40010         spin_unlock(&cookie->lock);
40011         kfree(op);
40012  nobufs:
40013 -       fscache_stat(&fscache_n_retrievals_nobufs);
40014 +       fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40015         _leave(" = -ENOBUFS");
40016         return -ENOBUFS;
40017  }
40018 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
40019  
40020         _enter("%p,,%d,,,", cookie, *nr_pages);
40021  
40022 -       fscache_stat(&fscache_n_retrievals);
40023 +       fscache_stat_unchecked(&fscache_n_retrievals);
40024  
40025         if (hlist_empty(&cookie->backing_objects))
40026                 goto nobufs;
40027 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
40028                 goto nobufs_unlock;
40029         spin_unlock(&cookie->lock);
40030  
40031 -       fscache_stat(&fscache_n_retrieval_ops);
40032 +       fscache_stat_unchecked(&fscache_n_retrieval_ops);
40033  
40034         /* pin the netfs read context in case we need to do the actual netfs
40035          * read because we've encountered a cache read failure */
40036 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
40037  
40038  error:
40039         if (ret == -ENOMEM)
40040 -               fscache_stat(&fscache_n_retrievals_nomem);
40041 +               fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40042         else if (ret == -ERESTARTSYS)
40043 -               fscache_stat(&fscache_n_retrievals_intr);
40044 +               fscache_stat_unchecked(&fscache_n_retrievals_intr);
40045         else if (ret == -ENODATA)
40046 -               fscache_stat(&fscache_n_retrievals_nodata);
40047 +               fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40048         else if (ret < 0)
40049 -               fscache_stat(&fscache_n_retrievals_nobufs);
40050 +               fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40051         else
40052 -               fscache_stat(&fscache_n_retrievals_ok);
40053 +               fscache_stat_unchecked(&fscache_n_retrievals_ok);
40054  
40055         fscache_put_retrieval(op);
40056         _leave(" = %d", ret);
40057 @@ -545,7 +545,7 @@ nobufs_unlock:
40058         spin_unlock(&cookie->lock);
40059         kfree(op);
40060  nobufs:
40061 -       fscache_stat(&fscache_n_retrievals_nobufs);
40062 +       fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40063         _leave(" = -ENOBUFS");
40064         return -ENOBUFS;
40065  }
40066 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
40067  
40068         _enter("%p,%p,,,", cookie, page);
40069  
40070 -       fscache_stat(&fscache_n_allocs);
40071 +       fscache_stat_unchecked(&fscache_n_allocs);
40072  
40073         if (hlist_empty(&cookie->backing_objects))
40074                 goto nobufs;
40075 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
40076                 goto nobufs_unlock;
40077         spin_unlock(&cookie->lock);
40078  
40079 -       fscache_stat(&fscache_n_alloc_ops);
40080 +       fscache_stat_unchecked(&fscache_n_alloc_ops);
40081  
40082         ret = fscache_wait_for_retrieval_activation(
40083                 object, op,
40084 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
40085  
40086  error:
40087         if (ret == -ERESTARTSYS)
40088 -               fscache_stat(&fscache_n_allocs_intr);
40089 +               fscache_stat_unchecked(&fscache_n_allocs_intr);
40090         else if (ret < 0)
40091 -               fscache_stat(&fscache_n_allocs_nobufs);
40092 +               fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40093         else
40094 -               fscache_stat(&fscache_n_allocs_ok);
40095 +               fscache_stat_unchecked(&fscache_n_allocs_ok);
40096  
40097         fscache_put_retrieval(op);
40098         _leave(" = %d", ret);
40099 @@ -625,7 +625,7 @@ nobufs_unlock:
40100         spin_unlock(&cookie->lock);
40101         kfree(op);
40102  nobufs:
40103 -       fscache_stat(&fscache_n_allocs_nobufs);
40104 +       fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40105         _leave(" = -ENOBUFS");
40106         return -ENOBUFS;
40107  }
40108 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
40109  
40110         spin_lock(&cookie->stores_lock);
40111  
40112 -       fscache_stat(&fscache_n_store_calls);
40113 +       fscache_stat_unchecked(&fscache_n_store_calls);
40114  
40115         /* find a page to store */
40116         page = NULL;
40117 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
40118         page = results[0];
40119         _debug("gang %d [%lx]", n, page->index);
40120         if (page->index > op->store_limit) {
40121 -               fscache_stat(&fscache_n_store_pages_over_limit);
40122 +               fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40123                 goto superseded;
40124         }
40125  
40126 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
40127         spin_unlock(&cookie->stores_lock);
40128         spin_unlock(&object->lock);
40129  
40130 -       fscache_stat(&fscache_n_store_pages);
40131 +       fscache_stat_unchecked(&fscache_n_store_pages);
40132         fscache_stat(&fscache_n_cop_write_page);
40133         ret = object->cache->ops->write_page(op, page);
40134         fscache_stat_d(&fscache_n_cop_write_page);
40135 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
40136         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40137         ASSERT(PageFsCache(page));
40138  
40139 -       fscache_stat(&fscache_n_stores);
40140 +       fscache_stat_unchecked(&fscache_n_stores);
40141  
40142         op = kzalloc(sizeof(*op), GFP_NOIO);
40143         if (!op)
40144 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
40145         spin_unlock(&cookie->stores_lock);
40146         spin_unlock(&object->lock);
40147  
40148 -       op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40149 +       op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40150         op->store_limit = object->store_limit;
40151  
40152         if (fscache_submit_op(object, &op->op) < 0)
40153 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
40154  
40155         spin_unlock(&cookie->lock);
40156         radix_tree_preload_end();
40157 -       fscache_stat(&fscache_n_store_ops);
40158 -       fscache_stat(&fscache_n_stores_ok);
40159 +       fscache_stat_unchecked(&fscache_n_store_ops);
40160 +       fscache_stat_unchecked(&fscache_n_stores_ok);
40161  
40162         /* the work queue now carries its own ref on the object */
40163         fscache_put_operation(&op->op);
40164 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
40165         return 0;
40166  
40167  already_queued:
40168 -       fscache_stat(&fscache_n_stores_again);
40169 +       fscache_stat_unchecked(&fscache_n_stores_again);
40170  already_pending:
40171         spin_unlock(&cookie->stores_lock);
40172         spin_unlock(&object->lock);
40173         spin_unlock(&cookie->lock);
40174         radix_tree_preload_end();
40175         kfree(op);
40176 -       fscache_stat(&fscache_n_stores_ok);
40177 +       fscache_stat_unchecked(&fscache_n_stores_ok);
40178         _leave(" = 0");
40179         return 0;
40180  
40181 @@ -851,14 +851,14 @@ nobufs:
40182         spin_unlock(&cookie->lock);
40183         radix_tree_preload_end();
40184         kfree(op);
40185 -       fscache_stat(&fscache_n_stores_nobufs);
40186 +       fscache_stat_unchecked(&fscache_n_stores_nobufs);
40187         _leave(" = -ENOBUFS");
40188         return -ENOBUFS;
40189  
40190  nomem_free:
40191         kfree(op);
40192  nomem:
40193 -       fscache_stat(&fscache_n_stores_oom);
40194 +       fscache_stat_unchecked(&fscache_n_stores_oom);
40195         _leave(" = -ENOMEM");
40196         return -ENOMEM;
40197  }
40198 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
40199         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40200         ASSERTCMP(page, !=, NULL);
40201  
40202 -       fscache_stat(&fscache_n_uncaches);
40203 +       fscache_stat_unchecked(&fscache_n_uncaches);
40204  
40205         /* cache withdrawal may beat us to it */
40206         if (!PageFsCache(page))
40207 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
40208         unsigned long loop;
40209  
40210  #ifdef CONFIG_FSCACHE_STATS
40211 -       atomic_add(pagevec->nr, &fscache_n_marks);
40212 +       atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40213  #endif
40214  
40215         for (loop = 0; loop < pagevec->nr; loop++) {
40216 diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
40217 --- linux-3.0.4/fs/fscache/stats.c      2011-07-21 22:17:23.000000000 -0400
40218 +++ linux-3.0.4/fs/fscache/stats.c      2011-08-23 21:47:56.000000000 -0400
40219 @@ -18,95 +18,95 @@
40220  /*
40221   * operation counters
40222   */
40223 -atomic_t fscache_n_op_pend;
40224 -atomic_t fscache_n_op_run;
40225 -atomic_t fscache_n_op_enqueue;
40226 -atomic_t fscache_n_op_requeue;
40227 -atomic_t fscache_n_op_deferred_release;
40228 -atomic_t fscache_n_op_release;
40229 -atomic_t fscache_n_op_gc;
40230 -atomic_t fscache_n_op_cancelled;
40231 -atomic_t fscache_n_op_rejected;
40232 -
40233 -atomic_t fscache_n_attr_changed;
40234 -atomic_t fscache_n_attr_changed_ok;
40235 -atomic_t fscache_n_attr_changed_nobufs;
40236 -atomic_t fscache_n_attr_changed_nomem;
40237 -atomic_t fscache_n_attr_changed_calls;
40238 -
40239 -atomic_t fscache_n_allocs;
40240 -atomic_t fscache_n_allocs_ok;
40241 -atomic_t fscache_n_allocs_wait;
40242 -atomic_t fscache_n_allocs_nobufs;
40243 -atomic_t fscache_n_allocs_intr;
40244 -atomic_t fscache_n_allocs_object_dead;
40245 -atomic_t fscache_n_alloc_ops;
40246 -atomic_t fscache_n_alloc_op_waits;
40247 -
40248 -atomic_t fscache_n_retrievals;
40249 -atomic_t fscache_n_retrievals_ok;
40250 -atomic_t fscache_n_retrievals_wait;
40251 -atomic_t fscache_n_retrievals_nodata;
40252 -atomic_t fscache_n_retrievals_nobufs;
40253 -atomic_t fscache_n_retrievals_intr;
40254 -atomic_t fscache_n_retrievals_nomem;
40255 -atomic_t fscache_n_retrievals_object_dead;
40256 -atomic_t fscache_n_retrieval_ops;
40257 -atomic_t fscache_n_retrieval_op_waits;
40258 -
40259 -atomic_t fscache_n_stores;
40260 -atomic_t fscache_n_stores_ok;
40261 -atomic_t fscache_n_stores_again;
40262 -atomic_t fscache_n_stores_nobufs;
40263 -atomic_t fscache_n_stores_oom;
40264 -atomic_t fscache_n_store_ops;
40265 -atomic_t fscache_n_store_calls;
40266 -atomic_t fscache_n_store_pages;
40267 -atomic_t fscache_n_store_radix_deletes;
40268 -atomic_t fscache_n_store_pages_over_limit;
40269 -
40270 -atomic_t fscache_n_store_vmscan_not_storing;
40271 -atomic_t fscache_n_store_vmscan_gone;
40272 -atomic_t fscache_n_store_vmscan_busy;
40273 -atomic_t fscache_n_store_vmscan_cancelled;
40274 -
40275 -atomic_t fscache_n_marks;
40276 -atomic_t fscache_n_uncaches;
40277 -
40278 -atomic_t fscache_n_acquires;
40279 -atomic_t fscache_n_acquires_null;
40280 -atomic_t fscache_n_acquires_no_cache;
40281 -atomic_t fscache_n_acquires_ok;
40282 -atomic_t fscache_n_acquires_nobufs;
40283 -atomic_t fscache_n_acquires_oom;
40284 -
40285 -atomic_t fscache_n_updates;
40286 -atomic_t fscache_n_updates_null;
40287 -atomic_t fscache_n_updates_run;
40288 -
40289 -atomic_t fscache_n_relinquishes;
40290 -atomic_t fscache_n_relinquishes_null;
40291 -atomic_t fscache_n_relinquishes_waitcrt;
40292 -atomic_t fscache_n_relinquishes_retire;
40293 -
40294 -atomic_t fscache_n_cookie_index;
40295 -atomic_t fscache_n_cookie_data;
40296 -atomic_t fscache_n_cookie_special;
40297 -
40298 -atomic_t fscache_n_object_alloc;
40299 -atomic_t fscache_n_object_no_alloc;
40300 -atomic_t fscache_n_object_lookups;
40301 -atomic_t fscache_n_object_lookups_negative;
40302 -atomic_t fscache_n_object_lookups_positive;
40303 -atomic_t fscache_n_object_lookups_timed_out;
40304 -atomic_t fscache_n_object_created;
40305 -atomic_t fscache_n_object_avail;
40306 -atomic_t fscache_n_object_dead;
40307 -
40308 -atomic_t fscache_n_checkaux_none;
40309 -atomic_t fscache_n_checkaux_okay;
40310 -atomic_t fscache_n_checkaux_update;
40311 -atomic_t fscache_n_checkaux_obsolete;
40312 +atomic_unchecked_t fscache_n_op_pend;
40313 +atomic_unchecked_t fscache_n_op_run;
40314 +atomic_unchecked_t fscache_n_op_enqueue;
40315 +atomic_unchecked_t fscache_n_op_requeue;
40316 +atomic_unchecked_t fscache_n_op_deferred_release;
40317 +atomic_unchecked_t fscache_n_op_release;
40318 +atomic_unchecked_t fscache_n_op_gc;
40319 +atomic_unchecked_t fscache_n_op_cancelled;
40320 +atomic_unchecked_t fscache_n_op_rejected;
40321 +
40322 +atomic_unchecked_t fscache_n_attr_changed;
40323 +atomic_unchecked_t fscache_n_attr_changed_ok;
40324 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
40325 +atomic_unchecked_t fscache_n_attr_changed_nomem;
40326 +atomic_unchecked_t fscache_n_attr_changed_calls;
40327 +
40328 +atomic_unchecked_t fscache_n_allocs;
40329 +atomic_unchecked_t fscache_n_allocs_ok;
40330 +atomic_unchecked_t fscache_n_allocs_wait;
40331 +atomic_unchecked_t fscache_n_allocs_nobufs;
40332 +atomic_unchecked_t fscache_n_allocs_intr;
40333 +atomic_unchecked_t fscache_n_allocs_object_dead;
40334 +atomic_unchecked_t fscache_n_alloc_ops;
40335 +atomic_unchecked_t fscache_n_alloc_op_waits;
40336 +
40337 +atomic_unchecked_t fscache_n_retrievals;
40338 +atomic_unchecked_t fscache_n_retrievals_ok;
40339 +atomic_unchecked_t fscache_n_retrievals_wait;
40340 +atomic_unchecked_t fscache_n_retrievals_nodata;
40341 +atomic_unchecked_t fscache_n_retrievals_nobufs;
40342 +atomic_unchecked_t fscache_n_retrievals_intr;
40343 +atomic_unchecked_t fscache_n_retrievals_nomem;
40344 +atomic_unchecked_t fscache_n_retrievals_object_dead;
40345 +atomic_unchecked_t fscache_n_retrieval_ops;
40346 +atomic_unchecked_t fscache_n_retrieval_op_waits;
40347 +
40348 +atomic_unchecked_t fscache_n_stores;
40349 +atomic_unchecked_t fscache_n_stores_ok;
40350 +atomic_unchecked_t fscache_n_stores_again;
40351 +atomic_unchecked_t fscache_n_stores_nobufs;
40352 +atomic_unchecked_t fscache_n_stores_oom;
40353 +atomic_unchecked_t fscache_n_store_ops;
40354 +atomic_unchecked_t fscache_n_store_calls;
40355 +atomic_unchecked_t fscache_n_store_pages;
40356 +atomic_unchecked_t fscache_n_store_radix_deletes;
40357 +atomic_unchecked_t fscache_n_store_pages_over_limit;
40358 +
40359 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40360 +atomic_unchecked_t fscache_n_store_vmscan_gone;
40361 +atomic_unchecked_t fscache_n_store_vmscan_busy;
40362 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40363 +
40364 +atomic_unchecked_t fscache_n_marks;
40365 +atomic_unchecked_t fscache_n_uncaches;
40366 +
40367 +atomic_unchecked_t fscache_n_acquires;
40368 +atomic_unchecked_t fscache_n_acquires_null;
40369 +atomic_unchecked_t fscache_n_acquires_no_cache;
40370 +atomic_unchecked_t fscache_n_acquires_ok;
40371 +atomic_unchecked_t fscache_n_acquires_nobufs;
40372 +atomic_unchecked_t fscache_n_acquires_oom;
40373 +
40374 +atomic_unchecked_t fscache_n_updates;
40375 +atomic_unchecked_t fscache_n_updates_null;
40376 +atomic_unchecked_t fscache_n_updates_run;
40377 +
40378 +atomic_unchecked_t fscache_n_relinquishes;
40379 +atomic_unchecked_t fscache_n_relinquishes_null;
40380 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40381 +atomic_unchecked_t fscache_n_relinquishes_retire;
40382 +
40383 +atomic_unchecked_t fscache_n_cookie_index;
40384 +atomic_unchecked_t fscache_n_cookie_data;
40385 +atomic_unchecked_t fscache_n_cookie_special;
40386 +
40387 +atomic_unchecked_t fscache_n_object_alloc;
40388 +atomic_unchecked_t fscache_n_object_no_alloc;
40389 +atomic_unchecked_t fscache_n_object_lookups;
40390 +atomic_unchecked_t fscache_n_object_lookups_negative;
40391 +atomic_unchecked_t fscache_n_object_lookups_positive;
40392 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
40393 +atomic_unchecked_t fscache_n_object_created;
40394 +atomic_unchecked_t fscache_n_object_avail;
40395 +atomic_unchecked_t fscache_n_object_dead;
40396 +
40397 +atomic_unchecked_t fscache_n_checkaux_none;
40398 +atomic_unchecked_t fscache_n_checkaux_okay;
40399 +atomic_unchecked_t fscache_n_checkaux_update;
40400 +atomic_unchecked_t fscache_n_checkaux_obsolete;
40401  
40402  atomic_t fscache_n_cop_alloc_object;
40403  atomic_t fscache_n_cop_lookup_object;
40404 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40405         seq_puts(m, "FS-Cache statistics\n");
40406  
40407         seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40408 -                  atomic_read(&fscache_n_cookie_index),
40409 -                  atomic_read(&fscache_n_cookie_data),
40410 -                  atomic_read(&fscache_n_cookie_special));
40411 +                  atomic_read_unchecked(&fscache_n_cookie_index),
40412 +                  atomic_read_unchecked(&fscache_n_cookie_data),
40413 +                  atomic_read_unchecked(&fscache_n_cookie_special));
40414  
40415         seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40416 -                  atomic_read(&fscache_n_object_alloc),
40417 -                  atomic_read(&fscache_n_object_no_alloc),
40418 -                  atomic_read(&fscache_n_object_avail),
40419 -                  atomic_read(&fscache_n_object_dead));
40420 +                  atomic_read_unchecked(&fscache_n_object_alloc),
40421 +                  atomic_read_unchecked(&fscache_n_object_no_alloc),
40422 +                  atomic_read_unchecked(&fscache_n_object_avail),
40423 +                  atomic_read_unchecked(&fscache_n_object_dead));
40424         seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40425 -                  atomic_read(&fscache_n_checkaux_none),
40426 -                  atomic_read(&fscache_n_checkaux_okay),
40427 -                  atomic_read(&fscache_n_checkaux_update),
40428 -                  atomic_read(&fscache_n_checkaux_obsolete));
40429 +                  atomic_read_unchecked(&fscache_n_checkaux_none),
40430 +                  atomic_read_unchecked(&fscache_n_checkaux_okay),
40431 +                  atomic_read_unchecked(&fscache_n_checkaux_update),
40432 +                  atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40433  
40434         seq_printf(m, "Pages  : mrk=%u unc=%u\n",
40435 -                  atomic_read(&fscache_n_marks),
40436 -                  atomic_read(&fscache_n_uncaches));
40437 +                  atomic_read_unchecked(&fscache_n_marks),
40438 +                  atomic_read_unchecked(&fscache_n_uncaches));
40439  
40440         seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40441                    " oom=%u\n",
40442 -                  atomic_read(&fscache_n_acquires),
40443 -                  atomic_read(&fscache_n_acquires_null),
40444 -                  atomic_read(&fscache_n_acquires_no_cache),
40445 -                  atomic_read(&fscache_n_acquires_ok),
40446 -                  atomic_read(&fscache_n_acquires_nobufs),
40447 -                  atomic_read(&fscache_n_acquires_oom));
40448 +                  atomic_read_unchecked(&fscache_n_acquires),
40449 +                  atomic_read_unchecked(&fscache_n_acquires_null),
40450 +                  atomic_read_unchecked(&fscache_n_acquires_no_cache),
40451 +                  atomic_read_unchecked(&fscache_n_acquires_ok),
40452 +                  atomic_read_unchecked(&fscache_n_acquires_nobufs),
40453 +                  atomic_read_unchecked(&fscache_n_acquires_oom));
40454  
40455         seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
40456 -                  atomic_read(&fscache_n_object_lookups),
40457 -                  atomic_read(&fscache_n_object_lookups_negative),
40458 -                  atomic_read(&fscache_n_object_lookups_positive),
40459 -                  atomic_read(&fscache_n_object_created),
40460 -                  atomic_read(&fscache_n_object_lookups_timed_out));
40461 +                  atomic_read_unchecked(&fscache_n_object_lookups),
40462 +                  atomic_read_unchecked(&fscache_n_object_lookups_negative),
40463 +                  atomic_read_unchecked(&fscache_n_object_lookups_positive),
40464 +                  atomic_read_unchecked(&fscache_n_object_created),
40465 +                  atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
40466  
40467         seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
40468 -                  atomic_read(&fscache_n_updates),
40469 -                  atomic_read(&fscache_n_updates_null),
40470 -                  atomic_read(&fscache_n_updates_run));
40471 +                  atomic_read_unchecked(&fscache_n_updates),
40472 +                  atomic_read_unchecked(&fscache_n_updates_null),
40473 +                  atomic_read_unchecked(&fscache_n_updates_run));
40474  
40475         seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
40476 -                  atomic_read(&fscache_n_relinquishes),
40477 -                  atomic_read(&fscache_n_relinquishes_null),
40478 -                  atomic_read(&fscache_n_relinquishes_waitcrt),
40479 -                  atomic_read(&fscache_n_relinquishes_retire));
40480 +                  atomic_read_unchecked(&fscache_n_relinquishes),
40481 +                  atomic_read_unchecked(&fscache_n_relinquishes_null),
40482 +                  atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
40483 +                  atomic_read_unchecked(&fscache_n_relinquishes_retire));
40484  
40485         seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
40486 -                  atomic_read(&fscache_n_attr_changed),
40487 -                  atomic_read(&fscache_n_attr_changed_ok),
40488 -                  atomic_read(&fscache_n_attr_changed_nobufs),
40489 -                  atomic_read(&fscache_n_attr_changed_nomem),
40490 -                  atomic_read(&fscache_n_attr_changed_calls));
40491 +                  atomic_read_unchecked(&fscache_n_attr_changed),
40492 +                  atomic_read_unchecked(&fscache_n_attr_changed_ok),
40493 +                  atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40494 +                  atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40495 +                  atomic_read_unchecked(&fscache_n_attr_changed_calls));
40496  
40497         seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40498 -                  atomic_read(&fscache_n_allocs),
40499 -                  atomic_read(&fscache_n_allocs_ok),
40500 -                  atomic_read(&fscache_n_allocs_wait),
40501 -                  atomic_read(&fscache_n_allocs_nobufs),
40502 -                  atomic_read(&fscache_n_allocs_intr));
40503 +                  atomic_read_unchecked(&fscache_n_allocs),
40504 +                  atomic_read_unchecked(&fscache_n_allocs_ok),
40505 +                  atomic_read_unchecked(&fscache_n_allocs_wait),
40506 +                  atomic_read_unchecked(&fscache_n_allocs_nobufs),
40507 +                  atomic_read_unchecked(&fscache_n_allocs_intr));
40508         seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40509 -                  atomic_read(&fscache_n_alloc_ops),
40510 -                  atomic_read(&fscache_n_alloc_op_waits),
40511 -                  atomic_read(&fscache_n_allocs_object_dead));
40512 +                  atomic_read_unchecked(&fscache_n_alloc_ops),
40513 +                  atomic_read_unchecked(&fscache_n_alloc_op_waits),
40514 +                  atomic_read_unchecked(&fscache_n_allocs_object_dead));
40515  
40516         seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40517                    " int=%u oom=%u\n",
40518 -                  atomic_read(&fscache_n_retrievals),
40519 -                  atomic_read(&fscache_n_retrievals_ok),
40520 -                  atomic_read(&fscache_n_retrievals_wait),
40521 -                  atomic_read(&fscache_n_retrievals_nodata),
40522 -                  atomic_read(&fscache_n_retrievals_nobufs),
40523 -                  atomic_read(&fscache_n_retrievals_intr),
40524 -                  atomic_read(&fscache_n_retrievals_nomem));
40525 +                  atomic_read_unchecked(&fscache_n_retrievals),
40526 +                  atomic_read_unchecked(&fscache_n_retrievals_ok),
40527 +                  atomic_read_unchecked(&fscache_n_retrievals_wait),
40528 +                  atomic_read_unchecked(&fscache_n_retrievals_nodata),
40529 +                  atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40530 +                  atomic_read_unchecked(&fscache_n_retrievals_intr),
40531 +                  atomic_read_unchecked(&fscache_n_retrievals_nomem));
40532         seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40533 -                  atomic_read(&fscache_n_retrieval_ops),
40534 -                  atomic_read(&fscache_n_retrieval_op_waits),
40535 -                  atomic_read(&fscache_n_retrievals_object_dead));
40536 +                  atomic_read_unchecked(&fscache_n_retrieval_ops),
40537 +                  atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40538 +                  atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40539  
40540         seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40541 -                  atomic_read(&fscache_n_stores),
40542 -                  atomic_read(&fscache_n_stores_ok),
40543 -                  atomic_read(&fscache_n_stores_again),
40544 -                  atomic_read(&fscache_n_stores_nobufs),
40545 -                  atomic_read(&fscache_n_stores_oom));
40546 +                  atomic_read_unchecked(&fscache_n_stores),
40547 +                  atomic_read_unchecked(&fscache_n_stores_ok),
40548 +                  atomic_read_unchecked(&fscache_n_stores_again),
40549 +                  atomic_read_unchecked(&fscache_n_stores_nobufs),
40550 +                  atomic_read_unchecked(&fscache_n_stores_oom));
40551         seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40552 -                  atomic_read(&fscache_n_store_ops),
40553 -                  atomic_read(&fscache_n_store_calls),
40554 -                  atomic_read(&fscache_n_store_pages),
40555 -                  atomic_read(&fscache_n_store_radix_deletes),
40556 -                  atomic_read(&fscache_n_store_pages_over_limit));
40557 +                  atomic_read_unchecked(&fscache_n_store_ops),
40558 +                  atomic_read_unchecked(&fscache_n_store_calls),
40559 +                  atomic_read_unchecked(&fscache_n_store_pages),
40560 +                  atomic_read_unchecked(&fscache_n_store_radix_deletes),
40561 +                  atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40562  
40563         seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40564 -                  atomic_read(&fscache_n_store_vmscan_not_storing),
40565 -                  atomic_read(&fscache_n_store_vmscan_gone),
40566 -                  atomic_read(&fscache_n_store_vmscan_busy),
40567 -                  atomic_read(&fscache_n_store_vmscan_cancelled));
40568 +                  atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40569 +                  atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40570 +                  atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40571 +                  atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40572  
40573         seq_printf(m, "Ops    : pend=%u run=%u enq=%u can=%u rej=%u\n",
40574 -                  atomic_read(&fscache_n_op_pend),
40575 -                  atomic_read(&fscache_n_op_run),
40576 -                  atomic_read(&fscache_n_op_enqueue),
40577 -                  atomic_read(&fscache_n_op_cancelled),
40578 -                  atomic_read(&fscache_n_op_rejected));
40579 +                  atomic_read_unchecked(&fscache_n_op_pend),
40580 +                  atomic_read_unchecked(&fscache_n_op_run),
40581 +                  atomic_read_unchecked(&fscache_n_op_enqueue),
40582 +                  atomic_read_unchecked(&fscache_n_op_cancelled),
40583 +                  atomic_read_unchecked(&fscache_n_op_rejected));
40584         seq_printf(m, "Ops    : dfr=%u rel=%u gc=%u\n",
40585 -                  atomic_read(&fscache_n_op_deferred_release),
40586 -                  atomic_read(&fscache_n_op_release),
40587 -                  atomic_read(&fscache_n_op_gc));
40588 +                  atomic_read_unchecked(&fscache_n_op_deferred_release),
40589 +                  atomic_read_unchecked(&fscache_n_op_release),
40590 +                  atomic_read_unchecked(&fscache_n_op_gc));
40591  
40592         seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40593                    atomic_read(&fscache_n_cop_alloc_object),
40594 diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
40595 --- linux-3.0.4/fs/fs_struct.c  2011-07-21 22:17:23.000000000 -0400
40596 +++ linux-3.0.4/fs/fs_struct.c  2011-08-23 21:48:14.000000000 -0400
40597 @@ -4,6 +4,7 @@
40598  #include <linux/path.h>
40599  #include <linux/slab.h>
40600  #include <linux/fs_struct.h>
40601 +#include <linux/grsecurity.h>
40602  #include <linux/vserver/global.h>
40603  #include "internal.h"
40604  
40605 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
40606         old_root = fs->root;
40607         fs->root = *path;
40608         path_get_longterm(path);
40609 +       gr_set_chroot_entries(current, path);
40610         write_seqcount_end(&fs->seq);
40611         spin_unlock(&fs->lock);
40612         if (old_root.dentry)
40613 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
40614                             && fs->root.mnt == old_root->mnt) {
40615                                 path_get_longterm(new_root);
40616                                 fs->root = *new_root;
40617 +                               gr_set_chroot_entries(p, new_root);
40618                                 count++;
40619                         }
40620                         if (fs->pwd.dentry == old_root->dentry
40621 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
40622                 spin_lock(&fs->lock);
40623                 write_seqcount_begin(&fs->seq);
40624                 tsk->fs = NULL;
40625 -               kill = !--fs->users;
40626 +               gr_clear_chroot_entries(tsk);
40627 +               kill = !atomic_dec_return(&fs->users);
40628                 write_seqcount_end(&fs->seq);
40629                 spin_unlock(&fs->lock);
40630                 task_unlock(tsk);
40631 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct 
40632         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40633         /* We don't need to lock fs - think why ;-) */
40634         if (fs) {
40635 -               fs->users = 1;
40636 +               atomic_set(&fs->users, 1);
40637                 fs->in_exec = 0;
40638                 spin_lock_init(&fs->lock);
40639                 seqcount_init(&fs->seq);
40640 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct 
40641                 spin_lock(&old->lock);
40642                 fs->root = old->root;
40643                 path_get_longterm(&fs->root);
40644 +               /* instead of calling gr_set_chroot_entries here,
40645 +                  we call it from every caller of this function
40646 +               */
40647                 fs->pwd = old->pwd;
40648                 path_get_longterm(&fs->pwd);
40649                 spin_unlock(&old->lock);
40650 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
40651  
40652         task_lock(current);
40653         spin_lock(&fs->lock);
40654 -       kill = !--fs->users;
40655 +       kill = !atomic_dec_return(&fs->users);
40656         current->fs = new_fs;
40657 +       gr_set_chroot_entries(current, &new_fs->root);
40658         spin_unlock(&fs->lock);
40659         task_unlock(current);
40660  
40661 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
40662  
40663  /* to be mentioned only in INIT_TASK */
40664  struct fs_struct init_fs = {
40665 -       .users          = 1,
40666 +       .users          = ATOMIC_INIT(1),
40667         .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
40668         .seq            = SEQCNT_ZERO,
40669         .umask          = 0022,
40670 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
40671                 task_lock(current);
40672  
40673                 spin_lock(&init_fs.lock);
40674 -               init_fs.users++;
40675 +               atomic_inc(&init_fs.users);
40676                 spin_unlock(&init_fs.lock);
40677  
40678                 spin_lock(&fs->lock);
40679                 current->fs = &init_fs;
40680 -               kill = !--fs->users;
40681 +               gr_set_chroot_entries(current, &current->fs->root);
40682 +               kill = !atomic_dec_return(&fs->users);
40683                 spin_unlock(&fs->lock);
40684  
40685                 task_unlock(current);
40686 diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
40687 --- linux-3.0.4/fs/fuse/cuse.c  2011-07-21 22:17:23.000000000 -0400
40688 +++ linux-3.0.4/fs/fuse/cuse.c  2011-08-23 21:47:56.000000000 -0400
40689 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
40690                 INIT_LIST_HEAD(&cuse_conntbl[i]);
40691  
40692         /* inherit and extend fuse_dev_operations */
40693 -       cuse_channel_fops               = fuse_dev_operations;
40694 -       cuse_channel_fops.owner         = THIS_MODULE;
40695 -       cuse_channel_fops.open          = cuse_channel_open;
40696 -       cuse_channel_fops.release       = cuse_channel_release;
40697 +       pax_open_kernel();
40698 +       memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
40699 +       *(void **)&cuse_channel_fops.owner      = THIS_MODULE;
40700 +       *(void **)&cuse_channel_fops.open       = cuse_channel_open;
40701 +       *(void **)&cuse_channel_fops.release    = cuse_channel_release;
40702 +       pax_close_kernel();
40703  
40704         cuse_class = class_create(THIS_MODULE, "cuse");
40705         if (IS_ERR(cuse_class))
40706 diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
40707 --- linux-3.0.4/fs/fuse/dev.c   2011-08-29 23:26:14.000000000 -0400
40708 +++ linux-3.0.4/fs/fuse/dev.c   2011-08-29 23:26:27.000000000 -0400
40709 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
40710         ret = 0;
40711         pipe_lock(pipe);
40712  
40713 -       if (!pipe->readers) {
40714 +       if (!atomic_read(&pipe->readers)) {
40715                 send_sig(SIGPIPE, current, 0);
40716                 if (!ret)
40717                         ret = -EPIPE;
40718 diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
40719 --- linux-3.0.4/fs/fuse/dir.c   2011-07-21 22:17:23.000000000 -0400
40720 +++ linux-3.0.4/fs/fuse/dir.c   2011-08-23 21:47:56.000000000 -0400
40721 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
40722         return link;
40723  }
40724  
40725 -static void free_link(char *link)
40726 +static void free_link(const char *link)
40727  {
40728         if (!IS_ERR(link))
40729                 free_page((unsigned long) link);
40730 diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
40731 --- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
40732 +++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
40733 @@ -1525,7 +1525,7 @@ out:
40734  
40735  static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40736  {
40737 -       char *s = nd_get_link(nd);
40738 +       const char *s = nd_get_link(nd);
40739         if (!IS_ERR(s))
40740                 kfree(s);
40741  }
40742 diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
40743 --- linux-3.0.4/fs/hfsplus/catalog.c    2011-07-21 22:17:23.000000000 -0400
40744 +++ linux-3.0.4/fs/hfsplus/catalog.c    2011-08-23 21:48:14.000000000 -0400
40745 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block 
40746         int err;
40747         u16 type;
40748  
40749 +       pax_track_stack();
40750 +
40751         hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40752         err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40753         if (err)
40754 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct 
40755         int entry_size;
40756         int err;
40757  
40758 +       pax_track_stack();
40759 +
40760         dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
40761                 str->name, cnid, inode->i_nlink);
40762         hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
40763 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
40764         int entry_size, type;
40765         int err = 0;
40766  
40767 +       pax_track_stack();
40768 +
40769         dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
40770                 cnid, src_dir->i_ino, src_name->name,
40771                 dst_dir->i_ino, dst_name->name);
40772 diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
40773 --- linux-3.0.4/fs/hfsplus/dir.c        2011-07-21 22:17:23.000000000 -0400
40774 +++ linux-3.0.4/fs/hfsplus/dir.c        2011-08-23 21:48:14.000000000 -0400
40775 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
40776         struct hfsplus_readdir_data *rd;
40777         u16 type;
40778  
40779 +       pax_track_stack();
40780 +
40781         if (filp->f_pos >= inode->i_size)
40782                 return 0;
40783  
40784 diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
40785 --- linux-3.0.4/fs/hfsplus/inode.c      2011-07-21 22:17:23.000000000 -0400
40786 +++ linux-3.0.4/fs/hfsplus/inode.c      2011-08-23 21:48:14.000000000 -0400
40787 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode 
40788         int res = 0;
40789         u16 type;
40790  
40791 +       pax_track_stack();
40792 +
40793         type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40794  
40795         HFSPLUS_I(inode)->linkid = 0;
40796 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
40797         struct hfs_find_data fd;
40798         hfsplus_cat_entry entry;
40799  
40800 +       pax_track_stack();
40801 +
40802         if (HFSPLUS_IS_RSRC(inode))
40803                 main_inode = HFSPLUS_I(inode)->rsrc_inode;
40804  
40805 diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
40806 --- linux-3.0.4/fs/hfsplus/ioctl.c      2011-07-21 22:17:23.000000000 -0400
40807 +++ linux-3.0.4/fs/hfsplus/ioctl.c      2011-08-23 21:48:14.000000000 -0400
40808 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
40809         struct hfsplus_cat_file *file;
40810         int res;
40811  
40812 +       pax_track_stack();
40813 +
40814         if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40815                 return -EOPNOTSUPP;
40816  
40817 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40818         struct hfsplus_cat_file *file;
40819         ssize_t res = 0;
40820  
40821 +       pax_track_stack();
40822 +
40823         if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40824                 return -EOPNOTSUPP;
40825  
40826 diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
40827 --- linux-3.0.4/fs/hfsplus/super.c      2011-07-21 22:17:23.000000000 -0400
40828 +++ linux-3.0.4/fs/hfsplus/super.c      2011-08-23 21:48:14.000000000 -0400
40829 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
40830         struct nls_table *nls = NULL;
40831         int err;
40832  
40833 +       pax_track_stack();
40834 +
40835         err = -EINVAL;
40836         sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40837         if (!sbi)
40838 diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
40839 --- linux-3.0.4/fs/hugetlbfs/inode.c    2011-07-21 22:17:23.000000000 -0400
40840 +++ linux-3.0.4/fs/hugetlbfs/inode.c    2011-08-23 21:48:14.000000000 -0400
40841 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
40842         .kill_sb        = kill_litter_super,
40843  };
40844  
40845 -static struct vfsmount *hugetlbfs_vfsmount;
40846 +struct vfsmount *hugetlbfs_vfsmount;
40847  
40848  static int can_do_hugetlb_shm(void)
40849  {
40850 diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
40851 --- linux-3.0.4/fs/inode.c      2011-07-21 22:17:23.000000000 -0400
40852 +++ linux-3.0.4/fs/inode.c      2011-08-23 21:47:56.000000000 -0400
40853 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
40854  
40855  #ifdef CONFIG_SMP
40856         if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
40857 -               static atomic_t shared_last_ino;
40858 -               int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
40859 +               static atomic_unchecked_t shared_last_ino;
40860 +               int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
40861  
40862                 res = next - LAST_INO_BATCH;
40863         }
40864 diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
40865 --- linux-3.0.4/fs/jbd/checkpoint.c     2011-07-21 22:17:23.000000000 -0400
40866 +++ linux-3.0.4/fs/jbd/checkpoint.c     2011-08-23 21:48:14.000000000 -0400
40867 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
40868         tid_t this_tid;
40869         int result;
40870  
40871 +       pax_track_stack();
40872 +
40873         jbd_debug(1, "Start checkpoint\n");
40874  
40875         /*
40876 diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
40877 --- linux-3.0.4/fs/jffs2/compr_rtime.c  2011-07-21 22:17:23.000000000 -0400
40878 +++ linux-3.0.4/fs/jffs2/compr_rtime.c  2011-08-23 21:48:14.000000000 -0400
40879 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40880         int outpos = 0;
40881         int pos=0;
40882  
40883 +       pax_track_stack();
40884 +
40885         memset(positions,0,sizeof(positions));
40886  
40887         while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40888 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
40889         int outpos = 0;
40890         int pos=0;
40891  
40892 +       pax_track_stack();
40893 +
40894         memset(positions,0,sizeof(positions));
40895  
40896         while (outpos<destlen) {
40897 diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
40898 --- linux-3.0.4/fs/jffs2/compr_rubin.c  2011-07-21 22:17:23.000000000 -0400
40899 +++ linux-3.0.4/fs/jffs2/compr_rubin.c  2011-08-23 21:48:14.000000000 -0400
40900 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40901         int ret;
40902         uint32_t mysrclen, mydstlen;
40903  
40904 +       pax_track_stack();
40905 +
40906         mysrclen = *sourcelen;
40907         mydstlen = *dstlen - 8;
40908  
40909 diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
40910 --- linux-3.0.4/fs/jffs2/erase.c        2011-07-21 22:17:23.000000000 -0400
40911 +++ linux-3.0.4/fs/jffs2/erase.c        2011-08-23 21:47:56.000000000 -0400
40912 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
40913                 struct jffs2_unknown_node marker = {
40914                         .magic =        cpu_to_je16(JFFS2_MAGIC_BITMASK),
40915                         .nodetype =     cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40916 -                       .totlen =       cpu_to_je32(c->cleanmarker_size)
40917 +                       .totlen =       cpu_to_je32(c->cleanmarker_size),
40918 +                       .hdr_crc =      cpu_to_je32(0)
40919                 };
40920  
40921                 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40922 diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
40923 --- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
40924 +++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
40925 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40926  {
40927         .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40928         .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40929 -       .totlen = constant_cpu_to_je32(8)
40930 +       .totlen = constant_cpu_to_je32(8),
40931 +       .hdr_crc = constant_cpu_to_je32(0)
40932  };
40933  
40934  /*
40935 diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
40936 --- linux-3.0.4/fs/jffs2/xattr.c        2011-07-21 22:17:23.000000000 -0400
40937 +++ linux-3.0.4/fs/jffs2/xattr.c        2011-08-23 21:48:14.000000000 -0400
40938 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct 
40939  
40940         BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40941  
40942 +       pax_track_stack();
40943 +
40944         /* Phase.1 : Merge same xref */
40945         for (i=0; i < XREF_TMPHASH_SIZE; i++)
40946                 xref_tmphash[i] = NULL;
40947 diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
40948 --- linux-3.0.4/fs/jfs/super.c  2011-07-21 22:17:23.000000000 -0400
40949 +++ linux-3.0.4/fs/jfs/super.c  2011-08-23 21:47:56.000000000 -0400
40950 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
40951  
40952         jfs_inode_cachep =
40953             kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40954 -                           SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40955 +                           SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40956                             init_once);
40957         if (jfs_inode_cachep == NULL)
40958                 return -ENOMEM;
40959 diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
40960 --- linux-3.0.4/fs/Kconfig.binfmt       2011-07-21 22:17:23.000000000 -0400
40961 +++ linux-3.0.4/fs/Kconfig.binfmt       2011-08-23 21:47:56.000000000 -0400
40962 @@ -86,7 +86,7 @@ config HAVE_AOUT
40963  
40964  config BINFMT_AOUT
40965         tristate "Kernel support for a.out and ECOFF binaries"
40966 -       depends on HAVE_AOUT
40967 +       depends on HAVE_AOUT && BROKEN
40968         ---help---
40969           A.out (Assembler.OUTput) is a set of formats for libraries and
40970           executables used in the earliest versions of UNIX.  Linux used
40971 diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
40972 --- linux-3.0.4/fs/libfs.c      2011-07-21 22:17:23.000000000 -0400
40973 +++ linux-3.0.4/fs/libfs.c      2011-08-23 21:47:56.000000000 -0400
40974 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
40975  
40976                         for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40977                                 struct dentry *next;
40978 +                               char d_name[sizeof(next->d_iname)];
40979 +                               const unsigned char *name;
40980 +
40981                                 next = list_entry(p, struct dentry, d_u.d_child);
40982                                 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
40983                                 if (!simple_positive(next)) {
40984 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
40985  
40986                                 spin_unlock(&next->d_lock);
40987                                 spin_unlock(&dentry->d_lock);
40988 -                               if (filldir(dirent, next->d_name.name, 
40989 +                               name = next->d_name.name;
40990 +                               if (name == next->d_iname) {
40991 +                                       memcpy(d_name, name, next->d_name.len);
40992 +                                       name = d_name;
40993 +                               }
40994 +                               if (filldir(dirent, name, 
40995                                             next->d_name.len, filp->f_pos, 
40996                                             next->d_inode->i_ino, 
40997                                             dt_type(next->d_inode)) < 0)
40998 diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
40999 --- linux-3.0.4/fs/lockd/clntproc.c     2011-07-21 22:17:23.000000000 -0400
41000 +++ linux-3.0.4/fs/lockd/clntproc.c     2011-08-23 21:48:14.000000000 -0400
41001 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41002  /*
41003   * Cookie counter for NLM requests
41004   */
41005 -static atomic_t        nlm_cookie = ATOMIC_INIT(0x1234);
41006 +static atomic_unchecked_t      nlm_cookie = ATOMIC_INIT(0x1234);
41007  
41008  void nlmclnt_next_cookie(struct nlm_cookie *c)
41009  {
41010 -       u32     cookie = atomic_inc_return(&nlm_cookie);
41011 +       u32     cookie = atomic_inc_return_unchecked(&nlm_cookie);
41012  
41013         memcpy(c->data, &cookie, 4);
41014         c->len=4;
41015 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41016         struct nlm_rqst reqst, *req;
41017         int             status;
41018  
41019 +       pax_track_stack();
41020 +
41021         req = &reqst;
41022         memset(req, 0, sizeof(*req));
41023         locks_init_lock(&req->a_args.lock.fl);
41024 diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
41025 --- linux-3.0.4/fs/locks.c      2011-07-21 22:17:23.000000000 -0400
41026 +++ linux-3.0.4/fs/locks.c      2011-08-23 21:47:56.000000000 -0400
41027 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
41028                 return;
41029  
41030         if (filp->f_op && filp->f_op->flock) {
41031 -               struct file_lock fl = {
41032 +               struct file_lock flock = {
41033                         .fl_pid = current->tgid,
41034                         .fl_file = filp,
41035                         .fl_flags = FL_FLOCK,
41036                         .fl_type = F_UNLCK,
41037                         .fl_end = OFFSET_MAX,
41038                 };
41039 -               filp->f_op->flock(filp, F_SETLKW, &fl);
41040 -               if (fl.fl_ops && fl.fl_ops->fl_release_private)
41041 -                       fl.fl_ops->fl_release_private(&fl);
41042 +               filp->f_op->flock(filp, F_SETLKW, &flock);
41043 +               if (flock.fl_ops && flock.fl_ops->fl_release_private)
41044 +                       flock.fl_ops->fl_release_private(&flock);
41045         }
41046  
41047         lock_flocks();
41048 diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
41049 --- linux-3.0.4/fs/logfs/super.c        2011-07-21 22:17:23.000000000 -0400
41050 +++ linux-3.0.4/fs/logfs/super.c        2011-08-23 21:48:14.000000000 -0400
41051 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
41052         struct logfs_disk_super _ds1, *ds1 = &_ds1;
41053         int err, valid0, valid1;
41054  
41055 +       pax_track_stack();
41056 +
41057         /* read first superblock */
41058         err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
41059         if (err)
41060 diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
41061 --- linux-3.0.4/fs/namei.c      2011-07-21 22:17:23.000000000 -0400
41062 +++ linux-3.0.4/fs/namei.c      2011-08-23 21:48:14.000000000 -0400
41063 @@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
41064                 return ret;
41065  
41066         /*
41067 -        * Read/write DACs are always overridable.
41068 -        * Executable DACs are overridable for all directories and
41069 -        * for non-directories that have least one exec bit set.
41070 +        * Searching includes executable on directories, else just read.
41071          */
41072 -       if (!(mask & MAY_EXEC) || execute_ok(inode))
41073 -               if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41074 +       mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41075 +       if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
41076 +#ifdef CONFIG_GRKERNSEC
41077 +               if (flags & IPERM_FLAG_RCU)
41078 +                       return -ECHILD;
41079 +#endif
41080 +               if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41081                         return 0;
41082 +       }
41083  
41084         /*
41085 -        * Searching includes executable on directories, else just read.
41086 +        * Read/write DACs are always overridable.
41087 +        * Executable DACs are overridable for all directories and
41088 +        * for non-directories that have least one exec bit set.
41089          */
41090 -       mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41091 -       if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
41092 -               if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41093 +       if (!(mask & MAY_EXEC) || execute_ok(inode)) {
41094 +#ifdef CONFIG_GRKERNSEC
41095 +               if (flags & IPERM_FLAG_RCU)
41096 +                       return -ECHILD;
41097 +#endif
41098 +               if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41099                         return 0;
41100 +       }
41101  
41102         return -EACCES;
41103  }
41104 @@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
41105                 br_read_unlock(vfsmount_lock);
41106         }
41107  
41108 +       if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41109 +               return -ENOENT;
41110 +
41111         if (likely(!(nd->flags & LOOKUP_JUMPED)))
41112                 return 0;
41113  
41114 @@ -593,9 +606,16 @@ static inline int exec_permission(struct
41115         if (ret == -ECHILD)
41116                 return ret;
41117  
41118 -       if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
41119 -                       ns_capable(ns, CAP_DAC_READ_SEARCH))
41120 +       if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
41121                 goto ok;
41122 +       else {
41123 +#ifdef CONFIG_GRKERNSEC
41124 +               if (flags & IPERM_FLAG_RCU)
41125 +                       return -ECHILD;
41126 +#endif
41127 +               if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
41128 +                       goto ok;
41129 +       }
41130  
41131         return ret;
41132  ok:
41133 @@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
41134                 return error;
41135         }
41136  
41137 +       if (gr_handle_follow_link(dentry->d_parent->d_inode,
41138 +                                 dentry->d_inode, dentry, nd->path.mnt)) {
41139 +               error = -EACCES;
41140 +               *p = ERR_PTR(error); /* no ->put_link(), please */
41141 +               path_put(&nd->path);
41142 +               return error;
41143 +       }
41144 +
41145         nd->last_type = LAST_BIND;
41146         *p = dentry->d_inode->i_op->follow_link(dentry, nd);
41147         error = PTR_ERR(*p);
41148         if (!IS_ERR(*p)) {
41149 -               char *s = nd_get_link(nd);
41150 +               const char *s = nd_get_link(nd);
41151                 error = 0;
41152                 if (s)
41153                         error = __vfs_follow_link(nd, s);
41154 @@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
41155                 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
41156  
41157         if (likely(!retval)) {
41158 +               if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41159 +                       return -ENOENT;
41160 +
41161                 if (unlikely(!audit_dummy_context())) {
41162                         if (nd->path.dentry && nd->inode)
41163                                 audit_inode(name, nd->path.dentry);
41164 @@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
41165         return error;
41166  }
41167  
41168 +/*
41169 + * Note that while the flag value (low two bits) for sys_open means:
41170 + *     00 - read-only
41171 + *     01 - write-only
41172 + *     10 - read-write
41173 + *     11 - special
41174 + * it is changed into
41175 + *     00 - no permissions needed
41176 + *     01 - read-permission
41177 + *     10 - write-permission
41178 + *     11 - read-write
41179 + * for the internal routines (ie open_namei()/follow_link() etc)
41180 + * This is more logical, and also allows the 00 "no perm needed"
41181 + * to be used for symlinks (where the permissions are checked
41182 + * later).
41183 + *
41184 +*/
41185 +static inline int open_to_namei_flags(int flag)
41186 +{
41187 +       if ((flag+1) & O_ACCMODE)
41188 +               flag++;
41189 +       return flag;
41190 +}
41191 +
41192  static int may_open(struct path *path, int acc_mode, int flag)
41193  {
41194         struct dentry *dentry = path->dentry;
41195 @@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
41196         /*
41197          * Ensure there are no outstanding leases on the file.
41198          */
41199 -       return break_lease(inode, flag);
41200 +       error = break_lease(inode, flag);
41201 +
41202 +       if (error)
41203 +               return error;
41204 +
41205 +       if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41206 +               error = -EPERM;
41207 +               goto exit;
41208 +       }
41209 +
41210 +       if (gr_handle_rawio(inode)) {
41211 +               error = -EPERM;
41212 +               goto exit;
41213 +       }
41214 +
41215 +       if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
41216 +               error = -EACCES;
41217 +               goto exit;
41218 +       }
41219 +exit:
41220 +       return error;
41221  }
41222  
41223  static int handle_truncate(struct file *filp)
41224 @@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
41225  }
41226  
41227  /*
41228 - * Note that while the flag value (low two bits) for sys_open means:
41229 - *     00 - read-only
41230 - *     01 - write-only
41231 - *     10 - read-write
41232 - *     11 - special
41233 - * it is changed into
41234 - *     00 - no permissions needed
41235 - *     01 - read-permission
41236 - *     10 - write-permission
41237 - *     11 - read-write
41238 - * for the internal routines (ie open_namei()/follow_link() etc)
41239 - * This is more logical, and also allows the 00 "no perm needed"
41240 - * to be used for symlinks (where the permissions are checked
41241 - * later).
41242 - *
41243 -*/
41244 -static inline int open_to_namei_flags(int flag)
41245 -{
41246 -       if ((flag+1) & O_ACCMODE)
41247 -               flag++;
41248 -       return flag;
41249 -}
41250 -
41251 -/*
41252   * Handle the last step of open()
41253   */
41254  static struct file *do_last(struct nameidata *nd, struct path *path,
41255 @@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
41256         struct dentry *dir = nd->path.dentry;
41257         struct dentry *dentry;
41258         int open_flag = op->open_flag;
41259 +       int flag = open_to_namei_flags(open_flag);
41260         int will_truncate = open_flag & O_TRUNC;
41261         int want_write = 0;
41262         int acc_mode = op->acc_mode;
41263 @@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
41264         /* Negative dentry, just create the file */
41265         if (!dentry->d_inode) {
41266                 int mode = op->mode;
41267 +
41268 +               if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
41269 +                       error = -EACCES;
41270 +                       goto exit_mutex_unlock;
41271 +               }
41272 +
41273                 if (!IS_POSIXACL(dir->d_inode))
41274                         mode &= ~current_umask();
41275                 /*
41276 @@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
41277                 error = vfs_create(dir->d_inode, dentry, mode, nd);
41278                 if (error)
41279                         goto exit_mutex_unlock;
41280 +               else
41281 +                       gr_handle_create(path->dentry, path->mnt);
41282                 mutex_unlock(&dir->d_inode->i_mutex);
41283                 dput(nd->path.dentry);
41284                 nd->path.dentry = dentry;
41285 @@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
41286         /*
41287          * It already exists.
41288          */
41289 +
41290 +       /* only check if O_CREAT is specified, all other checks need to go
41291 +          into may_open */
41292 +       if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
41293 +               error = -EACCES;
41294 +               goto exit_mutex_unlock;
41295 +       }
41296 +
41297         mutex_unlock(&dir->d_inode->i_mutex);
41298         audit_inode(pathname, path->dentry);
41299  
41300 @@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41301         error = may_mknod(mode);
41302         if (error)
41303                 goto out_dput;
41304 +
41305 +       if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41306 +               error = -EPERM;
41307 +               goto out_dput;
41308 +       }
41309 +
41310 +       if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41311 +               error = -EACCES;
41312 +               goto out_dput;
41313 +       }
41314 +
41315         error = mnt_want_write(nd.path.mnt);
41316         if (error)
41317                 goto out_dput;
41318 @@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41319         }
41320  out_drop_write:
41321         mnt_drop_write(nd.path.mnt);
41322 +
41323 +       if (!error)
41324 +               gr_handle_create(dentry, nd.path.mnt);
41325  out_dput:
41326         dput(dentry);
41327  out_unlock:
41328 @@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41329         if (IS_ERR(dentry))
41330                 goto out_unlock;
41331  
41332 +       if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41333 +               error = -EACCES;
41334 +               goto out_dput;
41335 +       }
41336 +
41337         if (!IS_POSIXACL(nd.path.dentry->d_inode))
41338                 mode &= ~current_umask();
41339         error = mnt_want_write(nd.path.mnt);
41340 @@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41341         error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41342  out_drop_write:
41343         mnt_drop_write(nd.path.mnt);
41344 +
41345 +       if (!error)
41346 +               gr_handle_create(dentry, nd.path.mnt);
41347 +
41348  out_dput:
41349         dput(dentry);
41350  out_unlock:
41351 @@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
41352         char * name;
41353         struct dentry *dentry;
41354         struct nameidata nd;
41355 +       ino_t saved_ino = 0;
41356 +       dev_t saved_dev = 0;
41357  
41358         error = user_path_parent(dfd, pathname, &nd, &name);
41359         if (error)
41360 @@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
41361                 error = -ENOENT;
41362                 goto exit3;
41363         }
41364 +
41365 +       if (dentry->d_inode->i_nlink <= 1) {
41366 +               saved_ino = dentry->d_inode->i_ino;
41367 +               saved_dev = gr_get_dev_from_dentry(dentry);
41368 +       }
41369 +
41370 +       if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41371 +               error = -EACCES;
41372 +               goto exit3;
41373 +       }
41374 +
41375         error = mnt_want_write(nd.path.mnt);
41376         if (error)
41377                 goto exit3;
41378 @@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
41379         if (error)
41380                 goto exit4;
41381         error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41382 +       if (!error && (saved_dev || saved_ino))
41383 +               gr_handle_delete(saved_ino, saved_dev);
41384  exit4:
41385         mnt_drop_write(nd.path.mnt);
41386  exit3:
41387 @@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
41388         struct dentry *dentry;
41389         struct nameidata nd;
41390         struct inode *inode = NULL;
41391 +       ino_t saved_ino = 0;
41392 +       dev_t saved_dev = 0;
41393  
41394         error = user_path_parent(dfd, pathname, &nd, &name);
41395         if (error)
41396 @@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
41397                 if (!inode)
41398                         goto slashes;
41399                 ihold(inode);
41400 +
41401 +               if (inode->i_nlink <= 1) {
41402 +                       saved_ino = inode->i_ino;
41403 +                       saved_dev = gr_get_dev_from_dentry(dentry);
41404 +               }
41405 +               if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41406 +                       error = -EACCES;
41407 +                       goto exit2;
41408 +               }
41409 +
41410                 error = mnt_want_write(nd.path.mnt);
41411                 if (error)
41412                         goto exit2;
41413 @@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
41414                 if (error)
41415                         goto exit3;
41416                 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41417 +               if (!error && (saved_ino || saved_dev))
41418 +                       gr_handle_delete(saved_ino, saved_dev);
41419  exit3:
41420                 mnt_drop_write(nd.path.mnt);
41421         exit2:
41422 @@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41423         if (IS_ERR(dentry))
41424                 goto out_unlock;
41425  
41426 +       if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41427 +               error = -EACCES;
41428 +               goto out_dput;
41429 +       }
41430 +
41431         error = mnt_want_write(nd.path.mnt);
41432         if (error)
41433                 goto out_dput;
41434 @@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41435         if (error)
41436                 goto out_drop_write;
41437         error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41438 +       if (!error)
41439 +               gr_handle_create(dentry, nd.path.mnt);
41440  out_drop_write:
41441         mnt_drop_write(nd.path.mnt);
41442  out_dput:
41443 @@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41444         error = PTR_ERR(new_dentry);
41445         if (IS_ERR(new_dentry))
41446                 goto out_unlock;
41447 +
41448 +       if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41449 +                              old_path.dentry->d_inode,
41450 +                              old_path.dentry->d_inode->i_mode, to)) {
41451 +               error = -EACCES;
41452 +               goto out_dput;
41453 +       }
41454 +
41455 +       if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41456 +                               old_path.dentry, old_path.mnt, to)) {
41457 +               error = -EACCES;
41458 +               goto out_dput;
41459 +       }
41460 +
41461         error = mnt_want_write(nd.path.mnt);
41462         if (error)
41463                 goto out_dput;
41464 @@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41465         if (error)
41466                 goto out_drop_write;
41467         error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41468 +       if (!error)
41469 +               gr_handle_create(new_dentry, nd.path.mnt);
41470  out_drop_write:
41471         mnt_drop_write(nd.path.mnt);
41472  out_dput:
41473 @@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41474         char *to;
41475         int error;
41476  
41477 +       pax_track_stack();
41478 +
41479         error = user_path_parent(olddfd, oldname, &oldnd, &from);
41480         if (error)
41481                 goto exit;
41482 @@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41483         if (new_dentry == trap)
41484                 goto exit5;
41485  
41486 +       error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41487 +                                    old_dentry, old_dir->d_inode, oldnd.path.mnt,
41488 +                                    to);
41489 +       if (error)
41490 +               goto exit5;
41491 +
41492         error = mnt_want_write(oldnd.path.mnt);
41493         if (error)
41494                 goto exit5;
41495 @@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41496                 goto exit6;
41497         error = vfs_rename(old_dir->d_inode, old_dentry,
41498                                    new_dir->d_inode, new_dentry);
41499 +       if (!error)
41500 +               gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41501 +                                new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41502  exit6:
41503         mnt_drop_write(oldnd.path.mnt);
41504  exit5:
41505 @@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
41506  
41507  int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41508  {
41509 +       char tmpbuf[64];
41510 +       const char *newlink;
41511         int len;
41512  
41513         len = PTR_ERR(link);
41514 @@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry, 
41515         len = strlen(link);
41516         if (len > (unsigned) buflen)
41517                 len = buflen;
41518 -       if (copy_to_user(buffer, link, len))
41519 +
41520 +       if (len < sizeof(tmpbuf)) {
41521 +               memcpy(tmpbuf, link, len);
41522 +               newlink = tmpbuf;
41523 +       } else
41524 +               newlink = link;
41525 +
41526 +       if (copy_to_user(buffer, newlink, len))
41527                 len = -EFAULT;
41528  out:
41529         return len;
41530 diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
41531 --- linux-3.0.4/fs/namespace.c  2011-07-21 22:17:23.000000000 -0400
41532 +++ linux-3.0.4/fs/namespace.c  2011-08-23 21:48:14.000000000 -0400
41533 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
41534                 if (!(sb->s_flags & MS_RDONLY))
41535                         retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41536                 up_write(&sb->s_umount);
41537 +
41538 +               gr_log_remount(mnt->mnt_devname, retval);
41539 +
41540                 return retval;
41541         }
41542  
41543 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
41544         br_write_unlock(vfsmount_lock);
41545         up_write(&namespace_sem);
41546         release_mounts(&umount_list);
41547 +
41548 +       gr_log_unmount(mnt->mnt_devname, retval);
41549 +
41550         return retval;
41551  }
41552  
41553 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
41554                    MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
41555                    MS_STRICTATIME);
41556  
41557 +       if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41558 +               retval = -EPERM;
41559 +               goto dput_out;
41560 +       }
41561 +
41562 +       if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41563 +               retval = -EPERM;
41564 +               goto dput_out;
41565 +       }
41566 +
41567         if (flags & MS_REMOUNT)
41568                 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41569                                     data_page);
41570 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
41571                                       dev_name, data_page);
41572  dput_out:
41573         path_put(&path);
41574 +
41575 +       gr_log_mount(dev_name, dir_name, retval);
41576 +
41577         return retval;
41578  }
41579  
41580 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
41581         if (error)
41582                 goto out2;
41583  
41584 +       if (gr_handle_chroot_pivot()) {
41585 +               error = -EPERM;
41586 +               goto out2;
41587 +       }
41588 +
41589         get_fs_root(current->fs, &root);
41590         error = lock_mount(&old);
41591         if (error)
41592 diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
41593 --- linux-3.0.4/fs/ncpfs/dir.c  2011-07-21 22:17:23.000000000 -0400
41594 +++ linux-3.0.4/fs/ncpfs/dir.c  2011-08-23 21:48:14.000000000 -0400
41595 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
41596         int res, val = 0, len;
41597         __u8 __name[NCP_MAXPATHLEN + 1];
41598  
41599 +       pax_track_stack();
41600 +
41601         if (dentry == dentry->d_sb->s_root)
41602                 return 1;
41603  
41604 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct 
41605         int error, res, len;
41606         __u8 __name[NCP_MAXPATHLEN + 1];
41607  
41608 +       pax_track_stack();
41609 +
41610         error = -EIO;
41611         if (!ncp_conn_valid(server))
41612                 goto finished;
41613 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
41614         PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41615                 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41616  
41617 +       pax_track_stack();
41618 +
41619         ncp_age_dentry(server, dentry);
41620         len = sizeof(__name);
41621         error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41622 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir, 
41623         int error, len;
41624         __u8 __name[NCP_MAXPATHLEN + 1];
41625  
41626 +       pax_track_stack();
41627 +
41628         DPRINTK("ncp_mkdir: making %s/%s\n",
41629                 dentry->d_parent->d_name.name, dentry->d_name.name);
41630  
41631 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
41632         int old_len, new_len;
41633         __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41634  
41635 +       pax_track_stack();
41636 +
41637         DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41638                 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41639                 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41640 diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
41641 --- linux-3.0.4/fs/ncpfs/inode.c        2011-07-21 22:17:23.000000000 -0400
41642 +++ linux-3.0.4/fs/ncpfs/inode.c        2011-08-23 21:48:14.000000000 -0400
41643 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
41644  #endif
41645         struct ncp_entry_info finfo;
41646  
41647 +       pax_track_stack();
41648 +
41649         memset(&data, 0, sizeof(data));
41650         server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41651         if (!server)
41652 diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
41653 --- linux-3.0.4/fs/nfs/inode.c  2011-07-21 22:17:23.000000000 -0400
41654 +++ linux-3.0.4/fs/nfs/inode.c  2011-08-23 21:47:56.000000000 -0400
41655 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
41656         nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
41657         nfsi->attrtimeo_timestamp = jiffies;
41658  
41659 -       memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
41660 +       memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
41661         if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
41662                 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
41663         else
41664 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
41665         return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41666  }
41667  
41668 -static atomic_long_t nfs_attr_generation_counter;
41669 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41670  
41671  static unsigned long nfs_read_attr_generation_counter(void)
41672  {
41673 -       return atomic_long_read(&nfs_attr_generation_counter);
41674 +       return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41675  }
41676  
41677  unsigned long nfs_inc_attr_generation_counter(void)
41678  {
41679 -       return atomic_long_inc_return(&nfs_attr_generation_counter);
41680 +       return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41681  }
41682  
41683  void nfs_fattr_init(struct nfs_fattr *fattr)
41684 diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
41685 --- linux-3.0.4/fs/nfsd/nfs4state.c     2011-08-23 21:44:40.000000000 -0400
41686 +++ linux-3.0.4/fs/nfsd/nfs4state.c     2011-08-23 21:48:14.000000000 -0400
41687 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41688         unsigned int strhashval;
41689         int err;
41690  
41691 +       pax_track_stack();
41692 +
41693         dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41694                 (long long) lock->lk_offset,
41695                 (long long) lock->lk_length);
41696 diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
41697 --- linux-3.0.4/fs/nfsd/nfs4xdr.c       2011-07-21 22:17:23.000000000 -0400
41698 +++ linux-3.0.4/fs/nfsd/nfs4xdr.c       2011-08-23 21:48:14.000000000 -0400
41699 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41700                 .dentry = dentry,
41701         };
41702  
41703 +       pax_track_stack();
41704 +
41705         BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41706         BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41707         BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41708 diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
41709 --- linux-3.0.4/fs/nfsd/vfs.c   2011-07-21 22:17:23.000000000 -0400
41710 +++ linux-3.0.4/fs/nfsd/vfs.c   2011-08-23 21:47:56.000000000 -0400
41711 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41712         } else {
41713                 oldfs = get_fs();
41714                 set_fs(KERNEL_DS);
41715 -               host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41716 +               host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41717                 set_fs(oldfs);
41718         }
41719  
41720 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41721  
41722         /* Write the data. */
41723         oldfs = get_fs(); set_fs(KERNEL_DS);
41724 -       host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41725 +       host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41726         set_fs(oldfs);
41727         if (host_err < 0)
41728                 goto out_nfserr;
41729 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41730          */
41731  
41732         oldfs = get_fs(); set_fs(KERNEL_DS);
41733 -       host_err = inode->i_op->readlink(dentry, buf, *lenp);
41734 +       host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41735         set_fs(oldfs);
41736  
41737         if (host_err < 0)
41738 diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
41739 --- linux-3.0.4/fs/notify/fanotify/fanotify_user.c      2011-07-21 22:17:23.000000000 -0400
41740 +++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c      2011-08-23 21:48:14.000000000 -0400
41741 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
41742                 goto out_close_fd;
41743  
41744         ret = -EFAULT;
41745 -       if (copy_to_user(buf, &fanotify_event_metadata,
41746 +       if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
41747 +           copy_to_user(buf, &fanotify_event_metadata,
41748                          fanotify_event_metadata.event_len))
41749                 goto out_kill_access_response;
41750  
41751 diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
41752 --- linux-3.0.4/fs/notify/notification.c        2011-07-21 22:17:23.000000000 -0400
41753 +++ linux-3.0.4/fs/notify/notification.c        2011-08-23 21:47:56.000000000 -0400
41754 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41755   * get set to 0 so it will never get 'freed'
41756   */
41757  static struct fsnotify_event *q_overflow_event;
41758 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41759 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41760  
41761  /**
41762   * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41763 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41764   */
41765  u32 fsnotify_get_cookie(void)
41766  {
41767 -       return atomic_inc_return(&fsnotify_sync_cookie);
41768 +       return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41769  }
41770  EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41771  
41772 diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
41773 --- linux-3.0.4/fs/ntfs/dir.c   2011-07-21 22:17:23.000000000 -0400
41774 +++ linux-3.0.4/fs/ntfs/dir.c   2011-08-23 21:47:56.000000000 -0400
41775 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
41776         ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41777                         ~(s64)(ndir->itype.index.block_size - 1)));
41778         /* Bounds checks. */
41779 -       if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41780 +       if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41781                 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41782                                 "inode 0x%lx or driver bug.", vdir->i_ino);
41783                 goto err_out;
41784 diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
41785 --- linux-3.0.4/fs/ntfs/file.c  2011-07-21 22:17:23.000000000 -0400
41786 +++ linux-3.0.4/fs/ntfs/file.c  2011-08-23 21:47:56.000000000 -0400
41787 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
41788  #endif /* NTFS_RW */
41789  };
41790  
41791 -const struct file_operations ntfs_empty_file_ops = {};
41792 +const struct file_operations ntfs_empty_file_ops __read_only;
41793  
41794 -const struct inode_operations ntfs_empty_inode_ops = {};
41795 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41796 diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
41797 --- linux-3.0.4/fs/ocfs2/localalloc.c   2011-07-21 22:17:23.000000000 -0400
41798 +++ linux-3.0.4/fs/ocfs2/localalloc.c   2011-08-23 21:47:56.000000000 -0400
41799 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
41800                 goto bail;
41801         }
41802  
41803 -       atomic_inc(&osb->alloc_stats.moves);
41804 +       atomic_inc_unchecked(&osb->alloc_stats.moves);
41805  
41806  bail:
41807         if (handle)
41808 diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
41809 --- linux-3.0.4/fs/ocfs2/namei.c        2011-07-21 22:17:23.000000000 -0400
41810 +++ linux-3.0.4/fs/ocfs2/namei.c        2011-08-23 21:48:14.000000000 -0400
41811 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
41812         struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41813         struct ocfs2_dir_lookup_result target_insert = { NULL, };
41814  
41815 +       pax_track_stack();
41816 +
41817         /* At some point it might be nice to break this function up a
41818          * bit. */
41819  
41820 diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
41821 --- linux-3.0.4/fs/ocfs2/ocfs2.h        2011-07-21 22:17:23.000000000 -0400
41822 +++ linux-3.0.4/fs/ocfs2/ocfs2.h        2011-08-23 21:47:56.000000000 -0400
41823 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
41824  
41825  struct ocfs2_alloc_stats
41826  {
41827 -       atomic_t moves;
41828 -       atomic_t local_data;
41829 -       atomic_t bitmap_data;
41830 -       atomic_t bg_allocs;
41831 -       atomic_t bg_extends;
41832 +       atomic_unchecked_t moves;
41833 +       atomic_unchecked_t local_data;
41834 +       atomic_unchecked_t bitmap_data;
41835 +       atomic_unchecked_t bg_allocs;
41836 +       atomic_unchecked_t bg_extends;
41837  };
41838  
41839  enum ocfs2_local_alloc_state
41840 diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
41841 --- linux-3.0.4/fs/ocfs2/suballoc.c     2011-07-21 22:17:23.000000000 -0400
41842 +++ linux-3.0.4/fs/ocfs2/suballoc.c     2011-08-23 21:47:56.000000000 -0400
41843 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
41844                                 mlog_errno(status);
41845                         goto bail;
41846                 }
41847 -               atomic_inc(&osb->alloc_stats.bg_extends);
41848 +               atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41849  
41850                 /* You should never ask for this much metadata */
41851                 BUG_ON(bits_wanted >
41852 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
41853                 mlog_errno(status);
41854                 goto bail;
41855         }
41856 -       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41857 +       atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41858  
41859         *suballoc_loc = res.sr_bg_blkno;
41860         *suballoc_bit_start = res.sr_bit_offset;
41861 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
41862         trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
41863                                            res->sr_bits);
41864  
41865 -       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41866 +       atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41867  
41868         BUG_ON(res->sr_bits != 1);
41869  
41870 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
41871                 mlog_errno(status);
41872                 goto bail;
41873         }
41874 -       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41875 +       atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
41876  
41877         BUG_ON(res.sr_bits != 1);
41878  
41879 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
41880                                                       cluster_start,
41881                                                       num_clusters);
41882                 if (!status)
41883 -                       atomic_inc(&osb->alloc_stats.local_data);
41884 +                       atomic_inc_unchecked(&osb->alloc_stats.local_data);
41885         } else {
41886                 if (min_clusters > (osb->bitmap_cpg - 1)) {
41887                         /* The only paths asking for contiguousness
41888 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
41889                                 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41890                                                                  res.sr_bg_blkno,
41891                                                                  res.sr_bit_offset);
41892 -                       atomic_inc(&osb->alloc_stats.bitmap_data);
41893 +                       atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41894                         *num_clusters = res.sr_bits;
41895                 }
41896         }
41897 diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
41898 --- linux-3.0.4/fs/ocfs2/super.c        2011-07-21 22:17:23.000000000 -0400
41899 +++ linux-3.0.4/fs/ocfs2/super.c        2011-08-23 21:47:56.000000000 -0400
41900 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41901                         "%10s => GlobalAllocs: %d  LocalAllocs: %d  "
41902                         "SubAllocs: %d  LAWinMoves: %d  SAExtends: %d\n",
41903                         "Stats",
41904 -                       atomic_read(&osb->alloc_stats.bitmap_data),
41905 -                       atomic_read(&osb->alloc_stats.local_data),
41906 -                       atomic_read(&osb->alloc_stats.bg_allocs),
41907 -                       atomic_read(&osb->alloc_stats.moves),
41908 -                       atomic_read(&osb->alloc_stats.bg_extends));
41909 +                       atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41910 +                       atomic_read_unchecked(&osb->alloc_stats.local_data),
41911 +                       atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41912 +                       atomic_read_unchecked(&osb->alloc_stats.moves),
41913 +                       atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41914  
41915         out += snprintf(buf + out, len - out,
41916                         "%10s => State: %u  Descriptor: %llu  Size: %u bits  "
41917 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
41918         spin_lock_init(&osb->osb_xattr_lock);
41919         ocfs2_init_steal_slots(osb);
41920  
41921 -       atomic_set(&osb->alloc_stats.moves, 0);
41922 -       atomic_set(&osb->alloc_stats.local_data, 0);
41923 -       atomic_set(&osb->alloc_stats.bitmap_data, 0);
41924 -       atomic_set(&osb->alloc_stats.bg_allocs, 0);
41925 -       atomic_set(&osb->alloc_stats.bg_extends, 0);
41926 +       atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41927 +       atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41928 +       atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41929 +       atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41930 +       atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41931  
41932         /* Copy the blockcheck stats from the superblock probe */
41933         osb->osb_ecc_stats = *stats;
41934 diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
41935 --- linux-3.0.4/fs/ocfs2/symlink.c      2011-07-21 22:17:23.000000000 -0400
41936 +++ linux-3.0.4/fs/ocfs2/symlink.c      2011-08-23 21:47:56.000000000 -0400
41937 @@ -142,7 +142,7 @@ bail:
41938  
41939  static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41940  {
41941 -       char *link = nd_get_link(nd);
41942 +       const char *link = nd_get_link(nd);
41943         if (!IS_ERR(link))
41944                 kfree(link);
41945  }
41946 diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
41947 --- linux-3.0.4/fs/open.c       2011-07-21 22:17:23.000000000 -0400
41948 +++ linux-3.0.4/fs/open.c       2011-08-23 21:48:14.000000000 -0400
41949 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
41950         error = locks_verify_truncate(inode, NULL, length);
41951         if (!error)
41952                 error = security_path_truncate(&path);
41953 +
41954 +       if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41955 +               error = -EACCES;
41956 +
41957         if (!error)
41958                 error = do_truncate(path.dentry, length, 0, NULL);
41959  
41960 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41961         if (__mnt_is_readonly(path.mnt))
41962                 res = -EROFS;
41963  
41964 +       if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41965 +               res = -EACCES;
41966 +
41967  out_path_release:
41968         path_put(&path);
41969  out:
41970 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41971         if (error)
41972                 goto dput_and_out;
41973  
41974 +       gr_log_chdir(path.dentry, path.mnt);
41975 +
41976         set_fs_pwd(current->fs, &path);
41977  
41978  dput_and_out:
41979 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41980                 goto out_putf;
41981  
41982         error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
41983 +
41984 +       if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41985 +               error = -EPERM;
41986 +
41987 +       if (!error)
41988 +               gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41989 +
41990         if (!error)
41991                 set_fs_pwd(current->fs, &file->f_path);
41992  out_putf:
41993 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41994         if (error)
41995                 goto dput_and_out;
41996  
41997 +       if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41998 +               goto dput_and_out;
41999 +
42000 +       if (gr_handle_chroot_caps(&path)) {
42001 +               error = -ENOMEM;
42002 +               goto dput_and_out;
42003 +       }
42004 +
42005         set_fs_root(current->fs, &path);
42006 +
42007 +       gr_handle_chroot_chdir(&path);
42008 +
42009         error = 0;
42010  dput_and_out:
42011         path_put(&path);
42012 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42013         err = mnt_want_write_file(file);
42014         if (err)
42015                 goto out_putf;
42016 +
42017         mutex_lock(&inode->i_mutex);
42018 +
42019 +       if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
42020 +               err = -EACCES;
42021 +               goto out_unlock;
42022 +       }
42023 +
42024         err = security_path_chmod(dentry, file->f_vfsmnt, mode);
42025         if (err)
42026                 goto out_unlock;
42027         if (mode == (mode_t) -1)
42028                 mode = inode->i_mode;
42029 +
42030 +       if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
42031 +               err = -EACCES;
42032 +               goto out_unlock;
42033 +       }
42034 +
42035         newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42036         newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42037         err = notify_change(dentry, &newattrs);
42038 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42039         error = mnt_want_write(path.mnt);
42040         if (error)
42041                 goto dput_and_out;
42042 +
42043         mutex_lock(&inode->i_mutex);
42044 +
42045 +       if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42046 +               error = -EACCES;
42047 +               goto out_unlock;
42048 +       }
42049 +
42050         error = security_path_chmod(path.dentry, path.mnt, mode);
42051         if (error)
42052                 goto out_unlock;
42053         if (mode == (mode_t) -1)
42054                 mode = inode->i_mode;
42055 +
42056 +       if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42057 +               error = -EACCES;
42058 +               goto out_unlock;
42059 +       }
42060 +
42061         newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42062         newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42063         error = notify_change(path.dentry, &newattrs);
42064 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
42065         int error;
42066         struct iattr newattrs;
42067  
42068 +       if (!gr_acl_handle_chown(path->dentry, path->mnt))
42069 +               return -EACCES;
42070 +
42071         newattrs.ia_valid =  ATTR_CTIME;
42072         if (user != (uid_t) -1) {
42073                 newattrs.ia_valid |= ATTR_UID;
42074 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
42075         if (!IS_ERR(tmp)) {
42076                 fd = get_unused_fd_flags(flags);
42077                 if (fd >= 0) {
42078 -                       struct file *f = do_filp_open(dfd, tmp, &op, lookup);
42079 +                       struct file *f;
42080 +                       /* don't allow to be set by userland */
42081 +                       flags &= ~FMODE_GREXEC;
42082 +                       f = do_filp_open(dfd, tmp, &op, lookup);
42083                         if (IS_ERR(f)) {
42084                                 put_unused_fd(fd);
42085                                 fd = PTR_ERR(f);
42086 diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
42087 --- linux-3.0.4/fs/partitions/ldm.c     2011-07-21 22:17:23.000000000 -0400
42088 +++ linux-3.0.4/fs/partitions/ldm.c     2011-08-23 21:48:14.000000000 -0400
42089 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42090                 ldm_error ("A VBLK claims to have %d parts.", num);
42091                 return false;
42092         }
42093 +
42094         if (rec >= num) {
42095                 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42096                 return false;
42097 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42098                         goto found;
42099         }
42100  
42101 -       f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42102 +       f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42103         if (!f) {
42104                 ldm_crit ("Out of memory.");
42105                 return false;
42106 diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
42107 --- linux-3.0.4/fs/pipe.c       2011-07-21 22:17:23.000000000 -0400
42108 +++ linux-3.0.4/fs/pipe.c       2011-08-23 21:48:14.000000000 -0400
42109 @@ -420,9 +420,9 @@ redo:
42110                 }
42111                 if (bufs)       /* More to do? */
42112                         continue;
42113 -               if (!pipe->writers)
42114 +               if (!atomic_read(&pipe->writers))
42115                         break;
42116 -               if (!pipe->waiting_writers) {
42117 +               if (!atomic_read(&pipe->waiting_writers)) {
42118                         /* syscall merging: Usually we must not sleep
42119                          * if O_NONBLOCK is set, or if we got some data.
42120                          * But if a writer sleeps in kernel space, then
42121 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
42122         mutex_lock(&inode->i_mutex);
42123         pipe = inode->i_pipe;
42124  
42125 -       if (!pipe->readers) {
42126 +       if (!atomic_read(&pipe->readers)) {
42127                 send_sig(SIGPIPE, current, 0);
42128                 ret = -EPIPE;
42129                 goto out;
42130 @@ -530,7 +530,7 @@ redo1:
42131         for (;;) {
42132                 int bufs;
42133  
42134 -               if (!pipe->readers) {
42135 +               if (!atomic_read(&pipe->readers)) {
42136                         send_sig(SIGPIPE, current, 0);
42137                         if (!ret)
42138                                 ret = -EPIPE;
42139 @@ -616,9 +616,9 @@ redo2:
42140                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42141                         do_wakeup = 0;
42142                 }
42143 -               pipe->waiting_writers++;
42144 +               atomic_inc(&pipe->waiting_writers);
42145                 pipe_wait(pipe);
42146 -               pipe->waiting_writers--;
42147 +               atomic_dec(&pipe->waiting_writers);
42148         }
42149  out:
42150         mutex_unlock(&inode->i_mutex);
42151 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table 
42152         mask = 0;
42153         if (filp->f_mode & FMODE_READ) {
42154                 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42155 -               if (!pipe->writers && filp->f_version != pipe->w_counter)
42156 +               if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42157                         mask |= POLLHUP;
42158         }
42159  
42160 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table 
42161                  * Most Unices do not set POLLERR for FIFOs but on Linux they
42162                  * behave exactly like pipes for poll().
42163                  */
42164 -               if (!pipe->readers)
42165 +               if (!atomic_read(&pipe->readers))
42166                         mask |= POLLERR;
42167         }
42168  
42169 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
42170  
42171         mutex_lock(&inode->i_mutex);
42172         pipe = inode->i_pipe;
42173 -       pipe->readers -= decr;
42174 -       pipe->writers -= decw;
42175 +       atomic_sub(decr, &pipe->readers);
42176 +       atomic_sub(decw, &pipe->writers);
42177  
42178 -       if (!pipe->readers && !pipe->writers) {
42179 +       if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42180                 free_pipe_info(inode);
42181         } else {
42182                 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
42183 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
42184  
42185         if (inode->i_pipe) {
42186                 ret = 0;
42187 -               inode->i_pipe->readers++;
42188 +               atomic_inc(&inode->i_pipe->readers);
42189         }
42190  
42191         mutex_unlock(&inode->i_mutex);
42192 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
42193  
42194         if (inode->i_pipe) {
42195                 ret = 0;
42196 -               inode->i_pipe->writers++;
42197 +               atomic_inc(&inode->i_pipe->writers);
42198         }
42199  
42200         mutex_unlock(&inode->i_mutex);
42201 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
42202         if (inode->i_pipe) {
42203                 ret = 0;
42204                 if (filp->f_mode & FMODE_READ)
42205 -                       inode->i_pipe->readers++;
42206 +                       atomic_inc(&inode->i_pipe->readers);
42207                 if (filp->f_mode & FMODE_WRITE)
42208 -                       inode->i_pipe->writers++;
42209 +                       atomic_inc(&inode->i_pipe->writers);
42210         }
42211  
42212         mutex_unlock(&inode->i_mutex);
42213 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
42214         inode->i_pipe = NULL;
42215  }
42216  
42217 -static struct vfsmount *pipe_mnt __read_mostly;
42218 +struct vfsmount *pipe_mnt __read_mostly;
42219  
42220  /*
42221   * pipefs_dname() is called from d_path().
42222 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
42223                 goto fail_iput;
42224         inode->i_pipe = pipe;
42225  
42226 -       pipe->readers = pipe->writers = 1;
42227 +       atomic_set(&pipe->readers, 1);
42228 +       atomic_set(&pipe->writers, 1);
42229         inode->i_fop = &rdwr_pipefifo_fops;
42230  
42231         /*
42232 diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
42233 --- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
42234 +++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
42235 @@ -60,6 +60,7 @@
42236  #include <linux/tty.h>
42237  #include <linux/string.h>
42238  #include <linux/mman.h>
42239 +#include <linux/grsecurity.h>
42240  #include <linux/proc_fs.h>
42241  #include <linux/ioport.h>
42242  #include <linux/uaccess.h>
42243 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
42244         seq_putc(m, '\n');
42245  }
42246  
42247 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42248 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
42249 +{
42250 +       if (p->mm)
42251 +               seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42252 +                          p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42253 +                          p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42254 +                          p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42255 +                          p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42256 +                          p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42257 +       else
42258 +               seq_printf(m, "PaX:\t-----\n");
42259 +}
42260 +#endif
42261 +
42262  int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42263                         struct pid *pid, struct task_struct *task)
42264  {
42265 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, 
42266         task_cpus_allowed(m, task);
42267         cpuset_task_status_allowed(m, task);
42268         task_context_switch_counts(m, task);
42269 +
42270 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42271 +       task_pax(m, task);
42272 +#endif
42273 +
42274 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42275 +       task_grsec_rbac(m, task);
42276 +#endif
42277 +
42278         return 0;
42279  }
42280  
42281 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42282 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42283 +                            (_mm->pax_flags & MF_PAX_RANDMMAP || \
42284 +                             _mm->pax_flags & MF_PAX_SEGMEXEC))
42285 +#endif
42286 +
42287  static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42288                         struct pid *pid, struct task_struct *task, int whole)
42289  {
42290 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file 
42291         cputime_t cutime, cstime, utime, stime;
42292         cputime_t cgtime, gtime;
42293         unsigned long rsslim = 0;
42294 -       char tcomm[sizeof(task->comm)];
42295 +       char tcomm[sizeof(task->comm)] = { 0 };
42296         unsigned long flags;
42297  
42298 +       pax_track_stack();
42299 +
42300         state = *get_task_state(task);
42301         vsize = eip = esp = 0;
42302         permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42303 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file 
42304                 gtime = task->gtime;
42305         }
42306  
42307 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42308 +       if (PAX_RAND_FLAGS(mm)) {
42309 +               eip = 0;
42310 +               esp = 0;
42311 +               wchan = 0;
42312 +       }
42313 +#endif
42314 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42315 +       wchan = 0;
42316 +       eip =0;
42317 +       esp =0;
42318 +#endif
42319 +
42320         /* scale priority and nice values from timeslices to -20..20 */
42321         /* to make it look like a "normal" Unix priority/nice value  */
42322         priority = task_prio(task);
42323 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file 
42324                 vsize,
42325                 mm ? get_mm_rss(mm) : 0,
42326                 rsslim,
42327 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42328 +               PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42329 +               PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42330 +               PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42331 +#else
42332                 mm ? (permitted ? mm->start_code : 1) : 0,
42333                 mm ? (permitted ? mm->end_code : 1) : 0,
42334                 (permitted && mm) ? mm->start_stack : 0,
42335 +#endif
42336                 esp,
42337                 eip,
42338                 /* The signal information here is obsolete.
42339 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
42340  
42341         return 0;
42342  }
42343 +
42344 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42345 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42346 +{
42347 +       u32 curr_ip = 0;
42348 +       unsigned long flags;
42349 +
42350 +       if (lock_task_sighand(task, &flags)) {
42351 +               curr_ip = task->signal->curr_ip;
42352 +               unlock_task_sighand(task, &flags);
42353 +       }
42354 +
42355 +       return sprintf(buffer, "%pI4\n", &curr_ip);
42356 +}
42357 +#endif
42358 diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
42359 --- linux-3.0.4/fs/proc/base.c  2011-08-23 21:44:40.000000000 -0400
42360 +++ linux-3.0.4/fs/proc/base.c  2011-08-23 21:48:14.000000000 -0400
42361 @@ -107,6 +107,22 @@ struct pid_entry {
42362         union proc_op op;
42363  };
42364  
42365 +struct getdents_callback {
42366 +       struct linux_dirent __user * current_dir;
42367 +       struct linux_dirent __user * previous;
42368 +       struct file * file;
42369 +       int count;
42370 +       int error;
42371 +};
42372 +
42373 +static int gr_fake_filldir(void * __buf, const char *name, int namlen, 
42374 +                          loff_t offset, u64 ino, unsigned int d_type)
42375 +{
42376 +       struct getdents_callback * buf = (struct getdents_callback *) __buf;
42377 +       buf->error = -EINVAL;
42378 +       return 0;
42379 +}
42380 +
42381  #define NOD(NAME, MODE, IOP, FOP, OP) {                        \
42382         .name = (NAME),                                 \
42383         .len  = sizeof(NAME) - 1,                       \
42384 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
42385         if (task == current)
42386                 return mm;
42387  
42388 +       if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42389 +               return ERR_PTR(-EPERM);
42390 +
42391         /*
42392          * If current is actively ptrace'ing, and would also be
42393          * permitted to freshly attach with ptrace now, permit it.
42394 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
42395         if (!mm->arg_end)
42396                 goto out_mm;    /* Shh! No looking before we're done */
42397  
42398 +       if (gr_acl_handle_procpidmem(task))
42399 +               goto out_mm;
42400 +
42401         len = mm->arg_end - mm->arg_start;
42402   
42403         if (len > PAGE_SIZE)
42404 @@ -309,12 +331,28 @@ out:
42405         return res;
42406  }
42407  
42408 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42409 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42410 +                            (_mm->pax_flags & MF_PAX_RANDMMAP || \
42411 +                             _mm->pax_flags & MF_PAX_SEGMEXEC))
42412 +#endif
42413 +
42414  static int proc_pid_auxv(struct task_struct *task, char *buffer)
42415  {
42416         struct mm_struct *mm = mm_for_maps(task);
42417         int res = PTR_ERR(mm);
42418         if (mm && !IS_ERR(mm)) {
42419                 unsigned int nwords = 0;
42420 +
42421 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42422 +               /* allow if we're currently ptracing this task */
42423 +               if (PAX_RAND_FLAGS(mm) &&
42424 +                   (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42425 +                       mmput(mm);
42426 +                       return res;
42427 +               }
42428 +#endif
42429 +
42430                 do {
42431                         nwords += 2;
42432                 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42433 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
42434  }
42435  
42436  
42437 -#ifdef CONFIG_KALLSYMS
42438 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42439  /*
42440   * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42441   * Returns the resolved symbol.  If that fails, simply return the address.
42442 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
42443         mutex_unlock(&task->signal->cred_guard_mutex);
42444  }
42445  
42446 -#ifdef CONFIG_STACKTRACE
42447 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42448  
42449  #define MAX_STACK_TRACE_DEPTH  64
42450  
42451 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
42452         return count;
42453  }
42454  
42455 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42456 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42457  static int proc_pid_syscall(struct task_struct *task, char *buffer)
42458  {
42459         long nr;
42460 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
42461  /************************************************************************/
42462  
42463  /* permission checks */
42464 -static int proc_fd_access_allowed(struct inode *inode)
42465 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42466  {
42467         struct task_struct *task;
42468         int allowed = 0;
42469 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
42470          */
42471         task = get_proc_task(inode);
42472         if (task) {
42473 -               allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42474 +               if (log)
42475 +                       allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42476 +               else
42477 +                       allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42478                 put_task_struct(task);
42479         }
42480         return allowed;
42481 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file 
42482         if (!task)
42483                 goto out_no_task;
42484  
42485 +       if (gr_acl_handle_procpidmem(task))
42486 +               goto out;
42487 +
42488         ret = -ENOMEM;
42489         page = (char *)__get_free_page(GFP_TEMPORARY);
42490         if (!page)
42491 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
42492         path_put(&nd->path);
42493  
42494         /* Are we allowed to snoop on the tasks file descriptors? */
42495 -       if (!proc_fd_access_allowed(inode))
42496 +       if (!proc_fd_access_allowed(inode,0))
42497                 goto out;
42498  
42499         error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42500 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
42501         struct path path;
42502  
42503         /* Are we allowed to snoop on the tasks file descriptors? */
42504 -       if (!proc_fd_access_allowed(inode))
42505 -               goto out;
42506 +       /* logging this is needed for learning on chromium to work properly,
42507 +          but we don't want to flood the logs from 'ps' which does a readlink
42508 +          on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42509 +          CAP_SYS_PTRACE as it's not necessary for its basic functionality
42510 +        */
42511 +       if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42512 +               if (!proc_fd_access_allowed(inode,0))
42513 +                       goto out;
42514 +       } else {
42515 +               if (!proc_fd_access_allowed(inode,1))
42516 +                       goto out;
42517 +       }
42518  
42519         error = PROC_I(inode)->op.proc_get_link(inode, &path);
42520         if (error)
42521 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
42522                 rcu_read_lock();
42523                 cred = __task_cred(task);
42524                 inode->i_uid = cred->euid;
42525 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42526 +               inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42527 +#else
42528                 inode->i_gid = cred->egid;
42529 +#endif
42530                 rcu_read_unlock();
42531         }
42532         security_task_to_inode(task, inode);
42533 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
42534         struct inode *inode = dentry->d_inode;
42535         struct task_struct *task;
42536         const struct cred *cred;
42537 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42538 +       const struct cred *tmpcred = current_cred();
42539 +#endif
42540  
42541         generic_fillattr(inode, stat);
42542  
42543 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
42544         stat->uid = 0;
42545         stat->gid = 0;
42546         task = pid_task(proc_pid(inode), PIDTYPE_PID);
42547 +
42548 +       if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42549 +               rcu_read_unlock();
42550 +               return -ENOENT;
42551 +       }
42552 +
42553         if (task) {
42554 +               cred = __task_cred(task);
42555 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42556 +               if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42557 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42558 +                   || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42559 +#endif
42560 +               ) {
42561 +#endif
42562                 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42563 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42564 +                   (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42565 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42566 +                   (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42567 +#endif
42568                     task_dumpable(task)) {
42569 -                       cred = __task_cred(task);
42570                         stat->uid = cred->euid;
42571 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42572 +                       stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42573 +#else
42574                         stat->gid = cred->egid;
42575 +#endif
42576                 }
42577 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42578 +               } else {
42579 +                       rcu_read_unlock();
42580 +                       return -ENOENT;
42581 +               }
42582 +#endif
42583         }
42584         rcu_read_unlock();
42585         return 0;
42586 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
42587  
42588         if (task) {
42589                 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42590 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42591 +                   (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42592 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42593 +                   (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42594 +#endif
42595                     task_dumpable(task)) {
42596                         rcu_read_lock();
42597                         cred = __task_cred(task);
42598                         inode->i_uid = cred->euid;
42599 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42600 +                       inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42601 +#else
42602                         inode->i_gid = cred->egid;
42603 +#endif
42604                         rcu_read_unlock();
42605                 } else {
42606                         inode->i_uid = 0;
42607 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
42608         int fd = proc_fd(inode);
42609  
42610         if (task) {
42611 -               files = get_files_struct(task);
42612 +               if (!gr_acl_handle_procpidmem(task))
42613 +                       files = get_files_struct(task);
42614                 put_task_struct(task);
42615         }
42616         if (files) {
42617 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
42618   */
42619  static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
42620  {
42621 +       struct task_struct *task;
42622         int rv = generic_permission(inode, mask, flags, NULL);
42623 -       if (rv == 0)
42624 -               return 0;
42625 +
42626         if (task_pid(current) == proc_pid(inode))
42627                 rv = 0;
42628 +
42629 +       task = get_proc_task(inode);
42630 +       if (task == NULL)
42631 +               return rv;
42632 +
42633 +       if (gr_acl_handle_procpidmem(task))
42634 +               rv = -EACCES;
42635 +
42636 +       put_task_struct(task);
42637 +
42638         return rv;
42639  }
42640  
42641 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
42642         if (!task)
42643                 goto out_no_task;
42644  
42645 +       if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42646 +               goto out;
42647 +
42648         /*
42649          * Yes, it does not scale. And it should not. Don't add
42650          * new entries into /proc/<tgid>/ without very good reasons.
42651 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
42652         if (!task)
42653                 goto out_no_task;
42654  
42655 +       if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42656 +               goto out;
42657 +
42658         ret = 0;
42659         i = filp->f_pos;
42660         switch (i) {
42661 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
42662  static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42663                                 void *cookie)
42664  {
42665 -       char *s = nd_get_link(nd);
42666 +       const char *s = nd_get_link(nd);
42667         if (!IS_ERR(s))
42668                 __putname(s);
42669  }
42670 @@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
42671         REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
42672  #endif
42673         REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
42674 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42675 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42676         INF("syscall",    S_IRUGO, proc_pid_syscall),
42677  #endif
42678         INF("cmdline",    S_IRUGO, proc_pid_cmdline),
42679 @@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
42680  #ifdef CONFIG_SECURITY
42681         DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42682  #endif
42683 -#ifdef CONFIG_KALLSYMS
42684 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42685         INF("wchan",      S_IRUGO, proc_pid_wchan),
42686  #endif
42687 -#ifdef CONFIG_STACKTRACE
42688 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42689         ONE("stack",      S_IRUGO, proc_pid_stack),
42690  #endif
42691  #ifdef CONFIG_SCHEDSTATS
42692 @@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
42693         INF("hardwall",   S_IRUGO, proc_pid_hardwall),
42694  #endif
42695         ONE("nsproxy",  S_IRUGO, proc_pid_nsproxy),
42696 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42697 +       INF("ipaddr",     S_IRUSR, proc_pid_ipaddr),
42698 +#endif
42699  };
42700  
42701  static int proc_tgid_base_readdir(struct file * filp,
42702 @@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
42703         if (!inode)
42704                 goto out;
42705  
42706 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42707 +       inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42708 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42709 +       inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42710 +       inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42711 +#else
42712         inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42713 +#endif
42714         inode->i_op = &proc_tgid_base_inode_operations;
42715         inode->i_fop = &proc_tgid_base_operations;
42716         inode->i_flags|=S_IMMUTABLE;
42717 @@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
42718         if (!task)
42719                 goto out;
42720  
42721 +       if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42722 +               goto out_put_task;
42723 +
42724         result = proc_pid_instantiate(dir, dentry, task, NULL);
42725 +out_put_task:
42726         put_task_struct(task);
42727  out:
42728         return result;
42729 @@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
42730  {
42731         unsigned int nr;
42732         struct task_struct *reaper;
42733 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42734 +       const struct cred *tmpcred = current_cred();
42735 +       const struct cred *itercred;
42736 +#endif
42737 +       filldir_t __filldir = filldir;
42738         struct tgid_iter iter;
42739         struct pid_namespace *ns;
42740  
42741 @@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
42742         for (iter = next_tgid(ns, iter);
42743              iter.task;
42744              iter.tgid += 1, iter = next_tgid(ns, iter)) {
42745 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42746 +               rcu_read_lock();
42747 +               itercred = __task_cred(iter.task);
42748 +#endif
42749 +               if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42750 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42751 +                   || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42752 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42753 +                       && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42754 +#endif
42755 +                       )
42756 +#endif
42757 +               )
42758 +                       __filldir = &gr_fake_filldir;
42759 +               else
42760 +                       __filldir = filldir;
42761 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42762 +       rcu_read_unlock();
42763 +#endif
42764                 filp->f_pos = iter.tgid + TGID_OFFSET;
42765                 if (!vx_proc_task_visible(iter.task))
42766                         continue;
42767 -               if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42768 +               if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42769                         put_task_struct(iter.task);
42770 @@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
42771         REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42772  #endif
42773         REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
42774 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42775 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42776         INF("syscall",   S_IRUGO, proc_pid_syscall),
42777  #endif
42778         INF("cmdline",   S_IRUGO, proc_pid_cmdline),
42779 @@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
42780  #ifdef CONFIG_SECURITY
42781         DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42782  #endif
42783 -#ifdef CONFIG_KALLSYMS
42784 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42785         INF("wchan",     S_IRUGO, proc_pid_wchan),
42786  #endif
42787 -#ifdef CONFIG_STACKTRACE
42788 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42789         ONE("stack",      S_IRUGO, proc_pid_stack),
42790  #endif
42791  #ifdef CONFIG_SCHEDSTATS
42792 diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
42793 --- linux-3.0.4/fs/proc/cmdline.c       2011-07-21 22:17:23.000000000 -0400
42794 +++ linux-3.0.4/fs/proc/cmdline.c       2011-08-23 21:48:14.000000000 -0400
42795 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42796  
42797  static int __init proc_cmdline_init(void)
42798  {
42799 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42800 +       proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42801 +#else
42802         proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42803 +#endif
42804         return 0;
42805  }
42806  module_init(proc_cmdline_init);
42807 diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
42808 --- linux-3.0.4/fs/proc/devices.c       2011-07-21 22:17:23.000000000 -0400
42809 +++ linux-3.0.4/fs/proc/devices.c       2011-08-23 21:48:14.000000000 -0400
42810 @@ -64,7 +64,11 @@ static const struct file_operations proc
42811  
42812  static int __init proc_devices_init(void)
42813  {
42814 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42815 +       proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42816 +#else
42817         proc_create("devices", 0, NULL, &proc_devinfo_operations);
42818 +#endif
42819         return 0;
42820  }
42821  module_init(proc_devices_init);
42822 diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
42823 --- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
42824 +++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
42825 @@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
42826                 if (de->mode) {
42827                         inode->i_mode = de->mode;
42828                         inode->i_uid = de->uid;
42829 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42830 +                       inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42831 +#else
42832                         inode->i_gid = de->gid;
42833 +#endif
42834                 }
42835                 if (de->size)
42836                         inode->i_size = de->size;
42837 diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
42838 --- linux-3.0.4/fs/proc/internal.h      2011-07-21 22:17:23.000000000 -0400
42839 +++ linux-3.0.4/fs/proc/internal.h      2011-08-23 21:48:14.000000000 -0400
42840 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42841                                 struct pid *pid, struct task_struct *task);
42842  extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42843                                 struct pid *pid, struct task_struct *task);
42844 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42845 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42846 +#endif
42847  extern int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
42848                                 struct pid *pid, struct task_struct *task);
42849  
42850 diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
42851 --- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
42852 +++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
42853 @@ -30,12 +30,12 @@ config PROC_FS
42854  
42855  config PROC_KCORE
42856         bool "/proc/kcore support" if !ARM
42857 -       depends on PROC_FS && MMU
42858 +       depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42859  
42860  config PROC_VMCORE
42861         bool "/proc/vmcore support"
42862 -       depends on PROC_FS && CRASH_DUMP
42863 -       default y
42864 +       depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42865 +       default n
42866          help
42867          Exports the dump image of crashed kernel in ELF format.
42868  
42869 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42870           limited in memory.
42871  
42872  config PROC_PAGE_MONITOR
42873 -       default y
42874 -       depends on PROC_FS && MMU
42875 +       default n
42876 +       depends on PROC_FS && MMU && !GRKERNSEC
42877         bool "Enable /proc page monitoring" if EXPERT
42878         help
42879           Various /proc files exist to monitor process memory utilization:
42880 diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
42881 --- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
42882 +++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
42883 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
42884         off_t offset = 0;
42885         struct kcore_list *m;
42886  
42887 +       pax_track_stack();
42888 +
42889         /* setup ELF header */
42890         elf = (struct elfhdr *) bufp;
42891         bufp += sizeof(struct elfhdr);
42892 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
42893          * the addresses in the elf_phdr on our list.
42894          */
42895         start = kc_offset_to_vaddr(*fpos - elf_buflen);
42896 -       if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42897 +       tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42898 +       if (tsz > buflen)
42899                 tsz = buflen;
42900 -               
42901 +
42902         while (buflen) {
42903                 struct kcore_list *m;
42904  
42905 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
42906                         kfree(elf_buf);
42907                 } else {
42908                         if (kern_addr_valid(start)) {
42909 -                               unsigned long n;
42910 +                               char *elf_buf;
42911 +                               mm_segment_t oldfs;
42912  
42913 -                               n = copy_to_user(buffer, (char *)start, tsz);
42914 -                               /*
42915 -                                * We cannot distingush between fault on source
42916 -                                * and fault on destination. When this happens
42917 -                                * we clear too and hope it will trigger the
42918 -                                * EFAULT again.
42919 -                                */
42920 -                               if (n) { 
42921 -                                       if (clear_user(buffer + tsz - n,
42922 -                                                               n))
42923 +                               elf_buf = kmalloc(tsz, GFP_KERNEL);
42924 +                               if (!elf_buf)
42925 +                                       return -ENOMEM;
42926 +                               oldfs = get_fs();
42927 +                               set_fs(KERNEL_DS);
42928 +                               if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42929 +                                       set_fs(oldfs);
42930 +                                       if (copy_to_user(buffer, elf_buf, tsz)) {
42931 +                                               kfree(elf_buf);
42932                                                 return -EFAULT;
42933 +                                       }
42934                                 }
42935 +                               set_fs(oldfs);
42936 +                               kfree(elf_buf);
42937                         } else {
42938                                 if (clear_user(buffer, tsz))
42939                                         return -EFAULT;
42940 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
42941  
42942  static int open_kcore(struct inode *inode, struct file *filp)
42943  {
42944 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42945 +       return -EPERM;
42946 +#endif
42947         if (!capable(CAP_SYS_RAWIO))
42948                 return -EPERM;
42949         if (kcore_need_update)
42950 diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
42951 --- linux-3.0.4/fs/proc/meminfo.c       2011-07-21 22:17:23.000000000 -0400
42952 +++ linux-3.0.4/fs/proc/meminfo.c       2011-08-23 21:48:14.000000000 -0400
42953 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42954         unsigned long pages[NR_LRU_LISTS];
42955         int lru;
42956  
42957 +       pax_track_stack();
42958 +
42959  /*
42960   * display in kilobytes.
42961   */
42962 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
42963                 vmi.used >> 10,
42964                 vmi.largest_chunk >> 10
42965  #ifdef CONFIG_MEMORY_FAILURE
42966 -               ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42967 +               ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42968  #endif
42969  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42970                 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
42971 diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
42972 --- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
42973 +++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
42974 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
42975                 if (len < 1)
42976                         len = 1;
42977                 seq_printf(m, "%*c", len, ' ');
42978 -               seq_path(m, &file->f_path, "");
42979 +               seq_path(m, &file->f_path, "\n\\");
42980         }
42981  
42982         seq_putc(m, '\n');
42983 diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
42984 --- linux-3.0.4/fs/proc/proc_net.c      2011-07-21 22:17:23.000000000 -0400
42985 +++ linux-3.0.4/fs/proc/proc_net.c      2011-08-23 21:48:14.000000000 -0400
42986 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
42987         struct task_struct *task;
42988         struct nsproxy *ns;
42989         struct net *net = NULL;
42990 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42991 +       const struct cred *cred = current_cred();
42992 +#endif
42993 +
42994 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42995 +       if (cred->fsuid)
42996 +               return net;
42997 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42998 +       if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42999 +               return net;
43000 +#endif
43001  
43002         rcu_read_lock();
43003         task = pid_task(proc_pid(dir), PIDTYPE_PID);
43004 diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
43005 --- linux-3.0.4/fs/proc/proc_sysctl.c   2011-07-21 22:17:23.000000000 -0400
43006 +++ linux-3.0.4/fs/proc/proc_sysctl.c   2011-08-23 21:48:14.000000000 -0400
43007 @@ -8,6 +8,8 @@
43008  #include <linux/namei.h>
43009  #include "internal.h"
43010  
43011 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43012 +
43013  static const struct dentry_operations proc_sys_dentry_operations;
43014  static const struct file_operations proc_sys_file_operations;
43015  static const struct inode_operations proc_sys_inode_operations;
43016 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
43017         if (!p)
43018                 goto out;
43019  
43020 +       if (gr_handle_sysctl(p, MAY_EXEC))
43021 +               goto out;
43022 +
43023         err = ERR_PTR(-ENOMEM);
43024         inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43025         if (h)
43026 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header 
43027                 if (*pos < file->f_pos)
43028                         continue;
43029  
43030 +               if (gr_handle_sysctl(table, 0))
43031 +                       continue;
43032 +
43033                 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43034                 if (res)
43035                         return res;
43036 @@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
43037         if (IS_ERR(head))
43038                 return PTR_ERR(head);
43039  
43040 +       if (table && gr_handle_sysctl(table, MAY_EXEC))
43041 +               return -ENOENT;
43042 +
43043         generic_fillattr(inode, stat);
43044         if (table)
43045                 stat->mode = (stat->mode & S_IFMT) | table->mode;
43046 diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
43047 --- linux-3.0.4/fs/proc/root.c  2011-07-21 22:17:23.000000000 -0400
43048 +++ linux-3.0.4/fs/proc/root.c  2011-08-23 21:48:14.000000000 -0400
43049 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
43050  #ifdef CONFIG_PROC_DEVICETREE
43051         proc_device_tree_init();
43052  #endif
43053 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43054 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43055 +       proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43056 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43057 +       proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43058 +#endif
43059 +#else
43060         proc_mkdir("bus", NULL);
43061 +#endif
43062         proc_sys_init();
43063  }
43064  
43065 diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
43066 --- linux-3.0.4/fs/proc/task_mmu.c      2011-07-21 22:17:23.000000000 -0400
43067 +++ linux-3.0.4/fs/proc/task_mmu.c      2011-08-23 21:48:14.000000000 -0400
43068 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
43069                 "VmExe:\t%8lu kB\n"
43070                 "VmLib:\t%8lu kB\n"
43071                 "VmPTE:\t%8lu kB\n"
43072 -               "VmSwap:\t%8lu kB\n",
43073 -               hiwater_vm << (PAGE_SHIFT-10),
43074 +               "VmSwap:\t%8lu kB\n"
43075 +
43076 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43077 +               "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43078 +#endif
43079 +
43080 +               ,hiwater_vm << (PAGE_SHIFT-10),
43081                 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43082                 mm->locked_vm << (PAGE_SHIFT-10),
43083                 hiwater_rss << (PAGE_SHIFT-10),
43084 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
43085                 data << (PAGE_SHIFT-10),
43086                 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43087                 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
43088 -               swap << (PAGE_SHIFT-10));
43089 +               swap << (PAGE_SHIFT-10)
43090 +
43091 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43092 +               , mm->context.user_cs_base, mm->context.user_cs_limit
43093 +#endif
43094 +
43095 +       );
43096  }
43097  
43098  unsigned long task_vsize(struct mm_struct *mm)
43099 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
43100         return ret;
43101  }
43102  
43103 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43104 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43105 +                            (_mm->pax_flags & MF_PAX_RANDMMAP || \
43106 +                             _mm->pax_flags & MF_PAX_SEGMEXEC))
43107 +#endif
43108 +
43109  static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43110  {
43111         struct mm_struct *mm = vma->vm_mm;
43112 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
43113                 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43114         }
43115  
43116 -       /* We don't show the stack guard page in /proc/maps */
43117 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43118 +       start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
43119 +       end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
43120 +#else
43121         start = vma->vm_start;
43122 -       if (stack_guard_page_start(vma, start))
43123 -               start += PAGE_SIZE;
43124         end = vma->vm_end;
43125 -       if (stack_guard_page_end(vma, end))
43126 -               end -= PAGE_SIZE;
43127 +#endif
43128  
43129         seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43130                         start,
43131 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
43132                         flags & VM_WRITE ? 'w' : '-',
43133                         flags & VM_EXEC ? 'x' : '-',
43134                         flags & VM_MAYSHARE ? 's' : 'p',
43135 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43136 +                       PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43137 +#else
43138                         pgoff,
43139 +#endif
43140                         MAJOR(dev), MINOR(dev), ino, &len);
43141  
43142         /*
43143 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
43144          */
43145         if (file) {
43146                 pad_len_spaces(m, len);
43147 -               seq_path(m, &file->f_path, "\n");
43148 +               seq_path(m, &file->f_path, "\n\\");
43149         } else {
43150                 const char *name = arch_vma_name(vma);
43151                 if (!name) {
43152 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
43153                                 if (vma->vm_start <= mm->brk &&
43154                                                 vma->vm_end >= mm->start_brk) {
43155                                         name = "[heap]";
43156 -                               } else if (vma->vm_start <= mm->start_stack &&
43157 -                                          vma->vm_end >= mm->start_stack) {
43158 +                               } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43159 +                                          (vma->vm_start <= mm->start_stack &&
43160 +                                           vma->vm_end >= mm->start_stack)) {
43161                                         name = "[stack]";
43162                                 }
43163                         } else {
43164 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
43165         };
43166  
43167         memset(&mss, 0, sizeof mss);
43168 -       mss.vma = vma;
43169 -       /* mmap_sem is held in m_start */
43170 -       if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43171 -               walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43172 -
43173 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43174 +       if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43175 +#endif
43176 +               mss.vma = vma;
43177 +               /* mmap_sem is held in m_start */
43178 +               if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43179 +                       walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43180 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43181 +       }
43182 +#endif
43183         show_map_vma(m, vma);
43184  
43185         seq_printf(m,
43186 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
43187                    "KernelPageSize: %8lu kB\n"
43188                    "MMUPageSize:    %8lu kB\n"
43189                    "Locked:         %8lu kB\n",
43190 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43191 +                  PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43192 +#else
43193                    (vma->vm_end - vma->vm_start) >> 10,
43194 +#endif
43195                    mss.resident >> 10,
43196                    (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43197                    mss.shared_clean  >> 10,
43198 @@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
43199  
43200         if (file) {
43201                 seq_printf(m, " file=");
43202 -               seq_path(m, &file->f_path, "\n\t= ");
43203 +               seq_path(m, &file->f_path, "\n\t\\= ");
43204         } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
43205                 seq_printf(m, " heap");
43206         } else if (vma->vm_start <= mm->start_stack &&
43207 diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
43208 --- linux-3.0.4/fs/proc/task_nommu.c    2011-07-21 22:17:23.000000000 -0400
43209 +++ linux-3.0.4/fs/proc/task_nommu.c    2011-08-23 21:47:56.000000000 -0400
43210 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
43211         else
43212                 bytes += kobjsize(mm);
43213         
43214 -       if (current->fs && current->fs->users > 1)
43215 +       if (current->fs && atomic_read(&current->fs->users) > 1)
43216                 sbytes += kobjsize(current->fs);
43217         else
43218                 bytes += kobjsize(current->fs);
43219 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
43220  
43221         if (file) {
43222                 pad_len_spaces(m, len);
43223 -               seq_path(m, &file->f_path, "");
43224 +               seq_path(m, &file->f_path, "\n\\");
43225         } else if (mm) {
43226                 if (vma->vm_start <= mm->start_stack &&
43227                         vma->vm_end >= mm->start_stack) {
43228 diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
43229 --- linux-3.0.4/fs/quota/netlink.c      2011-07-21 22:17:23.000000000 -0400
43230 +++ linux-3.0.4/fs/quota/netlink.c      2011-08-23 21:47:56.000000000 -0400
43231 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
43232  void quota_send_warning(short type, unsigned int id, dev_t dev,
43233                         const char warntype)
43234  {
43235 -       static atomic_t seq;
43236 +       static atomic_unchecked_t seq;
43237         struct sk_buff *skb;
43238         void *msg_head;
43239         int ret;
43240 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
43241                   "VFS: Not enough memory to send quota warning.\n");
43242                 return;
43243         }
43244 -       msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
43245 +       msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
43246                         &quota_genl_family, 0, QUOTA_NL_C_WARNING);
43247         if (!msg_head) {
43248                 printk(KERN_ERR
43249 diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
43250 --- linux-3.0.4/fs/readdir.c    2011-07-21 22:17:23.000000000 -0400
43251 +++ linux-3.0.4/fs/readdir.c    2011-08-23 21:48:14.000000000 -0400
43252 @@ -17,6 +17,7 @@
43253  #include <linux/security.h>
43254  #include <linux/syscalls.h>
43255  #include <linux/unistd.h>
43256 +#include <linux/namei.h>
43257  
43258  #include <asm/uaccess.h>
43259  
43260 @@ -67,6 +68,7 @@ struct old_linux_dirent {
43261  
43262  struct readdir_callback {
43263         struct old_linux_dirent __user * dirent;
43264 +       struct file * file;
43265         int result;
43266  };
43267  
43268 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43269                 buf->result = -EOVERFLOW;
43270                 return -EOVERFLOW;
43271         }
43272 +
43273 +       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43274 +               return 0;
43275 +
43276         buf->result++;
43277         dirent = buf->dirent;
43278         if (!access_ok(VERIFY_WRITE, dirent,
43279 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43280  
43281         buf.result = 0;
43282         buf.dirent = dirent;
43283 +       buf.file = file;
43284  
43285         error = vfs_readdir(file, fillonedir, &buf);
43286         if (buf.result)
43287 @@ -142,6 +149,7 @@ struct linux_dirent {
43288  struct getdents_callback {
43289         struct linux_dirent __user * current_dir;
43290         struct linux_dirent __user * previous;
43291 +       struct file * file;
43292         int count;
43293         int error;
43294  };
43295 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
43296                 buf->error = -EOVERFLOW;
43297                 return -EOVERFLOW;
43298         }
43299 +
43300 +       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43301 +               return 0;
43302 +
43303         dirent = buf->previous;
43304         if (dirent) {
43305                 if (__put_user(offset, &dirent->d_off))
43306 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, 
43307         buf.previous = NULL;
43308         buf.count = count;
43309         buf.error = 0;
43310 +       buf.file = file;
43311  
43312         error = vfs_readdir(file, filldir, &buf);
43313         if (error >= 0)
43314 @@ -229,6 +242,7 @@ out:
43315  struct getdents_callback64 {
43316         struct linux_dirent64 __user * current_dir;
43317         struct linux_dirent64 __user * previous;
43318 +       struct file *file;
43319         int count;
43320         int error;
43321  };
43322 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
43323         buf->error = -EINVAL;   /* only used if we fail.. */
43324         if (reclen > buf->count)
43325                 return -EINVAL;
43326 +
43327 +       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43328 +               return 0;
43329 +
43330         dirent = buf->previous;
43331         if (dirent) {
43332                 if (__put_user(offset, &dirent->d_off))
43333 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43334  
43335         buf.current_dir = dirent;
43336         buf.previous = NULL;
43337 +       buf.file = file;
43338         buf.count = count;
43339         buf.error = 0;
43340  
43341 diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
43342 --- linux-3.0.4/fs/reiserfs/dir.c       2011-07-21 22:17:23.000000000 -0400
43343 +++ linux-3.0.4/fs/reiserfs/dir.c       2011-08-23 21:48:14.000000000 -0400
43344 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43345         struct reiserfs_dir_entry de;
43346         int ret = 0;
43347  
43348 +       pax_track_stack();
43349 +
43350         reiserfs_write_lock(inode->i_sb);
43351  
43352         reiserfs_check_lock_depth(inode->i_sb, "readdir");
43353 diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
43354 --- linux-3.0.4/fs/reiserfs/do_balan.c  2011-07-21 22:17:23.000000000 -0400
43355 +++ linux-3.0.4/fs/reiserfs/do_balan.c  2011-08-23 21:47:56.000000000 -0400
43356 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
43357                 return;
43358         }
43359  
43360 -       atomic_inc(&(fs_generation(tb->tb_sb)));
43361 +       atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43362         do_balance_starts(tb);
43363  
43364         /* balance leaf returns 0 except if combining L R and S into
43365 diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
43366 --- linux-3.0.4/fs/reiserfs/journal.c   2011-07-21 22:17:23.000000000 -0400
43367 +++ linux-3.0.4/fs/reiserfs/journal.c   2011-08-23 21:48:14.000000000 -0400
43368 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
43369         struct buffer_head *bh;
43370         int i, j;
43371  
43372 +       pax_track_stack();
43373 +
43374         bh = __getblk(dev, block, bufsize);
43375         if (buffer_uptodate(bh))
43376                 return (bh);
43377 diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
43378 --- linux-3.0.4/fs/reiserfs/namei.c     2011-07-21 22:17:23.000000000 -0400
43379 +++ linux-3.0.4/fs/reiserfs/namei.c     2011-08-23 21:48:14.000000000 -0400
43380 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode 
43381         unsigned long savelink = 1;
43382         struct timespec ctime;
43383  
43384 +       pax_track_stack();
43385 +
43386         /* three balancings: (1) old name removal, (2) new name insertion
43387            and (3) maybe "save" link insertion
43388            stat data updates: (1) old directory,
43389 diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
43390 --- linux-3.0.4/fs/reiserfs/procfs.c    2011-07-21 22:17:23.000000000 -0400
43391 +++ linux-3.0.4/fs/reiserfs/procfs.c    2011-08-23 21:48:14.000000000 -0400
43392 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
43393                    "SMALL_TAILS " : "NO_TAILS ",
43394                    replay_only(sb) ? "REPLAY_ONLY " : "",
43395                    convert_reiserfs(sb) ? "CONV " : "",
43396 -                  atomic_read(&r->s_generation_counter),
43397 +                  atomic_read_unchecked(&r->s_generation_counter),
43398                    SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43399                    SF(s_do_balance), SF(s_unneeded_left_neighbor),
43400                    SF(s_good_search_by_key_reada), SF(s_bmaps),
43401 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file 
43402         struct journal_params *jp = &rs->s_v1.s_journal;
43403         char b[BDEVNAME_SIZE];
43404  
43405 +       pax_track_stack();
43406 +
43407         seq_printf(m,           /* on-disk fields */
43408                    "jp_journal_1st_block: \t%i\n"
43409                    "jp_journal_dev: \t%s[%x]\n"
43410 diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
43411 --- linux-3.0.4/fs/reiserfs/stree.c     2011-07-21 22:17:23.000000000 -0400
43412 +++ linux-3.0.4/fs/reiserfs/stree.c     2011-08-23 21:48:14.000000000 -0400
43413 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
43414         int iter = 0;
43415  #endif
43416  
43417 +       pax_track_stack();
43418 +
43419         BUG_ON(!th->t_trans_id);
43420  
43421         init_tb_struct(th, &s_del_balance, sb, path,
43422 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
43423         int retval;
43424         int quota_cut_bytes = 0;
43425  
43426 +       pax_track_stack();
43427 +
43428         BUG_ON(!th->t_trans_id);
43429  
43430         le_key2cpu_key(&cpu_key, key);
43431 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
43432         int quota_cut_bytes;
43433         loff_t tail_pos = 0;
43434  
43435 +       pax_track_stack();
43436 +
43437         BUG_ON(!th->t_trans_id);
43438  
43439         init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43440 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
43441         int retval;
43442         int fs_gen;
43443  
43444 +       pax_track_stack();
43445 +
43446         BUG_ON(!th->t_trans_id);
43447  
43448         fs_gen = get_generation(inode->i_sb);
43449 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
43450         int fs_gen = 0;
43451         int quota_bytes = 0;
43452  
43453 +       pax_track_stack();
43454 +
43455         BUG_ON(!th->t_trans_id);
43456  
43457         if (inode) {            /* Do we count quotas for item? */
43458 diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
43459 --- linux-3.0.4/fs/reiserfs/super.c     2011-07-21 22:17:23.000000000 -0400
43460 +++ linux-3.0.4/fs/reiserfs/super.c     2011-08-23 21:48:14.000000000 -0400
43461 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
43462                 {.option_name = NULL}
43463         };
43464  
43465 +       pax_track_stack();
43466 +
43467         *blocks = 0;
43468         if (!options || !*options)
43469                 /* use default configuration: create tails, journaling on, no
43470 diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
43471 --- linux-3.0.4/fs/select.c     2011-07-21 22:17:23.000000000 -0400
43472 +++ linux-3.0.4/fs/select.c     2011-08-23 21:48:14.000000000 -0400
43473 @@ -20,6 +20,7 @@
43474  #include <linux/module.h>
43475  #include <linux/slab.h>
43476  #include <linux/poll.h>
43477 +#include <linux/security.h>
43478  #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43479  #include <linux/file.h>
43480  #include <linux/fdtable.h>
43481 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
43482         int retval, i, timed_out = 0;
43483         unsigned long slack = 0;
43484  
43485 +       pax_track_stack();
43486 +
43487         rcu_read_lock();
43488         retval = max_select_fd(n, fds);
43489         rcu_read_unlock();
43490 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
43491         /* Allocate small arguments on the stack to save memory and be faster */
43492         long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43493  
43494 +       pax_track_stack();
43495 +
43496         ret = -EINVAL;
43497         if (n < 0)
43498                 goto out_nofds;
43499 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
43500         struct poll_list *walk = head;
43501         unsigned long todo = nfds;
43502  
43503 +       pax_track_stack();
43504 +
43505 +       gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43506         if (nfds > rlimit(RLIMIT_NOFILE))
43507                 return -EINVAL;
43508  
43509 diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
43510 --- linux-3.0.4/fs/seq_file.c   2011-07-21 22:17:23.000000000 -0400
43511 +++ linux-3.0.4/fs/seq_file.c   2011-08-23 21:47:56.000000000 -0400
43512 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, 
43513                 return 0;
43514         }
43515         if (!m->buf) {
43516 -               m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43517 +               m->size = PAGE_SIZE;
43518 +               m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43519                 if (!m->buf)
43520                         return -ENOMEM;
43521         }
43522 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, 
43523  Eoverflow:
43524         m->op->stop(m, p);
43525         kfree(m->buf);
43526 -       m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43527 +       m->size <<= 1;
43528 +       m->buf = kmalloc(m->size, GFP_KERNEL);
43529         return !m->buf ? -ENOMEM : -EAGAIN;
43530  }
43531  
43532 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43533         m->version = file->f_version;
43534         /* grab buffer if we didn't have one */
43535         if (!m->buf) {
43536 -               m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43537 +               m->size = PAGE_SIZE;
43538 +               m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43539                 if (!m->buf)
43540                         goto Enomem;
43541         }
43542 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43543                         goto Fill;
43544                 m->op->stop(m, p);
43545                 kfree(m->buf);
43546 -               m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43547 +               m->size <<= 1;
43548 +               m->buf = kmalloc(m->size, GFP_KERNEL);
43549                 if (!m->buf)
43550                         goto Enomem;
43551                 m->count = 0;
43552 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file 
43553  int single_open(struct file *file, int (*show)(struct seq_file *, void *),
43554                 void *data)
43555  {
43556 -       struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
43557 +       seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
43558         int res = -ENOMEM;
43559  
43560         if (op) {
43561 diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
43562 --- linux-3.0.4/fs/splice.c     2011-07-21 22:17:23.000000000 -0400
43563 +++ linux-3.0.4/fs/splice.c     2011-08-23 21:48:14.000000000 -0400
43564 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43565         pipe_lock(pipe);
43566  
43567         for (;;) {
43568 -               if (!pipe->readers) {
43569 +               if (!atomic_read(&pipe->readers)) {
43570                         send_sig(SIGPIPE, current, 0);
43571                         if (!ret)
43572                                 ret = -EPIPE;
43573 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43574                         do_wakeup = 0;
43575                 }
43576  
43577 -               pipe->waiting_writers++;
43578 +               atomic_inc(&pipe->waiting_writers);
43579                 pipe_wait(pipe);
43580 -               pipe->waiting_writers--;
43581 +               atomic_dec(&pipe->waiting_writers);
43582         }
43583  
43584         pipe_unlock(pipe);
43585 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
43586                 .spd_release = spd_release_page,
43587         };
43588  
43589 +       pax_track_stack();
43590 +
43591         if (splice_grow_spd(pipe, &spd))
43592                 return -ENOMEM;
43593  
43594 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file 
43595         old_fs = get_fs();
43596         set_fs(get_ds());
43597         /* The cast to a user pointer is valid due to the set_fs() */
43598 -       res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43599 +       res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43600         set_fs(old_fs);
43601  
43602         return res;
43603 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file 
43604         old_fs = get_fs();
43605         set_fs(get_ds());
43606         /* The cast to a user pointer is valid due to the set_fs() */
43607 -       res = vfs_write(file, (const char __user *)buf, count, &pos);
43608 +       res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43609         set_fs(old_fs);
43610  
43611         return res;
43612 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct 
43613                 .spd_release = spd_release_page,
43614         };
43615  
43616 +       pax_track_stack();
43617 +
43618         if (splice_grow_spd(pipe, &spd))
43619                 return -ENOMEM;
43620  
43621 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct 
43622                         goto err;
43623  
43624                 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43625 -               vec[i].iov_base = (void __user *) page_address(page);
43626 +               vec[i].iov_base = (__force void __user *) page_address(page);
43627                 vec[i].iov_len = this_len;
43628                 spd.pages[i] = page;
43629                 spd.nr_pages++;
43630 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43631  int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43632  {
43633         while (!pipe->nrbufs) {
43634 -               if (!pipe->writers)
43635 +               if (!atomic_read(&pipe->writers))
43636                         return 0;
43637  
43638 -               if (!pipe->waiting_writers && sd->num_spliced)
43639 +               if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43640                         return 0;
43641  
43642                 if (sd->flags & SPLICE_F_NONBLOCK)
43643 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
43644                  * out of the pipe right after the splice_to_pipe(). So set
43645                  * PIPE_READERS appropriately.
43646                  */
43647 -               pipe->readers = 1;
43648 +               atomic_set(&pipe->readers, 1);
43649  
43650                 current->splice_pipe = pipe;
43651         }
43652 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
43653         };
43654         long ret;
43655  
43656 +       pax_track_stack();
43657 +
43658         pipe = get_pipe_info(file);
43659         if (!pipe)
43660                 return -EBADF;
43661 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
43662                         ret = -ERESTARTSYS;
43663                         break;
43664                 }
43665 -               if (!pipe->writers)
43666 +               if (!atomic_read(&pipe->writers))
43667                         break;
43668 -               if (!pipe->waiting_writers) {
43669 +               if (!atomic_read(&pipe->waiting_writers)) {
43670                         if (flags & SPLICE_F_NONBLOCK) {
43671                                 ret = -EAGAIN;
43672                                 break;
43673 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
43674         pipe_lock(pipe);
43675  
43676         while (pipe->nrbufs >= pipe->buffers) {
43677 -               if (!pipe->readers) {
43678 +               if (!atomic_read(&pipe->readers)) {
43679                         send_sig(SIGPIPE, current, 0);
43680                         ret = -EPIPE;
43681                         break;
43682 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
43683                         ret = -ERESTARTSYS;
43684                         break;
43685                 }
43686 -               pipe->waiting_writers++;
43687 +               atomic_inc(&pipe->waiting_writers);
43688                 pipe_wait(pipe);
43689 -               pipe->waiting_writers--;
43690 +               atomic_dec(&pipe->waiting_writers);
43691         }
43692  
43693         pipe_unlock(pipe);
43694 @@ -1819,14 +1825,14 @@ retry:
43695         pipe_double_lock(ipipe, opipe);
43696  
43697         do {
43698 -               if (!opipe->readers) {
43699 +               if (!atomic_read(&opipe->readers)) {
43700                         send_sig(SIGPIPE, current, 0);
43701                         if (!ret)
43702                                 ret = -EPIPE;
43703                         break;
43704                 }
43705  
43706 -               if (!ipipe->nrbufs && !ipipe->writers)
43707 +               if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43708                         break;
43709  
43710                 /*
43711 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
43712         pipe_double_lock(ipipe, opipe);
43713  
43714         do {
43715 -               if (!opipe->readers) {
43716 +               if (!atomic_read(&opipe->readers)) {
43717                         send_sig(SIGPIPE, current, 0);
43718                         if (!ret)
43719                                 ret = -EPIPE;
43720 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
43721          * return EAGAIN if we have the potential of some data in the
43722          * future, otherwise just return 0
43723          */
43724 -       if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43725 +       if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43726                 ret = -EAGAIN;
43727  
43728         pipe_unlock(ipipe);
43729 diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
43730 --- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
43731 +++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
43732 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43733  
43734  struct sysfs_open_dirent {
43735         atomic_t                refcnt;
43736 -       atomic_t                event;
43737 +       atomic_unchecked_t      event;
43738         wait_queue_head_t       poll;
43739         struct list_head        buffers; /* goes through sysfs_buffer.list */
43740  };
43741 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
43742         if (!sysfs_get_active(attr_sd))
43743                 return -ENODEV;
43744  
43745 -       buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43746 +       buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43747         count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43748  
43749         sysfs_put_active(attr_sd);
43750 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct 
43751                 return -ENOMEM;
43752  
43753         atomic_set(&new_od->refcnt, 0);
43754 -       atomic_set(&new_od->event, 1);
43755 +       atomic_set_unchecked(&new_od->event, 1);
43756         init_waitqueue_head(&new_od->poll);
43757         INIT_LIST_HEAD(&new_od->buffers);
43758         goto retry;
43759 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
43760  
43761         sysfs_put_active(attr_sd);
43762  
43763 -       if (buffer->event != atomic_read(&od->event))
43764 +       if (buffer->event != atomic_read_unchecked(&od->event))
43765                 goto trigger;
43766  
43767         return DEFAULT_POLLMASK;
43768 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
43769  
43770         od = sd->s_attr.open;
43771         if (od) {
43772 -               atomic_inc(&od->event);
43773 +               atomic_inc_unchecked(&od->event);
43774                 wake_up_interruptible(&od->poll);
43775         }
43776  
43777 diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
43778 --- linux-3.0.4/fs/sysfs/mount.c        2011-07-21 22:17:23.000000000 -0400
43779 +++ linux-3.0.4/fs/sysfs/mount.c        2011-08-23 21:48:14.000000000 -0400
43780 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43781         .s_name         = "",
43782         .s_count        = ATOMIC_INIT(1),
43783         .s_flags        = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
43784 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43785 +       .s_mode         = S_IFDIR | S_IRWXU,
43786 +#else
43787         .s_mode         = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43788 +#endif
43789         .s_ino          = 1,
43790  };
43791  
43792 diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
43793 --- linux-3.0.4/fs/sysfs/symlink.c      2011-07-21 22:17:23.000000000 -0400
43794 +++ linux-3.0.4/fs/sysfs/symlink.c      2011-08-23 21:47:56.000000000 -0400
43795 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
43796  
43797  static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43798  {
43799 -       char *page = nd_get_link(nd);
43800 +       const char *page = nd_get_link(nd);
43801         if (!IS_ERR(page))
43802                 free_page((unsigned long)page);
43803  }
43804 diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
43805 --- linux-3.0.4/fs/udf/inode.c  2011-07-21 22:17:23.000000000 -0400
43806 +++ linux-3.0.4/fs/udf/inode.c  2011-08-23 21:48:14.000000000 -0400
43807 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
43808         int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43809         int lastblock = 0;
43810  
43811 +       pax_track_stack();
43812 +
43813         prev_epos.offset = udf_file_entry_alloc_offset(inode);
43814         prev_epos.block = iinfo->i_location;
43815         prev_epos.bh = NULL;
43816 diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
43817 --- linux-3.0.4/fs/udf/misc.c   2011-07-21 22:17:23.000000000 -0400
43818 +++ linux-3.0.4/fs/udf/misc.c   2011-08-23 21:47:56.000000000 -0400
43819 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43820  
43821  u8 udf_tag_checksum(const struct tag *t)
43822  {
43823 -       u8 *data = (u8 *)t;
43824 +       const u8 *data = (const u8 *)t;
43825         u8 checksum = 0;
43826         int i;
43827         for (i = 0; i < sizeof(struct tag); ++i)
43828 diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
43829 --- linux-3.0.4/fs/utimes.c     2011-07-21 22:17:23.000000000 -0400
43830 +++ linux-3.0.4/fs/utimes.c     2011-08-23 21:48:14.000000000 -0400
43831 @@ -1,6 +1,7 @@
43832  #include <linux/compiler.h>
43833  #include <linux/file.h>
43834  #include <linux/fs.h>
43835 +#include <linux/security.h>
43836  #include <linux/linkage.h>
43837  #include <linux/mount.h>
43838  #include <linux/namei.h>
43839 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43840                                 goto mnt_drop_write_and_out;
43841                 }
43842         }
43843 +
43844 +       if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43845 +               error = -EACCES;
43846 +               goto mnt_drop_write_and_out;
43847 +       }
43848 +
43849         mutex_lock(&inode->i_mutex);
43850         error = notify_change(path->dentry, &newattrs);
43851         mutex_unlock(&inode->i_mutex);
43852 diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
43853 --- linux-3.0.4/fs/xattr_acl.c  2011-07-21 22:17:23.000000000 -0400
43854 +++ linux-3.0.4/fs/xattr_acl.c  2011-08-23 21:47:56.000000000 -0400
43855 @@ -17,8 +17,8 @@
43856  struct posix_acl *
43857  posix_acl_from_xattr(const void *value, size_t size)
43858  {
43859 -       posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43860 -       posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43861 +       const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43862 +       const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43863         int count;
43864         struct posix_acl *acl;
43865         struct posix_acl_entry *acl_e;
43866 diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
43867 --- linux-3.0.4/fs/xattr.c      2011-07-21 22:17:23.000000000 -0400
43868 +++ linux-3.0.4/fs/xattr.c      2011-08-23 21:48:14.000000000 -0400
43869 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43870   * Extended attribute SET operations
43871   */
43872  static long
43873 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
43874 +setxattr(struct path *path, const char __user *name, const void __user *value,
43875          size_t size, int flags)
43876  {
43877         int error;
43878 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
43879                         return PTR_ERR(kvalue);
43880         }
43881  
43882 -       error = vfs_setxattr(d, kname, kvalue, size, flags);
43883 +       if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43884 +               error = -EACCES;
43885 +               goto out;
43886 +       }
43887 +
43888 +       error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43889 +out:
43890         kfree(kvalue);
43891         return error;
43892  }
43893 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43894                 return error;
43895         error = mnt_want_write(path.mnt);
43896         if (!error) {
43897 -               error = setxattr(path.dentry, name, value, size, flags);
43898 +               error = setxattr(&path, name, value, size, flags);
43899                 mnt_drop_write(path.mnt);
43900         }
43901         path_put(&path);
43902 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43903                 return error;
43904         error = mnt_want_write(path.mnt);
43905         if (!error) {
43906 -               error = setxattr(path.dentry, name, value, size, flags);
43907 +               error = setxattr(&path, name, value, size, flags);
43908                 mnt_drop_write(path.mnt);
43909         }
43910         path_put(&path);
43911 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43912                 const void __user *,value, size_t, size, int, flags)
43913  {
43914         struct file *f;
43915 -       struct dentry *dentry;
43916         int error = -EBADF;
43917  
43918         f = fget(fd);
43919         if (!f)
43920                 return error;
43921 -       dentry = f->f_path.dentry;
43922 -       audit_inode(NULL, dentry);
43923 +       audit_inode(NULL, f->f_path.dentry);
43924         error = mnt_want_write_file(f);
43925         if (!error) {
43926 -               error = setxattr(dentry, name, value, size, flags);
43927 +               error = setxattr(&f->f_path, name, value, size, flags);
43928                 mnt_drop_write(f->f_path.mnt);
43929         }
43930         fput(f);
43931 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
43932 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c  2011-07-21 22:17:23.000000000 -0400
43933 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c  2011-08-23 21:48:14.000000000 -0400
43934 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
43935         xfs_fsop_geom_t           fsgeo;
43936         int                       error;
43937  
43938 +       memset(&fsgeo, 0, sizeof(fsgeo));
43939         error = xfs_fs_geometry(mp, &fsgeo, 3);
43940         if (error)
43941                 return -error;
43942 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
43943 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c    2011-07-21 22:17:23.000000000 -0400
43944 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c    2011-08-23 21:47:56.000000000 -0400
43945 @@ -128,7 +128,7 @@ xfs_find_handle(
43946         }
43947  
43948         error = -EFAULT;
43949 -       if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43950 +       if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43951             copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43952                 goto out_put;
43953  
43954 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
43955 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c     2011-07-21 22:17:23.000000000 -0400
43956 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c     2011-08-23 21:47:56.000000000 -0400
43957 @@ -437,7 +437,7 @@ xfs_vn_put_link(
43958         struct nameidata *nd,
43959         void            *p)
43960  {
43961 -       char            *s = nd_get_link(nd);
43962 +       const char      *s = nd_get_link(nd);
43963  
43964         if (!IS_ERR(s))
43965                 kfree(s);
43966 diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
43967 --- linux-3.0.4/fs/xfs/xfs_bmap.c       2011-07-21 22:17:23.000000000 -0400
43968 +++ linux-3.0.4/fs/xfs/xfs_bmap.c       2011-08-23 21:47:56.000000000 -0400
43969 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
43970         int                     nmap,
43971         int                     ret_nmap);
43972  #else
43973 -#define        xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43974 +#define        xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43975  #endif /* DEBUG */
43976  
43977  STATIC int
43978 diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
43979 --- linux-3.0.4/fs/xfs/xfs_dir2_sf.c    2011-07-21 22:17:23.000000000 -0400
43980 +++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c    2011-08-23 21:47:56.000000000 -0400
43981 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
43982                 }
43983  
43984                 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43985 -               if (filldir(dirent, (char *)sfep->name, sfep->namelen,
43986 +               if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43987 +                       char name[sfep->namelen];
43988 +                       memcpy(name, sfep->name, sfep->namelen);
43989 +                       if (filldir(dirent, name, sfep->namelen,
43990 +                           off & 0x7fffffff, ino, DT_UNKNOWN)) {
43991 +                               *offset = off & 0x7fffffff;
43992 +                               return 0;
43993 +                       }
43994 +               } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
43995                             off & 0x7fffffff, ino, DT_UNKNOWN)) {
43996                         *offset = off & 0x7fffffff;
43997                         return 0;
43998 diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
43999 --- linux-3.0.4/grsecurity/gracl_alloc.c        1969-12-31 19:00:00.000000000 -0500
44000 +++ linux-3.0.4/grsecurity/gracl_alloc.c        2011-08-23 21:48:14.000000000 -0400
44001 @@ -0,0 +1,105 @@
44002 +#include <linux/kernel.h>
44003 +#include <linux/mm.h>
44004 +#include <linux/slab.h>
44005 +#include <linux/vmalloc.h>
44006 +#include <linux/gracl.h>
44007 +#include <linux/grsecurity.h>
44008 +
44009 +static unsigned long alloc_stack_next = 1;
44010 +static unsigned long alloc_stack_size = 1;
44011 +static void **alloc_stack;
44012 +
44013 +static __inline__ int
44014 +alloc_pop(void)
44015 +{
44016 +       if (alloc_stack_next == 1)
44017 +               return 0;
44018 +
44019 +       kfree(alloc_stack[alloc_stack_next - 2]);
44020 +
44021 +       alloc_stack_next--;
44022 +
44023 +       return 1;
44024 +}
44025 +
44026 +static __inline__ int
44027 +alloc_push(void *buf)
44028 +{
44029 +       if (alloc_stack_next >= alloc_stack_size)
44030 +               return 1;
44031 +
44032 +       alloc_stack[alloc_stack_next - 1] = buf;
44033 +
44034 +       alloc_stack_next++;
44035 +
44036 +       return 0;
44037 +}
44038 +
44039 +void *
44040 +acl_alloc(unsigned long len)
44041 +{
44042 +       void *ret = NULL;
44043 +
44044 +       if (!len || len > PAGE_SIZE)
44045 +               goto out;
44046 +
44047 +       ret = kmalloc(len, GFP_KERNEL);
44048 +
44049 +       if (ret) {
44050 +               if (alloc_push(ret)) {
44051 +                       kfree(ret);
44052 +                       ret = NULL;
44053 +               }
44054 +       }
44055 +
44056 +out:
44057 +       return ret;
44058 +}
44059 +
44060 +void *
44061 +acl_alloc_num(unsigned long num, unsigned long len)
44062 +{
44063 +       if (!len || (num > (PAGE_SIZE / len)))
44064 +               return NULL;
44065 +
44066 +       return acl_alloc(num * len);
44067 +}
44068 +
44069 +void
44070 +acl_free_all(void)
44071 +{
44072 +       if (gr_acl_is_enabled() || !alloc_stack)
44073 +               return;
44074 +
44075 +       while (alloc_pop()) ;
44076 +
44077 +       if (alloc_stack) {
44078 +               if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44079 +                       kfree(alloc_stack);
44080 +               else
44081 +                       vfree(alloc_stack);
44082 +       }
44083 +
44084 +       alloc_stack = NULL;
44085 +       alloc_stack_size = 1;
44086 +       alloc_stack_next = 1;
44087 +
44088 +       return;
44089 +}
44090 +
44091 +int
44092 +acl_alloc_stack_init(unsigned long size)
44093 +{
44094 +       if ((size * sizeof (void *)) <= PAGE_SIZE)
44095 +               alloc_stack =
44096 +                   (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44097 +       else
44098 +               alloc_stack = (void **) vmalloc(size * sizeof (void *));
44099 +
44100 +       alloc_stack_size = size;
44101 +
44102 +       if (!alloc_stack)
44103 +               return 0;
44104 +       else
44105 +               return 1;
44106 +}
44107 diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
44108 --- linux-3.0.4/grsecurity/gracl.c      1969-12-31 19:00:00.000000000 -0500
44109 +++ linux-3.0.4/grsecurity/gracl.c      2011-08-23 21:48:14.000000000 -0400
44110 @@ -0,0 +1,4106 @@
44111 +#include <linux/kernel.h>
44112 +#include <linux/module.h>
44113 +#include <linux/sched.h>
44114 +#include <linux/mm.h>
44115 +#include <linux/file.h>
44116 +#include <linux/fs.h>
44117 +#include <linux/namei.h>
44118 +#include <linux/mount.h>
44119 +#include <linux/tty.h>
44120 +#include <linux/proc_fs.h>
44121 +#include <linux/lglock.h>
44122 +#include <linux/slab.h>
44123 +#include <linux/vmalloc.h>
44124 +#include <linux/types.h>
44125 +#include <linux/sysctl.h>
44126 +#include <linux/netdevice.h>
44127 +#include <linux/ptrace.h>
44128 +#include <linux/gracl.h>
44129 +#include <linux/gralloc.h>
44130 +#include <linux/grsecurity.h>
44131 +#include <linux/grinternal.h>
44132 +#include <linux/pid_namespace.h>
44133 +#include <linux/fdtable.h>
44134 +#include <linux/percpu.h>
44135 +
44136 +#include <asm/uaccess.h>
44137 +#include <asm/errno.h>
44138 +#include <asm/mman.h>
44139 +
44140 +static struct acl_role_db acl_role_set;
44141 +static struct name_db name_set;
44142 +static struct inodev_db inodev_set;
44143 +
44144 +/* for keeping track of userspace pointers used for subjects, so we
44145 +   can share references in the kernel as well
44146 +*/
44147 +
44148 +static struct path real_root;
44149 +
44150 +static struct acl_subj_map_db subj_map_set;
44151 +
44152 +static struct acl_role_label *default_role;
44153 +
44154 +static struct acl_role_label *role_list;
44155 +
44156 +static u16 acl_sp_role_value;
44157 +
44158 +extern char *gr_shared_page[4];
44159 +static DEFINE_MUTEX(gr_dev_mutex);
44160 +DEFINE_RWLOCK(gr_inode_lock);
44161 +
44162 +struct gr_arg *gr_usermode;
44163 +
44164 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
44165 +
44166 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44167 +extern void gr_clear_learn_entries(void);
44168 +
44169 +#ifdef CONFIG_GRKERNSEC_RESLOG
44170 +extern void gr_log_resource(const struct task_struct *task,
44171 +                           const int res, const unsigned long wanted, const int gt);
44172 +#endif
44173 +
44174 +unsigned char *gr_system_salt;
44175 +unsigned char *gr_system_sum;
44176 +
44177 +static struct sprole_pw **acl_special_roles = NULL;
44178 +static __u16 num_sprole_pws = 0;
44179 +
44180 +static struct acl_role_label *kernel_role = NULL;
44181 +
44182 +static unsigned int gr_auth_attempts = 0;
44183 +static unsigned long gr_auth_expires = 0UL;
44184 +
44185 +#ifdef CONFIG_NET
44186 +extern struct vfsmount *sock_mnt;
44187 +#endif
44188 +
44189 +extern struct vfsmount *pipe_mnt;
44190 +extern struct vfsmount *shm_mnt;
44191 +#ifdef CONFIG_HUGETLBFS
44192 +extern struct vfsmount *hugetlbfs_vfsmount;
44193 +#endif
44194 +
44195 +static struct acl_object_label *fakefs_obj_rw;
44196 +static struct acl_object_label *fakefs_obj_rwx;
44197 +
44198 +extern int gr_init_uidset(void);
44199 +extern void gr_free_uidset(void);
44200 +extern void gr_remove_uid(uid_t uid);
44201 +extern int gr_find_uid(uid_t uid);
44202 +
44203 +DECLARE_BRLOCK(vfsmount_lock);
44204 +
44205 +__inline__ int
44206 +gr_acl_is_enabled(void)
44207 +{
44208 +       return (gr_status & GR_READY);
44209 +}
44210 +
44211 +#ifdef CONFIG_BTRFS_FS
44212 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44213 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44214 +#endif
44215 +
44216 +static inline dev_t __get_dev(const struct dentry *dentry)
44217 +{
44218 +#ifdef CONFIG_BTRFS_FS
44219 +       if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44220 +               return get_btrfs_dev_from_inode(dentry->d_inode);
44221 +       else
44222 +#endif
44223 +               return dentry->d_inode->i_sb->s_dev;
44224 +}
44225 +
44226 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44227 +{
44228 +       return __get_dev(dentry);
44229 +}
44230 +
44231 +static char gr_task_roletype_to_char(struct task_struct *task)
44232 +{
44233 +       switch (task->role->roletype &
44234 +               (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44235 +                GR_ROLE_SPECIAL)) {
44236 +       case GR_ROLE_DEFAULT:
44237 +               return 'D';
44238 +       case GR_ROLE_USER:
44239 +               return 'U';
44240 +       case GR_ROLE_GROUP:
44241 +               return 'G';
44242 +       case GR_ROLE_SPECIAL:
44243 +               return 'S';
44244 +       }
44245 +
44246 +       return 'X';
44247 +}
44248 +
44249 +char gr_roletype_to_char(void)
44250 +{
44251 +       return gr_task_roletype_to_char(current);
44252 +}
44253 +
44254 +__inline__ int
44255 +gr_acl_tpe_check(void)
44256 +{
44257 +       if (unlikely(!(gr_status & GR_READY)))
44258 +               return 0;
44259 +       if (current->role->roletype & GR_ROLE_TPE)
44260 +               return 1;
44261 +       else
44262 +               return 0;
44263 +}
44264 +
44265 +int
44266 +gr_handle_rawio(const struct inode *inode)
44267 +{
44268 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44269 +       if (inode && S_ISBLK(inode->i_mode) &&
44270 +           grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44271 +           !capable(CAP_SYS_RAWIO))
44272 +               return 1;
44273 +#endif
44274 +       return 0;
44275 +}
44276 +
44277 +static int
44278 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44279 +{
44280 +       if (likely(lena != lenb))
44281 +               return 0;
44282 +
44283 +       return !memcmp(a, b, lena);
44284 +}
44285 +
44286 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
44287 +{
44288 +       *buflen -= namelen;
44289 +       if (*buflen < 0)
44290 +               return -ENAMETOOLONG;
44291 +       *buffer -= namelen;
44292 +       memcpy(*buffer, str, namelen);
44293 +       return 0;
44294 +}
44295 +
44296 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
44297 +{
44298 +       return prepend(buffer, buflen, name->name, name->len);
44299 +}
44300 +
44301 +static int prepend_path(const struct path *path, struct path *root,
44302 +                       char **buffer, int *buflen)
44303 +{
44304 +       struct dentry *dentry = path->dentry;
44305 +       struct vfsmount *vfsmnt = path->mnt;
44306 +       bool slash = false;
44307 +       int error = 0;
44308 +
44309 +       while (dentry != root->dentry || vfsmnt != root->mnt) {
44310 +               struct dentry * parent;
44311 +
44312 +               if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44313 +                       /* Global root? */
44314 +                       if (vfsmnt->mnt_parent == vfsmnt) {
44315 +                               goto out;
44316 +                       }
44317 +                       dentry = vfsmnt->mnt_mountpoint;
44318 +                       vfsmnt = vfsmnt->mnt_parent;
44319 +                       continue;
44320 +               }
44321 +               parent = dentry->d_parent;
44322 +               prefetch(parent);
44323 +               spin_lock(&dentry->d_lock);
44324 +               error = prepend_name(buffer, buflen, &dentry->d_name);
44325 +               spin_unlock(&dentry->d_lock);
44326 +               if (!error)
44327 +                       error = prepend(buffer, buflen, "/", 1);
44328 +               if (error)
44329 +                       break;
44330 +
44331 +               slash = true;
44332 +               dentry = parent;
44333 +       }
44334 +
44335 +out:
44336 +       if (!error && !slash)
44337 +               error = prepend(buffer, buflen, "/", 1);
44338 +
44339 +       return error;
44340 +}
44341 +
44342 +/* this must be called with vfsmount_lock and rename_lock held */
44343 +
44344 +static char *__our_d_path(const struct path *path, struct path *root,
44345 +                       char *buf, int buflen)
44346 +{
44347 +       char *res = buf + buflen;
44348 +       int error;
44349 +
44350 +       prepend(&res, &buflen, "\0", 1);
44351 +       error = prepend_path(path, root, &res, &buflen);
44352 +       if (error)
44353 +               return ERR_PTR(error);
44354 +
44355 +       return res;
44356 +}
44357 +
44358 +static char *
44359 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
44360 +{
44361 +       char *retval;
44362 +
44363 +       retval = __our_d_path(path, root, buf, buflen);
44364 +       if (unlikely(IS_ERR(retval)))
44365 +               retval = strcpy(buf, "<path too long>");
44366 +       else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44367 +               retval[1] = '\0';
44368 +
44369 +       return retval;
44370 +}
44371 +
44372 +static char *
44373 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44374 +               char *buf, int buflen)
44375 +{
44376 +       struct path path;
44377 +       char *res;
44378 +
44379 +       path.dentry = (struct dentry *)dentry;
44380 +       path.mnt = (struct vfsmount *)vfsmnt;
44381 +
44382 +       /* we can use real_root.dentry, real_root.mnt, because this is only called
44383 +          by the RBAC system */
44384 +       res = gen_full_path(&path, &real_root, buf, buflen);
44385 +
44386 +       return res;
44387 +}
44388 +
44389 +static char *
44390 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44391 +           char *buf, int buflen)
44392 +{
44393 +       char *res;
44394 +       struct path path;
44395 +       struct path root;
44396 +       struct task_struct *reaper = &init_task;
44397 +
44398 +       path.dentry = (struct dentry *)dentry;
44399 +       path.mnt = (struct vfsmount *)vfsmnt;
44400 +
44401 +       /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
44402 +       get_fs_root(reaper->fs, &root);
44403 +
44404 +       write_seqlock(&rename_lock);
44405 +       br_read_lock(vfsmount_lock);
44406 +       res = gen_full_path(&path, &root, buf, buflen);
44407 +       br_read_unlock(vfsmount_lock);
44408 +       write_sequnlock(&rename_lock);
44409 +
44410 +       path_put(&root);
44411 +       return res;
44412 +}
44413 +
44414 +static char *
44415 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44416 +{
44417 +       char *ret;
44418 +       write_seqlock(&rename_lock);
44419 +       br_read_lock(vfsmount_lock);
44420 +       ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44421 +                            PAGE_SIZE);
44422 +       br_read_unlock(vfsmount_lock);
44423 +       write_sequnlock(&rename_lock);
44424 +       return ret;
44425 +}
44426 +
44427 +char *
44428 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44429 +{
44430 +       return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44431 +                            PAGE_SIZE);
44432 +}
44433 +
44434 +char *
44435 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44436 +{
44437 +       return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44438 +                          PAGE_SIZE);
44439 +}
44440 +
44441 +char *
44442 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44443 +{
44444 +       return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44445 +                          PAGE_SIZE);
44446 +}
44447 +
44448 +char *
44449 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44450 +{
44451 +       return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44452 +                          PAGE_SIZE);
44453 +}
44454 +
44455 +char *
44456 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44457 +{
44458 +       return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44459 +                          PAGE_SIZE);
44460 +}
44461 +
44462 +__inline__ __u32
44463 +to_gr_audit(const __u32 reqmode)
44464 +{
44465 +       /* masks off auditable permission flags, then shifts them to create
44466 +          auditing flags, and adds the special case of append auditing if
44467 +          we're requesting write */
44468 +       return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44469 +}
44470 +
44471 +struct acl_subject_label *
44472 +lookup_subject_map(const struct acl_subject_label *userp)
44473 +{
44474 +       unsigned int index = shash(userp, subj_map_set.s_size);
44475 +       struct subject_map *match;
44476 +
44477 +       match = subj_map_set.s_hash[index];
44478 +
44479 +       while (match && match->user != userp)
44480 +               match = match->next;
44481 +
44482 +       if (match != NULL)
44483 +               return match->kernel;
44484 +       else
44485 +               return NULL;
44486 +}
44487 +
44488 +static void
44489 +insert_subj_map_entry(struct subject_map *subjmap)
44490 +{
44491 +       unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44492 +       struct subject_map **curr;
44493 +
44494 +       subjmap->prev = NULL;
44495 +
44496 +       curr = &subj_map_set.s_hash[index];
44497 +       if (*curr != NULL)
44498 +               (*curr)->prev = subjmap;
44499 +
44500 +       subjmap->next = *curr;
44501 +       *curr = subjmap;
44502 +
44503 +       return;
44504 +}
44505 +
44506 +static struct acl_role_label *
44507 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44508 +                     const gid_t gid)
44509 +{
44510 +       unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44511 +       struct acl_role_label *match;
44512 +       struct role_allowed_ip *ipp;
44513 +       unsigned int x;
44514 +       u32 curr_ip = task->signal->curr_ip;
44515 +
44516 +       task->signal->saved_ip = curr_ip;
44517 +
44518 +       match = acl_role_set.r_hash[index];
44519 +
44520 +       while (match) {
44521 +               if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44522 +                       for (x = 0; x < match->domain_child_num; x++) {
44523 +                               if (match->domain_children[x] == uid)
44524 +                                       goto found;
44525 +                       }
44526 +               } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44527 +                       break;
44528 +               match = match->next;
44529 +       }
44530 +found:
44531 +       if (match == NULL) {
44532 +             try_group:
44533 +               index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44534 +               match = acl_role_set.r_hash[index];
44535 +
44536 +               while (match) {
44537 +                       if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44538 +                               for (x = 0; x < match->domain_child_num; x++) {
44539 +                                       if (match->domain_children[x] == gid)
44540 +                                               goto found2;
44541 +                               }
44542 +                       } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44543 +                               break;
44544 +                       match = match->next;
44545 +               }
44546 +found2:
44547 +               if (match == NULL)
44548 +                       match = default_role;
44549 +               if (match->allowed_ips == NULL)
44550 +                       return match;
44551 +               else {
44552 +                       for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44553 +                               if (likely
44554 +                                   ((ntohl(curr_ip) & ipp->netmask) ==
44555 +                                    (ntohl(ipp->addr) & ipp->netmask)))
44556 +                                       return match;
44557 +                       }
44558 +                       match = default_role;
44559 +               }
44560 +       } else if (match->allowed_ips == NULL) {
44561 +               return match;
44562 +       } else {
44563 +               for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44564 +                       if (likely
44565 +                           ((ntohl(curr_ip) & ipp->netmask) ==
44566 +                            (ntohl(ipp->addr) & ipp->netmask)))
44567 +                               return match;
44568 +               }
44569 +               goto try_group;
44570 +       }
44571 +
44572 +       return match;
44573 +}
44574 +
44575 +struct acl_subject_label *
44576 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44577 +                     const struct acl_role_label *role)
44578 +{
44579 +       unsigned int index = fhash(ino, dev, role->subj_hash_size);
44580 +       struct acl_subject_label *match;
44581 +
44582 +       match = role->subj_hash[index];
44583 +
44584 +       while (match && (match->inode != ino || match->device != dev ||
44585 +              (match->mode & GR_DELETED))) {
44586 +               match = match->next;
44587 +       }
44588 +
44589 +       if (match && !(match->mode & GR_DELETED))
44590 +               return match;
44591 +       else
44592 +               return NULL;
44593 +}
44594 +
44595 +struct acl_subject_label *
44596 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44597 +                         const struct acl_role_label *role)
44598 +{
44599 +       unsigned int index = fhash(ino, dev, role->subj_hash_size);
44600 +       struct acl_subject_label *match;
44601 +
44602 +       match = role->subj_hash[index];
44603 +
44604 +       while (match && (match->inode != ino || match->device != dev ||
44605 +              !(match->mode & GR_DELETED))) {
44606 +               match = match->next;
44607 +       }
44608 +
44609 +       if (match && (match->mode & GR_DELETED))
44610 +               return match;
44611 +       else
44612 +               return NULL;
44613 +}
44614 +
44615 +static struct acl_object_label *
44616 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44617 +                    const struct acl_subject_label *subj)
44618 +{
44619 +       unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44620 +       struct acl_object_label *match;
44621 +
44622 +       match = subj->obj_hash[index];
44623 +
44624 +       while (match && (match->inode != ino || match->device != dev ||
44625 +              (match->mode & GR_DELETED))) {
44626 +               match = match->next;
44627 +       }
44628 +
44629 +       if (match && !(match->mode & GR_DELETED))
44630 +               return match;
44631 +       else
44632 +               return NULL;
44633 +}
44634 +
44635 +static struct acl_object_label *
44636 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44637 +                    const struct acl_subject_label *subj)
44638 +{
44639 +       unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44640 +       struct acl_object_label *match;
44641 +
44642 +       match = subj->obj_hash[index];
44643 +
44644 +       while (match && (match->inode != ino || match->device != dev ||
44645 +              !(match->mode & GR_DELETED))) {
44646 +               match = match->next;
44647 +       }
44648 +
44649 +       if (match && (match->mode & GR_DELETED))
44650 +               return match;
44651 +
44652 +       match = subj->obj_hash[index];
44653 +
44654 +       while (match && (match->inode != ino || match->device != dev ||
44655 +              (match->mode & GR_DELETED))) {
44656 +               match = match->next;
44657 +       }
44658 +
44659 +       if (match && !(match->mode & GR_DELETED))
44660 +               return match;
44661 +       else
44662 +               return NULL;
44663 +}
44664 +
44665 +static struct name_entry *
44666 +lookup_name_entry(const char *name)
44667 +{
44668 +       unsigned int len = strlen(name);
44669 +       unsigned int key = full_name_hash(name, len);
44670 +       unsigned int index = key % name_set.n_size;
44671 +       struct name_entry *match;
44672 +
44673 +       match = name_set.n_hash[index];
44674 +
44675 +       while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44676 +               match = match->next;
44677 +
44678 +       return match;
44679 +}
44680 +
44681 +static struct name_entry *
44682 +lookup_name_entry_create(const char *name)
44683 +{
44684 +       unsigned int len = strlen(name);
44685 +       unsigned int key = full_name_hash(name, len);
44686 +       unsigned int index = key % name_set.n_size;
44687 +       struct name_entry *match;
44688 +
44689 +       match = name_set.n_hash[index];
44690 +
44691 +       while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44692 +                        !match->deleted))
44693 +               match = match->next;
44694 +
44695 +       if (match && match->deleted)
44696 +               return match;
44697 +
44698 +       match = name_set.n_hash[index];
44699 +
44700 +       while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44701 +                        match->deleted))
44702 +               match = match->next;
44703 +
44704 +       if (match && !match->deleted)
44705 +               return match;
44706 +       else
44707 +               return NULL;
44708 +}
44709 +
44710 +static struct inodev_entry *
44711 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44712 +{
44713 +       unsigned int index = fhash(ino, dev, inodev_set.i_size);
44714 +       struct inodev_entry *match;
44715 +
44716 +       match = inodev_set.i_hash[index];
44717 +
44718 +       while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44719 +               match = match->next;
44720 +
44721 +       return match;
44722 +}
44723 +
44724 +static void
44725 +insert_inodev_entry(struct inodev_entry *entry)
44726 +{
44727 +       unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44728 +                                   inodev_set.i_size);
44729 +       struct inodev_entry **curr;
44730 +
44731 +       entry->prev = NULL;
44732 +
44733 +       curr = &inodev_set.i_hash[index];
44734 +       if (*curr != NULL)
44735 +               (*curr)->prev = entry;
44736 +       
44737 +       entry->next = *curr;
44738 +       *curr = entry;
44739 +
44740 +       return;
44741 +}
44742 +
44743 +static void
44744 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44745 +{
44746 +       unsigned int index =
44747 +           rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44748 +       struct acl_role_label **curr;
44749 +       struct acl_role_label *tmp;
44750 +
44751 +       curr = &acl_role_set.r_hash[index];
44752 +
44753 +       /* if role was already inserted due to domains and already has
44754 +          a role in the same bucket as it attached, then we need to
44755 +          combine these two buckets
44756 +       */
44757 +       if (role->next) {
44758 +               tmp = role->next;
44759 +               while (tmp->next)
44760 +                       tmp = tmp->next;
44761 +               tmp->next = *curr;
44762 +       } else
44763 +               role->next = *curr;
44764 +       *curr = role;
44765 +
44766 +       return;
44767 +}
44768 +
44769 +static void
44770 +insert_acl_role_label(struct acl_role_label *role)
44771 +{
44772 +       int i;
44773 +
44774 +       if (role_list == NULL) {
44775 +               role_list = role;
44776 +               role->prev = NULL;
44777 +       } else {
44778 +               role->prev = role_list;
44779 +               role_list = role;
44780 +       }
44781 +       
44782 +       /* used for hash chains */
44783 +       role->next = NULL;
44784 +
44785 +       if (role->roletype & GR_ROLE_DOMAIN) {
44786 +               for (i = 0; i < role->domain_child_num; i++)
44787 +                       __insert_acl_role_label(role, role->domain_children[i]);
44788 +       } else
44789 +               __insert_acl_role_label(role, role->uidgid);
44790 +}
44791 +                                       
44792 +static int
44793 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44794 +{
44795 +       struct name_entry **curr, *nentry;
44796 +       struct inodev_entry *ientry;
44797 +       unsigned int len = strlen(name);
44798 +       unsigned int key = full_name_hash(name, len);
44799 +       unsigned int index = key % name_set.n_size;
44800 +
44801 +       curr = &name_set.n_hash[index];
44802 +
44803 +       while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44804 +               curr = &((*curr)->next);
44805 +
44806 +       if (*curr != NULL)
44807 +               return 1;
44808 +
44809 +       nentry = acl_alloc(sizeof (struct name_entry));
44810 +       if (nentry == NULL)
44811 +               return 0;
44812 +       ientry = acl_alloc(sizeof (struct inodev_entry));
44813 +       if (ientry == NULL)
44814 +               return 0;
44815 +       ientry->nentry = nentry;
44816 +
44817 +       nentry->key = key;
44818 +       nentry->name = name;
44819 +       nentry->inode = inode;
44820 +       nentry->device = device;
44821 +       nentry->len = len;
44822 +       nentry->deleted = deleted;
44823 +
44824 +       nentry->prev = NULL;
44825 +       curr = &name_set.n_hash[index];
44826 +       if (*curr != NULL)
44827 +               (*curr)->prev = nentry;
44828 +       nentry->next = *curr;
44829 +       *curr = nentry;
44830 +
44831 +       /* insert us into the table searchable by inode/dev */
44832 +       insert_inodev_entry(ientry);
44833 +
44834 +       return 1;
44835 +}
44836 +
44837 +static void
44838 +insert_acl_obj_label(struct acl_object_label *obj,
44839 +                    struct acl_subject_label *subj)
44840 +{
44841 +       unsigned int index =
44842 +           fhash(obj->inode, obj->device, subj->obj_hash_size);
44843 +       struct acl_object_label **curr;
44844 +
44845 +       
44846 +       obj->prev = NULL;
44847 +
44848 +       curr = &subj->obj_hash[index];
44849 +       if (*curr != NULL)
44850 +               (*curr)->prev = obj;
44851 +
44852 +       obj->next = *curr;
44853 +       *curr = obj;
44854 +
44855 +       return;
44856 +}
44857 +
44858 +static void
44859 +insert_acl_subj_label(struct acl_subject_label *obj,
44860 +                     struct acl_role_label *role)
44861 +{
44862 +       unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44863 +       struct acl_subject_label **curr;
44864 +
44865 +       obj->prev = NULL;
44866 +
44867 +       curr = &role->subj_hash[index];
44868 +       if (*curr != NULL)
44869 +               (*curr)->prev = obj;
44870 +
44871 +       obj->next = *curr;
44872 +       *curr = obj;
44873 +
44874 +       return;
44875 +}
44876 +
44877 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44878 +
44879 +static void *
44880 +create_table(__u32 * len, int elementsize)
44881 +{
44882 +       unsigned int table_sizes[] = {
44883 +               7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44884 +               32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44885 +               4194301, 8388593, 16777213, 33554393, 67108859
44886 +       };
44887 +       void *newtable = NULL;
44888 +       unsigned int pwr = 0;
44889 +
44890 +       while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44891 +              table_sizes[pwr] <= *len)
44892 +               pwr++;
44893 +
44894 +       if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44895 +               return newtable;
44896 +
44897 +       if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44898 +               newtable =
44899 +                   kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44900 +       else
44901 +               newtable = vmalloc(table_sizes[pwr] * elementsize);
44902 +
44903 +       *len = table_sizes[pwr];
44904 +
44905 +       return newtable;
44906 +}
44907 +
44908 +static int
44909 +init_variables(const struct gr_arg *arg)
44910 +{
44911 +       struct task_struct *reaper = &init_task;
44912 +       unsigned int stacksize;
44913 +
44914 +       subj_map_set.s_size = arg->role_db.num_subjects;
44915 +       acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44916 +       name_set.n_size = arg->role_db.num_objects;
44917 +       inodev_set.i_size = arg->role_db.num_objects;
44918 +
44919 +       if (!subj_map_set.s_size || !acl_role_set.r_size ||
44920 +           !name_set.n_size || !inodev_set.i_size)
44921 +               return 1;
44922 +
44923 +       if (!gr_init_uidset())
44924 +               return 1;
44925 +
44926 +       /* set up the stack that holds allocation info */
44927 +
44928 +       stacksize = arg->role_db.num_pointers + 5;
44929 +
44930 +       if (!acl_alloc_stack_init(stacksize))
44931 +               return 1;
44932 +
44933 +       /* grab reference for the real root dentry and vfsmount */
44934 +       get_fs_root(reaper->fs, &real_root);
44935 +       
44936 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44937 +       printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
44938 +#endif
44939 +
44940 +       fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44941 +       if (fakefs_obj_rw == NULL)
44942 +               return 1;
44943 +       fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44944 +
44945 +       fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44946 +       if (fakefs_obj_rwx == NULL)
44947 +               return 1;
44948 +       fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44949 +
44950 +       subj_map_set.s_hash =
44951 +           (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44952 +       acl_role_set.r_hash =
44953 +           (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44954 +       name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44955 +       inodev_set.i_hash =
44956 +           (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44957 +
44958 +       if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44959 +           !name_set.n_hash || !inodev_set.i_hash)
44960 +               return 1;
44961 +
44962 +       memset(subj_map_set.s_hash, 0,
44963 +              sizeof(struct subject_map *) * subj_map_set.s_size);
44964 +       memset(acl_role_set.r_hash, 0,
44965 +              sizeof (struct acl_role_label *) * acl_role_set.r_size);
44966 +       memset(name_set.n_hash, 0,
44967 +              sizeof (struct name_entry *) * name_set.n_size);
44968 +       memset(inodev_set.i_hash, 0,
44969 +              sizeof (struct inodev_entry *) * inodev_set.i_size);
44970 +
44971 +       return 0;
44972 +}
44973 +
44974 +/* free information not needed after startup
44975 +   currently contains user->kernel pointer mappings for subjects
44976 +*/
44977 +
44978 +static void
44979 +free_init_variables(void)
44980 +{
44981 +       __u32 i;
44982 +
44983 +       if (subj_map_set.s_hash) {
44984 +               for (i = 0; i < subj_map_set.s_size; i++) {
44985 +                       if (subj_map_set.s_hash[i]) {
44986 +                               kfree(subj_map_set.s_hash[i]);
44987 +                               subj_map_set.s_hash[i] = NULL;
44988 +                       }
44989 +               }
44990 +
44991 +               if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44992 +                   PAGE_SIZE)
44993 +                       kfree(subj_map_set.s_hash);
44994 +               else
44995 +                       vfree(subj_map_set.s_hash);
44996 +       }
44997 +
44998 +       return;
44999 +}
45000 +
45001 +static void
45002 +free_variables(void)
45003 +{
45004 +       struct acl_subject_label *s;
45005 +       struct acl_role_label *r;
45006 +       struct task_struct *task, *task2;
45007 +       unsigned int x;
45008 +
45009 +       gr_clear_learn_entries();
45010 +
45011 +       read_lock(&tasklist_lock);
45012 +       do_each_thread(task2, task) {
45013 +               task->acl_sp_role = 0;
45014 +               task->acl_role_id = 0;
45015 +               task->acl = NULL;
45016 +               task->role = NULL;
45017 +       } while_each_thread(task2, task);
45018 +       read_unlock(&tasklist_lock);
45019 +
45020 +       /* release the reference to the real root dentry and vfsmount */
45021 +       path_put(&real_root);
45022 +
45023 +       /* free all object hash tables */
45024 +
45025 +       FOR_EACH_ROLE_START(r)
45026 +               if (r->subj_hash == NULL)
45027 +                       goto next_role;
45028 +               FOR_EACH_SUBJECT_START(r, s, x)
45029 +                       if (s->obj_hash == NULL)
45030 +                               break;
45031 +                       if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45032 +                               kfree(s->obj_hash);
45033 +                       else
45034 +                               vfree(s->obj_hash);
45035 +               FOR_EACH_SUBJECT_END(s, x)
45036 +               FOR_EACH_NESTED_SUBJECT_START(r, s)
45037 +                       if (s->obj_hash == NULL)
45038 +                               break;
45039 +                       if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45040 +                               kfree(s->obj_hash);
45041 +                       else
45042 +                               vfree(s->obj_hash);
45043 +               FOR_EACH_NESTED_SUBJECT_END(s)
45044 +               if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45045 +                       kfree(r->subj_hash);
45046 +               else
45047 +                       vfree(r->subj_hash);
45048 +               r->subj_hash = NULL;
45049 +next_role:
45050 +       FOR_EACH_ROLE_END(r)
45051 +
45052 +       acl_free_all();
45053 +
45054 +       if (acl_role_set.r_hash) {
45055 +               if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45056 +                   PAGE_SIZE)
45057 +                       kfree(acl_role_set.r_hash);
45058 +               else
45059 +                       vfree(acl_role_set.r_hash);
45060 +       }
45061 +       if (name_set.n_hash) {
45062 +               if ((name_set.n_size * sizeof (struct name_entry *)) <=
45063 +                   PAGE_SIZE)
45064 +                       kfree(name_set.n_hash);
45065 +               else
45066 +                       vfree(name_set.n_hash);
45067 +       }
45068 +
45069 +       if (inodev_set.i_hash) {
45070 +               if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45071 +                   PAGE_SIZE)
45072 +                       kfree(inodev_set.i_hash);
45073 +               else
45074 +                       vfree(inodev_set.i_hash);
45075 +       }
45076 +
45077 +       gr_free_uidset();
45078 +
45079 +       memset(&name_set, 0, sizeof (struct name_db));
45080 +       memset(&inodev_set, 0, sizeof (struct inodev_db));
45081 +       memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45082 +       memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45083 +
45084 +       default_role = NULL;
45085 +       role_list = NULL;
45086 +
45087 +       return;
45088 +}
45089 +
45090 +static __u32
45091 +count_user_objs(struct acl_object_label *userp)
45092 +{
45093 +       struct acl_object_label o_tmp;
45094 +       __u32 num = 0;
45095 +
45096 +       while (userp) {
45097 +               if (copy_from_user(&o_tmp, userp,
45098 +                                  sizeof (struct acl_object_label)))
45099 +                       break;
45100 +
45101 +               userp = o_tmp.prev;
45102 +               num++;
45103 +       }
45104 +
45105 +       return num;
45106 +}
45107 +
45108 +static struct acl_subject_label *
45109 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45110 +
45111 +static int
45112 +copy_user_glob(struct acl_object_label *obj)
45113 +{
45114 +       struct acl_object_label *g_tmp, **guser;
45115 +       unsigned int len;
45116 +       char *tmp;
45117 +
45118 +       if (obj->globbed == NULL)
45119 +               return 0;
45120 +
45121 +       guser = &obj->globbed;
45122 +       while (*guser) {
45123 +               g_tmp = (struct acl_object_label *)
45124 +                       acl_alloc(sizeof (struct acl_object_label));
45125 +               if (g_tmp == NULL)
45126 +                       return -ENOMEM;
45127 +
45128 +               if (copy_from_user(g_tmp, *guser,
45129 +                                  sizeof (struct acl_object_label)))
45130 +                       return -EFAULT;
45131 +
45132 +               len = strnlen_user(g_tmp->filename, PATH_MAX);
45133 +
45134 +               if (!len || len >= PATH_MAX)
45135 +                       return -EINVAL;
45136 +
45137 +               if ((tmp = (char *) acl_alloc(len)) == NULL)
45138 +                       return -ENOMEM;
45139 +
45140 +               if (copy_from_user(tmp, g_tmp->filename, len))
45141 +                       return -EFAULT;
45142 +               tmp[len-1] = '\0';
45143 +               g_tmp->filename = tmp;
45144 +
45145 +               *guser = g_tmp;
45146 +               guser = &(g_tmp->next);
45147 +       }
45148 +
45149 +       return 0;
45150 +}
45151 +
45152 +static int
45153 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45154 +              struct acl_role_label *role)
45155 +{
45156 +       struct acl_object_label *o_tmp;
45157 +       unsigned int len;
45158 +       int ret;
45159 +       char *tmp;
45160 +
45161 +       while (userp) {
45162 +               if ((o_tmp = (struct acl_object_label *)
45163 +                    acl_alloc(sizeof (struct acl_object_label))) == NULL)
45164 +                       return -ENOMEM;
45165 +
45166 +               if (copy_from_user(o_tmp, userp,
45167 +                                  sizeof (struct acl_object_label)))
45168 +                       return -EFAULT;
45169 +
45170 +               userp = o_tmp->prev;
45171 +
45172 +               len = strnlen_user(o_tmp->filename, PATH_MAX);
45173 +
45174 +               if (!len || len >= PATH_MAX)
45175 +                       return -EINVAL;
45176 +
45177 +               if ((tmp = (char *) acl_alloc(len)) == NULL)
45178 +                       return -ENOMEM;
45179 +
45180 +               if (copy_from_user(tmp, o_tmp->filename, len))
45181 +                       return -EFAULT;
45182 +               tmp[len-1] = '\0';
45183 +               o_tmp->filename = tmp;
45184 +
45185 +               insert_acl_obj_label(o_tmp, subj);
45186 +               if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45187 +                                      o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45188 +                       return -ENOMEM;
45189 +
45190 +               ret = copy_user_glob(o_tmp);
45191 +               if (ret)
45192 +                       return ret;
45193 +
45194 +               if (o_tmp->nested) {
45195 +                       o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45196 +                       if (IS_ERR(o_tmp->nested))
45197 +                               return PTR_ERR(o_tmp->nested);
45198 +
45199 +                       /* insert into nested subject list */
45200 +                       o_tmp->nested->next = role->hash->first;
45201 +                       role->hash->first = o_tmp->nested;
45202 +               }
45203 +       }
45204 +
45205 +       return 0;
45206 +}
45207 +
45208 +static __u32
45209 +count_user_subjs(struct acl_subject_label *userp)
45210 +{
45211 +       struct acl_subject_label s_tmp;
45212 +       __u32 num = 0;
45213 +
45214 +       while (userp) {
45215 +               if (copy_from_user(&s_tmp, userp,
45216 +                                  sizeof (struct acl_subject_label)))
45217 +                       break;
45218 +
45219 +               userp = s_tmp.prev;
45220 +               /* do not count nested subjects against this count, since
45221 +                  they are not included in the hash table, but are
45222 +                  attached to objects.  We have already counted
45223 +                  the subjects in userspace for the allocation 
45224 +                  stack
45225 +               */
45226 +               if (!(s_tmp.mode & GR_NESTED))
45227 +                       num++;
45228 +       }
45229 +
45230 +       return num;
45231 +}
45232 +
45233 +static int
45234 +copy_user_allowedips(struct acl_role_label *rolep)
45235 +{
45236 +       struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45237 +
45238 +       ruserip = rolep->allowed_ips;
45239 +
45240 +       while (ruserip) {
45241 +               rlast = rtmp;
45242 +
45243 +               if ((rtmp = (struct role_allowed_ip *)
45244 +                    acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45245 +                       return -ENOMEM;
45246 +
45247 +               if (copy_from_user(rtmp, ruserip,
45248 +                                  sizeof (struct role_allowed_ip)))
45249 +                       return -EFAULT;
45250 +
45251 +               ruserip = rtmp->prev;
45252 +
45253 +               if (!rlast) {
45254 +                       rtmp->prev = NULL;
45255 +                       rolep->allowed_ips = rtmp;
45256 +               } else {
45257 +                       rlast->next = rtmp;
45258 +                       rtmp->prev = rlast;
45259 +               }
45260 +
45261 +               if (!ruserip)
45262 +                       rtmp->next = NULL;
45263 +       }
45264 +
45265 +       return 0;
45266 +}
45267 +
45268 +static int
45269 +copy_user_transitions(struct acl_role_label *rolep)
45270 +{
45271 +       struct role_transition *rusertp, *rtmp = NULL, *rlast;
45272 +       
45273 +       unsigned int len;
45274 +       char *tmp;
45275 +
45276 +       rusertp = rolep->transitions;
45277 +
45278 +       while (rusertp) {
45279 +               rlast = rtmp;
45280 +
45281 +               if ((rtmp = (struct role_transition *)
45282 +                    acl_alloc(sizeof (struct role_transition))) == NULL)
45283 +                       return -ENOMEM;
45284 +
45285 +               if (copy_from_user(rtmp, rusertp,
45286 +                                  sizeof (struct role_transition)))
45287 +                       return -EFAULT;
45288 +
45289 +               rusertp = rtmp->prev;
45290 +
45291 +               len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45292 +
45293 +               if (!len || len >= GR_SPROLE_LEN)
45294 +                       return -EINVAL;
45295 +
45296 +               if ((tmp = (char *) acl_alloc(len)) == NULL)
45297 +                       return -ENOMEM;
45298 +
45299 +               if (copy_from_user(tmp, rtmp->rolename, len))
45300 +                       return -EFAULT;
45301 +               tmp[len-1] = '\0';
45302 +               rtmp->rolename = tmp;
45303 +
45304 +               if (!rlast) {
45305 +                       rtmp->prev = NULL;
45306 +                       rolep->transitions = rtmp;
45307 +               } else {
45308 +                       rlast->next = rtmp;
45309 +                       rtmp->prev = rlast;
45310 +               }
45311 +
45312 +               if (!rusertp)
45313 +                       rtmp->next = NULL;
45314 +       }
45315 +
45316 +       return 0;
45317 +}
45318 +
45319 +static struct acl_subject_label *
45320 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45321 +{
45322 +       struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45323 +       unsigned int len;
45324 +       char *tmp;
45325 +       __u32 num_objs;
45326 +       struct acl_ip_label **i_tmp, *i_utmp2;
45327 +       struct gr_hash_struct ghash;
45328 +       struct subject_map *subjmap;
45329 +       unsigned int i_num;
45330 +       int err;
45331 +
45332 +       s_tmp = lookup_subject_map(userp);
45333 +
45334 +       /* we've already copied this subject into the kernel, just return
45335 +          the reference to it, and don't copy it over again
45336 +       */
45337 +       if (s_tmp)
45338 +               return(s_tmp);
45339 +
45340 +       if ((s_tmp = (struct acl_subject_label *)
45341 +           acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45342 +               return ERR_PTR(-ENOMEM);
45343 +
45344 +       subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45345 +       if (subjmap == NULL)
45346 +               return ERR_PTR(-ENOMEM);
45347 +
45348 +       subjmap->user = userp;
45349 +       subjmap->kernel = s_tmp;
45350 +       insert_subj_map_entry(subjmap);
45351 +
45352 +       if (copy_from_user(s_tmp, userp,
45353 +                          sizeof (struct acl_subject_label)))
45354 +               return ERR_PTR(-EFAULT);
45355 +
45356 +       len = strnlen_user(s_tmp->filename, PATH_MAX);
45357 +
45358 +       if (!len || len >= PATH_MAX)
45359 +               return ERR_PTR(-EINVAL);
45360 +
45361 +       if ((tmp = (char *) acl_alloc(len)) == NULL)
45362 +               return ERR_PTR(-ENOMEM);
45363 +
45364 +       if (copy_from_user(tmp, s_tmp->filename, len))
45365 +               return ERR_PTR(-EFAULT);
45366 +       tmp[len-1] = '\0';
45367 +       s_tmp->filename = tmp;
45368 +
45369 +       if (!strcmp(s_tmp->filename, "/"))
45370 +               role->root_label = s_tmp;
45371 +
45372 +       if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45373 +               return ERR_PTR(-EFAULT);
45374 +
45375 +       /* copy user and group transition tables */
45376 +
45377 +       if (s_tmp->user_trans_num) {
45378 +               uid_t *uidlist;
45379 +
45380 +               uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45381 +               if (uidlist == NULL)
45382 +                       return ERR_PTR(-ENOMEM);
45383 +               if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45384 +                       return ERR_PTR(-EFAULT);
45385 +
45386 +               s_tmp->user_transitions = uidlist;
45387 +       }
45388 +
45389 +       if (s_tmp->group_trans_num) {
45390 +               gid_t *gidlist;
45391 +
45392 +               gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45393 +               if (gidlist == NULL)
45394 +                       return ERR_PTR(-ENOMEM);
45395 +               if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45396 +                       return ERR_PTR(-EFAULT);
45397 +
45398 +               s_tmp->group_transitions = gidlist;
45399 +       }
45400 +
45401 +       /* set up object hash table */
45402 +       num_objs = count_user_objs(ghash.first);
45403 +
45404 +       s_tmp->obj_hash_size = num_objs;
45405 +       s_tmp->obj_hash =
45406 +           (struct acl_object_label **)
45407 +           create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45408 +
45409 +       if (!s_tmp->obj_hash)
45410 +               return ERR_PTR(-ENOMEM);
45411 +
45412 +       memset(s_tmp->obj_hash, 0,
45413 +              s_tmp->obj_hash_size *
45414 +              sizeof (struct acl_object_label *));
45415 +
45416 +       /* add in objects */
45417 +       err = copy_user_objs(ghash.first, s_tmp, role);
45418 +
45419 +       if (err)
45420 +               return ERR_PTR(err);
45421 +
45422 +       /* set pointer for parent subject */
45423 +       if (s_tmp->parent_subject) {
45424 +               s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45425 +
45426 +               if (IS_ERR(s_tmp2))
45427 +                       return s_tmp2;
45428 +
45429 +               s_tmp->parent_subject = s_tmp2;
45430 +       }
45431 +
45432 +       /* add in ip acls */
45433 +
45434 +       if (!s_tmp->ip_num) {
45435 +               s_tmp->ips = NULL;
45436 +               goto insert;
45437 +       }
45438 +
45439 +       i_tmp =
45440 +           (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45441 +                                              sizeof (struct acl_ip_label *));
45442 +
45443 +       if (!i_tmp)
45444 +               return ERR_PTR(-ENOMEM);
45445 +
45446 +       for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45447 +               *(i_tmp + i_num) =
45448 +                   (struct acl_ip_label *)
45449 +                   acl_alloc(sizeof (struct acl_ip_label));
45450 +               if (!*(i_tmp + i_num))
45451 +                       return ERR_PTR(-ENOMEM);
45452 +
45453 +               if (copy_from_user
45454 +                   (&i_utmp2, s_tmp->ips + i_num,
45455 +                    sizeof (struct acl_ip_label *)))
45456 +                       return ERR_PTR(-EFAULT);
45457 +
45458 +               if (copy_from_user
45459 +                   (*(i_tmp + i_num), i_utmp2,
45460 +                    sizeof (struct acl_ip_label)))
45461 +                       return ERR_PTR(-EFAULT);
45462 +               
45463 +               if ((*(i_tmp + i_num))->iface == NULL)
45464 +                       continue;
45465 +
45466 +               len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45467 +               if (!len || len >= IFNAMSIZ)
45468 +                       return ERR_PTR(-EINVAL);
45469 +               tmp = acl_alloc(len);
45470 +               if (tmp == NULL)
45471 +                       return ERR_PTR(-ENOMEM);
45472 +               if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45473 +                       return ERR_PTR(-EFAULT);
45474 +               (*(i_tmp + i_num))->iface = tmp;
45475 +       }
45476 +
45477 +       s_tmp->ips = i_tmp;
45478 +
45479 +insert:
45480 +       if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45481 +                              s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45482 +               return ERR_PTR(-ENOMEM);
45483 +
45484 +       return s_tmp;
45485 +}
45486 +
45487 +static int
45488 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45489 +{
45490 +       struct acl_subject_label s_pre;
45491 +       struct acl_subject_label * ret;
45492 +       int err;
45493 +
45494 +       while (userp) {
45495 +               if (copy_from_user(&s_pre, userp,
45496 +                                  sizeof (struct acl_subject_label)))
45497 +                       return -EFAULT;
45498 +               
45499 +               /* do not add nested subjects here, add
45500 +                  while parsing objects
45501 +               */
45502 +
45503 +               if (s_pre.mode & GR_NESTED) {
45504 +                       userp = s_pre.prev;
45505 +                       continue;
45506 +               }
45507 +
45508 +               ret = do_copy_user_subj(userp, role);
45509 +
45510 +               err = PTR_ERR(ret);
45511 +               if (IS_ERR(ret))
45512 +                       return err;
45513 +
45514 +               insert_acl_subj_label(ret, role);
45515 +
45516 +               userp = s_pre.prev;
45517 +       }
45518 +
45519 +       return 0;
45520 +}
45521 +
45522 +static int
45523 +copy_user_acl(struct gr_arg *arg)
45524 +{
45525 +       struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45526 +       struct sprole_pw *sptmp;
45527 +       struct gr_hash_struct *ghash;
45528 +       uid_t *domainlist;
45529 +       unsigned int r_num;
45530 +       unsigned int len;
45531 +       char *tmp;
45532 +       int err = 0;
45533 +       __u16 i;
45534 +       __u32 num_subjs;
45535 +
45536 +       /* we need a default and kernel role */
45537 +       if (arg->role_db.num_roles < 2)
45538 +               return -EINVAL;
45539 +
45540 +       /* copy special role authentication info from userspace */
45541 +
45542 +       num_sprole_pws = arg->num_sprole_pws;
45543 +       acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45544 +
45545 +       if (!acl_special_roles) {
45546 +               err = -ENOMEM;
45547 +               goto cleanup;
45548 +       }
45549 +
45550 +       for (i = 0; i < num_sprole_pws; i++) {
45551 +               sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45552 +               if (!sptmp) {
45553 +                       err = -ENOMEM;
45554 +                       goto cleanup;
45555 +               }
45556 +               if (copy_from_user(sptmp, arg->sprole_pws + i,
45557 +                                  sizeof (struct sprole_pw))) {
45558 +                       err = -EFAULT;
45559 +                       goto cleanup;
45560 +               }
45561 +
45562 +               len =
45563 +                   strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45564 +
45565 +               if (!len || len >= GR_SPROLE_LEN) {
45566 +                       err = -EINVAL;
45567 +                       goto cleanup;
45568 +               }
45569 +
45570 +               if ((tmp = (char *) acl_alloc(len)) == NULL) {
45571 +                       err = -ENOMEM;
45572 +                       goto cleanup;
45573 +               }
45574 +
45575 +               if (copy_from_user(tmp, sptmp->rolename, len)) {
45576 +                       err = -EFAULT;
45577 +                       goto cleanup;
45578 +               }
45579 +               tmp[len-1] = '\0';
45580 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45581 +               printk(KERN_ALERT "Copying special role %s\n", tmp);
45582 +#endif
45583 +               sptmp->rolename = tmp;
45584 +               acl_special_roles[i] = sptmp;
45585 +       }
45586 +
45587 +       r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45588 +
45589 +       for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45590 +               r_tmp = acl_alloc(sizeof (struct acl_role_label));
45591 +
45592 +               if (!r_tmp) {
45593 +                       err = -ENOMEM;
45594 +                       goto cleanup;
45595 +               }
45596 +
45597 +               if (copy_from_user(&r_utmp2, r_utmp + r_num,
45598 +                                  sizeof (struct acl_role_label *))) {
45599 +                       err = -EFAULT;
45600 +                       goto cleanup;
45601 +               }
45602 +
45603 +               if (copy_from_user(r_tmp, r_utmp2,
45604 +                                  sizeof (struct acl_role_label))) {
45605 +                       err = -EFAULT;
45606 +                       goto cleanup;
45607 +               }
45608 +
45609 +               len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45610 +
45611 +               if (!len || len >= PATH_MAX) {
45612 +                       err = -EINVAL;
45613 +                       goto cleanup;
45614 +               }
45615 +
45616 +               if ((tmp = (char *) acl_alloc(len)) == NULL) {
45617 +                       err = -ENOMEM;
45618 +                       goto cleanup;
45619 +               }
45620 +               if (copy_from_user(tmp, r_tmp->rolename, len)) {
45621 +                       err = -EFAULT;
45622 +                       goto cleanup;
45623 +               }
45624 +               tmp[len-1] = '\0';
45625 +               r_tmp->rolename = tmp;
45626 +
45627 +               if (!strcmp(r_tmp->rolename, "default")
45628 +                   && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45629 +                       default_role = r_tmp;
45630 +               } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45631 +                       kernel_role = r_tmp;
45632 +               }
45633 +
45634 +               if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45635 +                       err = -ENOMEM;
45636 +                       goto cleanup;
45637 +               }
45638 +               if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45639 +                       err = -EFAULT;
45640 +                       goto cleanup;
45641 +               }
45642 +
45643 +               r_tmp->hash = ghash;
45644 +
45645 +               num_subjs = count_user_subjs(r_tmp->hash->first);
45646 +
45647 +               r_tmp->subj_hash_size = num_subjs;
45648 +               r_tmp->subj_hash =
45649 +                   (struct acl_subject_label **)
45650 +                   create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45651 +
45652 +               if (!r_tmp->subj_hash) {
45653 +                       err = -ENOMEM;
45654 +                       goto cleanup;
45655 +               }
45656 +
45657 +               err = copy_user_allowedips(r_tmp);
45658 +               if (err)
45659 +                       goto cleanup;
45660 +
45661 +               /* copy domain info */
45662 +               if (r_tmp->domain_children != NULL) {
45663 +                       domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45664 +                       if (domainlist == NULL) {
45665 +                               err = -ENOMEM;
45666 +                               goto cleanup;
45667 +                       }
45668 +                       if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45669 +                               err = -EFAULT;
45670 +                               goto cleanup;
45671 +                       }
45672 +                       r_tmp->domain_children = domainlist;
45673 +               }
45674 +
45675 +               err = copy_user_transitions(r_tmp);
45676 +               if (err)
45677 +                       goto cleanup;
45678 +
45679 +               memset(r_tmp->subj_hash, 0,
45680 +                      r_tmp->subj_hash_size *
45681 +                      sizeof (struct acl_subject_label *));
45682 +
45683 +               err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45684 +
45685 +               if (err)
45686 +                       goto cleanup;
45687 +
45688 +               /* set nested subject list to null */
45689 +               r_tmp->hash->first = NULL;
45690 +
45691 +               insert_acl_role_label(r_tmp);
45692 +       }
45693 +
45694 +       goto return_err;
45695 +      cleanup:
45696 +       free_variables();
45697 +      return_err:
45698 +       return err;
45699 +
45700 +}
45701 +
45702 +static int
45703 +gracl_init(struct gr_arg *args)
45704 +{
45705 +       int error = 0;
45706 +
45707 +       memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45708 +       memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45709 +
45710 +       if (init_variables(args)) {
45711 +               gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45712 +               error = -ENOMEM;
45713 +               free_variables();
45714 +               goto out;
45715 +       }
45716 +
45717 +       error = copy_user_acl(args);
45718 +       free_init_variables();
45719 +       if (error) {
45720 +               free_variables();
45721 +               goto out;
45722 +       }
45723 +
45724 +       if ((error = gr_set_acls(0))) {
45725 +               free_variables();
45726 +               goto out;
45727 +       }
45728 +
45729 +       pax_open_kernel();
45730 +       gr_status |= GR_READY;
45731 +       pax_close_kernel();
45732 +
45733 +      out:
45734 +       return error;
45735 +}
45736 +
45737 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45738 +
45739 +static int
45740 +glob_match(const char *p, const char *n)
45741 +{
45742 +       char c;
45743 +
45744 +       while ((c = *p++) != '\0') {
45745 +       switch (c) {
45746 +               case '?':
45747 +                       if (*n == '\0')
45748 +                               return 1;
45749 +                       else if (*n == '/')
45750 +                               return 1;
45751 +                       break;
45752 +               case '\\':
45753 +                       if (*n != c)
45754 +                               return 1;
45755 +                       break;
45756 +               case '*':
45757 +                       for (c = *p++; c == '?' || c == '*'; c = *p++) {
45758 +                               if (*n == '/')
45759 +                                       return 1;
45760 +                               else if (c == '?') {
45761 +                                       if (*n == '\0')
45762 +                                               return 1;
45763 +                                       else
45764 +                                               ++n;
45765 +                               }
45766 +                       }
45767 +                       if (c == '\0') {
45768 +                               return 0;
45769 +                       } else {
45770 +                               const char *endp;
45771 +
45772 +                               if ((endp = strchr(n, '/')) == NULL)
45773 +                                       endp = n + strlen(n);
45774 +
45775 +                               if (c == '[') {
45776 +                                       for (--p; n < endp; ++n)
45777 +                                               if (!glob_match(p, n))
45778 +                                                       return 0;
45779 +                               } else if (c == '/') {
45780 +                                       while (*n != '\0' && *n != '/')
45781 +                                               ++n;
45782 +                                       if (*n == '/' && !glob_match(p, n + 1))
45783 +                                               return 0;
45784 +                               } else {
45785 +                                       for (--p; n < endp; ++n)
45786 +                                               if (*n == c && !glob_match(p, n))
45787 +                                                       return 0;
45788 +                               }
45789 +
45790 +                               return 1;
45791 +                       }
45792 +               case '[':
45793 +                       {
45794 +                       int not;
45795 +                       char cold;
45796 +
45797 +                       if (*n == '\0' || *n == '/')
45798 +                               return 1;
45799 +
45800 +                       not = (*p == '!' || *p == '^');
45801 +                       if (not)
45802 +                               ++p;
45803 +
45804 +                       c = *p++;
45805 +                       for (;;) {
45806 +                               unsigned char fn = (unsigned char)*n;
45807 +
45808 +                               if (c == '\0')
45809 +                                       return 1;
45810 +                               else {
45811 +                                       if (c == fn)
45812 +                                               goto matched;
45813 +                                       cold = c;
45814 +                                       c = *p++;
45815 +
45816 +                                       if (c == '-' && *p != ']') {
45817 +                                               unsigned char cend = *p++;
45818 +
45819 +                                               if (cend == '\0')
45820 +                                                       return 1;
45821 +
45822 +                                               if (cold <= fn && fn <= cend)
45823 +                                                       goto matched;
45824 +
45825 +                                               c = *p++;
45826 +                                       }
45827 +                               }
45828 +
45829 +                               if (c == ']')
45830 +                                       break;
45831 +                       }
45832 +                       if (!not)
45833 +                               return 1;
45834 +                       break;
45835 +               matched:
45836 +                       while (c != ']') {
45837 +                               if (c == '\0')
45838 +                                       return 1;
45839 +
45840 +                               c = *p++;
45841 +                       }
45842 +                       if (not)
45843 +                               return 1;
45844 +               }
45845 +               break;
45846 +       default:
45847 +               if (c != *n)
45848 +                       return 1;
45849 +       }
45850 +
45851 +       ++n;
45852 +       }
45853 +
45854 +       if (*n == '\0')
45855 +               return 0;
45856 +
45857 +       if (*n == '/')
45858 +               return 0;
45859 +
45860 +       return 1;
45861 +}
45862 +
45863 +static struct acl_object_label *
45864 +chk_glob_label(struct acl_object_label *globbed,
45865 +       struct dentry *dentry, struct vfsmount *mnt, char **path)
45866 +{
45867 +       struct acl_object_label *tmp;
45868 +
45869 +       if (*path == NULL)
45870 +               *path = gr_to_filename_nolock(dentry, mnt);
45871 +
45872 +       tmp = globbed;
45873 +
45874 +       while (tmp) {
45875 +               if (!glob_match(tmp->filename, *path))
45876 +                       return tmp;
45877 +               tmp = tmp->next;
45878 +       }
45879 +
45880 +       return NULL;
45881 +}
45882 +
45883 +static struct acl_object_label *
45884 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45885 +           const ino_t curr_ino, const dev_t curr_dev,
45886 +           const struct acl_subject_label *subj, char **path, const int checkglob)
45887 +{
45888 +       struct acl_subject_label *tmpsubj;
45889 +       struct acl_object_label *retval;
45890 +       struct acl_object_label *retval2;
45891 +
45892 +       tmpsubj = (struct acl_subject_label *) subj;
45893 +       read_lock(&gr_inode_lock);
45894 +       do {
45895 +               retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45896 +               if (retval) {
45897 +                       if (checkglob && retval->globbed) {
45898 +                               retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45899 +                                               (struct vfsmount *)orig_mnt, path);
45900 +                               if (retval2)
45901 +                                       retval = retval2;
45902 +                       }
45903 +                       break;
45904 +               }
45905 +       } while ((tmpsubj = tmpsubj->parent_subject));
45906 +       read_unlock(&gr_inode_lock);
45907 +
45908 +       return retval;
45909 +}
45910 +
45911 +static __inline__ struct acl_object_label *
45912 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45913 +           struct dentry *curr_dentry,
45914 +           const struct acl_subject_label *subj, char **path, const int checkglob)
45915 +{
45916 +       int newglob = checkglob;
45917 +       ino_t inode;
45918 +       dev_t device;
45919 +
45920 +       /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45921 +          as we don't want a / * rule to match instead of the / object
45922 +          don't do this for create lookups that call this function though, since they're looking up
45923 +          on the parent and thus need globbing checks on all paths
45924 +       */
45925 +       if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45926 +               newglob = GR_NO_GLOB;
45927 +
45928 +       spin_lock(&curr_dentry->d_lock);
45929 +       inode = curr_dentry->d_inode->i_ino;
45930 +       device = __get_dev(curr_dentry);
45931 +       spin_unlock(&curr_dentry->d_lock);
45932 +
45933 +       return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
45934 +}
45935 +
45936 +static struct acl_object_label *
45937 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45938 +             const struct acl_subject_label *subj, char *path, const int checkglob)
45939 +{
45940 +       struct dentry *dentry = (struct dentry *) l_dentry;
45941 +       struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45942 +       struct acl_object_label *retval;
45943 +       struct dentry *parent;
45944 +
45945 +       write_seqlock(&rename_lock);
45946 +       br_read_lock(vfsmount_lock);
45947 +
45948 +       if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45949 +#ifdef CONFIG_NET
45950 +           mnt == sock_mnt ||
45951 +#endif
45952 +#ifdef CONFIG_HUGETLBFS
45953 +           (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45954 +#endif
45955 +               /* ignore Eric Biederman */
45956 +           IS_PRIVATE(l_dentry->d_inode))) {
45957 +               retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45958 +               goto out;
45959 +       }
45960 +
45961 +       for (;;) {
45962 +               if (dentry == real_root.dentry && mnt == real_root.mnt)
45963 +                       break;
45964 +
45965 +               if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45966 +                       if (mnt->mnt_parent == mnt)
45967 +                               break;
45968 +
45969 +                       retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45970 +                       if (retval != NULL)
45971 +                               goto out;
45972 +
45973 +                       dentry = mnt->mnt_mountpoint;
45974 +                       mnt = mnt->mnt_parent;
45975 +                       continue;
45976 +               }
45977 +
45978 +               parent = dentry->d_parent;
45979 +               retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45980 +               if (retval != NULL)
45981 +                       goto out;
45982 +
45983 +               dentry = parent;
45984 +       }
45985 +
45986 +       retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45987 +
45988 +       /* real_root is pinned so we don't have to hold a reference */
45989 +       if (retval == NULL)
45990 +               retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
45991 +out:
45992 +       br_read_unlock(vfsmount_lock);
45993 +       write_sequnlock(&rename_lock);
45994 +
45995 +       BUG_ON(retval == NULL);
45996 +
45997 +       return retval;
45998 +}
45999 +
46000 +static __inline__ struct acl_object_label *
46001 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46002 +             const struct acl_subject_label *subj)
46003 +{
46004 +       char *path = NULL;
46005 +       return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46006 +}
46007 +
46008 +static __inline__ struct acl_object_label *
46009 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46010 +             const struct acl_subject_label *subj)
46011 +{
46012 +       char *path = NULL;
46013 +       return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46014 +}
46015 +
46016 +static __inline__ struct acl_object_label *
46017 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46018 +                    const struct acl_subject_label *subj, char *path)
46019 +{
46020 +       return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46021 +}
46022 +
46023 +static struct acl_subject_label *
46024 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46025 +              const struct acl_role_label *role)
46026 +{
46027 +       struct dentry *dentry = (struct dentry *) l_dentry;
46028 +       struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46029 +       struct acl_subject_label *retval;
46030 +       struct dentry *parent;
46031 +
46032 +       write_seqlock(&rename_lock);
46033 +       br_read_lock(vfsmount_lock);
46034 +
46035 +       for (;;) {
46036 +               if (dentry == real_root.dentry && mnt == real_root.mnt)
46037 +                       break;
46038 +               if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46039 +                       if (mnt->mnt_parent == mnt)
46040 +                               break;
46041 +
46042 +                       spin_lock(&dentry->d_lock);
46043 +                       read_lock(&gr_inode_lock);
46044 +                       retval =
46045 +                               lookup_acl_subj_label(dentry->d_inode->i_ino,
46046 +                                               __get_dev(dentry), role);
46047 +                       read_unlock(&gr_inode_lock);
46048 +                       spin_unlock(&dentry->d_lock);
46049 +                       if (retval != NULL)
46050 +                               goto out;
46051 +
46052 +                       dentry = mnt->mnt_mountpoint;
46053 +                       mnt = mnt->mnt_parent;
46054 +                       continue;
46055 +               }
46056 +
46057 +               spin_lock(&dentry->d_lock);
46058 +               read_lock(&gr_inode_lock);
46059 +               retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46060 +                                         __get_dev(dentry), role);
46061 +               read_unlock(&gr_inode_lock);
46062 +               parent = dentry->d_parent;
46063 +               spin_unlock(&dentry->d_lock);
46064 +
46065 +               if (retval != NULL)
46066 +                       goto out;
46067 +
46068 +               dentry = parent;
46069 +       }
46070 +
46071 +       spin_lock(&dentry->d_lock);
46072 +       read_lock(&gr_inode_lock);
46073 +       retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46074 +                                 __get_dev(dentry), role);
46075 +       read_unlock(&gr_inode_lock);
46076 +       spin_unlock(&dentry->d_lock);
46077 +
46078 +       if (unlikely(retval == NULL)) {
46079 +               /* real_root is pinned, we don't need to hold a reference */
46080 +               read_lock(&gr_inode_lock);
46081 +               retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
46082 +                                         __get_dev(real_root.dentry), role);
46083 +               read_unlock(&gr_inode_lock);
46084 +       }
46085 +out:
46086 +       br_read_unlock(vfsmount_lock);
46087 +       write_sequnlock(&rename_lock);
46088 +
46089 +       BUG_ON(retval == NULL);
46090 +
46091 +       return retval;
46092 +}
46093 +
46094 +static void
46095 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46096 +{
46097 +       struct task_struct *task = current;
46098 +       const struct cred *cred = current_cred();
46099 +
46100 +       security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46101 +                      cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46102 +                      task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46103 +                      1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46104 +
46105 +       return;
46106 +}
46107 +
46108 +static void
46109 +gr_log_learn_sysctl(const char *path, const __u32 mode)
46110 +{
46111 +       struct task_struct *task = current;
46112 +       const struct cred *cred = current_cred();
46113 +
46114 +       security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46115 +                      cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46116 +                      task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46117 +                      1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46118 +
46119 +       return;
46120 +}
46121 +
46122 +static void
46123 +gr_log_learn_id_change(const char type, const unsigned int real, 
46124 +                      const unsigned int effective, const unsigned int fs)
46125 +{
46126 +       struct task_struct *task = current;
46127 +       const struct cred *cred = current_cred();
46128 +
46129 +       security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46130 +                      cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46131 +                      task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46132 +                      type, real, effective, fs, &task->signal->saved_ip);
46133 +
46134 +       return;
46135 +}
46136 +
46137 +__u32
46138 +gr_check_link(const struct dentry * new_dentry,
46139 +             const struct dentry * parent_dentry,
46140 +             const struct vfsmount * parent_mnt,
46141 +             const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46142 +{
46143 +       struct acl_object_label *obj;
46144 +       __u32 oldmode, newmode;
46145 +       __u32 needmode;
46146 +
46147 +       if (unlikely(!(gr_status & GR_READY)))
46148 +               return (GR_CREATE | GR_LINK);
46149 +
46150 +       obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46151 +       oldmode = obj->mode;
46152 +
46153 +       if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46154 +               oldmode |= (GR_CREATE | GR_LINK);
46155 +
46156 +       needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46157 +       if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46158 +               needmode |= GR_SETID | GR_AUDIT_SETID;
46159 +
46160 +       newmode =
46161 +           gr_check_create(new_dentry, parent_dentry, parent_mnt,
46162 +                           oldmode | needmode);
46163 +
46164 +       needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46165 +                             GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46166 +                             GR_INHERIT | GR_AUDIT_INHERIT);
46167 +
46168 +       if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46169 +               goto bad;
46170 +
46171 +       if ((oldmode & needmode) != needmode)
46172 +               goto bad;
46173 +
46174 +       needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46175 +       if ((newmode & needmode) != needmode)
46176 +               goto bad;
46177 +
46178 +       if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46179 +               return newmode;
46180 +bad:
46181 +       needmode = oldmode;
46182 +       if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46183 +               needmode |= GR_SETID;
46184 +       
46185 +       if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46186 +               gr_log_learn(old_dentry, old_mnt, needmode);
46187 +               return (GR_CREATE | GR_LINK);
46188 +       } else if (newmode & GR_SUPPRESS)
46189 +               return GR_SUPPRESS;
46190 +       else
46191 +               return 0;
46192 +}
46193 +
46194 +__u32
46195 +gr_search_file(const struct dentry * dentry, const __u32 mode,
46196 +              const struct vfsmount * mnt)
46197 +{
46198 +       __u32 retval = mode;
46199 +       struct acl_subject_label *curracl;
46200 +       struct acl_object_label *currobj;
46201 +
46202 +       if (unlikely(!(gr_status & GR_READY)))
46203 +               return (mode & ~GR_AUDITS);
46204 +
46205 +       curracl = current->acl;
46206 +
46207 +       currobj = chk_obj_label(dentry, mnt, curracl);
46208 +       retval = currobj->mode & mode;
46209 +
46210 +       /* if we're opening a specified transfer file for writing
46211 +          (e.g. /dev/initctl), then transfer our role to init
46212 +       */
46213 +       if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46214 +                    current->role->roletype & GR_ROLE_PERSIST)) {
46215 +               struct task_struct *task = init_pid_ns.child_reaper;
46216 +
46217 +               if (task->role != current->role) {
46218 +                       task->acl_sp_role = 0;
46219 +                       task->acl_role_id = current->acl_role_id;
46220 +                       task->role = current->role;
46221 +                       rcu_read_lock();
46222 +                       read_lock(&grsec_exec_file_lock);
46223 +                       gr_apply_subject_to_task(task);
46224 +                       read_unlock(&grsec_exec_file_lock);
46225 +                       rcu_read_unlock();
46226 +                       gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46227 +               }
46228 +       }
46229 +
46230 +       if (unlikely
46231 +           ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46232 +            && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46233 +               __u32 new_mode = mode;
46234 +
46235 +               new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46236 +
46237 +               retval = new_mode;
46238 +
46239 +               if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46240 +                       new_mode |= GR_INHERIT;
46241 +
46242 +               if (!(mode & GR_NOLEARN))
46243 +                       gr_log_learn(dentry, mnt, new_mode);
46244 +       }
46245 +
46246 +       return retval;
46247 +}
46248 +
46249 +__u32
46250 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46251 +               const struct vfsmount * mnt, const __u32 mode)
46252 +{
46253 +       struct name_entry *match;
46254 +       struct acl_object_label *matchpo;
46255 +       struct acl_subject_label *curracl;
46256 +       char *path;
46257 +       __u32 retval;
46258 +
46259 +       if (unlikely(!(gr_status & GR_READY)))
46260 +               return (mode & ~GR_AUDITS);
46261 +
46262 +       preempt_disable();
46263 +       path = gr_to_filename_rbac(new_dentry, mnt);
46264 +       match = lookup_name_entry_create(path);
46265 +
46266 +       if (!match)
46267 +               goto check_parent;
46268 +
46269 +       curracl = current->acl;
46270 +
46271 +       read_lock(&gr_inode_lock);
46272 +       matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46273 +       read_unlock(&gr_inode_lock);
46274 +
46275 +       if (matchpo) {
46276 +               if ((matchpo->mode & mode) !=
46277 +                   (mode & ~(GR_AUDITS | GR_SUPPRESS))
46278 +                   && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46279 +                       __u32 new_mode = mode;
46280 +
46281 +                       new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46282 +
46283 +                       gr_log_learn(new_dentry, mnt, new_mode);
46284 +
46285 +                       preempt_enable();
46286 +                       return new_mode;
46287 +               }
46288 +               preempt_enable();
46289 +               return (matchpo->mode & mode);
46290 +       }
46291 +
46292 +      check_parent:
46293 +       curracl = current->acl;
46294 +
46295 +       matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46296 +       retval = matchpo->mode & mode;
46297 +
46298 +       if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46299 +           && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46300 +               __u32 new_mode = mode;
46301 +
46302 +               new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46303 +
46304 +               gr_log_learn(new_dentry, mnt, new_mode);
46305 +               preempt_enable();
46306 +               return new_mode;
46307 +       }
46308 +
46309 +       preempt_enable();
46310 +       return retval;
46311 +}
46312 +
46313 +int
46314 +gr_check_hidden_task(const struct task_struct *task)
46315 +{
46316 +       if (unlikely(!(gr_status & GR_READY)))
46317 +               return 0;
46318 +
46319 +       if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46320 +               return 1;
46321 +
46322 +       return 0;
46323 +}
46324 +
46325 +int
46326 +gr_check_protected_task(const struct task_struct *task)
46327 +{
46328 +       if (unlikely(!(gr_status & GR_READY) || !task))
46329 +               return 0;
46330 +
46331 +       if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46332 +           task->acl != current->acl)
46333 +               return 1;
46334 +
46335 +       return 0;
46336 +}
46337 +
46338 +int
46339 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46340 +{
46341 +       struct task_struct *p;
46342 +       int ret = 0;
46343 +
46344 +       if (unlikely(!(gr_status & GR_READY) || !pid))
46345 +               return ret;
46346 +
46347 +       read_lock(&tasklist_lock);
46348 +       do_each_pid_task(pid, type, p) {
46349 +               if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46350 +                   p->acl != current->acl) {
46351 +                       ret = 1;
46352 +                       goto out;
46353 +               }
46354 +       } while_each_pid_task(pid, type, p);
46355 +out:
46356 +       read_unlock(&tasklist_lock);
46357 +
46358 +       return ret;
46359 +}
46360 +
46361 +void
46362 +gr_copy_label(struct task_struct *tsk)
46363 +{
46364 +       tsk->signal->used_accept = 0;
46365 +       tsk->acl_sp_role = 0;
46366 +       tsk->acl_role_id = current->acl_role_id;
46367 +       tsk->acl = current->acl;
46368 +       tsk->role = current->role;
46369 +       tsk->signal->curr_ip = current->signal->curr_ip;
46370 +       tsk->signal->saved_ip = current->signal->saved_ip;
46371 +       if (current->exec_file)
46372 +               get_file(current->exec_file);
46373 +       tsk->exec_file = current->exec_file;
46374 +       tsk->is_writable = current->is_writable;
46375 +       if (unlikely(current->signal->used_accept)) {
46376 +               current->signal->curr_ip = 0;
46377 +               current->signal->saved_ip = 0;
46378 +       }
46379 +
46380 +       return;
46381 +}
46382 +
46383 +static void
46384 +gr_set_proc_res(struct task_struct *task)
46385 +{
46386 +       struct acl_subject_label *proc;
46387 +       unsigned short i;
46388 +
46389 +       proc = task->acl;
46390 +
46391 +       if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46392 +               return;
46393 +
46394 +       for (i = 0; i < RLIM_NLIMITS; i++) {
46395 +               if (!(proc->resmask & (1 << i)))
46396 +                       continue;
46397 +
46398 +               task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46399 +               task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46400 +       }
46401 +
46402 +       return;
46403 +}
46404 +
46405 +extern int __gr_process_user_ban(struct user_struct *user);
46406 +
46407 +int
46408 +gr_check_user_change(int real, int effective, int fs)
46409 +{
46410 +       unsigned int i;
46411 +       __u16 num;
46412 +       uid_t *uidlist;
46413 +       int curuid;
46414 +       int realok = 0;
46415 +       int effectiveok = 0;
46416 +       int fsok = 0;
46417 +
46418 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46419 +       struct user_struct *user;
46420 +
46421 +       if (real == -1)
46422 +               goto skipit;
46423 +
46424 +       user = find_user(real);
46425 +       if (user == NULL)
46426 +               goto skipit;
46427 +
46428 +       if (__gr_process_user_ban(user)) {
46429 +               /* for find_user */
46430 +               free_uid(user);
46431 +               return 1;
46432 +       }
46433 +
46434 +       /* for find_user */
46435 +       free_uid(user);
46436 +
46437 +skipit:
46438 +#endif
46439 +
46440 +       if (unlikely(!(gr_status & GR_READY)))
46441 +               return 0;
46442 +
46443 +       if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46444 +               gr_log_learn_id_change('u', real, effective, fs);
46445 +
46446 +       num = current->acl->user_trans_num;
46447 +       uidlist = current->acl->user_transitions;
46448 +
46449 +       if (uidlist == NULL)
46450 +               return 0;
46451 +
46452 +       if (real == -1)
46453 +               realok = 1;
46454 +       if (effective == -1)
46455 +               effectiveok = 1;
46456 +       if (fs == -1)
46457 +               fsok = 1;
46458 +
46459 +       if (current->acl->user_trans_type & GR_ID_ALLOW) {
46460 +               for (i = 0; i < num; i++) {
46461 +                       curuid = (int)uidlist[i];
46462 +                       if (real == curuid)
46463 +                               realok = 1;
46464 +                       if (effective == curuid)
46465 +                               effectiveok = 1;
46466 +                       if (fs == curuid)
46467 +                               fsok = 1;
46468 +               }
46469 +       } else if (current->acl->user_trans_type & GR_ID_DENY) {
46470 +               for (i = 0; i < num; i++) {
46471 +                       curuid = (int)uidlist[i];
46472 +                       if (real == curuid)
46473 +                               break;
46474 +                       if (effective == curuid)
46475 +                               break;
46476 +                       if (fs == curuid)
46477 +                               break;
46478 +               }
46479 +               /* not in deny list */
46480 +               if (i == num) {
46481 +                       realok = 1;
46482 +                       effectiveok = 1;
46483 +                       fsok = 1;
46484 +               }
46485 +       }
46486 +
46487 +       if (realok && effectiveok && fsok)
46488 +               return 0;
46489 +       else {
46490 +               gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46491 +               return 1;
46492 +       }
46493 +}
46494 +
46495 +int
46496 +gr_check_group_change(int real, int effective, int fs)
46497 +{
46498 +       unsigned int i;
46499 +       __u16 num;
46500 +       gid_t *gidlist;
46501 +       int curgid;
46502 +       int realok = 0;
46503 +       int effectiveok = 0;
46504 +       int fsok = 0;
46505 +
46506 +       if (unlikely(!(gr_status & GR_READY)))
46507 +               return 0;
46508 +
46509 +       if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46510 +               gr_log_learn_id_change('g', real, effective, fs);
46511 +
46512 +       num = current->acl->group_trans_num;
46513 +       gidlist = current->acl->group_transitions;
46514 +
46515 +       if (gidlist == NULL)
46516 +               return 0;
46517 +
46518 +       if (real == -1)
46519 +               realok = 1;
46520 +       if (effective == -1)
46521 +               effectiveok = 1;
46522 +       if (fs == -1)
46523 +               fsok = 1;
46524 +
46525 +       if (current->acl->group_trans_type & GR_ID_ALLOW) {
46526 +               for (i = 0; i < num; i++) {
46527 +                       curgid = (int)gidlist[i];
46528 +                       if (real == curgid)
46529 +                               realok = 1;
46530 +                       if (effective == curgid)
46531 +                               effectiveok = 1;
46532 +                       if (fs == curgid)
46533 +                               fsok = 1;
46534 +               }
46535 +       } else if (current->acl->group_trans_type & GR_ID_DENY) {
46536 +               for (i = 0; i < num; i++) {
46537 +                       curgid = (int)gidlist[i];
46538 +                       if (real == curgid)
46539 +                               break;
46540 +                       if (effective == curgid)
46541 +                               break;
46542 +                       if (fs == curgid)
46543 +                               break;
46544 +               }
46545 +               /* not in deny list */
46546 +               if (i == num) {
46547 +                       realok = 1;
46548 +                       effectiveok = 1;
46549 +                       fsok = 1;
46550 +               }
46551 +       }
46552 +
46553 +       if (realok && effectiveok && fsok)
46554 +               return 0;
46555 +       else {
46556 +               gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46557 +               return 1;
46558 +       }
46559 +}
46560 +
46561 +void
46562 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46563 +{
46564 +       struct acl_role_label *role = task->role;
46565 +       struct acl_subject_label *subj = NULL;
46566 +       struct acl_object_label *obj;
46567 +       struct file *filp;
46568 +
46569 +       if (unlikely(!(gr_status & GR_READY)))
46570 +               return;
46571 +
46572 +       filp = task->exec_file;
46573 +
46574 +       /* kernel process, we'll give them the kernel role */
46575 +       if (unlikely(!filp)) {
46576 +               task->role = kernel_role;
46577 +               task->acl = kernel_role->root_label;
46578 +               return;
46579 +       } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46580 +               role = lookup_acl_role_label(task, uid, gid);
46581 +
46582 +       /* perform subject lookup in possibly new role
46583 +          we can use this result below in the case where role == task->role
46584 +       */
46585 +       subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46586 +
46587 +       /* if we changed uid/gid, but result in the same role
46588 +          and are using inheritance, don't lose the inherited subject
46589 +          if current subject is other than what normal lookup
46590 +          would result in, we arrived via inheritance, don't
46591 +          lose subject
46592 +       */
46593 +       if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46594 +                                  (subj == task->acl)))
46595 +               task->acl = subj;
46596 +
46597 +       task->role = role;
46598 +
46599 +       task->is_writable = 0;
46600 +
46601 +       /* ignore additional mmap checks for processes that are writable 
46602 +          by the default ACL */
46603 +       obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46604 +       if (unlikely(obj->mode & GR_WRITE))
46605 +               task->is_writable = 1;
46606 +       obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46607 +       if (unlikely(obj->mode & GR_WRITE))
46608 +               task->is_writable = 1;
46609 +
46610 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46611 +       printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46612 +#endif
46613 +
46614 +       gr_set_proc_res(task);
46615 +
46616 +       return;
46617 +}
46618 +
46619 +int
46620 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46621 +                 const int unsafe_share)
46622 +{
46623 +       struct task_struct *task = current;
46624 +       struct acl_subject_label *newacl;
46625 +       struct acl_object_label *obj;
46626 +       __u32 retmode;
46627 +
46628 +       if (unlikely(!(gr_status & GR_READY)))
46629 +               return 0;
46630 +
46631 +       newacl = chk_subj_label(dentry, mnt, task->role);
46632 +
46633 +       task_lock(task);
46634 +       if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46635 +            !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46636 +            !(task->role->roletype & GR_ROLE_GOD) &&
46637 +            !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46638 +            !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46639 +                task_unlock(task);
46640 +               if (unsafe_share)
46641 +                       gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46642 +               else
46643 +                       gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46644 +               return -EACCES;
46645 +       }
46646 +       task_unlock(task);
46647 +
46648 +       obj = chk_obj_label(dentry, mnt, task->acl);
46649 +       retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46650 +
46651 +       if (!(task->acl->mode & GR_INHERITLEARN) &&
46652 +           ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46653 +               if (obj->nested)
46654 +                       task->acl = obj->nested;
46655 +               else
46656 +                       task->acl = newacl;
46657 +       } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46658 +               gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46659 +
46660 +       task->is_writable = 0;
46661 +
46662 +       /* ignore additional mmap checks for processes that are writable 
46663 +          by the default ACL */
46664 +       obj = chk_obj_label(dentry, mnt, default_role->root_label);
46665 +       if (unlikely(obj->mode & GR_WRITE))
46666 +               task->is_writable = 1;
46667 +       obj = chk_obj_label(dentry, mnt, task->role->root_label);
46668 +       if (unlikely(obj->mode & GR_WRITE))
46669 +               task->is_writable = 1;
46670 +
46671 +       gr_set_proc_res(task);
46672 +
46673 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46674 +       printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46675 +#endif
46676 +       return 0;
46677 +}
46678 +
46679 +/* always called with valid inodev ptr */
46680 +static void
46681 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46682 +{
46683 +       struct acl_object_label *matchpo;
46684 +       struct acl_subject_label *matchps;
46685 +       struct acl_subject_label *subj;
46686 +       struct acl_role_label *role;
46687 +       unsigned int x;
46688 +
46689 +       FOR_EACH_ROLE_START(role)
46690 +               FOR_EACH_SUBJECT_START(role, subj, x)
46691 +                       if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46692 +                               matchpo->mode |= GR_DELETED;
46693 +               FOR_EACH_SUBJECT_END(subj,x)
46694 +               FOR_EACH_NESTED_SUBJECT_START(role, subj)
46695 +                       if (subj->inode == ino && subj->device == dev)
46696 +                               subj->mode |= GR_DELETED;
46697 +               FOR_EACH_NESTED_SUBJECT_END(subj)
46698 +               if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46699 +                       matchps->mode |= GR_DELETED;
46700 +       FOR_EACH_ROLE_END(role)
46701 +
46702 +       inodev->nentry->deleted = 1;
46703 +
46704 +       return;
46705 +}
46706 +
46707 +void
46708 +gr_handle_delete(const ino_t ino, const dev_t dev)
46709 +{
46710 +       struct inodev_entry *inodev;
46711 +
46712 +       if (unlikely(!(gr_status & GR_READY)))
46713 +               return;
46714 +
46715 +       write_lock(&gr_inode_lock);
46716 +       inodev = lookup_inodev_entry(ino, dev);
46717 +       if (inodev != NULL)
46718 +               do_handle_delete(inodev, ino, dev);
46719 +       write_unlock(&gr_inode_lock);
46720 +
46721 +       return;
46722 +}
46723 +
46724 +static void
46725 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46726 +                    const ino_t newinode, const dev_t newdevice,
46727 +                    struct acl_subject_label *subj)
46728 +{
46729 +       unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46730 +       struct acl_object_label *match;
46731 +
46732 +       match = subj->obj_hash[index];
46733 +
46734 +       while (match && (match->inode != oldinode ||
46735 +              match->device != olddevice ||
46736 +              !(match->mode & GR_DELETED)))
46737 +               match = match->next;
46738 +
46739 +       if (match && (match->inode == oldinode)
46740 +           && (match->device == olddevice)
46741 +           && (match->mode & GR_DELETED)) {
46742 +               if (match->prev == NULL) {
46743 +                       subj->obj_hash[index] = match->next;
46744 +                       if (match->next != NULL)
46745 +                               match->next->prev = NULL;
46746 +               } else {
46747 +                       match->prev->next = match->next;
46748 +                       if (match->next != NULL)
46749 +                               match->next->prev = match->prev;
46750 +               }
46751 +               match->prev = NULL;
46752 +               match->next = NULL;
46753 +               match->inode = newinode;
46754 +               match->device = newdevice;
46755 +               match->mode &= ~GR_DELETED;
46756 +
46757 +               insert_acl_obj_label(match, subj);
46758 +       }
46759 +
46760 +       return;
46761 +}
46762 +
46763 +static void
46764 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46765 +                     const ino_t newinode, const dev_t newdevice,
46766 +                     struct acl_role_label *role)
46767 +{
46768 +       unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46769 +       struct acl_subject_label *match;
46770 +
46771 +       match = role->subj_hash[index];
46772 +
46773 +       while (match && (match->inode != oldinode ||
46774 +              match->device != olddevice ||
46775 +              !(match->mode & GR_DELETED)))
46776 +               match = match->next;
46777 +
46778 +       if (match && (match->inode == oldinode)
46779 +           && (match->device == olddevice)
46780 +           && (match->mode & GR_DELETED)) {
46781 +               if (match->prev == NULL) {
46782 +                       role->subj_hash[index] = match->next;
46783 +                       if (match->next != NULL)
46784 +                               match->next->prev = NULL;
46785 +               } else {
46786 +                       match->prev->next = match->next;
46787 +                       if (match->next != NULL)
46788 +                               match->next->prev = match->prev;
46789 +               }
46790 +               match->prev = NULL;
46791 +               match->next = NULL;
46792 +               match->inode = newinode;
46793 +               match->device = newdevice;
46794 +               match->mode &= ~GR_DELETED;
46795 +
46796 +               insert_acl_subj_label(match, role);
46797 +       }
46798 +
46799 +       return;
46800 +}
46801 +
46802 +static void
46803 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46804 +                   const ino_t newinode, const dev_t newdevice)
46805 +{
46806 +       unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46807 +       struct inodev_entry *match;
46808 +
46809 +       match = inodev_set.i_hash[index];
46810 +
46811 +       while (match && (match->nentry->inode != oldinode ||
46812 +              match->nentry->device != olddevice || !match->nentry->deleted))
46813 +               match = match->next;
46814 +
46815 +       if (match && (match->nentry->inode == oldinode)
46816 +           && (match->nentry->device == olddevice) &&
46817 +           match->nentry->deleted) {
46818 +               if (match->prev == NULL) {
46819 +                       inodev_set.i_hash[index] = match->next;
46820 +                       if (match->next != NULL)
46821 +                               match->next->prev = NULL;
46822 +               } else {
46823 +                       match->prev->next = match->next;
46824 +                       if (match->next != NULL)
46825 +                               match->next->prev = match->prev;
46826 +               }
46827 +               match->prev = NULL;
46828 +               match->next = NULL;
46829 +               match->nentry->inode = newinode;
46830 +               match->nentry->device = newdevice;
46831 +               match->nentry->deleted = 0;
46832 +
46833 +               insert_inodev_entry(match);
46834 +       }
46835 +
46836 +       return;
46837 +}
46838 +
46839 +static void
46840 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46841 +                const struct vfsmount *mnt)
46842 +{
46843 +       struct acl_subject_label *subj;
46844 +       struct acl_role_label *role;
46845 +       unsigned int x;
46846 +       ino_t ino = dentry->d_inode->i_ino;
46847 +       dev_t dev = __get_dev(dentry);
46848 +       
46849 +       FOR_EACH_ROLE_START(role)
46850 +               update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
46851 +
46852 +               FOR_EACH_NESTED_SUBJECT_START(role, subj)
46853 +                       if ((subj->inode == ino) && (subj->device == dev)) {
46854 +                               subj->inode = ino;
46855 +                               subj->device = dev;
46856 +                       }
46857 +               FOR_EACH_NESTED_SUBJECT_END(subj)
46858 +               FOR_EACH_SUBJECT_START(role, subj, x)
46859 +                       update_acl_obj_label(matchn->inode, matchn->device,
46860 +                                            ino, dev, subj);
46861 +               FOR_EACH_SUBJECT_END(subj,x)
46862 +       FOR_EACH_ROLE_END(role)
46863 +
46864 +       update_inodev_entry(matchn->inode, matchn->device, ino, dev);
46865 +
46866 +       return;
46867 +}
46868 +
46869 +void
46870 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46871 +{
46872 +       struct name_entry *matchn;
46873 +
46874 +       if (unlikely(!(gr_status & GR_READY)))
46875 +               return;
46876 +
46877 +       preempt_disable();
46878 +       matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46879 +
46880 +       if (unlikely((unsigned long)matchn)) {
46881 +               write_lock(&gr_inode_lock);
46882 +               do_handle_create(matchn, dentry, mnt);
46883 +               write_unlock(&gr_inode_lock);
46884 +       }
46885 +       preempt_enable();
46886 +
46887 +       return;
46888 +}
46889 +
46890 +void
46891 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46892 +                struct dentry *old_dentry,
46893 +                struct dentry *new_dentry,
46894 +                struct vfsmount *mnt, const __u8 replace)
46895 +{
46896 +       struct name_entry *matchn;
46897 +       struct inodev_entry *inodev;
46898 +       ino_t old_ino = old_dentry->d_inode->i_ino;
46899 +       dev_t old_dev = __get_dev(old_dentry);
46900 +
46901 +       /* vfs_rename swaps the name and parent link for old_dentry and
46902 +          new_dentry
46903 +          at this point, old_dentry has the new name, parent link, and inode
46904 +          for the renamed file
46905 +          if a file is being replaced by a rename, new_dentry has the inode
46906 +          and name for the replaced file
46907 +       */
46908 +
46909 +       if (unlikely(!(gr_status & GR_READY)))
46910 +               return;
46911 +
46912 +       preempt_disable();
46913 +       matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46914 +
46915 +       /* we wouldn't have to check d_inode if it weren't for
46916 +          NFS silly-renaming
46917 +        */
46918 +
46919 +       write_lock(&gr_inode_lock);
46920 +       if (unlikely(replace && new_dentry->d_inode)) {
46921 +               ino_t new_ino = new_dentry->d_inode->i_ino;
46922 +               dev_t new_dev = __get_dev(new_dentry);
46923 +
46924 +               inodev = lookup_inodev_entry(new_ino, new_dev);
46925 +               if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46926 +                       do_handle_delete(inodev, new_ino, new_dev);
46927 +       }
46928 +
46929 +       inodev = lookup_inodev_entry(old_ino, old_dev);
46930 +       if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46931 +               do_handle_delete(inodev, old_ino, old_dev);
46932 +
46933 +       if (unlikely((unsigned long)matchn))
46934 +               do_handle_create(matchn, old_dentry, mnt);
46935 +
46936 +       write_unlock(&gr_inode_lock);
46937 +       preempt_enable();
46938 +
46939 +       return;
46940 +}
46941 +
46942 +static int
46943 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46944 +                        unsigned char **sum)
46945 +{
46946 +       struct acl_role_label *r;
46947 +       struct role_allowed_ip *ipp;
46948 +       struct role_transition *trans;
46949 +       unsigned int i;
46950 +       int found = 0;
46951 +       u32 curr_ip = current->signal->curr_ip;
46952 +
46953 +       current->signal->saved_ip = curr_ip;
46954 +
46955 +       /* check transition table */
46956 +
46957 +       for (trans = current->role->transitions; trans; trans = trans->next) {
46958 +               if (!strcmp(rolename, trans->rolename)) {
46959 +                       found = 1;
46960 +                       break;
46961 +               }
46962 +       }
46963 +
46964 +       if (!found)
46965 +               return 0;
46966 +
46967 +       /* handle special roles that do not require authentication
46968 +          and check ip */
46969 +
46970 +       FOR_EACH_ROLE_START(r)
46971 +               if (!strcmp(rolename, r->rolename) &&
46972 +                   (r->roletype & GR_ROLE_SPECIAL)) {
46973 +                       found = 0;
46974 +                       if (r->allowed_ips != NULL) {
46975 +                               for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46976 +                                       if ((ntohl(curr_ip) & ipp->netmask) ==
46977 +                                            (ntohl(ipp->addr) & ipp->netmask))
46978 +                                               found = 1;
46979 +                               }
46980 +                       } else
46981 +                               found = 2;
46982 +                       if (!found)
46983 +                               return 0;
46984 +
46985 +                       if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46986 +                           ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46987 +                               *salt = NULL;
46988 +                               *sum = NULL;
46989 +                               return 1;
46990 +                       }
46991 +               }
46992 +       FOR_EACH_ROLE_END(r)
46993 +
46994 +       for (i = 0; i < num_sprole_pws; i++) {
46995 +               if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46996 +                       *salt = acl_special_roles[i]->salt;
46997 +                       *sum = acl_special_roles[i]->sum;
46998 +                       return 1;
46999 +               }
47000 +       }
47001 +
47002 +       return 0;
47003 +}
47004 +
47005 +static void
47006 +assign_special_role(char *rolename)
47007 +{
47008 +       struct acl_object_label *obj;
47009 +       struct acl_role_label *r;
47010 +       struct acl_role_label *assigned = NULL;
47011 +       struct task_struct *tsk;
47012 +       struct file *filp;
47013 +
47014 +       FOR_EACH_ROLE_START(r)
47015 +               if (!strcmp(rolename, r->rolename) &&
47016 +                   (r->roletype & GR_ROLE_SPECIAL)) {
47017 +                       assigned = r;
47018 +                       break;
47019 +               }
47020 +       FOR_EACH_ROLE_END(r)
47021 +
47022 +       if (!assigned)
47023 +               return;
47024 +
47025 +       read_lock(&tasklist_lock);
47026 +       read_lock(&grsec_exec_file_lock);
47027 +
47028 +       tsk = current->real_parent;
47029 +       if (tsk == NULL)
47030 +               goto out_unlock;
47031 +
47032 +       filp = tsk->exec_file;
47033 +       if (filp == NULL)
47034 +               goto out_unlock;
47035 +
47036 +       tsk->is_writable = 0;
47037 +
47038 +       tsk->acl_sp_role = 1;
47039 +       tsk->acl_role_id = ++acl_sp_role_value;
47040 +       tsk->role = assigned;
47041 +       tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47042 +
47043 +       /* ignore additional mmap checks for processes that are writable 
47044 +          by the default ACL */
47045 +       obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47046 +       if (unlikely(obj->mode & GR_WRITE))
47047 +               tsk->is_writable = 1;
47048 +       obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47049 +       if (unlikely(obj->mode & GR_WRITE))
47050 +               tsk->is_writable = 1;
47051 +
47052 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47053 +       printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47054 +#endif
47055 +
47056 +out_unlock:
47057 +       read_unlock(&grsec_exec_file_lock);
47058 +       read_unlock(&tasklist_lock);
47059 +       return;
47060 +}
47061 +
47062 +int gr_check_secure_terminal(struct task_struct *task)
47063 +{
47064 +       struct task_struct *p, *p2, *p3;
47065 +       struct files_struct *files;
47066 +       struct fdtable *fdt;
47067 +       struct file *our_file = NULL, *file;
47068 +       int i;
47069 +
47070 +       if (task->signal->tty == NULL)
47071 +               return 1;
47072 +
47073 +       files = get_files_struct(task);
47074 +       if (files != NULL) {
47075 +               rcu_read_lock();
47076 +               fdt = files_fdtable(files);
47077 +               for (i=0; i < fdt->max_fds; i++) {
47078 +                       file = fcheck_files(files, i);
47079 +                       if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47080 +                               get_file(file);
47081 +                               our_file = file;
47082 +                       }
47083 +               }
47084 +               rcu_read_unlock();
47085 +               put_files_struct(files);
47086 +       }
47087 +
47088 +       if (our_file == NULL)
47089 +               return 1;
47090 +
47091 +       read_lock(&tasklist_lock);
47092 +       do_each_thread(p2, p) {
47093 +               files = get_files_struct(p);
47094 +               if (files == NULL ||
47095 +                   (p->signal && p->signal->tty == task->signal->tty)) {
47096 +                       if (files != NULL)
47097 +                               put_files_struct(files);
47098 +                       continue;
47099 +               }
47100 +               rcu_read_lock();
47101 +               fdt = files_fdtable(files);
47102 +               for (i=0; i < fdt->max_fds; i++) {
47103 +                       file = fcheck_files(files, i);
47104 +                       if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47105 +                           file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47106 +                               p3 = task;
47107 +                               while (p3->pid > 0) {
47108 +                                       if (p3 == p)
47109 +                                               break;
47110 +                                       p3 = p3->real_parent;
47111 +                               }
47112 +                               if (p3 == p)
47113 +                                       break;
47114 +                               gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47115 +                               gr_handle_alertkill(p);
47116 +                               rcu_read_unlock();
47117 +                               put_files_struct(files);
47118 +                               read_unlock(&tasklist_lock);
47119 +                               fput(our_file);
47120 +                               return 0;
47121 +                       }
47122 +               }
47123 +               rcu_read_unlock();
47124 +               put_files_struct(files);
47125 +       } while_each_thread(p2, p);
47126 +       read_unlock(&tasklist_lock);
47127 +
47128 +       fput(our_file);
47129 +       return 1;
47130 +}
47131 +
47132 +ssize_t
47133 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47134 +{
47135 +       struct gr_arg_wrapper uwrap;
47136 +       unsigned char *sprole_salt = NULL;
47137 +       unsigned char *sprole_sum = NULL;
47138 +       int error = sizeof (struct gr_arg_wrapper);
47139 +       int error2 = 0;
47140 +
47141 +       mutex_lock(&gr_dev_mutex);
47142 +
47143 +       if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47144 +               error = -EPERM;
47145 +               goto out;
47146 +       }
47147 +
47148 +       if (count != sizeof (struct gr_arg_wrapper)) {
47149 +               gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47150 +               error = -EINVAL;
47151 +               goto out;
47152 +       }
47153 +
47154 +       
47155 +       if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47156 +               gr_auth_expires = 0;
47157 +               gr_auth_attempts = 0;
47158 +       }
47159 +
47160 +       if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47161 +               error = -EFAULT;
47162 +               goto out;
47163 +       }
47164 +
47165 +       if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47166 +               error = -EINVAL;
47167 +               goto out;
47168 +       }
47169 +
47170 +       if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47171 +               error = -EFAULT;
47172 +               goto out;
47173 +       }
47174 +
47175 +       if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47176 +           gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47177 +           time_after(gr_auth_expires, get_seconds())) {
47178 +               error = -EBUSY;
47179 +               goto out;
47180 +       }
47181 +
47182 +       /* if non-root trying to do anything other than use a special role,
47183 +          do not attempt authentication, do not count towards authentication
47184 +          locking
47185 +        */
47186 +
47187 +       if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47188 +           gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47189 +           current_uid()) {
47190 +               error = -EPERM;
47191 +               goto out;
47192 +       }
47193 +
47194 +       /* ensure pw and special role name are null terminated */
47195 +
47196 +       gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47197 +       gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47198 +
47199 +       /* Okay. 
47200 +        * We have our enough of the argument structure..(we have yet
47201 +        * to copy_from_user the tables themselves) . Copy the tables
47202 +        * only if we need them, i.e. for loading operations. */
47203 +
47204 +       switch (gr_usermode->mode) {
47205 +       case GR_STATUS:
47206 +                       if (gr_status & GR_READY) {
47207 +                               error = 1;
47208 +                               if (!gr_check_secure_terminal(current))
47209 +                                       error = 3;
47210 +                       } else
47211 +                               error = 2;
47212 +                       goto out;
47213 +       case GR_SHUTDOWN:
47214 +               if ((gr_status & GR_READY)
47215 +                   && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47216 +                       pax_open_kernel();
47217 +                       gr_status &= ~GR_READY;
47218 +                       pax_close_kernel();
47219 +
47220 +                       gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47221 +                       free_variables();
47222 +                       memset(gr_usermode, 0, sizeof (struct gr_arg));
47223 +                       memset(gr_system_salt, 0, GR_SALT_LEN);
47224 +                       memset(gr_system_sum, 0, GR_SHA_LEN);
47225 +               } else if (gr_status & GR_READY) {
47226 +                       gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47227 +                       error = -EPERM;
47228 +               } else {
47229 +                       gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47230 +                       error = -EAGAIN;
47231 +               }
47232 +               break;
47233 +       case GR_ENABLE:
47234 +               if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47235 +                       gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47236 +               else {
47237 +                       if (gr_status & GR_READY)
47238 +                               error = -EAGAIN;
47239 +                       else
47240 +                               error = error2;
47241 +                       gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47242 +               }
47243 +               break;
47244 +       case GR_RELOAD:
47245 +               if (!(gr_status & GR_READY)) {
47246 +                       gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47247 +                       error = -EAGAIN;
47248 +               } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47249 +                       preempt_disable();
47250 +
47251 +                       pax_open_kernel();
47252 +                       gr_status &= ~GR_READY;
47253 +                       pax_close_kernel();
47254 +
47255 +                       free_variables();
47256 +                       if (!(error2 = gracl_init(gr_usermode))) {
47257 +                               preempt_enable();
47258 +                               gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47259 +                       } else {
47260 +                               preempt_enable();
47261 +                               error = error2;
47262 +                               gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47263 +                       }
47264 +               } else {
47265 +                       gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47266 +                       error = -EPERM;
47267 +               }
47268 +               break;
47269 +       case GR_SEGVMOD:
47270 +               if (unlikely(!(gr_status & GR_READY))) {
47271 +                       gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47272 +                       error = -EAGAIN;
47273 +                       break;
47274 +               }
47275 +
47276 +               if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47277 +                       gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47278 +                       if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47279 +                               struct acl_subject_label *segvacl;
47280 +                               segvacl =
47281 +                                   lookup_acl_subj_label(gr_usermode->segv_inode,
47282 +                                                         gr_usermode->segv_device,
47283 +                                                         current->role);
47284 +                               if (segvacl) {
47285 +                                       segvacl->crashes = 0;
47286 +                                       segvacl->expires = 0;
47287 +                               }
47288 +                       } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47289 +                               gr_remove_uid(gr_usermode->segv_uid);
47290 +                       }
47291 +               } else {
47292 +                       gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47293 +                       error = -EPERM;
47294 +               }
47295 +               break;
47296 +       case GR_SPROLE:
47297 +       case GR_SPROLEPAM:
47298 +               if (unlikely(!(gr_status & GR_READY))) {
47299 +                       gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47300 +                       error = -EAGAIN;
47301 +                       break;
47302 +               }
47303 +
47304 +               if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47305 +                       current->role->expires = 0;
47306 +                       current->role->auth_attempts = 0;
47307 +               }
47308 +
47309 +               if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47310 +                   time_after(current->role->expires, get_seconds())) {
47311 +                       error = -EBUSY;
47312 +                       goto out;
47313 +               }
47314 +
47315 +               if (lookup_special_role_auth
47316 +                   (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47317 +                   && ((!sprole_salt && !sprole_sum)
47318 +                       || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47319 +                       char *p = "";
47320 +                       assign_special_role(gr_usermode->sp_role);
47321 +                       read_lock(&tasklist_lock);
47322 +                       if (current->real_parent)
47323 +                               p = current->real_parent->role->rolename;
47324 +                       read_unlock(&tasklist_lock);
47325 +                       gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47326 +                                       p, acl_sp_role_value);
47327 +               } else {
47328 +                       gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47329 +                       error = -EPERM;
47330 +                       if(!(current->role->auth_attempts++))
47331 +                               current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47332 +
47333 +                       goto out;
47334 +               }
47335 +               break;
47336 +       case GR_UNSPROLE:
47337 +               if (unlikely(!(gr_status & GR_READY))) {
47338 +                       gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47339 +                       error = -EAGAIN;
47340 +                       break;
47341 +               }
47342 +
47343 +               if (current->role->roletype & GR_ROLE_SPECIAL) {
47344 +                       char *p = "";
47345 +                       int i = 0;
47346 +
47347 +                       read_lock(&tasklist_lock);
47348 +                       if (current->real_parent) {
47349 +                               p = current->real_parent->role->rolename;
47350 +                               i = current->real_parent->acl_role_id;
47351 +                       }
47352 +                       read_unlock(&tasklist_lock);
47353 +
47354 +                       gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47355 +                       gr_set_acls(1);
47356 +               } else {
47357 +                       error = -EPERM;
47358 +                       goto out;
47359 +               }
47360 +               break;
47361 +       default:
47362 +               gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47363 +               error = -EINVAL;
47364 +               break;
47365 +       }
47366 +
47367 +       if (error != -EPERM)
47368 +               goto out;
47369 +
47370 +       if(!(gr_auth_attempts++))
47371 +               gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47372 +
47373 +      out:
47374 +       mutex_unlock(&gr_dev_mutex);
47375 +       return error;
47376 +}
47377 +
47378 +/* must be called with
47379 +       rcu_read_lock();
47380 +       read_lock(&tasklist_lock);
47381 +       read_lock(&grsec_exec_file_lock);
47382 +*/
47383 +int gr_apply_subject_to_task(struct task_struct *task)
47384 +{
47385 +       struct acl_object_label *obj;
47386 +       char *tmpname;
47387 +       struct acl_subject_label *tmpsubj;
47388 +       struct file *filp;
47389 +       struct name_entry *nmatch;
47390 +
47391 +       filp = task->exec_file;
47392 +       if (filp == NULL)
47393 +               return 0;
47394 +
47395 +       /* the following is to apply the correct subject 
47396 +          on binaries running when the RBAC system 
47397 +          is enabled, when the binaries have been 
47398 +          replaced or deleted since their execution
47399 +          -----
47400 +          when the RBAC system starts, the inode/dev
47401 +          from exec_file will be one the RBAC system
47402 +          is unaware of.  It only knows the inode/dev
47403 +          of the present file on disk, or the absence
47404 +          of it.
47405 +       */
47406 +       preempt_disable();
47407 +       tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47408 +                       
47409 +       nmatch = lookup_name_entry(tmpname);
47410 +       preempt_enable();
47411 +       tmpsubj = NULL;
47412 +       if (nmatch) {
47413 +               if (nmatch->deleted)
47414 +                       tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47415 +               else
47416 +                       tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47417 +               if (tmpsubj != NULL)
47418 +                       task->acl = tmpsubj;
47419 +       }
47420 +       if (tmpsubj == NULL)
47421 +               task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47422 +                                          task->role);
47423 +       if (task->acl) {
47424 +               task->is_writable = 0;
47425 +               /* ignore additional mmap checks for processes that are writable 
47426 +                  by the default ACL */
47427 +               obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47428 +               if (unlikely(obj->mode & GR_WRITE))
47429 +                       task->is_writable = 1;
47430 +               obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47431 +               if (unlikely(obj->mode & GR_WRITE))
47432 +                       task->is_writable = 1;
47433 +
47434 +               gr_set_proc_res(task);
47435 +
47436 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47437 +               printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47438 +#endif
47439 +       } else {
47440 +               return 1;
47441 +       }
47442 +
47443 +       return 0;
47444 +}
47445 +
47446 +int
47447 +gr_set_acls(const int type)
47448 +{
47449 +       struct task_struct *task, *task2;
47450 +       struct acl_role_label *role = current->role;
47451 +       __u16 acl_role_id = current->acl_role_id;
47452 +       const struct cred *cred;
47453 +       int ret;
47454 +
47455 +       rcu_read_lock();
47456 +       read_lock(&tasklist_lock);
47457 +       read_lock(&grsec_exec_file_lock);
47458 +       do_each_thread(task2, task) {
47459 +               /* check to see if we're called from the exit handler,
47460 +                  if so, only replace ACLs that have inherited the admin
47461 +                  ACL */
47462 +
47463 +               if (type && (task->role != role ||
47464 +                            task->acl_role_id != acl_role_id))
47465 +                       continue;
47466 +
47467 +               task->acl_role_id = 0;
47468 +               task->acl_sp_role = 0;
47469 +
47470 +               if (task->exec_file) {
47471 +                       cred = __task_cred(task);
47472 +                       task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47473 +                       ret = gr_apply_subject_to_task(task);
47474 +                       if (ret) {
47475 +                               read_unlock(&grsec_exec_file_lock);
47476 +                               read_unlock(&tasklist_lock);
47477 +                               rcu_read_unlock();
47478 +                               gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47479 +                               return ret;
47480 +                       }
47481 +               } else {
47482 +                       // it's a kernel process
47483 +                       task->role = kernel_role;
47484 +                       task->acl = kernel_role->root_label;
47485 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47486 +                       task->acl->mode &= ~GR_PROCFIND;
47487 +#endif
47488 +               }
47489 +       } while_each_thread(task2, task);
47490 +       read_unlock(&grsec_exec_file_lock);
47491 +       read_unlock(&tasklist_lock);
47492 +       rcu_read_unlock();
47493 +
47494 +       return 0;
47495 +}
47496 +
47497 +void
47498 +gr_learn_resource(const struct task_struct *task,
47499 +                 const int res, const unsigned long wanted, const int gt)
47500 +{
47501 +       struct acl_subject_label *acl;
47502 +       const struct cred *cred;
47503 +
47504 +       if (unlikely((gr_status & GR_READY) &&
47505 +                    task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47506 +               goto skip_reslog;
47507 +
47508 +#ifdef CONFIG_GRKERNSEC_RESLOG
47509 +       gr_log_resource(task, res, wanted, gt);
47510 +#endif
47511 +      skip_reslog:
47512 +
47513 +       if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47514 +               return;
47515 +
47516 +       acl = task->acl;
47517 +
47518 +       if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47519 +                  !(acl->resmask & (1 << (unsigned short) res))))
47520 +               return;
47521 +
47522 +       if (wanted >= acl->res[res].rlim_cur) {
47523 +               unsigned long res_add;
47524 +
47525 +               res_add = wanted;
47526 +               switch (res) {
47527 +               case RLIMIT_CPU:
47528 +                       res_add += GR_RLIM_CPU_BUMP;
47529 +                       break;
47530 +               case RLIMIT_FSIZE:
47531 +                       res_add += GR_RLIM_FSIZE_BUMP;
47532 +                       break;
47533 +               case RLIMIT_DATA:
47534 +                       res_add += GR_RLIM_DATA_BUMP;
47535 +                       break;
47536 +               case RLIMIT_STACK:
47537 +                       res_add += GR_RLIM_STACK_BUMP;
47538 +                       break;
47539 +               case RLIMIT_CORE:
47540 +                       res_add += GR_RLIM_CORE_BUMP;
47541 +                       break;
47542 +               case RLIMIT_RSS:
47543 +                       res_add += GR_RLIM_RSS_BUMP;
47544 +                       break;
47545 +               case RLIMIT_NPROC:
47546 +                       res_add += GR_RLIM_NPROC_BUMP;
47547 +                       break;
47548 +               case RLIMIT_NOFILE:
47549 +                       res_add += GR_RLIM_NOFILE_BUMP;
47550 +                       break;
47551 +               case RLIMIT_MEMLOCK:
47552 +                       res_add += GR_RLIM_MEMLOCK_BUMP;
47553 +                       break;
47554 +               case RLIMIT_AS:
47555 +                       res_add += GR_RLIM_AS_BUMP;
47556 +                       break;
47557 +               case RLIMIT_LOCKS:
47558 +                       res_add += GR_RLIM_LOCKS_BUMP;
47559 +                       break;
47560 +               case RLIMIT_SIGPENDING:
47561 +                       res_add += GR_RLIM_SIGPENDING_BUMP;
47562 +                       break;
47563 +               case RLIMIT_MSGQUEUE:
47564 +                       res_add += GR_RLIM_MSGQUEUE_BUMP;
47565 +                       break;
47566 +               case RLIMIT_NICE:
47567 +                       res_add += GR_RLIM_NICE_BUMP;
47568 +                       break;
47569 +               case RLIMIT_RTPRIO:
47570 +                       res_add += GR_RLIM_RTPRIO_BUMP;
47571 +                       break;
47572 +               case RLIMIT_RTTIME:
47573 +                       res_add += GR_RLIM_RTTIME_BUMP;
47574 +                       break;
47575 +               }
47576 +
47577 +               acl->res[res].rlim_cur = res_add;
47578 +
47579 +               if (wanted > acl->res[res].rlim_max)
47580 +                       acl->res[res].rlim_max = res_add;
47581 +
47582 +               /* only log the subject filename, since resource logging is supported for
47583 +                  single-subject learning only */
47584 +               rcu_read_lock();
47585 +               cred = __task_cred(task);
47586 +               security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47587 +                              task->role->roletype, cred->uid, cred->gid, acl->filename,
47588 +                              acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47589 +                              "", (unsigned long) res, &task->signal->saved_ip);
47590 +               rcu_read_unlock();
47591 +       }
47592 +
47593 +       return;
47594 +}
47595 +
47596 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47597 +void
47598 +pax_set_initial_flags(struct linux_binprm *bprm)
47599 +{
47600 +       struct task_struct *task = current;
47601 +        struct acl_subject_label *proc;
47602 +       unsigned long flags;
47603 +
47604 +        if (unlikely(!(gr_status & GR_READY)))
47605 +                return;
47606 +
47607 +       flags = pax_get_flags(task);
47608 +
47609 +        proc = task->acl;
47610 +
47611 +       if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47612 +               flags &= ~MF_PAX_PAGEEXEC;
47613 +       if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47614 +               flags &= ~MF_PAX_SEGMEXEC;
47615 +       if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47616 +               flags &= ~MF_PAX_RANDMMAP;
47617 +       if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47618 +               flags &= ~MF_PAX_EMUTRAMP;
47619 +       if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47620 +               flags &= ~MF_PAX_MPROTECT;
47621 +
47622 +       if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47623 +               flags |= MF_PAX_PAGEEXEC;
47624 +       if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47625 +               flags |= MF_PAX_SEGMEXEC;
47626 +       if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47627 +               flags |= MF_PAX_RANDMMAP;
47628 +       if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47629 +               flags |= MF_PAX_EMUTRAMP;
47630 +       if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47631 +               flags |= MF_PAX_MPROTECT;
47632 +
47633 +       pax_set_flags(task, flags);
47634 +
47635 +        return;
47636 +}
47637 +#endif
47638 +
47639 +#ifdef CONFIG_SYSCTL
47640 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47641 +   system to save 35kb of memory */
47642 +
47643 +/* we modify the passed in filename, but adjust it back before returning */
47644 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47645 +{
47646 +       struct name_entry *nmatch;
47647 +       char *p, *lastp = NULL;
47648 +       struct acl_object_label *obj = NULL, *tmp;
47649 +       struct acl_subject_label *tmpsubj;
47650 +       char c = '\0';
47651 +
47652 +       read_lock(&gr_inode_lock);
47653 +
47654 +       p = name + len - 1;
47655 +       do {
47656 +               nmatch = lookup_name_entry(name);
47657 +               if (lastp != NULL)
47658 +                       *lastp = c;
47659 +
47660 +               if (nmatch == NULL)
47661 +                       goto next_component;
47662 +               tmpsubj = current->acl;
47663 +               do {
47664 +                       obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47665 +                       if (obj != NULL) {
47666 +                               tmp = obj->globbed;
47667 +                               while (tmp) {
47668 +                                       if (!glob_match(tmp->filename, name)) {
47669 +                                               obj = tmp;
47670 +                                               goto found_obj;
47671 +                                       }
47672 +                                       tmp = tmp->next;
47673 +                               }
47674 +                               goto found_obj;
47675 +                       }
47676 +               } while ((tmpsubj = tmpsubj->parent_subject));
47677 +next_component:
47678 +               /* end case */
47679 +               if (p == name)
47680 +                       break;
47681 +
47682 +               while (*p != '/')
47683 +                       p--;
47684 +               if (p == name)
47685 +                       lastp = p + 1;
47686 +               else {
47687 +                       lastp = p;
47688 +                       p--;
47689 +               }
47690 +               c = *lastp;
47691 +               *lastp = '\0';
47692 +       } while (1);
47693 +found_obj:
47694 +       read_unlock(&gr_inode_lock);
47695 +       /* obj returned will always be non-null */
47696 +       return obj;
47697 +}
47698 +
47699 +/* returns 0 when allowing, non-zero on error
47700 +   op of 0 is used for readdir, so we don't log the names of hidden files
47701 +*/
47702 +__u32
47703 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47704 +{
47705 +       struct ctl_table *tmp;
47706 +       const char *proc_sys = "/proc/sys";
47707 +       char *path;
47708 +       struct acl_object_label *obj;
47709 +       unsigned short len = 0, pos = 0, depth = 0, i;
47710 +       __u32 err = 0;
47711 +       __u32 mode = 0;
47712 +
47713 +       if (unlikely(!(gr_status & GR_READY)))
47714 +               return 0;
47715 +
47716 +       /* for now, ignore operations on non-sysctl entries if it's not a
47717 +          readdir*/
47718 +       if (table->child != NULL && op != 0)
47719 +               return 0;
47720 +
47721 +       mode |= GR_FIND;
47722 +       /* it's only a read if it's an entry, read on dirs is for readdir */
47723 +       if (op & MAY_READ)
47724 +               mode |= GR_READ;
47725 +       if (op & MAY_WRITE)
47726 +               mode |= GR_WRITE;
47727 +
47728 +       preempt_disable();
47729 +
47730 +       path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47731 +
47732 +       /* it's only a read/write if it's an actual entry, not a dir
47733 +          (which are opened for readdir)
47734 +       */
47735 +
47736 +       /* convert the requested sysctl entry into a pathname */
47737 +
47738 +       for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47739 +               len += strlen(tmp->procname);
47740 +               len++;
47741 +               depth++;
47742 +       }
47743 +
47744 +       if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47745 +               /* deny */
47746 +               goto out;
47747 +       }
47748 +
47749 +       memset(path, 0, PAGE_SIZE);
47750 +
47751 +       memcpy(path, proc_sys, strlen(proc_sys));
47752 +
47753 +       pos += strlen(proc_sys);
47754 +
47755 +       for (; depth > 0; depth--) {
47756 +               path[pos] = '/';
47757 +               pos++;
47758 +               for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47759 +                       if (depth == i) {
47760 +                               memcpy(path + pos, tmp->procname,
47761 +                                      strlen(tmp->procname));
47762 +                               pos += strlen(tmp->procname);
47763 +                       }
47764 +                       i++;
47765 +               }
47766 +       }
47767 +
47768 +       obj = gr_lookup_by_name(path, pos);
47769 +       err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47770 +
47771 +       if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47772 +                    ((err & mode) != mode))) {
47773 +               __u32 new_mode = mode;
47774 +
47775 +               new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47776 +
47777 +               err = 0;
47778 +               gr_log_learn_sysctl(path, new_mode);
47779 +       } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47780 +               gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47781 +               err = -ENOENT;
47782 +       } else if (!(err & GR_FIND)) {
47783 +               err = -ENOENT;
47784 +       } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47785 +               gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47786 +                              path, (mode & GR_READ) ? " reading" : "",
47787 +                              (mode & GR_WRITE) ? " writing" : "");
47788 +               err = -EACCES;
47789 +       } else if ((err & mode) != mode) {
47790 +               err = -EACCES;
47791 +       } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47792 +               gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47793 +                              path, (mode & GR_READ) ? " reading" : "",
47794 +                              (mode & GR_WRITE) ? " writing" : "");
47795 +               err = 0;
47796 +       } else
47797 +               err = 0;
47798 +
47799 +      out:
47800 +       preempt_enable();
47801 +
47802 +       return err;
47803 +}
47804 +#endif
47805 +
47806 +int
47807 +gr_handle_proc_ptrace(struct task_struct *task)
47808 +{
47809 +       struct file *filp;
47810 +       struct task_struct *tmp = task;
47811 +       struct task_struct *curtemp = current;
47812 +       __u32 retmode;
47813 +
47814 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47815 +       if (unlikely(!(gr_status & GR_READY)))
47816 +               return 0;
47817 +#endif
47818 +
47819 +       read_lock(&tasklist_lock);
47820 +       read_lock(&grsec_exec_file_lock);
47821 +       filp = task->exec_file;
47822 +
47823 +       while (tmp->pid > 0) {
47824 +               if (tmp == curtemp)
47825 +                       break;
47826 +               tmp = tmp->real_parent;
47827 +       }
47828 +
47829 +       if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47830 +                               ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47831 +               read_unlock(&grsec_exec_file_lock);
47832 +               read_unlock(&tasklist_lock);
47833 +               return 1;
47834 +       }
47835 +
47836 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47837 +       if (!(gr_status & GR_READY)) {
47838 +               read_unlock(&grsec_exec_file_lock);
47839 +               read_unlock(&tasklist_lock);
47840 +               return 0;
47841 +       }
47842 +#endif
47843 +
47844 +       retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47845 +       read_unlock(&grsec_exec_file_lock);
47846 +       read_unlock(&tasklist_lock);
47847 +
47848 +       if (retmode & GR_NOPTRACE)
47849 +               return 1;
47850 +
47851 +       if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47852 +           && (current->acl != task->acl || (current->acl != current->role->root_label
47853 +           && current->pid != task->pid)))
47854 +               return 1;
47855 +
47856 +       return 0;
47857 +}
47858 +
47859 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47860 +{
47861 +       if (unlikely(!(gr_status & GR_READY)))
47862 +               return;
47863 +
47864 +       if (!(current->role->roletype & GR_ROLE_GOD))
47865 +               return;
47866 +
47867 +       seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47868 +                       p->role->rolename, gr_task_roletype_to_char(p),
47869 +                       p->acl->filename);
47870 +}
47871 +
47872 +int
47873 +gr_handle_ptrace(struct task_struct *task, const long request)
47874 +{
47875 +       struct task_struct *tmp = task;
47876 +       struct task_struct *curtemp = current;
47877 +       __u32 retmode;
47878 +
47879 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47880 +       if (unlikely(!(gr_status & GR_READY)))
47881 +               return 0;
47882 +#endif
47883 +
47884 +       read_lock(&tasklist_lock);
47885 +       while (tmp->pid > 0) {
47886 +               if (tmp == curtemp)
47887 +                       break;
47888 +               tmp = tmp->real_parent;
47889 +       }
47890 +
47891 +       if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47892 +                               ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47893 +               read_unlock(&tasklist_lock);
47894 +               gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47895 +               return 1;
47896 +       }
47897 +       read_unlock(&tasklist_lock);
47898 +
47899 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47900 +       if (!(gr_status & GR_READY))
47901 +               return 0;
47902 +#endif
47903 +
47904 +       read_lock(&grsec_exec_file_lock);
47905 +       if (unlikely(!task->exec_file)) {
47906 +               read_unlock(&grsec_exec_file_lock);
47907 +               return 0;
47908 +       }
47909 +
47910 +       retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47911 +       read_unlock(&grsec_exec_file_lock);
47912 +
47913 +       if (retmode & GR_NOPTRACE) {
47914 +               gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47915 +               return 1;
47916 +       }
47917 +               
47918 +       if (retmode & GR_PTRACERD) {
47919 +               switch (request) {
47920 +               case PTRACE_POKETEXT:
47921 +               case PTRACE_POKEDATA:
47922 +               case PTRACE_POKEUSR:
47923 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47924 +               case PTRACE_SETREGS:
47925 +               case PTRACE_SETFPREGS:
47926 +#endif
47927 +#ifdef CONFIG_X86
47928 +               case PTRACE_SETFPXREGS:
47929 +#endif
47930 +#ifdef CONFIG_ALTIVEC
47931 +               case PTRACE_SETVRREGS:
47932 +#endif
47933 +                       return 1;
47934 +               default:
47935 +                       return 0;
47936 +               }
47937 +       } else if (!(current->acl->mode & GR_POVERRIDE) &&
47938 +                  !(current->role->roletype & GR_ROLE_GOD) &&
47939 +                  (current->acl != task->acl)) {
47940 +               gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47941 +               return 1;
47942 +       }
47943 +
47944 +       return 0;
47945 +}
47946 +
47947 +static int is_writable_mmap(const struct file *filp)
47948 +{
47949 +       struct task_struct *task = current;
47950 +       struct acl_object_label *obj, *obj2;
47951 +
47952 +       if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47953 +           !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47954 +               obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47955 +               obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47956 +                                    task->role->root_label);
47957 +               if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47958 +                       gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47959 +                       return 1;
47960 +               }
47961 +       }
47962 +       return 0;
47963 +}
47964 +
47965 +int
47966 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47967 +{
47968 +       __u32 mode;
47969 +
47970 +       if (unlikely(!file || !(prot & PROT_EXEC)))
47971 +               return 1;
47972 +
47973 +       if (is_writable_mmap(file))
47974 +               return 0;
47975 +
47976 +       mode =
47977 +           gr_search_file(file->f_path.dentry,
47978 +                          GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47979 +                          file->f_path.mnt);
47980 +
47981 +       if (!gr_tpe_allow(file))
47982 +               return 0;
47983 +
47984 +       if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47985 +               gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47986 +               return 0;
47987 +       } else if (unlikely(!(mode & GR_EXEC))) {
47988 +               return 0;
47989 +       } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47990 +               gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47991 +               return 1;
47992 +       }
47993 +
47994 +       return 1;
47995 +}
47996 +
47997 +int
47998 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47999 +{
48000 +       __u32 mode;
48001 +
48002 +       if (unlikely(!file || !(prot & PROT_EXEC)))
48003 +               return 1;
48004 +
48005 +       if (is_writable_mmap(file))
48006 +               return 0;
48007 +
48008 +       mode =
48009 +           gr_search_file(file->f_path.dentry,
48010 +                          GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48011 +                          file->f_path.mnt);
48012 +
48013 +       if (!gr_tpe_allow(file))
48014 +               return 0;
48015 +
48016 +       if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48017 +               gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48018 +               return 0;
48019 +       } else if (unlikely(!(mode & GR_EXEC))) {
48020 +               return 0;
48021 +       } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48022 +               gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48023 +               return 1;
48024 +       }
48025 +
48026 +       return 1;
48027 +}
48028 +
48029 +void
48030 +gr_acl_handle_psacct(struct task_struct *task, const long code)
48031 +{
48032 +       unsigned long runtime;
48033 +       unsigned long cputime;
48034 +       unsigned int wday, cday;
48035 +       __u8 whr, chr;
48036 +       __u8 wmin, cmin;
48037 +       __u8 wsec, csec;
48038 +       struct timespec timeval;
48039 +
48040 +       if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48041 +                    !(task->acl->mode & GR_PROCACCT)))
48042 +               return;
48043 +
48044 +       do_posix_clock_monotonic_gettime(&timeval);
48045 +       runtime = timeval.tv_sec - task->start_time.tv_sec;
48046 +       wday = runtime / (3600 * 24);
48047 +       runtime -= wday * (3600 * 24);
48048 +       whr = runtime / 3600;
48049 +       runtime -= whr * 3600;
48050 +       wmin = runtime / 60;
48051 +       runtime -= wmin * 60;
48052 +       wsec = runtime;
48053 +
48054 +       cputime = (task->utime + task->stime) / HZ;
48055 +       cday = cputime / (3600 * 24);
48056 +       cputime -= cday * (3600 * 24);
48057 +       chr = cputime / 3600;
48058 +       cputime -= chr * 3600;
48059 +       cmin = cputime / 60;
48060 +       cputime -= cmin * 60;
48061 +       csec = cputime;
48062 +
48063 +       gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48064 +
48065 +       return;
48066 +}
48067 +
48068 +void gr_set_kernel_label(struct task_struct *task)
48069 +{
48070 +       if (gr_status & GR_READY) {
48071 +               task->role = kernel_role;
48072 +               task->acl = kernel_role->root_label;
48073 +       }
48074 +       return;
48075 +}
48076 +
48077 +#ifdef CONFIG_TASKSTATS
48078 +int gr_is_taskstats_denied(int pid)
48079 +{
48080 +       struct task_struct *task;
48081 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48082 +       const struct cred *cred;
48083 +#endif
48084 +       int ret = 0;
48085 +
48086 +       /* restrict taskstats viewing to un-chrooted root users
48087 +          who have the 'view' subject flag if the RBAC system is enabled
48088 +       */
48089 +
48090 +       rcu_read_lock();
48091 +       read_lock(&tasklist_lock);
48092 +       task = find_task_by_vpid(pid);
48093 +       if (task) {
48094 +#ifdef CONFIG_GRKERNSEC_CHROOT
48095 +               if (proc_is_chrooted(task))
48096 +                       ret = -EACCES;
48097 +#endif
48098 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48099 +               cred = __task_cred(task);
48100 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48101 +               if (cred->uid != 0)
48102 +                       ret = -EACCES;
48103 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48104 +               if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48105 +                       ret = -EACCES;
48106 +#endif
48107 +#endif
48108 +               if (gr_status & GR_READY) {
48109 +                       if (!(task->acl->mode & GR_VIEW))
48110 +                               ret = -EACCES;
48111 +               }
48112 +       } else
48113 +               ret = -ENOENT;
48114 +
48115 +       read_unlock(&tasklist_lock);
48116 +       rcu_read_unlock();
48117 +
48118 +       return ret;
48119 +}
48120 +#endif
48121 +
48122 +/* AUXV entries are filled via a descendant of search_binary_handler
48123 +   after we've already applied the subject for the target
48124 +*/
48125 +int gr_acl_enable_at_secure(void)
48126 +{
48127 +       if (unlikely(!(gr_status & GR_READY)))
48128 +               return 0;
48129 +
48130 +       if (current->acl->mode & GR_ATSECURE)
48131 +               return 1;
48132 +
48133 +       return 0;
48134 +}
48135 +       
48136 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48137 +{
48138 +       struct task_struct *task = current;
48139 +       struct dentry *dentry = file->f_path.dentry;
48140 +       struct vfsmount *mnt = file->f_path.mnt;
48141 +       struct acl_object_label *obj, *tmp;
48142 +       struct acl_subject_label *subj;
48143 +       unsigned int bufsize;
48144 +       int is_not_root;
48145 +       char *path;
48146 +       dev_t dev = __get_dev(dentry);
48147 +
48148 +       if (unlikely(!(gr_status & GR_READY)))
48149 +               return 1;
48150 +
48151 +       if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48152 +               return 1;
48153 +
48154 +       /* ignore Eric Biederman */
48155 +       if (IS_PRIVATE(dentry->d_inode))
48156 +               return 1;
48157 +
48158 +       subj = task->acl;
48159 +       do {
48160 +               obj = lookup_acl_obj_label(ino, dev, subj);
48161 +               if (obj != NULL)
48162 +                       return (obj->mode & GR_FIND) ? 1 : 0;
48163 +       } while ((subj = subj->parent_subject));
48164 +       
48165 +       /* this is purely an optimization since we're looking for an object
48166 +          for the directory we're doing a readdir on
48167 +          if it's possible for any globbed object to match the entry we're
48168 +          filling into the directory, then the object we find here will be
48169 +          an anchor point with attached globbed objects
48170 +       */
48171 +       obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48172 +       if (obj->globbed == NULL)
48173 +               return (obj->mode & GR_FIND) ? 1 : 0;
48174 +
48175 +       is_not_root = ((obj->filename[0] == '/') &&
48176 +                  (obj->filename[1] == '\0')) ? 0 : 1;
48177 +       bufsize = PAGE_SIZE - namelen - is_not_root;
48178 +
48179 +       /* check bufsize > PAGE_SIZE || bufsize == 0 */
48180 +       if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48181 +               return 1;
48182 +
48183 +       preempt_disable();
48184 +       path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48185 +                          bufsize);
48186 +
48187 +       bufsize = strlen(path);
48188 +
48189 +       /* if base is "/", don't append an additional slash */
48190 +       if (is_not_root)
48191 +               *(path + bufsize) = '/';
48192 +       memcpy(path + bufsize + is_not_root, name, namelen);
48193 +       *(path + bufsize + namelen + is_not_root) = '\0';
48194 +
48195 +       tmp = obj->globbed;
48196 +       while (tmp) {
48197 +               if (!glob_match(tmp->filename, path)) {
48198 +                       preempt_enable();
48199 +                       return (tmp->mode & GR_FIND) ? 1 : 0;
48200 +               }
48201 +               tmp = tmp->next;
48202 +       }
48203 +       preempt_enable();
48204 +       return (obj->mode & GR_FIND) ? 1 : 0;
48205 +}
48206 +
48207 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48208 +EXPORT_SYMBOL(gr_acl_is_enabled);
48209 +#endif
48210 +EXPORT_SYMBOL(gr_learn_resource);
48211 +EXPORT_SYMBOL(gr_set_kernel_label);
48212 +#ifdef CONFIG_SECURITY
48213 +EXPORT_SYMBOL(gr_check_user_change);
48214 +EXPORT_SYMBOL(gr_check_group_change);
48215 +#endif
48216 +
48217 diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
48218 --- linux-3.0.4/grsecurity/gracl_cap.c  1969-12-31 19:00:00.000000000 -0500
48219 +++ linux-3.0.4/grsecurity/gracl_cap.c  2011-08-23 21:48:14.000000000 -0400
48220 @@ -0,0 +1,139 @@
48221 +#include <linux/kernel.h>
48222 +#include <linux/module.h>
48223 +#include <linux/sched.h>
48224 +#include <linux/gracl.h>
48225 +#include <linux/grsecurity.h>
48226 +#include <linux/grinternal.h>
48227 +
48228 +static const char *captab_log[] = {
48229 +       "CAP_CHOWN",
48230 +       "CAP_DAC_OVERRIDE",
48231 +       "CAP_DAC_READ_SEARCH",
48232 +       "CAP_FOWNER",
48233 +       "CAP_FSETID",
48234 +       "CAP_KILL",
48235 +       "CAP_SETGID",
48236 +       "CAP_SETUID",
48237 +       "CAP_SETPCAP",
48238 +       "CAP_LINUX_IMMUTABLE",
48239 +       "CAP_NET_BIND_SERVICE",
48240 +       "CAP_NET_BROADCAST",
48241 +       "CAP_NET_ADMIN",
48242 +       "CAP_NET_RAW",
48243 +       "CAP_IPC_LOCK",
48244 +       "CAP_IPC_OWNER",
48245 +       "CAP_SYS_MODULE",
48246 +       "CAP_SYS_RAWIO",
48247 +       "CAP_SYS_CHROOT",
48248 +       "CAP_SYS_PTRACE",
48249 +       "CAP_SYS_PACCT",
48250 +       "CAP_SYS_ADMIN",
48251 +       "CAP_SYS_BOOT",
48252 +       "CAP_SYS_NICE",
48253 +       "CAP_SYS_RESOURCE",
48254 +       "CAP_SYS_TIME",
48255 +       "CAP_SYS_TTY_CONFIG",
48256 +       "CAP_MKNOD",
48257 +       "CAP_LEASE",
48258 +       "CAP_AUDIT_WRITE",
48259 +       "CAP_AUDIT_CONTROL",
48260 +       "CAP_SETFCAP",
48261 +       "CAP_MAC_OVERRIDE",
48262 +       "CAP_MAC_ADMIN",
48263 +       "CAP_SYSLOG"
48264 +};
48265 +
48266 +EXPORT_SYMBOL(gr_is_capable);
48267 +EXPORT_SYMBOL(gr_is_capable_nolog);
48268 +
48269 +int
48270 +gr_is_capable(const int cap)
48271 +{
48272 +       struct task_struct *task = current;
48273 +       const struct cred *cred = current_cred();
48274 +       struct acl_subject_label *curracl;
48275 +       kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48276 +       kernel_cap_t cap_audit = __cap_empty_set;
48277 +
48278 +       if (!gr_acl_is_enabled())
48279 +               return 1;
48280 +
48281 +       curracl = task->acl;
48282 +
48283 +       cap_drop = curracl->cap_lower;
48284 +       cap_mask = curracl->cap_mask;
48285 +       cap_audit = curracl->cap_invert_audit;
48286 +
48287 +       while ((curracl = curracl->parent_subject)) {
48288 +               /* if the cap isn't specified in the current computed mask but is specified in the
48289 +                  current level subject, and is lowered in the current level subject, then add
48290 +                  it to the set of dropped capabilities
48291 +                  otherwise, add the current level subject's mask to the current computed mask
48292 +                */
48293 +               if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48294 +                       cap_raise(cap_mask, cap);
48295 +                       if (cap_raised(curracl->cap_lower, cap))
48296 +                               cap_raise(cap_drop, cap);
48297 +                       if (cap_raised(curracl->cap_invert_audit, cap))
48298 +                               cap_raise(cap_audit, cap);
48299 +               }
48300 +       }
48301 +
48302 +       if (!cap_raised(cap_drop, cap)) {
48303 +               if (cap_raised(cap_audit, cap))
48304 +                       gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48305 +               return 1;
48306 +       }
48307 +
48308 +       curracl = task->acl;
48309 +
48310 +       if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48311 +           && cap_raised(cred->cap_effective, cap)) {
48312 +               security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48313 +                              task->role->roletype, cred->uid,
48314 +                              cred->gid, task->exec_file ?
48315 +                              gr_to_filename(task->exec_file->f_path.dentry,
48316 +                              task->exec_file->f_path.mnt) : curracl->filename,
48317 +                              curracl->filename, 0UL,
48318 +                              0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48319 +               return 1;
48320 +       }
48321 +
48322 +       if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48323 +               gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48324 +       return 0;
48325 +}
48326 +
48327 +int
48328 +gr_is_capable_nolog(const int cap)
48329 +{
48330 +       struct acl_subject_label *curracl;
48331 +       kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48332 +
48333 +       if (!gr_acl_is_enabled())
48334 +               return 1;
48335 +
48336 +       curracl = current->acl;
48337 +
48338 +       cap_drop = curracl->cap_lower;
48339 +       cap_mask = curracl->cap_mask;
48340 +
48341 +       while ((curracl = curracl->parent_subject)) {
48342 +               /* if the cap isn't specified in the current computed mask but is specified in the
48343 +                  current level subject, and is lowered in the current level subject, then add
48344 +                  it to the set of dropped capabilities
48345 +                  otherwise, add the current level subject's mask to the current computed mask
48346 +                */
48347 +               if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48348 +                       cap_raise(cap_mask, cap);
48349 +                       if (cap_raised(curracl->cap_lower, cap))
48350 +                               cap_raise(cap_drop, cap);
48351 +               }
48352 +       }
48353 +
48354 +       if (!cap_raised(cap_drop, cap))
48355 +               return 1;
48356 +
48357 +       return 0;
48358 +}
48359 +
48360 diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
48361 --- linux-3.0.4/grsecurity/gracl_fs.c   1969-12-31 19:00:00.000000000 -0500
48362 +++ linux-3.0.4/grsecurity/gracl_fs.c   2011-08-23 21:48:14.000000000 -0400
48363 @@ -0,0 +1,431 @@
48364 +#include <linux/kernel.h>
48365 +#include <linux/sched.h>
48366 +#include <linux/types.h>
48367 +#include <linux/fs.h>
48368 +#include <linux/file.h>
48369 +#include <linux/stat.h>
48370 +#include <linux/grsecurity.h>
48371 +#include <linux/grinternal.h>
48372 +#include <linux/gracl.h>
48373 +
48374 +__u32
48375 +gr_acl_handle_hidden_file(const struct dentry * dentry,
48376 +                         const struct vfsmount * mnt)
48377 +{
48378 +       __u32 mode;
48379 +
48380 +       if (unlikely(!dentry->d_inode))
48381 +               return GR_FIND;
48382 +
48383 +       mode =
48384 +           gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48385 +
48386 +       if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48387 +               gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48388 +               return mode;
48389 +       } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48390 +               gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48391 +               return 0;
48392 +       } else if (unlikely(!(mode & GR_FIND)))
48393 +               return 0;
48394 +
48395 +       return GR_FIND;
48396 +}
48397 +
48398 +__u32
48399 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48400 +                  const int fmode)
48401 +{
48402 +       __u32 reqmode = GR_FIND;
48403 +       __u32 mode;
48404 +
48405 +       if (unlikely(!dentry->d_inode))
48406 +               return reqmode;
48407 +
48408 +       if (unlikely(fmode & O_APPEND))
48409 +               reqmode |= GR_APPEND;
48410 +       else if (unlikely(fmode & FMODE_WRITE))
48411 +               reqmode |= GR_WRITE;
48412 +       if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48413 +               reqmode |= GR_READ;
48414 +       if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
48415 +               reqmode &= ~GR_READ;
48416 +       mode =
48417 +           gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48418 +                          mnt);
48419 +
48420 +       if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48421 +               gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48422 +                              reqmode & GR_READ ? " reading" : "",
48423 +                              reqmode & GR_WRITE ? " writing" : reqmode &
48424 +                              GR_APPEND ? " appending" : "");
48425 +               return reqmode;
48426 +       } else
48427 +           if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48428 +       {
48429 +               gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48430 +                              reqmode & GR_READ ? " reading" : "",
48431 +                              reqmode & GR_WRITE ? " writing" : reqmode &
48432 +                              GR_APPEND ? " appending" : "");
48433 +               return 0;
48434 +       } else if (unlikely((mode & reqmode) != reqmode))
48435 +               return 0;
48436 +
48437 +       return reqmode;
48438 +}
48439 +
48440 +__u32
48441 +gr_acl_handle_creat(const struct dentry * dentry,
48442 +                   const struct dentry * p_dentry,
48443 +                   const struct vfsmount * p_mnt, const int fmode,
48444 +                   const int imode)
48445 +{
48446 +       __u32 reqmode = GR_WRITE | GR_CREATE;
48447 +       __u32 mode;
48448 +
48449 +       if (unlikely(fmode & O_APPEND))
48450 +               reqmode |= GR_APPEND;
48451 +       if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48452 +               reqmode |= GR_READ;
48453 +       if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48454 +               reqmode |= GR_SETID;
48455 +
48456 +       mode =
48457 +           gr_check_create(dentry, p_dentry, p_mnt,
48458 +                           reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48459 +
48460 +       if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48461 +               gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48462 +                              reqmode & GR_READ ? " reading" : "",
48463 +                              reqmode & GR_WRITE ? " writing" : reqmode &
48464 +                              GR_APPEND ? " appending" : "");
48465 +               return reqmode;
48466 +       } else
48467 +           if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48468 +       {
48469 +               gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48470 +                              reqmode & GR_READ ? " reading" : "",
48471 +                              reqmode & GR_WRITE ? " writing" : reqmode &
48472 +                              GR_APPEND ? " appending" : "");
48473 +               return 0;
48474 +       } else if (unlikely((mode & reqmode) != reqmode))
48475 +               return 0;
48476 +
48477 +       return reqmode;
48478 +}
48479 +
48480 +__u32
48481 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48482 +                    const int fmode)
48483 +{
48484 +       __u32 mode, reqmode = GR_FIND;
48485 +
48486 +       if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48487 +               reqmode |= GR_EXEC;
48488 +       if (fmode & S_IWOTH)
48489 +               reqmode |= GR_WRITE;
48490 +       if (fmode & S_IROTH)
48491 +               reqmode |= GR_READ;
48492 +
48493 +       mode =
48494 +           gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48495 +                          mnt);
48496 +
48497 +       if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48498 +               gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48499 +                              reqmode & GR_READ ? " reading" : "",
48500 +                              reqmode & GR_WRITE ? " writing" : "",
48501 +                              reqmode & GR_EXEC ? " executing" : "");
48502 +               return reqmode;
48503 +       } else
48504 +           if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48505 +       {
48506 +               gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48507 +                              reqmode & GR_READ ? " reading" : "",
48508 +                              reqmode & GR_WRITE ? " writing" : "",
48509 +                              reqmode & GR_EXEC ? " executing" : "");
48510 +               return 0;
48511 +       } else if (unlikely((mode & reqmode) != reqmode))
48512 +               return 0;
48513 +
48514 +       return reqmode;
48515 +}
48516 +
48517 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48518 +{
48519 +       __u32 mode;
48520 +
48521 +       mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48522 +
48523 +       if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48524 +               gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48525 +               return mode;
48526 +       } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48527 +               gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48528 +               return 0;
48529 +       } else if (unlikely((mode & (reqmode)) != (reqmode)))
48530 +               return 0;
48531 +
48532 +       return (reqmode);
48533 +}
48534 +
48535 +__u32
48536 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48537 +{
48538 +       return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48539 +}
48540 +
48541 +__u32
48542 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48543 +{
48544 +       return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48545 +}
48546 +
48547 +__u32
48548 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48549 +{
48550 +       return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48551 +}
48552 +
48553 +__u32
48554 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48555 +{
48556 +       return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48557 +}
48558 +
48559 +__u32
48560 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48561 +                    mode_t mode)
48562 +{
48563 +       if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48564 +               return 1;
48565 +
48566 +       if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48567 +               return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48568 +                                  GR_FCHMOD_ACL_MSG);
48569 +       } else {
48570 +               return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48571 +       }
48572 +}
48573 +
48574 +__u32
48575 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48576 +                   mode_t mode)
48577 +{
48578 +       if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48579 +               return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48580 +                                  GR_CHMOD_ACL_MSG);
48581 +       } else {
48582 +               return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48583 +       }
48584 +}
48585 +
48586 +__u32
48587 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48588 +{
48589 +       return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48590 +}
48591 +
48592 +__u32
48593 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48594 +{
48595 +       return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48596 +}
48597 +
48598 +__u32
48599 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48600 +{
48601 +       return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48602 +}
48603 +
48604 +__u32
48605 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48606 +{
48607 +       return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48608 +                          GR_UNIXCONNECT_ACL_MSG);
48609 +}
48610 +
48611 +/* hardlinks require at minimum create permission,
48612 +   any additional privilege required is based on the
48613 +   privilege of the file being linked to
48614 +*/
48615 +__u32
48616 +gr_acl_handle_link(const struct dentry * new_dentry,
48617 +                  const struct dentry * parent_dentry,
48618 +                  const struct vfsmount * parent_mnt,
48619 +                  const struct dentry * old_dentry,
48620 +                  const struct vfsmount * old_mnt, const char *to)
48621 +{
48622 +       __u32 mode;
48623 +       __u32 needmode = GR_CREATE | GR_LINK;
48624 +       __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48625 +
48626 +       mode =
48627 +           gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48628 +                         old_mnt);
48629 +
48630 +       if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48631 +               gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48632 +               return mode;
48633 +       } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48634 +               gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48635 +               return 0;
48636 +       } else if (unlikely((mode & needmode) != needmode))
48637 +               return 0;
48638 +
48639 +       return 1;
48640 +}
48641 +
48642 +__u32
48643 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48644 +                     const struct dentry * parent_dentry,
48645 +                     const struct vfsmount * parent_mnt, const char *from)
48646 +{
48647 +       __u32 needmode = GR_WRITE | GR_CREATE;
48648 +       __u32 mode;
48649 +
48650 +       mode =
48651 +           gr_check_create(new_dentry, parent_dentry, parent_mnt,
48652 +                           GR_CREATE | GR_AUDIT_CREATE |
48653 +                           GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48654 +
48655 +       if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48656 +               gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48657 +               return mode;
48658 +       } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48659 +               gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48660 +               return 0;
48661 +       } else if (unlikely((mode & needmode) != needmode))
48662 +               return 0;
48663 +
48664 +       return (GR_WRITE | GR_CREATE);
48665 +}
48666 +
48667 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48668 +{
48669 +       __u32 mode;
48670 +
48671 +       mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48672 +
48673 +       if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48674 +               gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48675 +               return mode;
48676 +       } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48677 +               gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48678 +               return 0;
48679 +       } else if (unlikely((mode & (reqmode)) != (reqmode)))
48680 +               return 0;
48681 +
48682 +       return (reqmode);
48683 +}
48684 +
48685 +__u32
48686 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48687 +                   const struct dentry * parent_dentry,
48688 +                   const struct vfsmount * parent_mnt,
48689 +                   const int mode)
48690 +{
48691 +       __u32 reqmode = GR_WRITE | GR_CREATE;
48692 +       if (unlikely(mode & (S_ISUID | S_ISGID)))
48693 +               reqmode |= GR_SETID;
48694 +
48695 +       return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48696 +                                 reqmode, GR_MKNOD_ACL_MSG);
48697 +}
48698 +
48699 +__u32
48700 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48701 +                   const struct dentry *parent_dentry,
48702 +                   const struct vfsmount *parent_mnt)
48703 +{
48704 +       return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48705 +                                 GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48706 +}
48707 +
48708 +#define RENAME_CHECK_SUCCESS(old, new) \
48709 +       (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48710 +        ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48711 +
48712 +int
48713 +gr_acl_handle_rename(struct dentry *new_dentry,
48714 +                    struct dentry *parent_dentry,
48715 +                    const struct vfsmount *parent_mnt,
48716 +                    struct dentry *old_dentry,
48717 +                    struct inode *old_parent_inode,
48718 +                    struct vfsmount *old_mnt, const char *newname)
48719 +{
48720 +       __u32 comp1, comp2;
48721 +       int error = 0;
48722 +
48723 +       if (unlikely(!gr_acl_is_enabled()))
48724 +               return 0;
48725 +
48726 +       if (!new_dentry->d_inode) {
48727 +               comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48728 +                                       GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48729 +                                       GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48730 +               comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48731 +                                      GR_DELETE | GR_AUDIT_DELETE |
48732 +                                      GR_AUDIT_READ | GR_AUDIT_WRITE |
48733 +                                      GR_SUPPRESS, old_mnt);
48734 +       } else {
48735 +               comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48736 +                                      GR_CREATE | GR_DELETE |
48737 +                                      GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48738 +                                      GR_AUDIT_READ | GR_AUDIT_WRITE |
48739 +                                      GR_SUPPRESS, parent_mnt);
48740 +               comp2 =
48741 +                   gr_search_file(old_dentry,
48742 +                                  GR_READ | GR_WRITE | GR_AUDIT_READ |
48743 +                                  GR_DELETE | GR_AUDIT_DELETE |
48744 +                                  GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48745 +       }
48746 +
48747 +       if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48748 +           ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48749 +               gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48750 +       else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48751 +                && !(comp2 & GR_SUPPRESS)) {
48752 +               gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48753 +               error = -EACCES;
48754 +       } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48755 +               error = -EACCES;
48756 +
48757 +       return error;
48758 +}
48759 +
48760 +void
48761 +gr_acl_handle_exit(void)
48762 +{
48763 +       u16 id;
48764 +       char *rolename;
48765 +       struct file *exec_file;
48766 +
48767 +       if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48768 +           !(current->role->roletype & GR_ROLE_PERSIST))) {
48769 +               id = current->acl_role_id;
48770 +               rolename = current->role->rolename;
48771 +               gr_set_acls(1);
48772 +               gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48773 +       }
48774 +
48775 +       write_lock(&grsec_exec_file_lock);
48776 +       exec_file = current->exec_file;
48777 +       current->exec_file = NULL;
48778 +       write_unlock(&grsec_exec_file_lock);
48779 +
48780 +       if (exec_file)
48781 +               fput(exec_file);
48782 +}
48783 +
48784 +int
48785 +gr_acl_handle_procpidmem(const struct task_struct *task)
48786 +{
48787 +       if (unlikely(!gr_acl_is_enabled()))
48788 +               return 0;
48789 +
48790 +       if (task != current && task->acl->mode & GR_PROTPROCFD)
48791 +               return -EACCES;
48792 +
48793 +       return 0;
48794 +}
48795 diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
48796 --- linux-3.0.4/grsecurity/gracl_ip.c   1969-12-31 19:00:00.000000000 -0500
48797 +++ linux-3.0.4/grsecurity/gracl_ip.c   2011-08-23 21:48:14.000000000 -0400
48798 @@ -0,0 +1,381 @@
48799 +#include <linux/kernel.h>
48800 +#include <asm/uaccess.h>
48801 +#include <asm/errno.h>
48802 +#include <net/sock.h>
48803 +#include <linux/file.h>
48804 +#include <linux/fs.h>
48805 +#include <linux/net.h>
48806 +#include <linux/in.h>
48807 +#include <linux/skbuff.h>
48808 +#include <linux/ip.h>
48809 +#include <linux/udp.h>
48810 +#include <linux/types.h>
48811 +#include <linux/sched.h>
48812 +#include <linux/netdevice.h>
48813 +#include <linux/inetdevice.h>
48814 +#include <linux/gracl.h>
48815 +#include <linux/grsecurity.h>
48816 +#include <linux/grinternal.h>
48817 +
48818 +#define GR_BIND                        0x01
48819 +#define GR_CONNECT             0x02
48820 +#define GR_INVERT              0x04
48821 +#define GR_BINDOVERRIDE                0x08
48822 +#define GR_CONNECTOVERRIDE     0x10
48823 +#define GR_SOCK_FAMILY         0x20
48824 +
48825 +static const char * gr_protocols[IPPROTO_MAX] = {
48826 +       "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48827 +       "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48828 +       "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48829 +       "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48830 +       "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48831 +       "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48832 +       "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48833 +       "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48834 +       "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48835 +       "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", 
48836 +       "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", 
48837 +       "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48838 +       "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48839 +       "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48840 +       "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48841 +       "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48842 +       "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48843 +       "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48844 +       "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48845 +       "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48846 +       "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48847 +       "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48848 +       "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48849 +       "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48850 +       "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48851 +       "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48852 +       "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48853 +       "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48854 +       "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48855 +       "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48856 +       "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48857 +       "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48858 +       };
48859 +
48860 +static const char * gr_socktypes[SOCK_MAX] = {
48861 +       "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", 
48862 +       "unknown:7", "unknown:8", "unknown:9", "packet"
48863 +       };
48864 +
48865 +static const char * gr_sockfamilies[AF_MAX+1] = {
48866 +       "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48867 +       "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48868 +       "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48869 +       "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
48870 +       };
48871 +
48872 +const char *
48873 +gr_proto_to_name(unsigned char proto)
48874 +{
48875 +       return gr_protocols[proto];
48876 +}
48877 +
48878 +const char *
48879 +gr_socktype_to_name(unsigned char type)
48880 +{
48881 +       return gr_socktypes[type];
48882 +}
48883 +
48884 +const char *
48885 +gr_sockfamily_to_name(unsigned char family)
48886 +{
48887 +       return gr_sockfamilies[family];
48888 +}
48889 +
48890 +int
48891 +gr_search_socket(const int domain, const int type, const int protocol)
48892 +{
48893 +       struct acl_subject_label *curr;
48894 +       const struct cred *cred = current_cred();
48895 +
48896 +       if (unlikely(!gr_acl_is_enabled()))
48897 +               goto exit;
48898 +
48899 +       if ((domain < 0) || (type < 0) || (protocol < 0) ||
48900 +           (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48901 +               goto exit;      // let the kernel handle it
48902 +
48903 +       curr = current->acl;
48904 +
48905 +       if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48906 +               /* the family is allowed, if this is PF_INET allow it only if
48907 +                  the extra sock type/protocol checks pass */
48908 +               if (domain == PF_INET)
48909 +                       goto inet_check;
48910 +               goto exit;
48911 +       } else {
48912 +               if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48913 +                       __u32 fakeip = 0;
48914 +                       security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48915 +                                      current->role->roletype, cred->uid,
48916 +                                      cred->gid, current->exec_file ?
48917 +                                      gr_to_filename(current->exec_file->f_path.dentry,
48918 +                                      current->exec_file->f_path.mnt) :
48919 +                                      curr->filename, curr->filename,
48920 +                                      &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48921 +                                      &current->signal->saved_ip);
48922 +                       goto exit;
48923 +               }
48924 +               goto exit_fail;
48925 +       }
48926 +
48927 +inet_check:
48928 +       /* the rest of this checking is for IPv4 only */
48929 +       if (!curr->ips)
48930 +               goto exit;
48931 +
48932 +       if ((curr->ip_type & (1 << type)) &&
48933 +           (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48934 +               goto exit;
48935 +
48936 +       if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48937 +               /* we don't place acls on raw sockets , and sometimes
48938 +                  dgram/ip sockets are opened for ioctl and not
48939 +                  bind/connect, so we'll fake a bind learn log */
48940 +               if (type == SOCK_RAW || type == SOCK_PACKET) {
48941 +                       __u32 fakeip = 0;
48942 +                       security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48943 +                                      current->role->roletype, cred->uid,
48944 +                                      cred->gid, current->exec_file ?
48945 +                                      gr_to_filename(current->exec_file->f_path.dentry,
48946 +                                      current->exec_file->f_path.mnt) :
48947 +                                      curr->filename, curr->filename,
48948 +                                      &fakeip, 0, type,
48949 +                                      protocol, GR_CONNECT, &current->signal->saved_ip);
48950 +               } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48951 +                       __u32 fakeip = 0;
48952 +                       security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48953 +                                      current->role->roletype, cred->uid,
48954 +                                      cred->gid, current->exec_file ?
48955 +                                      gr_to_filename(current->exec_file->f_path.dentry,
48956 +                                      current->exec_file->f_path.mnt) :
48957 +                                      curr->filename, curr->filename,
48958 +                                      &fakeip, 0, type,
48959 +                                      protocol, GR_BIND, &current->signal->saved_ip);
48960 +               }
48961 +               /* we'll log when they use connect or bind */
48962 +               goto exit;
48963 +       }
48964 +
48965 +exit_fail:
48966 +       if (domain == PF_INET)
48967 +               gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), 
48968 +                           gr_socktype_to_name(type), gr_proto_to_name(protocol));
48969 +       else
48970 +               gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain), 
48971 +                           gr_socktype_to_name(type), protocol);
48972 +
48973 +       return 0;
48974 +exit:
48975 +       return 1;
48976 +}
48977 +
48978 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48979 +{
48980 +       if ((ip->mode & mode) &&
48981 +           (ip_port >= ip->low) &&
48982 +           (ip_port <= ip->high) &&
48983 +           ((ntohl(ip_addr) & our_netmask) ==
48984 +            (ntohl(our_addr) & our_netmask))
48985 +           && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48986 +           && (ip->type & (1 << type))) {
48987 +               if (ip->mode & GR_INVERT)
48988 +                       return 2; // specifically denied
48989 +               else
48990 +                       return 1; // allowed
48991 +       }
48992 +
48993 +       return 0; // not specifically allowed, may continue parsing
48994 +}
48995 +
48996 +static int
48997 +gr_search_connectbind(const int full_mode, struct sock *sk,
48998 +                     struct sockaddr_in *addr, const int type)
48999 +{
49000 +       char iface[IFNAMSIZ] = {0};
49001 +       struct acl_subject_label *curr;
49002 +       struct acl_ip_label *ip;
49003 +       struct inet_sock *isk;
49004 +       struct net_device *dev;
49005 +       struct in_device *idev;
49006 +       unsigned long i;
49007 +       int ret;
49008 +       int mode = full_mode & (GR_BIND | GR_CONNECT);
49009 +       __u32 ip_addr = 0;
49010 +       __u32 our_addr;
49011 +       __u32 our_netmask;
49012 +       char *p;
49013 +       __u16 ip_port = 0;
49014 +       const struct cred *cred = current_cred();
49015 +
49016 +       if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49017 +               return 0;
49018 +
49019 +       curr = current->acl;
49020 +       isk = inet_sk(sk);
49021 +
49022 +       /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49023 +       if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49024 +               addr->sin_addr.s_addr = curr->inaddr_any_override;
49025 +       if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49026 +               struct sockaddr_in saddr;
49027 +               int err;
49028 +
49029 +               saddr.sin_family = AF_INET;
49030 +               saddr.sin_addr.s_addr = curr->inaddr_any_override;
49031 +               saddr.sin_port = isk->inet_sport;
49032 +
49033 +               err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49034 +               if (err)
49035 +                       return err;
49036 +
49037 +               err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49038 +               if (err)
49039 +                       return err;
49040 +       }
49041 +
49042 +       if (!curr->ips)
49043 +               return 0;
49044 +
49045 +       ip_addr = addr->sin_addr.s_addr;
49046 +       ip_port = ntohs(addr->sin_port);
49047 +
49048 +       if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49049 +               security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49050 +                              current->role->roletype, cred->uid,
49051 +                              cred->gid, current->exec_file ?
49052 +                              gr_to_filename(current->exec_file->f_path.dentry,
49053 +                              current->exec_file->f_path.mnt) :
49054 +                              curr->filename, curr->filename,
49055 +                              &ip_addr, ip_port, type,
49056 +                              sk->sk_protocol, mode, &current->signal->saved_ip);
49057 +               return 0;
49058 +       }
49059 +
49060 +       for (i = 0; i < curr->ip_num; i++) {
49061 +               ip = *(curr->ips + i);
49062 +               if (ip->iface != NULL) {
49063 +                       strncpy(iface, ip->iface, IFNAMSIZ - 1);
49064 +                       p = strchr(iface, ':');
49065 +                       if (p != NULL)
49066 +                               *p = '\0';
49067 +                       dev = dev_get_by_name(sock_net(sk), iface);
49068 +                       if (dev == NULL)
49069 +                               continue;
49070 +                       idev = in_dev_get(dev);
49071 +                       if (idev == NULL) {
49072 +                               dev_put(dev);
49073 +                               continue;
49074 +                       }
49075 +                       rcu_read_lock();
49076 +                       for_ifa(idev) {
49077 +                               if (!strcmp(ip->iface, ifa->ifa_label)) {
49078 +                                       our_addr = ifa->ifa_address;
49079 +                                       our_netmask = 0xffffffff;
49080 +                                       ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49081 +                                       if (ret == 1) {
49082 +                                               rcu_read_unlock();
49083 +                                               in_dev_put(idev);
49084 +                                               dev_put(dev);
49085 +                                               return 0;
49086 +                                       } else if (ret == 2) {
49087 +                                               rcu_read_unlock();
49088 +                                               in_dev_put(idev);
49089 +                                               dev_put(dev);
49090 +                                               goto denied;
49091 +                                       }
49092 +                               }
49093 +                       } endfor_ifa(idev);
49094 +                       rcu_read_unlock();
49095 +                       in_dev_put(idev);
49096 +                       dev_put(dev);
49097 +               } else {
49098 +                       our_addr = ip->addr;
49099 +                       our_netmask = ip->netmask;
49100 +                       ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49101 +                       if (ret == 1)
49102 +                               return 0;
49103 +                       else if (ret == 2)
49104 +                               goto denied;
49105 +               }
49106 +       }
49107 +
49108 +denied:
49109 +       if (mode == GR_BIND)
49110 +               gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49111 +       else if (mode == GR_CONNECT)
49112 +               gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49113 +
49114 +       return -EACCES;
49115 +}
49116 +
49117 +int
49118 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49119 +{
49120 +       return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49121 +}
49122 +
49123 +int
49124 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49125 +{
49126 +       return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49127 +}
49128 +
49129 +int gr_search_listen(struct socket *sock)
49130 +{
49131 +       struct sock *sk = sock->sk;
49132 +       struct sockaddr_in addr;
49133 +
49134 +       addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49135 +       addr.sin_port = inet_sk(sk)->inet_sport;
49136 +
49137 +       return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49138 +}
49139 +
49140 +int gr_search_accept(struct socket *sock)
49141 +{
49142 +       struct sock *sk = sock->sk;
49143 +       struct sockaddr_in addr;
49144 +
49145 +       addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49146 +       addr.sin_port = inet_sk(sk)->inet_sport;
49147 +
49148 +       return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49149 +}
49150 +
49151 +int
49152 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49153 +{
49154 +       if (addr)
49155 +               return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49156 +       else {
49157 +               struct sockaddr_in sin;
49158 +               const struct inet_sock *inet = inet_sk(sk);
49159 +
49160 +               sin.sin_addr.s_addr = inet->inet_daddr;
49161 +               sin.sin_port = inet->inet_dport;
49162 +
49163 +               return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49164 +       }
49165 +}
49166 +
49167 +int
49168 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49169 +{
49170 +       struct sockaddr_in sin;
49171 +
49172 +       if (unlikely(skb->len < sizeof (struct udphdr)))
49173 +               return 0;       // skip this packet
49174 +
49175 +       sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49176 +       sin.sin_port = udp_hdr(skb)->source;
49177 +
49178 +       return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49179 +}
49180 diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
49181 --- linux-3.0.4/grsecurity/gracl_learn.c        1969-12-31 19:00:00.000000000 -0500
49182 +++ linux-3.0.4/grsecurity/gracl_learn.c        2011-08-23 21:48:14.000000000 -0400
49183 @@ -0,0 +1,207 @@
49184 +#include <linux/kernel.h>
49185 +#include <linux/mm.h>
49186 +#include <linux/sched.h>
49187 +#include <linux/poll.h>
49188 +#include <linux/string.h>
49189 +#include <linux/file.h>
49190 +#include <linux/types.h>
49191 +#include <linux/vmalloc.h>
49192 +#include <linux/grinternal.h>
49193 +
49194 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49195 +                                  size_t count, loff_t *ppos);
49196 +extern int gr_acl_is_enabled(void);
49197 +
49198 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49199 +static int gr_learn_attached;
49200 +
49201 +/* use a 512k buffer */
49202 +#define LEARN_BUFFER_SIZE (512 * 1024)
49203 +
49204 +static DEFINE_SPINLOCK(gr_learn_lock);
49205 +static DEFINE_MUTEX(gr_learn_user_mutex);
49206 +
49207 +/* we need to maintain two buffers, so that the kernel context of grlearn
49208 +   uses a semaphore around the userspace copying, and the other kernel contexts
49209 +   use a spinlock when copying into the buffer, since they cannot sleep
49210 +*/
49211 +static char *learn_buffer;
49212 +static char *learn_buffer_user;
49213 +static int learn_buffer_len;
49214 +static int learn_buffer_user_len;
49215 +
49216 +static ssize_t
49217 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49218 +{
49219 +       DECLARE_WAITQUEUE(wait, current);
49220 +       ssize_t retval = 0;
49221 +
49222 +       add_wait_queue(&learn_wait, &wait);
49223 +       set_current_state(TASK_INTERRUPTIBLE);
49224 +       do {
49225 +               mutex_lock(&gr_learn_user_mutex);
49226 +               spin_lock(&gr_learn_lock);
49227 +               if (learn_buffer_len)
49228 +                       break;
49229 +               spin_unlock(&gr_learn_lock);
49230 +               mutex_unlock(&gr_learn_user_mutex);
49231 +               if (file->f_flags & O_NONBLOCK) {
49232 +                       retval = -EAGAIN;
49233 +                       goto out;
49234 +               }
49235 +               if (signal_pending(current)) {
49236 +                       retval = -ERESTARTSYS;
49237 +                       goto out;
49238 +               }
49239 +
49240 +               schedule();
49241 +       } while (1);
49242 +
49243 +       memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49244 +       learn_buffer_user_len = learn_buffer_len;
49245 +       retval = learn_buffer_len;
49246 +       learn_buffer_len = 0;
49247 +
49248 +       spin_unlock(&gr_learn_lock);
49249 +
49250 +       if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49251 +               retval = -EFAULT;
49252 +
49253 +       mutex_unlock(&gr_learn_user_mutex);
49254 +out:
49255 +       set_current_state(TASK_RUNNING);
49256 +       remove_wait_queue(&learn_wait, &wait);
49257 +       return retval;
49258 +}
49259 +
49260 +static unsigned int
49261 +poll_learn(struct file * file, poll_table * wait)
49262 +{
49263 +       poll_wait(file, &learn_wait, wait);
49264 +
49265 +       if (learn_buffer_len)
49266 +               return (POLLIN | POLLRDNORM);
49267 +
49268 +       return 0;
49269 +}
49270 +
49271 +void
49272 +gr_clear_learn_entries(void)
49273 +{
49274 +       char *tmp;
49275 +
49276 +       mutex_lock(&gr_learn_user_mutex);
49277 +       spin_lock(&gr_learn_lock);
49278 +       tmp = learn_buffer;
49279 +       learn_buffer = NULL;
49280 +       spin_unlock(&gr_learn_lock);
49281 +       if (tmp)
49282 +               vfree(tmp);
49283 +       if (learn_buffer_user != NULL) {
49284 +               vfree(learn_buffer_user);
49285 +               learn_buffer_user = NULL;
49286 +       }
49287 +       learn_buffer_len = 0;
49288 +       mutex_unlock(&gr_learn_user_mutex);
49289 +
49290 +       return;
49291 +}
49292 +
49293 +void
49294 +gr_add_learn_entry(const char *fmt, ...)
49295 +{
49296 +       va_list args;
49297 +       unsigned int len;
49298 +
49299 +       if (!gr_learn_attached)
49300 +               return;
49301 +
49302 +       spin_lock(&gr_learn_lock);
49303 +
49304 +       /* leave a gap at the end so we know when it's "full" but don't have to
49305 +          compute the exact length of the string we're trying to append
49306 +       */
49307 +       if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49308 +               spin_unlock(&gr_learn_lock);
49309 +               wake_up_interruptible(&learn_wait);
49310 +               return;
49311 +       }
49312 +       if (learn_buffer == NULL) {
49313 +               spin_unlock(&gr_learn_lock);
49314 +               return;
49315 +       }
49316 +
49317 +       va_start(args, fmt);
49318 +       len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49319 +       va_end(args);
49320 +
49321 +       learn_buffer_len += len + 1;
49322 +
49323 +       spin_unlock(&gr_learn_lock);
49324 +       wake_up_interruptible(&learn_wait);
49325 +
49326 +       return;
49327 +}
49328 +
49329 +static int
49330 +open_learn(struct inode *inode, struct file *file)
49331 +{
49332 +       if (file->f_mode & FMODE_READ && gr_learn_attached)
49333 +               return -EBUSY;
49334 +       if (file->f_mode & FMODE_READ) {
49335 +               int retval = 0;
49336 +               mutex_lock(&gr_learn_user_mutex);
49337 +               if (learn_buffer == NULL)
49338 +                       learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49339 +               if (learn_buffer_user == NULL)
49340 +                       learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49341 +               if (learn_buffer == NULL) {
49342 +                       retval = -ENOMEM;
49343 +                       goto out_error;
49344 +               }
49345 +               if (learn_buffer_user == NULL) {
49346 +                       retval = -ENOMEM;
49347 +                       goto out_error;
49348 +               }
49349 +               learn_buffer_len = 0;
49350 +               learn_buffer_user_len = 0;
49351 +               gr_learn_attached = 1;
49352 +out_error:
49353 +               mutex_unlock(&gr_learn_user_mutex);
49354 +               return retval;
49355 +       }
49356 +       return 0;
49357 +}
49358 +
49359 +static int
49360 +close_learn(struct inode *inode, struct file *file)
49361 +{
49362 +       if (file->f_mode & FMODE_READ) {
49363 +               char *tmp = NULL;
49364 +               mutex_lock(&gr_learn_user_mutex);
49365 +               spin_lock(&gr_learn_lock);
49366 +               tmp = learn_buffer;
49367 +               learn_buffer = NULL;
49368 +               spin_unlock(&gr_learn_lock);
49369 +               if (tmp)
49370 +                       vfree(tmp);
49371 +               if (learn_buffer_user != NULL) {
49372 +                       vfree(learn_buffer_user);
49373 +                       learn_buffer_user = NULL;
49374 +               }
49375 +               learn_buffer_len = 0;
49376 +               learn_buffer_user_len = 0;
49377 +               gr_learn_attached = 0;
49378 +               mutex_unlock(&gr_learn_user_mutex);
49379 +       }
49380 +
49381 +       return 0;
49382 +}
49383 +               
49384 +const struct file_operations grsec_fops = {
49385 +       .read           = read_learn,
49386 +       .write          = write_grsec_handler,
49387 +       .open           = open_learn,
49388 +       .release        = close_learn,
49389 +       .poll           = poll_learn,
49390 +};
49391 diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
49392 --- linux-3.0.4/grsecurity/gracl_res.c  1969-12-31 19:00:00.000000000 -0500
49393 +++ linux-3.0.4/grsecurity/gracl_res.c  2011-08-23 21:48:14.000000000 -0400
49394 @@ -0,0 +1,68 @@
49395 +#include <linux/kernel.h>
49396 +#include <linux/sched.h>
49397 +#include <linux/gracl.h>
49398 +#include <linux/grinternal.h>
49399 +
49400 +static const char *restab_log[] = {
49401 +       [RLIMIT_CPU] = "RLIMIT_CPU",
49402 +       [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49403 +       [RLIMIT_DATA] = "RLIMIT_DATA",
49404 +       [RLIMIT_STACK] = "RLIMIT_STACK",
49405 +       [RLIMIT_CORE] = "RLIMIT_CORE",
49406 +       [RLIMIT_RSS] = "RLIMIT_RSS",
49407 +       [RLIMIT_NPROC] = "RLIMIT_NPROC",
49408 +       [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49409 +       [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49410 +       [RLIMIT_AS] = "RLIMIT_AS",
49411 +       [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49412 +       [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49413 +       [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49414 +       [RLIMIT_NICE] = "RLIMIT_NICE",
49415 +       [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49416 +       [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49417 +       [GR_CRASH_RES] = "RLIMIT_CRASH"
49418 +};
49419 +
49420 +void
49421 +gr_log_resource(const struct task_struct *task,
49422 +               const int res, const unsigned long wanted, const int gt)
49423 +{
49424 +       const struct cred *cred;
49425 +       unsigned long rlim;
49426 +
49427 +       if (!gr_acl_is_enabled() && !grsec_resource_logging)
49428 +               return;
49429 +
49430 +       // not yet supported resource
49431 +       if (unlikely(!restab_log[res]))
49432 +               return;
49433 +
49434 +       if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49435 +               rlim = task_rlimit_max(task, res);
49436 +       else
49437 +               rlim = task_rlimit(task, res);
49438 +
49439 +       if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49440 +               return;
49441 +
49442 +       rcu_read_lock();
49443 +       cred = __task_cred(task);
49444 +
49445 +       if (res == RLIMIT_NPROC && 
49446 +           (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || 
49447 +            cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49448 +               goto out_rcu_unlock;
49449 +       else if (res == RLIMIT_MEMLOCK &&
49450 +                cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49451 +               goto out_rcu_unlock;
49452 +       else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49453 +               goto out_rcu_unlock;
49454 +       rcu_read_unlock();
49455 +
49456 +       gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49457 +
49458 +       return;
49459 +out_rcu_unlock:
49460 +       rcu_read_unlock();
49461 +       return;
49462 +}
49463 diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
49464 --- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49465 +++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
49466 @@ -0,0 +1,299 @@
49467 +#include <linux/kernel.h>
49468 +#include <linux/mm.h>
49469 +#include <asm/uaccess.h>
49470 +#include <asm/errno.h>
49471 +#include <asm/mman.h>
49472 +#include <net/sock.h>
49473 +#include <linux/file.h>
49474 +#include <linux/fs.h>
49475 +#include <linux/net.h>
49476 +#include <linux/in.h>
49477 +#include <linux/slab.h>
49478 +#include <linux/types.h>
49479 +#include <linux/sched.h>
49480 +#include <linux/timer.h>
49481 +#include <linux/gracl.h>
49482 +#include <linux/grsecurity.h>
49483 +#include <linux/grinternal.h>
49484 +
49485 +static struct crash_uid *uid_set;
49486 +static unsigned short uid_used;
49487 +static DEFINE_SPINLOCK(gr_uid_lock);
49488 +extern rwlock_t gr_inode_lock;
49489 +extern struct acl_subject_label *
49490 +       lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49491 +                             struct acl_role_label *role);
49492 +
49493 +#ifdef CONFIG_BTRFS_FS
49494 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
49495 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
49496 +#endif
49497 +
49498 +static inline dev_t __get_dev(const struct dentry *dentry)
49499 +{
49500 +#ifdef CONFIG_BTRFS_FS
49501 +       if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
49502 +               return get_btrfs_dev_from_inode(dentry->d_inode);
49503 +       else
49504 +#endif
49505 +               return dentry->d_inode->i_sb->s_dev;
49506 +}
49507 +
49508 +int
49509 +gr_init_uidset(void)
49510 +{
49511 +       uid_set =
49512 +           kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49513 +       uid_used = 0;
49514 +
49515 +       return uid_set ? 1 : 0;
49516 +}
49517 +
49518 +void
49519 +gr_free_uidset(void)
49520 +{
49521 +       if (uid_set)
49522 +               kfree(uid_set);
49523 +
49524 +       return;
49525 +}
49526 +
49527 +int
49528 +gr_find_uid(const uid_t uid)
49529 +{
49530 +       struct crash_uid *tmp = uid_set;
49531 +       uid_t buid;
49532 +       int low = 0, high = uid_used - 1, mid;
49533 +
49534 +       while (high >= low) {
49535 +               mid = (low + high) >> 1;
49536 +               buid = tmp[mid].uid;
49537 +               if (buid == uid)
49538 +                       return mid;
49539 +               if (buid > uid)
49540 +                       high = mid - 1;
49541 +               if (buid < uid)
49542 +                       low = mid + 1;
49543 +       }
49544 +
49545 +       return -1;
49546 +}
49547 +
49548 +static __inline__ void
49549 +gr_insertsort(void)
49550 +{
49551 +       unsigned short i, j;
49552 +       struct crash_uid index;
49553 +
49554 +       for (i = 1; i < uid_used; i++) {
49555 +               index = uid_set[i];
49556 +               j = i;
49557 +               while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49558 +                       uid_set[j] = uid_set[j - 1];
49559 +                       j--;
49560 +               }
49561 +               uid_set[j] = index;
49562 +       }
49563 +
49564 +       return;
49565 +}
49566 +
49567 +static __inline__ void
49568 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49569 +{
49570 +       int loc;
49571 +
49572 +       if (uid_used == GR_UIDTABLE_MAX)
49573 +               return;
49574 +
49575 +       loc = gr_find_uid(uid);
49576 +
49577 +       if (loc >= 0) {
49578 +               uid_set[loc].expires = expires;
49579 +               return;
49580 +       }
49581 +
49582 +       uid_set[uid_used].uid = uid;
49583 +       uid_set[uid_used].expires = expires;
49584 +       uid_used++;
49585 +
49586 +       gr_insertsort();
49587 +
49588 +       return;
49589 +}
49590 +
49591 +void
49592 +gr_remove_uid(const unsigned short loc)
49593 +{
49594 +       unsigned short i;
49595 +
49596 +       for (i = loc + 1; i < uid_used; i++)
49597 +               uid_set[i - 1] = uid_set[i];
49598 +
49599 +       uid_used--;
49600 +
49601 +       return;
49602 +}
49603 +
49604 +int
49605 +gr_check_crash_uid(const uid_t uid)
49606 +{
49607 +       int loc;
49608 +       int ret = 0;
49609 +
49610 +       if (unlikely(!gr_acl_is_enabled()))
49611 +               return 0;
49612 +
49613 +       spin_lock(&gr_uid_lock);
49614 +       loc = gr_find_uid(uid);
49615 +
49616 +       if (loc < 0)
49617 +               goto out_unlock;
49618 +
49619 +       if (time_before_eq(uid_set[loc].expires, get_seconds()))
49620 +               gr_remove_uid(loc);
49621 +       else
49622 +               ret = 1;
49623 +
49624 +out_unlock:
49625 +       spin_unlock(&gr_uid_lock);
49626 +       return ret;
49627 +}
49628 +
49629 +static __inline__ int
49630 +proc_is_setxid(const struct cred *cred)
49631 +{
49632 +       if (cred->uid != cred->euid || cred->uid != cred->suid ||
49633 +           cred->uid != cred->fsuid)
49634 +               return 1;
49635 +       if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49636 +           cred->gid != cred->fsgid)
49637 +               return 1;
49638 +
49639 +       return 0;
49640 +}
49641 +
49642 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49643 +
49644 +void
49645 +gr_handle_crash(struct task_struct *task, const int sig)
49646 +{
49647 +       struct acl_subject_label *curr;
49648 +       struct acl_subject_label *curr2;
49649 +       struct task_struct *tsk, *tsk2;
49650 +       const struct cred *cred;
49651 +       const struct cred *cred2;
49652 +
49653 +       if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49654 +               return;
49655 +
49656 +       if (unlikely(!gr_acl_is_enabled()))
49657 +               return;
49658 +
49659 +       curr = task->acl;
49660 +
49661 +       if (!(curr->resmask & (1 << GR_CRASH_RES)))
49662 +               return;
49663 +
49664 +       if (time_before_eq(curr->expires, get_seconds())) {
49665 +               curr->expires = 0;
49666 +               curr->crashes = 0;
49667 +       }
49668 +
49669 +       curr->crashes++;
49670 +
49671 +       if (!curr->expires)
49672 +               curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49673 +
49674 +       if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49675 +           time_after(curr->expires, get_seconds())) {
49676 +               rcu_read_lock();
49677 +               cred = __task_cred(task);
49678 +               if (cred->uid && proc_is_setxid(cred)) {
49679 +                       gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49680 +                       spin_lock(&gr_uid_lock);
49681 +                       gr_insert_uid(cred->uid, curr->expires);
49682 +                       spin_unlock(&gr_uid_lock);
49683 +                       curr->expires = 0;
49684 +                       curr->crashes = 0;
49685 +                       read_lock(&tasklist_lock);
49686 +                       do_each_thread(tsk2, tsk) {
49687 +                               cred2 = __task_cred(tsk);
49688 +                               if (tsk != task && cred2->uid == cred->uid)
49689 +                                       gr_fake_force_sig(SIGKILL, tsk);
49690 +                       } while_each_thread(tsk2, tsk);
49691 +                       read_unlock(&tasklist_lock);
49692 +               } else {
49693 +                       gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49694 +                       read_lock(&tasklist_lock);
49695 +                       do_each_thread(tsk2, tsk) {
49696 +                               if (likely(tsk != task)) {
49697 +                                       curr2 = tsk->acl;
49698 +
49699 +                                       if (curr2->device == curr->device &&
49700 +                                           curr2->inode == curr->inode)
49701 +                                               gr_fake_force_sig(SIGKILL, tsk);
49702 +                               }
49703 +                       } while_each_thread(tsk2, tsk);
49704 +                       read_unlock(&tasklist_lock);
49705 +               }
49706 +               rcu_read_unlock();
49707 +       }
49708 +
49709 +       return;
49710 +}
49711 +
49712 +int
49713 +gr_check_crash_exec(const struct file *filp)
49714 +{
49715 +       struct acl_subject_label *curr;
49716 +
49717 +       if (unlikely(!gr_acl_is_enabled()))
49718 +               return 0;
49719 +
49720 +       read_lock(&gr_inode_lock);
49721 +       curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49722 +                                    __get_dev(filp->f_path.dentry),
49723 +                                    current->role);
49724 +       read_unlock(&gr_inode_lock);
49725 +
49726 +       if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49727 +           (!curr->crashes && !curr->expires))
49728 +               return 0;
49729 +
49730 +       if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49731 +           time_after(curr->expires, get_seconds()))
49732 +               return 1;
49733 +       else if (time_before_eq(curr->expires, get_seconds())) {
49734 +               curr->crashes = 0;
49735 +               curr->expires = 0;
49736 +       }
49737 +
49738 +       return 0;
49739 +}
49740 +
49741 +void
49742 +gr_handle_alertkill(struct task_struct *task)
49743 +{
49744 +       struct acl_subject_label *curracl;
49745 +       __u32 curr_ip;
49746 +       struct task_struct *p, *p2;
49747 +
49748 +       if (unlikely(!gr_acl_is_enabled()))
49749 +               return;
49750 +
49751 +       curracl = task->acl;
49752 +       curr_ip = task->signal->curr_ip;
49753 +
49754 +       if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49755 +               read_lock(&tasklist_lock);
49756 +               do_each_thread(p2, p) {
49757 +                       if (p->signal->curr_ip == curr_ip)
49758 +                               gr_fake_force_sig(SIGKILL, p);
49759 +               } while_each_thread(p2, p);
49760 +               read_unlock(&tasklist_lock);
49761 +       } else if (curracl->mode & GR_KILLPROC)
49762 +               gr_fake_force_sig(SIGKILL, task);
49763 +
49764 +       return;
49765 +}
49766 diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
49767 --- linux-3.0.4/grsecurity/gracl_shm.c  1969-12-31 19:00:00.000000000 -0500
49768 +++ linux-3.0.4/grsecurity/gracl_shm.c  2011-08-23 21:48:14.000000000 -0400
49769 @@ -0,0 +1,40 @@
49770 +#include <linux/kernel.h>
49771 +#include <linux/mm.h>
49772 +#include <linux/sched.h>
49773 +#include <linux/file.h>
49774 +#include <linux/ipc.h>
49775 +#include <linux/gracl.h>
49776 +#include <linux/grsecurity.h>
49777 +#include <linux/grinternal.h>
49778 +
49779 +int
49780 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49781 +               const time_t shm_createtime, const uid_t cuid, const int shmid)
49782 +{
49783 +       struct task_struct *task;
49784 +
49785 +       if (!gr_acl_is_enabled())
49786 +               return 1;
49787 +
49788 +       rcu_read_lock();
49789 +       read_lock(&tasklist_lock);
49790 +
49791 +       task = find_task_by_vpid(shm_cprid);
49792 +
49793 +       if (unlikely(!task))
49794 +               task = find_task_by_vpid(shm_lapid);
49795 +
49796 +       if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49797 +                             (task->pid == shm_lapid)) &&
49798 +                    (task->acl->mode & GR_PROTSHM) &&
49799 +                    (task->acl != current->acl))) {
49800 +               read_unlock(&tasklist_lock);
49801 +               rcu_read_unlock();
49802 +               gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49803 +               return 0;
49804 +       }
49805 +       read_unlock(&tasklist_lock);
49806 +       rcu_read_unlock();
49807 +
49808 +       return 1;
49809 +}
49810 diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
49811 --- linux-3.0.4/grsecurity/grsec_chdir.c        1969-12-31 19:00:00.000000000 -0500
49812 +++ linux-3.0.4/grsecurity/grsec_chdir.c        2011-08-23 21:48:14.000000000 -0400
49813 @@ -0,0 +1,19 @@
49814 +#include <linux/kernel.h>
49815 +#include <linux/sched.h>
49816 +#include <linux/fs.h>
49817 +#include <linux/file.h>
49818 +#include <linux/grsecurity.h>
49819 +#include <linux/grinternal.h>
49820 +
49821 +void
49822 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49823 +{
49824 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49825 +       if ((grsec_enable_chdir && grsec_enable_group &&
49826 +            in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49827 +                                             !grsec_enable_group)) {
49828 +               gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49829 +       }
49830 +#endif
49831 +       return;
49832 +}
49833 diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
49834 --- linux-3.0.4/grsecurity/grsec_chroot.c       1969-12-31 19:00:00.000000000 -0500
49835 +++ linux-3.0.4/grsecurity/grsec_chroot.c       2011-08-23 21:48:14.000000000 -0400
49836 @@ -0,0 +1,349 @@
49837 +#include <linux/kernel.h>
49838 +#include <linux/module.h>
49839 +#include <linux/sched.h>
49840 +#include <linux/file.h>
49841 +#include <linux/fs.h>
49842 +#include <linux/mount.h>
49843 +#include <linux/types.h>
49844 +#include <linux/pid_namespace.h>
49845 +#include <linux/grsecurity.h>
49846 +#include <linux/grinternal.h>
49847 +
49848 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49849 +{
49850 +#ifdef CONFIG_GRKERNSEC
49851 +       if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49852 +                            path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49853 +               task->gr_is_chrooted = 1;
49854 +       else
49855 +               task->gr_is_chrooted = 0;
49856 +
49857 +       task->gr_chroot_dentry = path->dentry;
49858 +#endif
49859 +       return;
49860 +}
49861 +
49862 +void gr_clear_chroot_entries(struct task_struct *task)
49863 +{
49864 +#ifdef CONFIG_GRKERNSEC
49865 +       task->gr_is_chrooted = 0;
49866 +       task->gr_chroot_dentry = NULL;
49867 +#endif
49868 +       return;
49869 +}      
49870 +
49871 +int
49872 +gr_handle_chroot_unix(const pid_t pid)
49873 +{
49874 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49875 +       struct task_struct *p;
49876 +
49877 +       if (unlikely(!grsec_enable_chroot_unix))
49878 +               return 1;
49879 +
49880 +       if (likely(!proc_is_chrooted(current)))
49881 +               return 1;
49882 +
49883 +       rcu_read_lock();
49884 +       read_lock(&tasklist_lock);
49885 +       p = find_task_by_vpid_unrestricted(pid);
49886 +       if (unlikely(p && !have_same_root(current, p))) {
49887 +               read_unlock(&tasklist_lock);
49888 +               rcu_read_unlock();
49889 +               gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49890 +               return 0;
49891 +       }
49892 +       read_unlock(&tasklist_lock);
49893 +       rcu_read_unlock();
49894 +#endif
49895 +       return 1;
49896 +}
49897 +
49898 +int
49899 +gr_handle_chroot_nice(void)
49900 +{
49901 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49902 +       if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49903 +               gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49904 +               return -EPERM;
49905 +       }
49906 +#endif
49907 +       return 0;
49908 +}
49909 +
49910 +int
49911 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49912 +{
49913 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49914 +       if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49915 +                       && proc_is_chrooted(current)) {
49916 +               gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49917 +               return -EACCES;
49918 +       }
49919 +#endif
49920 +       return 0;
49921 +}
49922 +
49923 +int
49924 +gr_handle_chroot_rawio(const struct inode *inode)
49925 +{
49926 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49927 +       if (grsec_enable_chroot_caps && proc_is_chrooted(current) && 
49928 +           inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49929 +               return 1;
49930 +#endif
49931 +       return 0;
49932 +}
49933 +
49934 +int
49935 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49936 +{
49937 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49938 +       struct task_struct *p;
49939 +       int ret = 0;
49940 +       if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49941 +               return ret;
49942 +
49943 +       read_lock(&tasklist_lock);
49944 +       do_each_pid_task(pid, type, p) {
49945 +               if (!have_same_root(current, p)) {
49946 +                       ret = 1;
49947 +                       goto out;
49948 +               }
49949 +       } while_each_pid_task(pid, type, p);
49950 +out:
49951 +       read_unlock(&tasklist_lock);
49952 +       return ret;
49953 +#endif
49954 +       return 0;
49955 +}
49956 +
49957 +int
49958 +gr_pid_is_chrooted(struct task_struct *p)
49959 +{
49960 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49961 +       if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49962 +               return 0;
49963 +
49964 +       if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49965 +           !have_same_root(current, p)) {
49966 +               return 1;
49967 +       }
49968 +#endif
49969 +       return 0;
49970 +}
49971 +
49972 +EXPORT_SYMBOL(gr_pid_is_chrooted);
49973 +
49974 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49975 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49976 +{
49977 +       struct path path, currentroot;
49978 +       int ret = 0;
49979 +
49980 +       path.dentry = (struct dentry *)u_dentry;
49981 +       path.mnt = (struct vfsmount *)u_mnt;
49982 +       get_fs_root(current->fs, &currentroot);
49983 +       if (path_is_under(&path, &currentroot))
49984 +               ret = 1;
49985 +       path_put(&currentroot);
49986 +
49987 +       return ret;
49988 +}
49989 +#endif
49990 +
49991 +int
49992 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49993 +{
49994 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49995 +       if (!grsec_enable_chroot_fchdir)
49996 +               return 1;
49997 +
49998 +       if (!proc_is_chrooted(current))
49999 +               return 1;
50000 +       else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
50001 +               gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
50002 +               return 0;
50003 +       }
50004 +#endif
50005 +       return 1;
50006 +}
50007 +
50008 +int
50009 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50010 +               const time_t shm_createtime)
50011 +{
50012 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50013 +       struct task_struct *p;
50014 +       time_t starttime;
50015 +
50016 +       if (unlikely(!grsec_enable_chroot_shmat))
50017 +               return 1;
50018 +
50019 +       if (likely(!proc_is_chrooted(current)))
50020 +               return 1;
50021 +
50022 +       rcu_read_lock();
50023 +       read_lock(&tasklist_lock);
50024 +
50025 +       if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
50026 +               starttime = p->start_time.tv_sec;
50027 +               if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
50028 +                       if (have_same_root(current, p)) {
50029 +                               goto allow;
50030 +                       } else {
50031 +                               read_unlock(&tasklist_lock);
50032 +                               rcu_read_unlock();
50033 +                               gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50034 +                               return 0;
50035 +                       }
50036 +               }
50037 +               /* creator exited, pid reuse, fall through to next check */
50038 +       }
50039 +       if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
50040 +               if (unlikely(!have_same_root(current, p))) {
50041 +                       read_unlock(&tasklist_lock);
50042 +                       rcu_read_unlock();
50043 +                       gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50044 +                       return 0;
50045 +               }
50046 +       }
50047 +
50048 +allow:
50049 +       read_unlock(&tasklist_lock);
50050 +       rcu_read_unlock();
50051 +#endif
50052 +       return 1;
50053 +}
50054 +
50055 +void
50056 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50057 +{
50058 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50059 +       if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50060 +               gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50061 +#endif
50062 +       return;
50063 +}
50064 +
50065 +int
50066 +gr_handle_chroot_mknod(const struct dentry *dentry,
50067 +                      const struct vfsmount *mnt, const int mode)
50068 +{
50069 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50070 +       if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && 
50071 +           proc_is_chrooted(current)) {
50072 +               gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50073 +               return -EPERM;
50074 +       }
50075 +#endif
50076 +       return 0;
50077 +}
50078 +
50079 +int
50080 +gr_handle_chroot_mount(const struct dentry *dentry,
50081 +                      const struct vfsmount *mnt, const char *dev_name)
50082 +{
50083 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50084 +       if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50085 +               gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
50086 +               return -EPERM;
50087 +       }
50088 +#endif
50089 +       return 0;
50090 +}
50091 +
50092 +int
50093 +gr_handle_chroot_pivot(void)
50094 +{
50095 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50096 +       if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50097 +               gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50098 +               return -EPERM;
50099 +       }
50100 +#endif
50101 +       return 0;
50102 +}
50103 +
50104 +int
50105 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50106 +{
50107 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50108 +       if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50109 +           !gr_is_outside_chroot(dentry, mnt)) {
50110 +               gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50111 +               return -EPERM;
50112 +       }
50113 +#endif
50114 +       return 0;
50115 +}
50116 +
50117 +int
50118 +gr_handle_chroot_caps(struct path *path)
50119 +{
50120 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50121 +       if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
50122 +               (init_task.fs->root.dentry != path->dentry) &&
50123 +               (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
50124 +
50125 +               kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50126 +               const struct cred *old = current_cred();
50127 +               struct cred *new = prepare_creds();
50128 +               if (new == NULL)
50129 +                       return 1;
50130 +
50131 +               new->cap_permitted = cap_drop(old->cap_permitted, 
50132 +                                             chroot_caps);
50133 +               new->cap_inheritable = cap_drop(old->cap_inheritable, 
50134 +                                               chroot_caps);
50135 +               new->cap_effective = cap_drop(old->cap_effective,
50136 +                                             chroot_caps);
50137 +
50138 +               commit_creds(new);
50139 +
50140 +               return 0;
50141 +       }
50142 +#endif
50143 +       return 0;
50144 +}
50145 +
50146 +int
50147 +gr_handle_chroot_sysctl(const int op)
50148 +{
50149 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50150 +       if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
50151 +           proc_is_chrooted(current))
50152 +               return -EACCES;
50153 +#endif
50154 +       return 0;
50155 +}
50156 +
50157 +void
50158 +gr_handle_chroot_chdir(struct path *path)
50159 +{
50160 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50161 +       if (grsec_enable_chroot_chdir)
50162 +               set_fs_pwd(current->fs, path);
50163 +#endif
50164 +       return;
50165 +}
50166 +
50167 +int
50168 +gr_handle_chroot_chmod(const struct dentry *dentry,
50169 +                      const struct vfsmount *mnt, const int mode)
50170 +{
50171 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50172 +       /* allow chmod +s on directories, but not files */
50173 +       if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50174 +           ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50175 +           proc_is_chrooted(current)) {
50176 +               gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50177 +               return -EPERM;
50178 +       }
50179 +#endif
50180 +       return 0;
50181 +}
50182 +
50183 +#ifdef CONFIG_SECURITY
50184 +EXPORT_SYMBOL(gr_handle_chroot_caps);
50185 +#endif
50186 diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
50187 --- linux-3.0.4/grsecurity/grsec_disabled.c     1969-12-31 19:00:00.000000000 -0500
50188 +++ linux-3.0.4/grsecurity/grsec_disabled.c     2011-08-23 21:48:14.000000000 -0400
50189 @@ -0,0 +1,447 @@
50190 +#include <linux/kernel.h>
50191 +#include <linux/module.h>
50192 +#include <linux/sched.h>
50193 +#include <linux/file.h>
50194 +#include <linux/fs.h>
50195 +#include <linux/kdev_t.h>
50196 +#include <linux/net.h>
50197 +#include <linux/in.h>
50198 +#include <linux/ip.h>
50199 +#include <linux/skbuff.h>
50200 +#include <linux/sysctl.h>
50201 +
50202 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50203 +void
50204 +pax_set_initial_flags(struct linux_binprm *bprm)
50205 +{
50206 +       return;
50207 +}
50208 +#endif
50209 +
50210 +#ifdef CONFIG_SYSCTL
50211 +__u32
50212 +gr_handle_sysctl(const struct ctl_table * table, const int op)
50213 +{
50214 +       return 0;
50215 +}
50216 +#endif
50217 +
50218 +#ifdef CONFIG_TASKSTATS
50219 +int gr_is_taskstats_denied(int pid)
50220 +{
50221 +       return 0;
50222 +}
50223 +#endif
50224 +
50225 +int
50226 +gr_acl_is_enabled(void)
50227 +{
50228 +       return 0;
50229 +}
50230 +
50231 +int
50232 +gr_handle_rawio(const struct inode *inode)
50233 +{
50234 +       return 0;
50235 +}
50236 +
50237 +void
50238 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50239 +{
50240 +       return;
50241 +}
50242 +
50243 +int
50244 +gr_handle_ptrace(struct task_struct *task, const long request)
50245 +{
50246 +       return 0;
50247 +}
50248 +
50249 +int
50250 +gr_handle_proc_ptrace(struct task_struct *task)
50251 +{
50252 +       return 0;
50253 +}
50254 +
50255 +void
50256 +gr_learn_resource(const struct task_struct *task,
50257 +                 const int res, const unsigned long wanted, const int gt)
50258 +{
50259 +       return;
50260 +}
50261 +
50262 +int
50263 +gr_set_acls(const int type)
50264 +{
50265 +       return 0;
50266 +}
50267 +
50268 +int
50269 +gr_check_hidden_task(const struct task_struct *tsk)
50270 +{
50271 +       return 0;
50272 +}
50273 +
50274 +int
50275 +gr_check_protected_task(const struct task_struct *task)
50276 +{
50277 +       return 0;
50278 +}
50279 +
50280 +int
50281 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50282 +{
50283 +       return 0;
50284 +}
50285 +
50286 +void
50287 +gr_copy_label(struct task_struct *tsk)
50288 +{
50289 +       return;
50290 +}
50291 +
50292 +void
50293 +gr_set_pax_flags(struct task_struct *task)
50294 +{
50295 +       return;
50296 +}
50297 +
50298 +int
50299 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50300 +                 const int unsafe_share)
50301 +{
50302 +       return 0;
50303 +}
50304 +
50305 +void
50306 +gr_handle_delete(const ino_t ino, const dev_t dev)
50307 +{
50308 +       return;
50309 +}
50310 +
50311 +void
50312 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50313 +{
50314 +       return;
50315 +}
50316 +
50317 +void
50318 +gr_handle_crash(struct task_struct *task, const int sig)
50319 +{
50320 +       return;
50321 +}
50322 +
50323 +int
50324 +gr_check_crash_exec(const struct file *filp)
50325 +{
50326 +       return 0;
50327 +}
50328 +
50329 +int
50330 +gr_check_crash_uid(const uid_t uid)
50331 +{
50332 +       return 0;
50333 +}
50334 +
50335 +void
50336 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50337 +                struct dentry *old_dentry,
50338 +                struct dentry *new_dentry,
50339 +                struct vfsmount *mnt, const __u8 replace)
50340 +{
50341 +       return;
50342 +}
50343 +
50344 +int
50345 +gr_search_socket(const int family, const int type, const int protocol)
50346 +{
50347 +       return 1;
50348 +}
50349 +
50350 +int
50351 +gr_search_connectbind(const int mode, const struct socket *sock,
50352 +                     const struct sockaddr_in *addr)
50353 +{
50354 +       return 0;
50355 +}
50356 +
50357 +int
50358 +gr_is_capable(const int cap)
50359 +{
50360 +       return 1;
50361 +}
50362 +
50363 +int
50364 +gr_is_capable_nolog(const int cap)
50365 +{
50366 +       return 1;
50367 +}
50368 +
50369 +void
50370 +gr_handle_alertkill(struct task_struct *task)
50371 +{
50372 +       return;
50373 +}
50374 +
50375 +__u32
50376 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50377 +{
50378 +       return 1;
50379 +}
50380 +
50381 +__u32
50382 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50383 +                         const struct vfsmount * mnt)
50384 +{
50385 +       return 1;
50386 +}
50387 +
50388 +__u32
50389 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50390 +                  const int fmode)
50391 +{
50392 +       return 1;
50393 +}
50394 +
50395 +__u32
50396 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50397 +{
50398 +       return 1;
50399 +}
50400 +
50401 +__u32
50402 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50403 +{
50404 +       return 1;
50405 +}
50406 +
50407 +int
50408 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50409 +                  unsigned int *vm_flags)
50410 +{
50411 +       return 1;
50412 +}
50413 +
50414 +__u32
50415 +gr_acl_handle_truncate(const struct dentry * dentry,
50416 +                      const struct vfsmount * mnt)
50417 +{
50418 +       return 1;
50419 +}
50420 +
50421 +__u32
50422 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50423 +{
50424 +       return 1;
50425 +}
50426 +
50427 +__u32
50428 +gr_acl_handle_access(const struct dentry * dentry,
50429 +                    const struct vfsmount * mnt, const int fmode)
50430 +{
50431 +       return 1;
50432 +}
50433 +
50434 +__u32
50435 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50436 +                    mode_t mode)
50437 +{
50438 +       return 1;
50439 +}
50440 +
50441 +__u32
50442 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50443 +                   mode_t mode)
50444 +{
50445 +       return 1;
50446 +}
50447 +
50448 +__u32
50449 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50450 +{
50451 +       return 1;
50452 +}
50453 +
50454 +__u32
50455 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50456 +{
50457 +       return 1;
50458 +}
50459 +
50460 +void
50461 +grsecurity_init(void)
50462 +{
50463 +       return;
50464 +}
50465 +
50466 +__u32
50467 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50468 +                   const struct dentry * parent_dentry,
50469 +                   const struct vfsmount * parent_mnt,
50470 +                   const int mode)
50471 +{
50472 +       return 1;
50473 +}
50474 +
50475 +__u32
50476 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50477 +                   const struct dentry * parent_dentry,
50478 +                   const struct vfsmount * parent_mnt)
50479 +{
50480 +       return 1;
50481 +}
50482 +
50483 +__u32
50484 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50485 +                     const struct dentry * parent_dentry,
50486 +                     const struct vfsmount * parent_mnt, const char *from)
50487 +{
50488 +       return 1;
50489 +}
50490 +
50491 +__u32
50492 +gr_acl_handle_link(const struct dentry * new_dentry,
50493 +                  const struct dentry * parent_dentry,
50494 +                  const struct vfsmount * parent_mnt,
50495 +                  const struct dentry * old_dentry,
50496 +                  const struct vfsmount * old_mnt, const char *to)
50497 +{
50498 +       return 1;
50499 +}
50500 +
50501 +int
50502 +gr_acl_handle_rename(const struct dentry *new_dentry,
50503 +                    const struct dentry *parent_dentry,
50504 +                    const struct vfsmount *parent_mnt,
50505 +                    const struct dentry *old_dentry,
50506 +                    const struct inode *old_parent_inode,
50507 +                    const struct vfsmount *old_mnt, const char *newname)
50508 +{
50509 +       return 0;
50510 +}
50511 +
50512 +int
50513 +gr_acl_handle_filldir(const struct file *file, const char *name,
50514 +                     const int namelen, const ino_t ino)
50515 +{
50516 +       return 1;
50517 +}
50518 +
50519 +int
50520 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50521 +               const time_t shm_createtime, const uid_t cuid, const int shmid)
50522 +{
50523 +       return 1;
50524 +}
50525 +
50526 +int
50527 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50528 +{
50529 +       return 0;
50530 +}
50531 +
50532 +int
50533 +gr_search_accept(const struct socket *sock)
50534 +{
50535 +       return 0;
50536 +}
50537 +
50538 +int
50539 +gr_search_listen(const struct socket *sock)
50540 +{
50541 +       return 0;
50542 +}
50543 +
50544 +int
50545 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50546 +{
50547 +       return 0;
50548 +}
50549 +
50550 +__u32
50551 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50552 +{
50553 +       return 1;
50554 +}
50555 +
50556 +__u32
50557 +gr_acl_handle_creat(const struct dentry * dentry,
50558 +                   const struct dentry * p_dentry,
50559 +                   const struct vfsmount * p_mnt, const int fmode,
50560 +                   const int imode)
50561 +{
50562 +       return 1;
50563 +}
50564 +
50565 +void
50566 +gr_acl_handle_exit(void)
50567 +{
50568 +       return;
50569 +}
50570 +
50571 +int
50572 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50573 +{
50574 +       return 1;
50575 +}
50576 +
50577 +void
50578 +gr_set_role_label(const uid_t uid, const gid_t gid)
50579 +{
50580 +       return;
50581 +}
50582 +
50583 +int
50584 +gr_acl_handle_procpidmem(const struct task_struct *task)
50585 +{
50586 +       return 0;
50587 +}
50588 +
50589 +int
50590 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50591 +{
50592 +       return 0;
50593 +}
50594 +
50595 +int
50596 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50597 +{
50598 +       return 0;
50599 +}
50600 +
50601 +void
50602 +gr_set_kernel_label(struct task_struct *task)
50603 +{
50604 +       return;
50605 +}
50606 +
50607 +int
50608 +gr_check_user_change(int real, int effective, int fs)
50609 +{
50610 +       return 0;
50611 +}
50612 +
50613 +int
50614 +gr_check_group_change(int real, int effective, int fs)
50615 +{
50616 +       return 0;
50617 +}
50618 +
50619 +int gr_acl_enable_at_secure(void)
50620 +{
50621 +       return 0;
50622 +}
50623 +
50624 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50625 +{
50626 +       return dentry->d_inode->i_sb->s_dev;
50627 +}
50628 +
50629 +EXPORT_SYMBOL(gr_is_capable);
50630 +EXPORT_SYMBOL(gr_is_capable_nolog);
50631 +EXPORT_SYMBOL(gr_learn_resource);
50632 +EXPORT_SYMBOL(gr_set_kernel_label);
50633 +#ifdef CONFIG_SECURITY
50634 +EXPORT_SYMBOL(gr_check_user_change);
50635 +EXPORT_SYMBOL(gr_check_group_change);
50636 +#endif
50637 diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
50638 --- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50639 +++ linux-3.0.4/grsecurity/grsec_exec.c 2011-08-25 17:25:59.000000000 -0400
50640 @@ -0,0 +1,72 @@
50641 +#include <linux/kernel.h>
50642 +#include <linux/sched.h>
50643 +#include <linux/file.h>
50644 +#include <linux/binfmts.h>
50645 +#include <linux/fs.h>
50646 +#include <linux/types.h>
50647 +#include <linux/grdefs.h>
50648 +#include <linux/grsecurity.h>
50649 +#include <linux/grinternal.h>
50650 +#include <linux/capability.h>
50651 +
50652 +#include <asm/uaccess.h>
50653 +
50654 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50655 +static char gr_exec_arg_buf[132];
50656 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50657 +#endif
50658 +
50659 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
50660 +
50661 +void
50662 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
50663 +{
50664 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50665 +       char *grarg = gr_exec_arg_buf;
50666 +       unsigned int i, x, execlen = 0;
50667 +       char c;
50668 +
50669 +       if (!((grsec_enable_execlog && grsec_enable_group &&
50670 +              in_group_p(grsec_audit_gid))
50671 +             || (grsec_enable_execlog && !grsec_enable_group)))
50672 +               return;
50673 +
50674 +       mutex_lock(&gr_exec_arg_mutex);
50675 +       memset(grarg, 0, sizeof(gr_exec_arg_buf));
50676 +
50677 +       for (i = 0; i < bprm->argc && execlen < 128; i++) {
50678 +               const char __user *p;
50679 +               unsigned int len;
50680 +
50681 +               p = get_user_arg_ptr(argv, i);
50682 +               if (IS_ERR(p))
50683 +                       goto log;
50684 +
50685 +               len = strnlen_user(p, 128 - execlen);
50686 +               if (len > 128 - execlen)
50687 +                       len = 128 - execlen;
50688 +               else if (len > 0)
50689 +                       len--;
50690 +               if (copy_from_user(grarg + execlen, p, len))
50691 +                       goto log;
50692 +
50693 +               /* rewrite unprintable characters */
50694 +               for (x = 0; x < len; x++) {
50695 +                       c = *(grarg + execlen + x);
50696 +                       if (c < 32 || c > 126)
50697 +                               *(grarg + execlen + x) = ' ';
50698 +               }
50699 +
50700 +               execlen += len;
50701 +               *(grarg + execlen) = ' ';
50702 +               *(grarg + execlen + 1) = '\0';
50703 +               execlen++;
50704 +       }
50705 +
50706 +      log:
50707 +       gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50708 +                       bprm->file->f_path.mnt, grarg);
50709 +       mutex_unlock(&gr_exec_arg_mutex);
50710 +#endif
50711 +       return;
50712 +}
50713 diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
50714 --- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50715 +++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
50716 @@ -0,0 +1,24 @@
50717 +#include <linux/kernel.h>
50718 +#include <linux/sched.h>
50719 +#include <linux/fs.h>
50720 +#include <linux/file.h>
50721 +#include <linux/grinternal.h>
50722 +
50723 +int
50724 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50725 +              const struct dentry *dir, const int flag, const int acc_mode)
50726 +{
50727 +#ifdef CONFIG_GRKERNSEC_FIFO
50728 +       const struct cred *cred = current_cred();
50729 +
50730 +       if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50731 +           !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50732 +           (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50733 +           (cred->fsuid != dentry->d_inode->i_uid)) {
50734 +               if (!inode_permission(dentry->d_inode, acc_mode))
50735 +                       gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50736 +               return -EACCES;
50737 +       }
50738 +#endif
50739 +       return 0;
50740 +}
50741 diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
50742 --- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50743 +++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
50744 @@ -0,0 +1,23 @@
50745 +#include <linux/kernel.h>
50746 +#include <linux/sched.h>
50747 +#include <linux/grsecurity.h>
50748 +#include <linux/grinternal.h>
50749 +#include <linux/errno.h>
50750 +
50751 +void
50752 +gr_log_forkfail(const int retval)
50753 +{
50754 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50755 +       if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50756 +               switch (retval) {
50757 +                       case -EAGAIN:
50758 +                               gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50759 +                               break;
50760 +                       case -ENOMEM:
50761 +                               gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50762 +                               break;
50763 +               }
50764 +       }
50765 +#endif
50766 +       return;
50767 +}
50768 diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
50769 --- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50770 +++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
50771 @@ -0,0 +1,269 @@
50772 +#include <linux/kernel.h>
50773 +#include <linux/sched.h>
50774 +#include <linux/mm.h>
50775 +#include <linux/gracl.h>
50776 +#include <linux/slab.h>
50777 +#include <linux/vmalloc.h>
50778 +#include <linux/percpu.h>
50779 +#include <linux/module.h>
50780 +
50781 +int grsec_enable_brute;
50782 +int grsec_enable_link;
50783 +int grsec_enable_dmesg;
50784 +int grsec_enable_harden_ptrace;
50785 +int grsec_enable_fifo;
50786 +int grsec_enable_execlog;
50787 +int grsec_enable_signal;
50788 +int grsec_enable_forkfail;
50789 +int grsec_enable_audit_ptrace;
50790 +int grsec_enable_time;
50791 +int grsec_enable_audit_textrel;
50792 +int grsec_enable_group;
50793 +int grsec_audit_gid;
50794 +int grsec_enable_chdir;
50795 +int grsec_enable_mount;
50796 +int grsec_enable_rofs;
50797 +int grsec_enable_chroot_findtask;
50798 +int grsec_enable_chroot_mount;
50799 +int grsec_enable_chroot_shmat;
50800 +int grsec_enable_chroot_fchdir;
50801 +int grsec_enable_chroot_double;
50802 +int grsec_enable_chroot_pivot;
50803 +int grsec_enable_chroot_chdir;
50804 +int grsec_enable_chroot_chmod;
50805 +int grsec_enable_chroot_mknod;
50806 +int grsec_enable_chroot_nice;
50807 +int grsec_enable_chroot_execlog;
50808 +int grsec_enable_chroot_caps;
50809 +int grsec_enable_chroot_sysctl;
50810 +int grsec_enable_chroot_unix;
50811 +int grsec_enable_tpe;
50812 +int grsec_tpe_gid;
50813 +int grsec_enable_blackhole;
50814 +#ifdef CONFIG_IPV6_MODULE
50815 +EXPORT_SYMBOL(grsec_enable_blackhole);
50816 +#endif
50817 +int grsec_lastack_retries;
50818 +int grsec_enable_tpe_all;
50819 +int grsec_enable_tpe_invert;
50820 +int grsec_enable_socket_all;
50821 +int grsec_socket_all_gid;
50822 +int grsec_enable_socket_client;
50823 +int grsec_socket_client_gid;
50824 +int grsec_enable_socket_server;
50825 +int grsec_socket_server_gid;
50826 +int grsec_resource_logging;
50827 +int grsec_disable_privio;
50828 +int grsec_enable_log_rwxmaps;
50829 +int grsec_lock;
50830 +
50831 +DEFINE_SPINLOCK(grsec_alert_lock);
50832 +unsigned long grsec_alert_wtime = 0;
50833 +unsigned long grsec_alert_fyet = 0;
50834 +
50835 +DEFINE_SPINLOCK(grsec_audit_lock);
50836 +
50837 +DEFINE_RWLOCK(grsec_exec_file_lock);
50838 +
50839 +char *gr_shared_page[4];
50840 +
50841 +char *gr_alert_log_fmt;
50842 +char *gr_audit_log_fmt;
50843 +char *gr_alert_log_buf;
50844 +char *gr_audit_log_buf;
50845 +
50846 +extern struct gr_arg *gr_usermode;
50847 +extern unsigned char *gr_system_salt;
50848 +extern unsigned char *gr_system_sum;
50849 +
50850 +void __init
50851 +grsecurity_init(void)
50852 +{
50853 +       int j;
50854 +       /* create the per-cpu shared pages */
50855 +
50856 +#ifdef CONFIG_X86
50857 +       memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50858 +#endif
50859 +
50860 +       for (j = 0; j < 4; j++) {
50861 +               gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50862 +               if (gr_shared_page[j] == NULL) {
50863 +                       panic("Unable to allocate grsecurity shared page");
50864 +                       return;
50865 +               }
50866 +       }
50867 +
50868 +       /* allocate log buffers */
50869 +       gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50870 +       if (!gr_alert_log_fmt) {
50871 +               panic("Unable to allocate grsecurity alert log format buffer");
50872 +               return;
50873 +       }
50874 +       gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50875 +       if (!gr_audit_log_fmt) {
50876 +               panic("Unable to allocate grsecurity audit log format buffer");
50877 +               return;
50878 +       }
50879 +       gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50880 +       if (!gr_alert_log_buf) {
50881 +               panic("Unable to allocate grsecurity alert log buffer");
50882 +               return;
50883 +       }
50884 +       gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50885 +       if (!gr_audit_log_buf) {
50886 +               panic("Unable to allocate grsecurity audit log buffer");
50887 +               return;
50888 +       }
50889 +
50890 +       /* allocate memory for authentication structure */
50891 +       gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50892 +       gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50893 +       gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50894 +
50895 +       if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50896 +               panic("Unable to allocate grsecurity authentication structure");
50897 +               return;
50898 +       }
50899 +
50900 +
50901 +#ifdef CONFIG_GRKERNSEC_IO
50902 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50903 +       grsec_disable_privio = 1;
50904 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50905 +       grsec_disable_privio = 1;
50906 +#else
50907 +       grsec_disable_privio = 0;
50908 +#endif
50909 +#endif
50910 +
50911 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50912 +       /* for backward compatibility, tpe_invert always defaults to on if
50913 +          enabled in the kernel
50914 +       */
50915 +       grsec_enable_tpe_invert = 1;
50916 +#endif
50917 +
50918 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50919 +#ifndef CONFIG_GRKERNSEC_SYSCTL
50920 +       grsec_lock = 1;
50921 +#endif
50922 +
50923 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50924 +       grsec_enable_audit_textrel = 1;
50925 +#endif
50926 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50927 +       grsec_enable_log_rwxmaps = 1;
50928 +#endif
50929 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50930 +       grsec_enable_group = 1;
50931 +       grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50932 +#endif
50933 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50934 +       grsec_enable_chdir = 1;
50935 +#endif
50936 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50937 +       grsec_enable_harden_ptrace = 1;
50938 +#endif
50939 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50940 +       grsec_enable_mount = 1;
50941 +#endif
50942 +#ifdef CONFIG_GRKERNSEC_LINK
50943 +       grsec_enable_link = 1;
50944 +#endif
50945 +#ifdef CONFIG_GRKERNSEC_BRUTE
50946 +       grsec_enable_brute = 1;
50947 +#endif
50948 +#ifdef CONFIG_GRKERNSEC_DMESG
50949 +       grsec_enable_dmesg = 1;
50950 +#endif
50951 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50952 +       grsec_enable_blackhole = 1;
50953 +       grsec_lastack_retries = 4;
50954 +#endif
50955 +#ifdef CONFIG_GRKERNSEC_FIFO
50956 +       grsec_enable_fifo = 1;
50957 +#endif
50958 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50959 +       grsec_enable_execlog = 1;
50960 +#endif
50961 +#ifdef CONFIG_GRKERNSEC_SIGNAL
50962 +       grsec_enable_signal = 1;
50963 +#endif
50964 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50965 +       grsec_enable_forkfail = 1;
50966 +#endif
50967 +#ifdef CONFIG_GRKERNSEC_TIME
50968 +       grsec_enable_time = 1;
50969 +#endif
50970 +#ifdef CONFIG_GRKERNSEC_RESLOG
50971 +       grsec_resource_logging = 1;
50972 +#endif
50973 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50974 +       grsec_enable_chroot_findtask = 1;
50975 +#endif
50976 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50977 +       grsec_enable_chroot_unix = 1;
50978 +#endif
50979 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50980 +       grsec_enable_chroot_mount = 1;
50981 +#endif
50982 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50983 +       grsec_enable_chroot_fchdir = 1;
50984 +#endif
50985 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50986 +       grsec_enable_chroot_shmat = 1;
50987 +#endif
50988 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50989 +       grsec_enable_audit_ptrace = 1;
50990 +#endif
50991 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50992 +       grsec_enable_chroot_double = 1;
50993 +#endif
50994 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50995 +       grsec_enable_chroot_pivot = 1;
50996 +#endif
50997 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50998 +       grsec_enable_chroot_chdir = 1;
50999 +#endif
51000 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51001 +       grsec_enable_chroot_chmod = 1;
51002 +#endif
51003 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51004 +       grsec_enable_chroot_mknod = 1;
51005 +#endif
51006 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51007 +       grsec_enable_chroot_nice = 1;
51008 +#endif
51009 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51010 +       grsec_enable_chroot_execlog = 1;
51011 +#endif
51012 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51013 +       grsec_enable_chroot_caps = 1;
51014 +#endif
51015 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51016 +       grsec_enable_chroot_sysctl = 1;
51017 +#endif
51018 +#ifdef CONFIG_GRKERNSEC_TPE
51019 +       grsec_enable_tpe = 1;
51020 +       grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51021 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
51022 +       grsec_enable_tpe_all = 1;
51023 +#endif
51024 +#endif
51025 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51026 +       grsec_enable_socket_all = 1;
51027 +       grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51028 +#endif
51029 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51030 +       grsec_enable_socket_client = 1;
51031 +       grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51032 +#endif
51033 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51034 +       grsec_enable_socket_server = 1;
51035 +       grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51036 +#endif
51037 +#endif
51038 +
51039 +       return;
51040 +}
51041 diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
51042 --- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51043 +++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
51044 @@ -0,0 +1,43 @@
51045 +#include <linux/kernel.h>
51046 +#include <linux/sched.h>
51047 +#include <linux/fs.h>
51048 +#include <linux/file.h>
51049 +#include <linux/grinternal.h>
51050 +
51051 +int
51052 +gr_handle_follow_link(const struct inode *parent,
51053 +                     const struct inode *inode,
51054 +                     const struct dentry *dentry, const struct vfsmount *mnt)
51055 +{
51056 +#ifdef CONFIG_GRKERNSEC_LINK
51057 +       const struct cred *cred = current_cred();
51058 +
51059 +       if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51060 +           (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51061 +           (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51062 +               gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51063 +               return -EACCES;
51064 +       }
51065 +#endif
51066 +       return 0;
51067 +}
51068 +
51069 +int
51070 +gr_handle_hardlink(const struct dentry *dentry,
51071 +                  const struct vfsmount *mnt,
51072 +                  struct inode *inode, const int mode, const char *to)
51073 +{
51074 +#ifdef CONFIG_GRKERNSEC_LINK
51075 +       const struct cred *cred = current_cred();
51076 +
51077 +       if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51078 +           (!S_ISREG(mode) || (mode & S_ISUID) ||
51079 +            ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51080 +            (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51081 +           !capable(CAP_FOWNER) && cred->uid) {
51082 +               gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51083 +               return -EPERM;
51084 +       }
51085 +#endif
51086 +       return 0;
51087 +}
51088 diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
51089 --- linux-3.0.4/grsecurity/grsec_log.c  1969-12-31 19:00:00.000000000 -0500
51090 +++ linux-3.0.4/grsecurity/grsec_log.c  2011-08-23 21:48:14.000000000 -0400
51091 @@ -0,0 +1,310 @@
51092 +#include <linux/kernel.h>
51093 +#include <linux/sched.h>
51094 +#include <linux/file.h>
51095 +#include <linux/tty.h>
51096 +#include <linux/fs.h>
51097 +#include <linux/grinternal.h>
51098 +
51099 +#ifdef CONFIG_TREE_PREEMPT_RCU
51100 +#define DISABLE_PREEMPT() preempt_disable()
51101 +#define ENABLE_PREEMPT() preempt_enable()
51102 +#else
51103 +#define DISABLE_PREEMPT()
51104 +#define ENABLE_PREEMPT()
51105 +#endif
51106 +
51107 +#define BEGIN_LOCKS(x) \
51108 +       DISABLE_PREEMPT(); \
51109 +       rcu_read_lock(); \
51110 +       read_lock(&tasklist_lock); \
51111 +       read_lock(&grsec_exec_file_lock); \
51112 +       if (x != GR_DO_AUDIT) \
51113 +               spin_lock(&grsec_alert_lock); \
51114 +       else \
51115 +               spin_lock(&grsec_audit_lock)
51116 +
51117 +#define END_LOCKS(x) \
51118 +       if (x != GR_DO_AUDIT) \
51119 +               spin_unlock(&grsec_alert_lock); \
51120 +       else \
51121 +               spin_unlock(&grsec_audit_lock); \
51122 +       read_unlock(&grsec_exec_file_lock); \
51123 +       read_unlock(&tasklist_lock); \
51124 +       rcu_read_unlock(); \
51125 +       ENABLE_PREEMPT(); \
51126 +       if (x == GR_DONT_AUDIT) \
51127 +               gr_handle_alertkill(current)
51128 +
51129 +enum {
51130 +       FLOODING,
51131 +       NO_FLOODING
51132 +};
51133 +
51134 +extern char *gr_alert_log_fmt;
51135 +extern char *gr_audit_log_fmt;
51136 +extern char *gr_alert_log_buf;
51137 +extern char *gr_audit_log_buf;
51138 +
51139 +static int gr_log_start(int audit)
51140 +{
51141 +       char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51142 +       char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51143 +       char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51144 +
51145 +       if (audit == GR_DO_AUDIT)
51146 +               goto set_fmt;
51147 +
51148 +       if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
51149 +               grsec_alert_wtime = jiffies;
51150 +               grsec_alert_fyet = 0;
51151 +       } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
51152 +               grsec_alert_fyet++;
51153 +       } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51154 +               grsec_alert_wtime = jiffies;
51155 +               grsec_alert_fyet++;
51156 +               printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51157 +               return FLOODING;
51158 +       } else return FLOODING;
51159 +
51160 +set_fmt:
51161 +       memset(buf, 0, PAGE_SIZE);
51162 +       if (current->signal->curr_ip && gr_acl_is_enabled()) {
51163 +               sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51164 +               snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51165 +       } else if (current->signal->curr_ip) {
51166 +               sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51167 +               snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51168 +       } else if (gr_acl_is_enabled()) {
51169 +               sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51170 +               snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51171 +       } else {
51172 +               sprintf(fmt, "%s%s", loglevel, "grsec: ");
51173 +               strcpy(buf, fmt);
51174 +       }
51175 +
51176 +       return NO_FLOODING;
51177 +}
51178 +
51179 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51180 +       __attribute__ ((format (printf, 2, 0)));
51181 +
51182 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51183 +{
51184 +       char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51185 +       unsigned int len = strlen(buf);
51186 +
51187 +       vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51188 +
51189 +       return;
51190 +}
51191 +
51192 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51193 +       __attribute__ ((format (printf, 2, 3)));
51194 +
51195 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51196 +{
51197 +       char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51198 +       unsigned int len = strlen(buf);
51199 +       va_list ap;
51200 +
51201 +       va_start(ap, msg);
51202 +       vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51203 +       va_end(ap);
51204 +
51205 +       return;
51206 +}
51207 +
51208 +static void gr_log_end(int audit)
51209 +{
51210 +       char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51211 +       unsigned int len = strlen(buf);
51212 +
51213 +       snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51214 +       printk("%s\n", buf);
51215 +
51216 +       return;
51217 +}
51218 +
51219 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51220 +{
51221 +       int logtype;
51222 +       char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51223 +       char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51224 +       void *voidptr = NULL;
51225 +       int num1 = 0, num2 = 0;
51226 +       unsigned long ulong1 = 0, ulong2 = 0;
51227 +       struct dentry *dentry = NULL;
51228 +       struct vfsmount *mnt = NULL;
51229 +       struct file *file = NULL;
51230 +       struct task_struct *task = NULL;
51231 +       const struct cred *cred, *pcred;
51232 +       va_list ap;
51233 +
51234 +       BEGIN_LOCKS(audit);
51235 +       logtype = gr_log_start(audit);
51236 +       if (logtype == FLOODING) {
51237 +               END_LOCKS(audit);
51238 +               return;
51239 +       }
51240 +       va_start(ap, argtypes);
51241 +       switch (argtypes) {
51242 +       case GR_TTYSNIFF:
51243 +               task = va_arg(ap, struct task_struct *);
51244 +               gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51245 +               break;
51246 +       case GR_SYSCTL_HIDDEN:
51247 +               str1 = va_arg(ap, char *);
51248 +               gr_log_middle_varargs(audit, msg, result, str1);
51249 +               break;
51250 +       case GR_RBAC:
51251 +               dentry = va_arg(ap, struct dentry *);
51252 +               mnt = va_arg(ap, struct vfsmount *);
51253 +               gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51254 +               break;
51255 +       case GR_RBAC_STR:
51256 +               dentry = va_arg(ap, struct dentry *);
51257 +               mnt = va_arg(ap, struct vfsmount *);
51258 +               str1 = va_arg(ap, char *);
51259 +               gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51260 +               break;
51261 +       case GR_STR_RBAC:
51262 +               str1 = va_arg(ap, char *);
51263 +               dentry = va_arg(ap, struct dentry *);
51264 +               mnt = va_arg(ap, struct vfsmount *);
51265 +               gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51266 +               break;
51267 +       case GR_RBAC_MODE2:
51268 +               dentry = va_arg(ap, struct dentry *);
51269 +               mnt = va_arg(ap, struct vfsmount *);
51270 +               str1 = va_arg(ap, char *);
51271 +               str2 = va_arg(ap, char *);
51272 +               gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51273 +               break;
51274 +       case GR_RBAC_MODE3:
51275 +               dentry = va_arg(ap, struct dentry *);
51276 +               mnt = va_arg(ap, struct vfsmount *);
51277 +               str1 = va_arg(ap, char *);
51278 +               str2 = va_arg(ap, char *);
51279 +               str3 = va_arg(ap, char *);
51280 +               gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51281 +               break;
51282 +       case GR_FILENAME:
51283 +               dentry = va_arg(ap, struct dentry *);
51284 +               mnt = va_arg(ap, struct vfsmount *);
51285 +               gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51286 +               break;
51287 +       case GR_STR_FILENAME:
51288 +               str1 = va_arg(ap, char *);
51289 +               dentry = va_arg(ap, struct dentry *);
51290 +               mnt = va_arg(ap, struct vfsmount *);
51291 +               gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51292 +               break;
51293 +       case GR_FILENAME_STR:
51294 +               dentry = va_arg(ap, struct dentry *);
51295 +               mnt = va_arg(ap, struct vfsmount *);
51296 +               str1 = va_arg(ap, char *);
51297 +               gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51298 +               break;
51299 +       case GR_FILENAME_TWO_INT:
51300 +               dentry = va_arg(ap, struct dentry *);
51301 +               mnt = va_arg(ap, struct vfsmount *);
51302 +               num1 = va_arg(ap, int);
51303 +               num2 = va_arg(ap, int);
51304 +               gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51305 +               break;
51306 +       case GR_FILENAME_TWO_INT_STR:
51307 +               dentry = va_arg(ap, struct dentry *);
51308 +               mnt = va_arg(ap, struct vfsmount *);
51309 +               num1 = va_arg(ap, int);
51310 +               num2 = va_arg(ap, int);
51311 +               str1 = va_arg(ap, char *);
51312 +               gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51313 +               break;
51314 +       case GR_TEXTREL:
51315 +               file = va_arg(ap, struct file *);
51316 +               ulong1 = va_arg(ap, unsigned long);
51317 +               ulong2 = va_arg(ap, unsigned long);
51318 +               gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51319 +               break;
51320 +       case GR_PTRACE:
51321 +               task = va_arg(ap, struct task_struct *);
51322 +               gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51323 +               break;
51324 +       case GR_RESOURCE:
51325 +               task = va_arg(ap, struct task_struct *);
51326 +               cred = __task_cred(task);
51327 +               pcred = __task_cred(task->real_parent);
51328 +               ulong1 = va_arg(ap, unsigned long);
51329 +               str1 = va_arg(ap, char *);
51330 +               ulong2 = va_arg(ap, unsigned long);
51331 +               gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51332 +               break;
51333 +       case GR_CAP:
51334 +               task = va_arg(ap, struct task_struct *);
51335 +               cred = __task_cred(task);
51336 +               pcred = __task_cred(task->real_parent);
51337 +               str1 = va_arg(ap, char *);
51338 +               gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51339 +               break;
51340 +       case GR_SIG:
51341 +               str1 = va_arg(ap, char *);
51342 +               voidptr = va_arg(ap, void *);
51343 +               gr_log_middle_varargs(audit, msg, str1, voidptr);
51344 +               break;
51345 +       case GR_SIG2:
51346 +               task = va_arg(ap, struct task_struct *);
51347 +               cred = __task_cred(task);
51348 +               pcred = __task_cred(task->real_parent);
51349 +               num1 = va_arg(ap, int);
51350 +               gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51351 +               break;
51352 +       case GR_CRASH1:
51353 +               task = va_arg(ap, struct task_struct *);
51354 +               cred = __task_cred(task);
51355 +               pcred = __task_cred(task->real_parent);
51356 +               ulong1 = va_arg(ap, unsigned long);
51357 +               gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51358 +               break;
51359 +       case GR_CRASH2:
51360 +               task = va_arg(ap, struct task_struct *);
51361 +               cred = __task_cred(task);
51362 +               pcred = __task_cred(task->real_parent);
51363 +               ulong1 = va_arg(ap, unsigned long);
51364 +               gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51365 +               break;
51366 +       case GR_RWXMAP:
51367 +               file = va_arg(ap, struct file *);
51368 +               gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51369 +               break;
51370 +       case GR_PSACCT:
51371 +               {
51372 +                       unsigned int wday, cday;
51373 +                       __u8 whr, chr;
51374 +                       __u8 wmin, cmin;
51375 +                       __u8 wsec, csec;
51376 +                       char cur_tty[64] = { 0 };
51377 +                       char parent_tty[64] = { 0 };
51378 +
51379 +                       task = va_arg(ap, struct task_struct *);
51380 +                       wday = va_arg(ap, unsigned int);
51381 +                       cday = va_arg(ap, unsigned int);
51382 +                       whr = va_arg(ap, int);
51383 +                       chr = va_arg(ap, int);
51384 +                       wmin = va_arg(ap, int);
51385 +                       cmin = va_arg(ap, int);
51386 +                       wsec = va_arg(ap, int);
51387 +                       csec = va_arg(ap, int);
51388 +                       ulong1 = va_arg(ap, unsigned long);
51389 +                       cred = __task_cred(task);
51390 +                       pcred = __task_cred(task->real_parent);
51391 +
51392 +                       gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51393 +               }
51394 +               break;
51395 +       default:
51396 +               gr_log_middle(audit, msg, ap);
51397 +       }
51398 +       va_end(ap);
51399 +       gr_log_end(audit);
51400 +       END_LOCKS(audit);
51401 +}
51402 diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
51403 --- linux-3.0.4/grsecurity/grsec_mem.c  1969-12-31 19:00:00.000000000 -0500
51404 +++ linux-3.0.4/grsecurity/grsec_mem.c  2011-08-23 21:48:14.000000000 -0400
51405 @@ -0,0 +1,33 @@
51406 +#include <linux/kernel.h>
51407 +#include <linux/sched.h>
51408 +#include <linux/mm.h>
51409 +#include <linux/mman.h>
51410 +#include <linux/grinternal.h>
51411 +
51412 +void
51413 +gr_handle_ioperm(void)
51414 +{
51415 +       gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51416 +       return;
51417 +}
51418 +
51419 +void
51420 +gr_handle_iopl(void)
51421 +{
51422 +       gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51423 +       return;
51424 +}
51425 +
51426 +void
51427 +gr_handle_mem_readwrite(u64 from, u64 to)
51428 +{
51429 +       gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51430 +       return;
51431 +}
51432 +
51433 +void
51434 +gr_handle_vm86(void)
51435 +{
51436 +       gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51437 +       return;
51438 +}
51439 diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
51440 --- linux-3.0.4/grsecurity/grsec_mount.c        1969-12-31 19:00:00.000000000 -0500
51441 +++ linux-3.0.4/grsecurity/grsec_mount.c        2011-08-23 21:48:14.000000000 -0400
51442 @@ -0,0 +1,62 @@
51443 +#include <linux/kernel.h>
51444 +#include <linux/sched.h>
51445 +#include <linux/mount.h>
51446 +#include <linux/grsecurity.h>
51447 +#include <linux/grinternal.h>
51448 +
51449 +void
51450 +gr_log_remount(const char *devname, const int retval)
51451 +{
51452 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51453 +       if (grsec_enable_mount && (retval >= 0))
51454 +               gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51455 +#endif
51456 +       return;
51457 +}
51458 +
51459 +void
51460 +gr_log_unmount(const char *devname, const int retval)
51461 +{
51462 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51463 +       if (grsec_enable_mount && (retval >= 0))
51464 +               gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51465 +#endif
51466 +       return;
51467 +}
51468 +
51469 +void
51470 +gr_log_mount(const char *from, const char *to, const int retval)
51471 +{
51472 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51473 +       if (grsec_enable_mount && (retval >= 0))
51474 +               gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51475 +#endif
51476 +       return;
51477 +}
51478 +
51479 +int
51480 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51481 +{
51482 +#ifdef CONFIG_GRKERNSEC_ROFS
51483 +       if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51484 +               gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51485 +               return -EPERM;
51486 +       } else
51487 +               return 0;
51488 +#endif
51489 +       return 0;
51490 +}
51491 +
51492 +int
51493 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51494 +{
51495 +#ifdef CONFIG_GRKERNSEC_ROFS
51496 +       if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51497 +           dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51498 +               gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51499 +               return -EPERM;
51500 +       } else
51501 +               return 0;
51502 +#endif
51503 +       return 0;
51504 +}
51505 diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
51506 --- linux-3.0.4/grsecurity/grsec_pax.c  1969-12-31 19:00:00.000000000 -0500
51507 +++ linux-3.0.4/grsecurity/grsec_pax.c  2011-08-23 21:48:14.000000000 -0400
51508 @@ -0,0 +1,36 @@
51509 +#include <linux/kernel.h>
51510 +#include <linux/sched.h>
51511 +#include <linux/mm.h>
51512 +#include <linux/file.h>
51513 +#include <linux/grinternal.h>
51514 +#include <linux/grsecurity.h>
51515 +
51516 +void
51517 +gr_log_textrel(struct vm_area_struct * vma)
51518 +{
51519 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51520 +       if (grsec_enable_audit_textrel)
51521 +               gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51522 +#endif
51523 +       return;
51524 +}
51525 +
51526 +void
51527 +gr_log_rwxmmap(struct file *file)
51528 +{
51529 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51530 +       if (grsec_enable_log_rwxmaps)
51531 +               gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51532 +#endif
51533 +       return;
51534 +}
51535 +
51536 +void
51537 +gr_log_rwxmprotect(struct file *file)
51538 +{
51539 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51540 +       if (grsec_enable_log_rwxmaps)
51541 +               gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51542 +#endif
51543 +       return;
51544 +}
51545 diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
51546 --- linux-3.0.4/grsecurity/grsec_ptrace.c       1969-12-31 19:00:00.000000000 -0500
51547 +++ linux-3.0.4/grsecurity/grsec_ptrace.c       2011-08-23 21:48:14.000000000 -0400
51548 @@ -0,0 +1,14 @@
51549 +#include <linux/kernel.h>
51550 +#include <linux/sched.h>
51551 +#include <linux/grinternal.h>
51552 +#include <linux/grsecurity.h>
51553 +
51554 +void
51555 +gr_audit_ptrace(struct task_struct *task)
51556 +{
51557 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51558 +       if (grsec_enable_audit_ptrace)
51559 +               gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51560 +#endif
51561 +       return;
51562 +}
51563 diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
51564 --- linux-3.0.4/grsecurity/grsec_sig.c  1969-12-31 19:00:00.000000000 -0500
51565 +++ linux-3.0.4/grsecurity/grsec_sig.c  2011-08-23 21:48:14.000000000 -0400
51566 @@ -0,0 +1,206 @@
51567 +#include <linux/kernel.h>
51568 +#include <linux/sched.h>
51569 +#include <linux/delay.h>
51570 +#include <linux/grsecurity.h>
51571 +#include <linux/grinternal.h>
51572 +#include <linux/hardirq.h>
51573 +
51574 +char *signames[] = {
51575 +       [SIGSEGV] = "Segmentation fault",
51576 +       [SIGILL] = "Illegal instruction",
51577 +       [SIGABRT] = "Abort",
51578 +       [SIGBUS] = "Invalid alignment/Bus error"
51579 +};
51580 +
51581 +void
51582 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51583 +{
51584 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51585 +       if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51586 +                                   (sig == SIGABRT) || (sig == SIGBUS))) {
51587 +               if (t->pid == current->pid) {
51588 +                       gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51589 +               } else {
51590 +                       gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51591 +               }
51592 +       }
51593 +#endif
51594 +       return;
51595 +}
51596 +
51597 +int
51598 +gr_handle_signal(const struct task_struct *p, const int sig)
51599 +{
51600 +#ifdef CONFIG_GRKERNSEC
51601 +       if (current->pid > 1 && gr_check_protected_task(p)) {
51602 +               gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51603 +               return -EPERM;
51604 +       } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51605 +               return -EPERM;
51606 +       }
51607 +#endif
51608 +       return 0;
51609 +}
51610 +
51611 +#ifdef CONFIG_GRKERNSEC
51612 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51613 +
51614 +int gr_fake_force_sig(int sig, struct task_struct *t)
51615 +{
51616 +       unsigned long int flags;
51617 +       int ret, blocked, ignored;
51618 +       struct k_sigaction *action;
51619 +
51620 +       spin_lock_irqsave(&t->sighand->siglock, flags);
51621 +       action = &t->sighand->action[sig-1];
51622 +       ignored = action->sa.sa_handler == SIG_IGN;
51623 +       blocked = sigismember(&t->blocked, sig);
51624 +       if (blocked || ignored) {
51625 +               action->sa.sa_handler = SIG_DFL;
51626 +               if (blocked) {
51627 +                       sigdelset(&t->blocked, sig);
51628 +                       recalc_sigpending_and_wake(t);
51629 +               }
51630 +       }
51631 +       if (action->sa.sa_handler == SIG_DFL)
51632 +               t->signal->flags &= ~SIGNAL_UNKILLABLE;
51633 +       ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51634 +
51635 +       spin_unlock_irqrestore(&t->sighand->siglock, flags);
51636 +
51637 +       return ret;
51638 +}
51639 +#endif
51640 +
51641 +#ifdef CONFIG_GRKERNSEC_BRUTE
51642 +#define GR_USER_BAN_TIME (15 * 60)
51643 +
51644 +static int __get_dumpable(unsigned long mm_flags)
51645 +{
51646 +       int ret;
51647 +
51648 +       ret = mm_flags & MMF_DUMPABLE_MASK;
51649 +       return (ret >= 2) ? 2 : ret;
51650 +}
51651 +#endif
51652 +
51653 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51654 +{
51655 +#ifdef CONFIG_GRKERNSEC_BRUTE
51656 +       uid_t uid = 0;
51657 +
51658 +       if (!grsec_enable_brute)
51659 +               return;
51660 +
51661 +       rcu_read_lock();
51662 +       read_lock(&tasklist_lock);
51663 +       read_lock(&grsec_exec_file_lock);
51664 +       if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51665 +               p->real_parent->brute = 1;
51666 +       else {
51667 +               const struct cred *cred = __task_cred(p), *cred2;
51668 +               struct task_struct *tsk, *tsk2;
51669 +
51670 +               if (!__get_dumpable(mm_flags) && cred->uid) {
51671 +                       struct user_struct *user;
51672 +
51673 +                       uid = cred->uid;
51674 +
51675 +                       /* this is put upon execution past expiration */
51676 +                       user = find_user(uid);
51677 +                       if (user == NULL)
51678 +                               goto unlock;
51679 +                       user->banned = 1;
51680 +                       user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51681 +                       if (user->ban_expires == ~0UL)
51682 +                               user->ban_expires--;
51683 +
51684 +                       do_each_thread(tsk2, tsk) {
51685 +                               cred2 = __task_cred(tsk);
51686 +                               if (tsk != p && cred2->uid == uid)
51687 +                                       gr_fake_force_sig(SIGKILL, tsk);
51688 +                       } while_each_thread(tsk2, tsk);
51689 +               }
51690 +       }
51691 +unlock:
51692 +       read_unlock(&grsec_exec_file_lock);
51693 +       read_unlock(&tasklist_lock);
51694 +       rcu_read_unlock();
51695 +
51696 +       if (uid)
51697 +               printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51698 +
51699 +#endif
51700 +       return;
51701 +}
51702 +
51703 +void gr_handle_brute_check(void)
51704 +{
51705 +#ifdef CONFIG_GRKERNSEC_BRUTE
51706 +       if (current->brute)
51707 +               msleep(30 * 1000);
51708 +#endif
51709 +       return;
51710 +}
51711 +
51712 +void gr_handle_kernel_exploit(void)
51713 +{
51714 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51715 +       const struct cred *cred;
51716 +       struct task_struct *tsk, *tsk2;
51717 +       struct user_struct *user;
51718 +       uid_t uid;
51719 +
51720 +       if (in_irq() || in_serving_softirq() || in_nmi())
51721 +               panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51722 +
51723 +       uid = current_uid();
51724 +
51725 +       if (uid == 0)
51726 +               panic("grsec: halting the system due to suspicious kernel crash caused by root");
51727 +       else {
51728 +               /* kill all the processes of this user, hold a reference
51729 +                  to their creds struct, and prevent them from creating
51730 +                  another process until system reset
51731 +               */
51732 +               printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51733 +               /* we intentionally leak this ref */
51734 +               user = get_uid(current->cred->user);
51735 +               if (user) {
51736 +                       user->banned = 1;
51737 +                       user->ban_expires = ~0UL;
51738 +               }
51739 +
51740 +               read_lock(&tasklist_lock);
51741 +               do_each_thread(tsk2, tsk) {
51742 +                       cred = __task_cred(tsk);
51743 +                       if (cred->uid == uid)
51744 +                               gr_fake_force_sig(SIGKILL, tsk);
51745 +               } while_each_thread(tsk2, tsk);
51746 +               read_unlock(&tasklist_lock); 
51747 +       }
51748 +#endif
51749 +}
51750 +
51751 +int __gr_process_user_ban(struct user_struct *user)
51752 +{
51753 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51754 +       if (unlikely(user->banned)) {
51755 +               if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51756 +                       user->banned = 0;
51757 +                       user->ban_expires = 0;
51758 +                       free_uid(user);
51759 +               } else
51760 +                       return -EPERM;
51761 +       }
51762 +#endif
51763 +       return 0;
51764 +}
51765 +
51766 +int gr_process_user_ban(void)
51767 +{
51768 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51769 +       return __gr_process_user_ban(current->cred->user);
51770 +#endif
51771 +       return 0;
51772 +}
51773 diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
51774 --- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51775 +++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
51776 @@ -0,0 +1,244 @@
51777 +#include <linux/kernel.h>
51778 +#include <linux/module.h>
51779 +#include <linux/sched.h>
51780 +#include <linux/file.h>
51781 +#include <linux/net.h>
51782 +#include <linux/in.h>
51783 +#include <linux/ip.h>
51784 +#include <net/sock.h>
51785 +#include <net/inet_sock.h>
51786 +#include <linux/grsecurity.h>
51787 +#include <linux/grinternal.h>
51788 +#include <linux/gracl.h>
51789 +
51790 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51791 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51792 +
51793 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
51794 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
51795 +
51796 +#ifdef CONFIG_UNIX_MODULE
51797 +EXPORT_SYMBOL(gr_acl_handle_unix);
51798 +EXPORT_SYMBOL(gr_acl_handle_mknod);
51799 +EXPORT_SYMBOL(gr_handle_chroot_unix);
51800 +EXPORT_SYMBOL(gr_handle_create);
51801 +#endif
51802 +
51803 +#ifdef CONFIG_GRKERNSEC
51804 +#define gr_conn_table_size 32749
51805 +struct conn_table_entry {
51806 +       struct conn_table_entry *next;
51807 +       struct signal_struct *sig;
51808 +};
51809 +
51810 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51811 +DEFINE_SPINLOCK(gr_conn_table_lock);
51812 +
51813 +extern const char * gr_socktype_to_name(unsigned char type);
51814 +extern const char * gr_proto_to_name(unsigned char proto);
51815 +extern const char * gr_sockfamily_to_name(unsigned char family);
51816 +
51817 +static __inline__ int 
51818 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51819 +{
51820 +       return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51821 +}
51822 +
51823 +static __inline__ int
51824 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, 
51825 +          __u16 sport, __u16 dport)
51826 +{
51827 +       if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51828 +                    sig->gr_sport == sport && sig->gr_dport == dport))
51829 +               return 1;
51830 +       else
51831 +               return 0;
51832 +}
51833 +
51834 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51835 +{
51836 +       struct conn_table_entry **match;
51837 +       unsigned int index;
51838 +
51839 +       index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
51840 +                         sig->gr_sport, sig->gr_dport, 
51841 +                         gr_conn_table_size);
51842 +
51843 +       newent->sig = sig;
51844 +       
51845 +       match = &gr_conn_table[index];
51846 +       newent->next = *match;
51847 +       *match = newent;
51848 +
51849 +       return;
51850 +}
51851 +
51852 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51853 +{
51854 +       struct conn_table_entry *match, *last = NULL;
51855 +       unsigned int index;
51856 +
51857 +       index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
51858 +                         sig->gr_sport, sig->gr_dport, 
51859 +                         gr_conn_table_size);
51860 +
51861 +       match = gr_conn_table[index];
51862 +       while (match && !conn_match(match->sig, 
51863 +               sig->gr_saddr, sig->gr_daddr, sig->gr_sport, 
51864 +               sig->gr_dport)) {
51865 +               last = match;
51866 +               match = match->next;
51867 +       }
51868 +
51869 +       if (match) {
51870 +               if (last)
51871 +                       last->next = match->next;
51872 +               else
51873 +                       gr_conn_table[index] = NULL;
51874 +               kfree(match);
51875 +       }
51876 +
51877 +       return;
51878 +}
51879 +
51880 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51881 +                                            __u16 sport, __u16 dport)
51882 +{
51883 +       struct conn_table_entry *match;
51884 +       unsigned int index;
51885 +
51886 +       index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51887 +
51888 +       match = gr_conn_table[index];
51889 +       while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51890 +               match = match->next;
51891 +
51892 +       if (match)
51893 +               return match->sig;
51894 +       else
51895 +               return NULL;
51896 +}
51897 +
51898 +#endif
51899 +
51900 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51901 +{
51902 +#ifdef CONFIG_GRKERNSEC
51903 +       struct signal_struct *sig = task->signal;
51904 +       struct conn_table_entry *newent;
51905 +
51906 +       newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51907 +       if (newent == NULL)
51908 +               return;
51909 +       /* no bh lock needed since we are called with bh disabled */
51910 +       spin_lock(&gr_conn_table_lock);
51911 +       gr_del_task_from_ip_table_nolock(sig);
51912 +       sig->gr_saddr = inet->inet_rcv_saddr;
51913 +       sig->gr_daddr = inet->inet_daddr;
51914 +       sig->gr_sport = inet->inet_sport;
51915 +       sig->gr_dport = inet->inet_dport;
51916 +       gr_add_to_task_ip_table_nolock(sig, newent);
51917 +       spin_unlock(&gr_conn_table_lock);
51918 +#endif
51919 +       return;
51920 +}
51921 +
51922 +void gr_del_task_from_ip_table(struct task_struct *task)
51923 +{
51924 +#ifdef CONFIG_GRKERNSEC
51925 +       spin_lock_bh(&gr_conn_table_lock);
51926 +       gr_del_task_from_ip_table_nolock(task->signal);
51927 +       spin_unlock_bh(&gr_conn_table_lock);
51928 +#endif
51929 +       return;
51930 +}
51931 +
51932 +void
51933 +gr_attach_curr_ip(const struct sock *sk)
51934 +{
51935 +#ifdef CONFIG_GRKERNSEC
51936 +       struct signal_struct *p, *set;
51937 +       const struct inet_sock *inet = inet_sk(sk);     
51938 +
51939 +       if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51940 +               return;
51941 +
51942 +       set = current->signal;
51943 +
51944 +       spin_lock_bh(&gr_conn_table_lock);
51945 +       p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
51946 +                                   inet->inet_dport, inet->inet_sport);
51947 +       if (unlikely(p != NULL)) {
51948 +               set->curr_ip = p->curr_ip;
51949 +               set->used_accept = 1;
51950 +               gr_del_task_from_ip_table_nolock(p);
51951 +               spin_unlock_bh(&gr_conn_table_lock);
51952 +               return;
51953 +       }
51954 +       spin_unlock_bh(&gr_conn_table_lock);
51955 +
51956 +       set->curr_ip = inet->inet_daddr;
51957 +       set->used_accept = 1;
51958 +#endif
51959 +       return;
51960 +}
51961 +
51962 +int
51963 +gr_handle_sock_all(const int family, const int type, const int protocol)
51964 +{
51965 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51966 +       if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51967 +           (family != AF_UNIX)) {
51968 +               if (family == AF_INET)
51969 +                       gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51970 +               else
51971 +                       gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51972 +               return -EACCES;
51973 +       }
51974 +#endif
51975 +       return 0;
51976 +}
51977 +
51978 +int
51979 +gr_handle_sock_server(const struct sockaddr *sck)
51980 +{
51981 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51982 +       if (grsec_enable_socket_server &&
51983 +           in_group_p(grsec_socket_server_gid) &&
51984 +           sck && (sck->sa_family != AF_UNIX) &&
51985 +           (sck->sa_family != AF_LOCAL)) {
51986 +               gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51987 +               return -EACCES;
51988 +       }
51989 +#endif
51990 +       return 0;
51991 +}
51992 +
51993 +int
51994 +gr_handle_sock_server_other(const struct sock *sck)
51995 +{
51996 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51997 +       if (grsec_enable_socket_server &&
51998 +           in_group_p(grsec_socket_server_gid) &&
51999 +           sck && (sck->sk_family != AF_UNIX) &&
52000 +           (sck->sk_family != AF_LOCAL)) {
52001 +               gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52002 +               return -EACCES;
52003 +       }
52004 +#endif
52005 +       return 0;
52006 +}
52007 +
52008 +int
52009 +gr_handle_sock_client(const struct sockaddr *sck)
52010 +{
52011 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52012 +       if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52013 +           sck && (sck->sa_family != AF_UNIX) &&
52014 +           (sck->sa_family != AF_LOCAL)) {
52015 +               gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52016 +               return -EACCES;
52017 +       }
52018 +#endif
52019 +       return 0;
52020 +}
52021 diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
52022 --- linux-3.0.4/grsecurity/grsec_sysctl.c       1969-12-31 19:00:00.000000000 -0500
52023 +++ linux-3.0.4/grsecurity/grsec_sysctl.c       2011-08-25 17:26:15.000000000 -0400
52024 @@ -0,0 +1,433 @@
52025 +#include <linux/kernel.h>
52026 +#include <linux/sched.h>
52027 +#include <linux/sysctl.h>
52028 +#include <linux/grsecurity.h>
52029 +#include <linux/grinternal.h>
52030 +
52031 +int
52032 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52033 +{
52034 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52035 +       if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52036 +               gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52037 +               return -EACCES;
52038 +       }
52039 +#endif
52040 +       return 0;
52041 +}
52042 +
52043 +#ifdef CONFIG_GRKERNSEC_ROFS
52044 +static int __maybe_unused one = 1;
52045 +#endif
52046 +
52047 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52048 +struct ctl_table grsecurity_table[] = {
52049 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52050 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52051 +#ifdef CONFIG_GRKERNSEC_IO
52052 +       {
52053 +               .procname       = "disable_priv_io",
52054 +               .data           = &grsec_disable_privio,
52055 +               .maxlen         = sizeof(int),
52056 +               .mode           = 0600,
52057 +               .proc_handler   = &proc_dointvec,
52058 +       },
52059 +#endif
52060 +#endif
52061 +#ifdef CONFIG_GRKERNSEC_LINK
52062 +       {
52063 +               .procname       = "linking_restrictions",
52064 +               .data           = &grsec_enable_link,
52065 +               .maxlen         = sizeof(int),
52066 +               .mode           = 0600,
52067 +               .proc_handler   = &proc_dointvec,
52068 +       },
52069 +#endif
52070 +#ifdef CONFIG_GRKERNSEC_BRUTE
52071 +       {
52072 +               .procname       = "deter_bruteforce",
52073 +               .data           = &grsec_enable_brute,
52074 +               .maxlen         = sizeof(int),
52075 +               .mode           = 0600,
52076 +               .proc_handler   = &proc_dointvec,
52077 +       },
52078 +#endif
52079 +#ifdef CONFIG_GRKERNSEC_FIFO
52080 +       {
52081 +               .procname       = "fifo_restrictions",
52082 +               .data           = &grsec_enable_fifo,
52083 +               .maxlen         = sizeof(int),
52084 +               .mode           = 0600,
52085 +               .proc_handler   = &proc_dointvec,
52086 +       },
52087 +#endif
52088 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52089 +       {
52090 +               .procname       = "ip_blackhole",
52091 +               .data           = &grsec_enable_blackhole,
52092 +               .maxlen         = sizeof(int),
52093 +               .mode           = 0600,
52094 +               .proc_handler   = &proc_dointvec,
52095 +       },
52096 +       {
52097 +               .procname       = "lastack_retries",
52098 +               .data           = &grsec_lastack_retries,
52099 +               .maxlen         = sizeof(int),
52100 +               .mode           = 0600,
52101 +               .proc_handler   = &proc_dointvec,
52102 +       },
52103 +#endif
52104 +#ifdef CONFIG_GRKERNSEC_EXECLOG
52105 +       {
52106 +               .procname       = "exec_logging",
52107 +               .data           = &grsec_enable_execlog,
52108 +               .maxlen         = sizeof(int),
52109 +               .mode           = 0600,
52110 +               .proc_handler   = &proc_dointvec,
52111 +       },
52112 +#endif
52113 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52114 +       {
52115 +               .procname       = "rwxmap_logging",
52116 +               .data           = &grsec_enable_log_rwxmaps,
52117 +               .maxlen         = sizeof(int),
52118 +               .mode           = 0600,
52119 +               .proc_handler   = &proc_dointvec,
52120 +       },
52121 +#endif
52122 +#ifdef CONFIG_GRKERNSEC_SIGNAL
52123 +       {
52124 +               .procname       = "signal_logging",
52125 +               .data           = &grsec_enable_signal,
52126 +               .maxlen         = sizeof(int),
52127 +               .mode           = 0600,
52128 +               .proc_handler   = &proc_dointvec,
52129 +       },
52130 +#endif
52131 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
52132 +       {
52133 +               .procname       = "forkfail_logging",
52134 +               .data           = &grsec_enable_forkfail,
52135 +               .maxlen         = sizeof(int),
52136 +               .mode           = 0600,
52137 +               .proc_handler   = &proc_dointvec,
52138 +       },
52139 +#endif
52140 +#ifdef CONFIG_GRKERNSEC_TIME
52141 +       {
52142 +               .procname       = "timechange_logging",
52143 +               .data           = &grsec_enable_time,
52144 +               .maxlen         = sizeof(int),
52145 +               .mode           = 0600,
52146 +               .proc_handler   = &proc_dointvec,
52147 +       },
52148 +#endif
52149 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52150 +       {
52151 +               .procname       = "chroot_deny_shmat",
52152 +               .data           = &grsec_enable_chroot_shmat,
52153 +               .maxlen         = sizeof(int),
52154 +               .mode           = 0600,
52155 +               .proc_handler   = &proc_dointvec,
52156 +       },
52157 +#endif
52158 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52159 +       {
52160 +               .procname       = "chroot_deny_unix",
52161 +               .data           = &grsec_enable_chroot_unix,
52162 +               .maxlen         = sizeof(int),
52163 +               .mode           = 0600,
52164 +               .proc_handler   = &proc_dointvec,
52165 +       },
52166 +#endif
52167 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52168 +       {
52169 +               .procname       = "chroot_deny_mount",
52170 +               .data           = &grsec_enable_chroot_mount,
52171 +               .maxlen         = sizeof(int),
52172 +               .mode           = 0600,
52173 +               .proc_handler   = &proc_dointvec,
52174 +       },
52175 +#endif
52176 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52177 +       {
52178 +               .procname       = "chroot_deny_fchdir",
52179 +               .data           = &grsec_enable_chroot_fchdir,
52180 +               .maxlen         = sizeof(int),
52181 +               .mode           = 0600,
52182 +               .proc_handler   = &proc_dointvec,
52183 +       },
52184 +#endif
52185 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52186 +       {
52187 +               .procname       = "chroot_deny_chroot",
52188 +               .data           = &grsec_enable_chroot_double,
52189 +               .maxlen         = sizeof(int),
52190 +               .mode           = 0600,
52191 +               .proc_handler   = &proc_dointvec,
52192 +       },
52193 +#endif
52194 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52195 +       {
52196 +               .procname       = "chroot_deny_pivot",
52197 +               .data           = &grsec_enable_chroot_pivot,
52198 +               .maxlen         = sizeof(int),
52199 +               .mode           = 0600,
52200 +               .proc_handler   = &proc_dointvec,
52201 +       },
52202 +#endif
52203 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52204 +       {
52205 +               .procname       = "chroot_enforce_chdir",
52206 +               .data           = &grsec_enable_chroot_chdir,
52207 +               .maxlen         = sizeof(int),
52208 +               .mode           = 0600,
52209 +               .proc_handler   = &proc_dointvec,
52210 +       },
52211 +#endif
52212 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52213 +       {
52214 +               .procname       = "chroot_deny_chmod",
52215 +               .data           = &grsec_enable_chroot_chmod,
52216 +               .maxlen         = sizeof(int),
52217 +               .mode           = 0600,
52218 +               .proc_handler   = &proc_dointvec,
52219 +       },
52220 +#endif
52221 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52222 +       {
52223 +               .procname       = "chroot_deny_mknod",
52224 +               .data           = &grsec_enable_chroot_mknod,
52225 +               .maxlen         = sizeof(int),
52226 +               .mode           = 0600,
52227 +               .proc_handler   = &proc_dointvec,
52228 +       },
52229 +#endif
52230 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52231 +       {
52232 +               .procname       = "chroot_restrict_nice",
52233 +               .data           = &grsec_enable_chroot_nice,
52234 +               .maxlen         = sizeof(int),
52235 +               .mode           = 0600,
52236 +               .proc_handler   = &proc_dointvec,
52237 +       },
52238 +#endif
52239 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52240 +       {
52241 +               .procname       = "chroot_execlog",
52242 +               .data           = &grsec_enable_chroot_execlog,
52243 +               .maxlen         = sizeof(int),
52244 +               .mode           = 0600,
52245 +               .proc_handler   = &proc_dointvec,
52246 +       },
52247 +#endif
52248 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52249 +       {
52250 +               .procname       = "chroot_caps",
52251 +               .data           = &grsec_enable_chroot_caps,
52252 +               .maxlen         = sizeof(int),
52253 +               .mode           = 0600,
52254 +               .proc_handler   = &proc_dointvec,
52255 +       },
52256 +#endif
52257 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52258 +       {
52259 +               .procname       = "chroot_deny_sysctl",
52260 +               .data           = &grsec_enable_chroot_sysctl,
52261 +               .maxlen         = sizeof(int),
52262 +               .mode           = 0600,
52263 +               .proc_handler   = &proc_dointvec,
52264 +       },
52265 +#endif
52266 +#ifdef CONFIG_GRKERNSEC_TPE
52267 +       {
52268 +               .procname       = "tpe",
52269 +               .data           = &grsec_enable_tpe,
52270 +               .maxlen         = sizeof(int),
52271 +               .mode           = 0600,
52272 +               .proc_handler   = &proc_dointvec,
52273 +       },
52274 +       {
52275 +               .procname       = "tpe_gid",
52276 +               .data           = &grsec_tpe_gid,
52277 +               .maxlen         = sizeof(int),
52278 +               .mode           = 0600,
52279 +               .proc_handler   = &proc_dointvec,
52280 +       },
52281 +#endif
52282 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52283 +       {
52284 +               .procname       = "tpe_invert",
52285 +               .data           = &grsec_enable_tpe_invert,
52286 +               .maxlen         = sizeof(int),
52287 +               .mode           = 0600,
52288 +               .proc_handler   = &proc_dointvec,
52289 +       },
52290 +#endif
52291 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52292 +       {
52293 +               .procname       = "tpe_restrict_all",
52294 +               .data           = &grsec_enable_tpe_all,
52295 +               .maxlen         = sizeof(int),
52296 +               .mode           = 0600,
52297 +               .proc_handler   = &proc_dointvec,
52298 +       },
52299 +#endif
52300 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52301 +       {
52302 +               .procname       = "socket_all",
52303 +               .data           = &grsec_enable_socket_all,
52304 +               .maxlen         = sizeof(int),
52305 +               .mode           = 0600,
52306 +               .proc_handler   = &proc_dointvec,
52307 +       },
52308 +       {
52309 +               .procname       = "socket_all_gid",
52310 +               .data           = &grsec_socket_all_gid,
52311 +               .maxlen         = sizeof(int),
52312 +               .mode           = 0600,
52313 +               .proc_handler   = &proc_dointvec,
52314 +       },
52315 +#endif
52316 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52317 +       {
52318 +               .procname       = "socket_client",
52319 +               .data           = &grsec_enable_socket_client,
52320 +               .maxlen         = sizeof(int),
52321 +               .mode           = 0600,
52322 +               .proc_handler   = &proc_dointvec,
52323 +       },
52324 +       {
52325 +               .procname       = "socket_client_gid",
52326 +               .data           = &grsec_socket_client_gid,
52327 +               .maxlen         = sizeof(int),
52328 +               .mode           = 0600,
52329 +               .proc_handler   = &proc_dointvec,
52330 +       },
52331 +#endif
52332 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52333 +       {
52334 +               .procname       = "socket_server",
52335 +               .data           = &grsec_enable_socket_server,
52336 +               .maxlen         = sizeof(int),
52337 +               .mode           = 0600,
52338 +               .proc_handler   = &proc_dointvec,
52339 +       },
52340 +       {
52341 +               .procname       = "socket_server_gid",
52342 +               .data           = &grsec_socket_server_gid,
52343 +               .maxlen         = sizeof(int),
52344 +               .mode           = 0600,
52345 +               .proc_handler   = &proc_dointvec,
52346 +       },
52347 +#endif
52348 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52349 +       {
52350 +               .procname       = "audit_group",
52351 +               .data           = &grsec_enable_group,
52352 +               .maxlen         = sizeof(int),
52353 +               .mode           = 0600,
52354 +               .proc_handler   = &proc_dointvec,
52355 +       },
52356 +       {
52357 +               .procname       = "audit_gid",
52358 +               .data           = &grsec_audit_gid,
52359 +               .maxlen         = sizeof(int),
52360 +               .mode           = 0600,
52361 +               .proc_handler   = &proc_dointvec,
52362 +       },
52363 +#endif
52364 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52365 +       {
52366 +               .procname       = "audit_chdir",
52367 +               .data           = &grsec_enable_chdir,
52368 +               .maxlen         = sizeof(int),
52369 +               .mode           = 0600,
52370 +               .proc_handler   = &proc_dointvec,
52371 +       },
52372 +#endif
52373 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52374 +       {
52375 +               .procname       = "audit_mount",
52376 +               .data           = &grsec_enable_mount,
52377 +               .maxlen         = sizeof(int),
52378 +               .mode           = 0600,
52379 +               .proc_handler   = &proc_dointvec,
52380 +       },
52381 +#endif
52382 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52383 +       {
52384 +               .procname       = "audit_textrel",
52385 +               .data           = &grsec_enable_audit_textrel,
52386 +               .maxlen         = sizeof(int),
52387 +               .mode           = 0600,
52388 +               .proc_handler   = &proc_dointvec,
52389 +       },
52390 +#endif
52391 +#ifdef CONFIG_GRKERNSEC_DMESG
52392 +       {
52393 +               .procname       = "dmesg",
52394 +               .data           = &grsec_enable_dmesg,
52395 +               .maxlen         = sizeof(int),
52396 +               .mode           = 0600,
52397 +               .proc_handler   = &proc_dointvec,
52398 +       },
52399 +#endif
52400 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52401 +       {
52402 +               .procname       = "chroot_findtask",
52403 +               .data           = &grsec_enable_chroot_findtask,
52404 +               .maxlen         = sizeof(int),
52405 +               .mode           = 0600,
52406 +               .proc_handler   = &proc_dointvec,
52407 +       },
52408 +#endif
52409 +#ifdef CONFIG_GRKERNSEC_RESLOG
52410 +       {
52411 +               .procname       = "resource_logging",
52412 +               .data           = &grsec_resource_logging,
52413 +               .maxlen         = sizeof(int),
52414 +               .mode           = 0600,
52415 +               .proc_handler   = &proc_dointvec,
52416 +       },
52417 +#endif
52418 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52419 +       {
52420 +               .procname       = "audit_ptrace",
52421 +               .data           = &grsec_enable_audit_ptrace,
52422 +               .maxlen         = sizeof(int),
52423 +               .mode           = 0600,
52424 +               .proc_handler   = &proc_dointvec,
52425 +       },
52426 +#endif
52427 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52428 +       {
52429 +               .procname       = "harden_ptrace",
52430 +               .data           = &grsec_enable_harden_ptrace,
52431 +               .maxlen         = sizeof(int),
52432 +               .mode           = 0600,
52433 +               .proc_handler   = &proc_dointvec,
52434 +       },
52435 +#endif
52436 +       {
52437 +               .procname       = "grsec_lock",
52438 +               .data           = &grsec_lock,
52439 +               .maxlen         = sizeof(int),
52440 +               .mode           = 0600,
52441 +               .proc_handler   = &proc_dointvec,
52442 +       },
52443 +#endif
52444 +#ifdef CONFIG_GRKERNSEC_ROFS
52445 +       {
52446 +               .procname       = "romount_protect",
52447 +               .data           = &grsec_enable_rofs,
52448 +               .maxlen         = sizeof(int),
52449 +               .mode           = 0600,
52450 +               .proc_handler   = &proc_dointvec_minmax,
52451 +               .extra1         = &one,
52452 +               .extra2         = &one,
52453 +       },
52454 +#endif
52455 +       { }
52456 +};
52457 +#endif
52458 diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
52459 --- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52460 +++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
52461 @@ -0,0 +1,16 @@
52462 +#include <linux/kernel.h>
52463 +#include <linux/sched.h>
52464 +#include <linux/grinternal.h>
52465 +#include <linux/module.h>
52466 +
52467 +void
52468 +gr_log_timechange(void)
52469 +{
52470 +#ifdef CONFIG_GRKERNSEC_TIME
52471 +       if (grsec_enable_time)
52472 +               gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52473 +#endif
52474 +       return;
52475 +}
52476 +
52477 +EXPORT_SYMBOL(gr_log_timechange);
52478 diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
52479 --- linux-3.0.4/grsecurity/grsec_tpe.c  1969-12-31 19:00:00.000000000 -0500
52480 +++ linux-3.0.4/grsecurity/grsec_tpe.c  2011-08-23 21:48:14.000000000 -0400
52481 @@ -0,0 +1,39 @@
52482 +#include <linux/kernel.h>
52483 +#include <linux/sched.h>
52484 +#include <linux/file.h>
52485 +#include <linux/fs.h>
52486 +#include <linux/grinternal.h>
52487 +
52488 +extern int gr_acl_tpe_check(void);
52489 +
52490 +int
52491 +gr_tpe_allow(const struct file *file)
52492 +{
52493 +#ifdef CONFIG_GRKERNSEC
52494 +       struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52495 +       const struct cred *cred = current_cred();
52496 +
52497 +       if (cred->uid && ((grsec_enable_tpe &&
52498 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52499 +           ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52500 +            (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52501 +#else
52502 +           in_group_p(grsec_tpe_gid)
52503 +#endif
52504 +           ) || gr_acl_tpe_check()) &&
52505 +           (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52506 +                                               (inode->i_mode & S_IWOTH))))) {
52507 +               gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52508 +               return 0;
52509 +       }
52510 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52511 +       if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52512 +           ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52513 +            (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52514 +               gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52515 +               return 0;
52516 +       }
52517 +#endif
52518 +#endif
52519 +       return 1;
52520 +}
52521 diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
52522 --- linux-3.0.4/grsecurity/grsum.c      1969-12-31 19:00:00.000000000 -0500
52523 +++ linux-3.0.4/grsecurity/grsum.c      2011-08-23 21:48:14.000000000 -0400
52524 @@ -0,0 +1,61 @@
52525 +#include <linux/err.h>
52526 +#include <linux/kernel.h>
52527 +#include <linux/sched.h>
52528 +#include <linux/mm.h>
52529 +#include <linux/scatterlist.h>
52530 +#include <linux/crypto.h>
52531 +#include <linux/gracl.h>
52532 +
52533 +
52534 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52535 +#error "crypto and sha256 must be built into the kernel"
52536 +#endif
52537 +
52538 +int
52539 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52540 +{
52541 +       char *p;
52542 +       struct crypto_hash *tfm;
52543 +       struct hash_desc desc;
52544 +       struct scatterlist sg;
52545 +       unsigned char temp_sum[GR_SHA_LEN];
52546 +       volatile int retval = 0;
52547 +       volatile int dummy = 0;
52548 +       unsigned int i;
52549 +
52550 +       sg_init_table(&sg, 1);
52551 +
52552 +       tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52553 +       if (IS_ERR(tfm)) {
52554 +               /* should never happen, since sha256 should be built in */
52555 +               return 1;
52556 +       }
52557 +
52558 +       desc.tfm = tfm;
52559 +       desc.flags = 0;
52560 +
52561 +       crypto_hash_init(&desc);
52562 +
52563 +       p = salt;
52564 +       sg_set_buf(&sg, p, GR_SALT_LEN);
52565 +       crypto_hash_update(&desc, &sg, sg.length);
52566 +
52567 +       p = entry->pw;
52568 +       sg_set_buf(&sg, p, strlen(p));
52569 +       
52570 +       crypto_hash_update(&desc, &sg, sg.length);
52571 +
52572 +       crypto_hash_final(&desc, temp_sum);
52573 +
52574 +       memset(entry->pw, 0, GR_PW_LEN);
52575 +
52576 +       for (i = 0; i < GR_SHA_LEN; i++)
52577 +               if (sum[i] != temp_sum[i])
52578 +                       retval = 1;
52579 +               else
52580 +                       dummy = 1;      // waste a cycle
52581 +
52582 +       crypto_free_hash(tfm);
52583 +
52584 +       return retval;
52585 +}
52586 diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
52587 --- linux-3.0.4/grsecurity/Kconfig      1969-12-31 19:00:00.000000000 -0500
52588 +++ linux-3.0.4/grsecurity/Kconfig      2011-08-25 17:25:34.000000000 -0400
52589 @@ -0,0 +1,1038 @@
52590 +#
52591 +# grecurity configuration
52592 +#
52593 +
52594 +menu "Grsecurity"
52595 +
52596 +config GRKERNSEC
52597 +       bool "Grsecurity"
52598 +       select CRYPTO
52599 +       select CRYPTO_SHA256
52600 +       help
52601 +         If you say Y here, you will be able to configure many features
52602 +         that will enhance the security of your system.  It is highly
52603 +         recommended that you say Y here and read through the help
52604 +         for each option so that you fully understand the features and
52605 +         can evaluate their usefulness for your machine.
52606 +
52607 +choice
52608 +       prompt "Security Level"
52609 +       depends on GRKERNSEC
52610 +       default GRKERNSEC_CUSTOM
52611 +
52612 +config GRKERNSEC_LOW
52613 +       bool "Low"
52614 +       select GRKERNSEC_LINK
52615 +       select GRKERNSEC_FIFO
52616 +       select GRKERNSEC_RANDNET
52617 +       select GRKERNSEC_DMESG
52618 +       select GRKERNSEC_CHROOT
52619 +       select GRKERNSEC_CHROOT_CHDIR
52620 +
52621 +       help
52622 +         If you choose this option, several of the grsecurity options will
52623 +         be enabled that will give you greater protection against a number
52624 +         of attacks, while assuring that none of your software will have any
52625 +         conflicts with the additional security measures.  If you run a lot
52626 +         of unusual software, or you are having problems with the higher
52627 +         security levels, you should say Y here.  With this option, the
52628 +         following features are enabled:
52629 +
52630 +         - Linking restrictions
52631 +         - FIFO restrictions
52632 +         - Restricted dmesg
52633 +         - Enforced chdir("/") on chroot
52634 +         - Runtime module disabling
52635 +
52636 +config GRKERNSEC_MEDIUM
52637 +       bool "Medium"
52638 +       select PAX
52639 +       select PAX_EI_PAX
52640 +       select PAX_PT_PAX_FLAGS
52641 +       select PAX_HAVE_ACL_FLAGS
52642 +       select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52643 +       select GRKERNSEC_CHROOT
52644 +       select GRKERNSEC_CHROOT_SYSCTL
52645 +       select GRKERNSEC_LINK
52646 +       select GRKERNSEC_FIFO
52647 +       select GRKERNSEC_DMESG
52648 +       select GRKERNSEC_RANDNET
52649 +       select GRKERNSEC_FORKFAIL
52650 +       select GRKERNSEC_TIME
52651 +       select GRKERNSEC_SIGNAL
52652 +       select GRKERNSEC_CHROOT
52653 +       select GRKERNSEC_CHROOT_UNIX
52654 +       select GRKERNSEC_CHROOT_MOUNT
52655 +       select GRKERNSEC_CHROOT_PIVOT
52656 +       select GRKERNSEC_CHROOT_DOUBLE
52657 +       select GRKERNSEC_CHROOT_CHDIR
52658 +       select GRKERNSEC_CHROOT_MKNOD
52659 +       select GRKERNSEC_PROC
52660 +       select GRKERNSEC_PROC_USERGROUP
52661 +       select PAX_RANDUSTACK
52662 +       select PAX_ASLR
52663 +       select PAX_RANDMMAP
52664 +       select PAX_REFCOUNT if (X86 || SPARC64)
52665 +       select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
52666 +
52667 +       help
52668 +         If you say Y here, several features in addition to those included
52669 +         in the low additional security level will be enabled.  These
52670 +         features provide even more security to your system, though in rare
52671 +         cases they may be incompatible with very old or poorly written
52672 +         software.  If you enable this option, make sure that your auth
52673 +         service (identd) is running as gid 1001.  With this option, 
52674 +         the following features (in addition to those provided in the 
52675 +         low additional security level) will be enabled:
52676 +
52677 +         - Failed fork logging
52678 +         - Time change logging
52679 +         - Signal logging
52680 +         - Deny mounts in chroot
52681 +         - Deny double chrooting
52682 +         - Deny sysctl writes in chroot
52683 +         - Deny mknod in chroot
52684 +         - Deny access to abstract AF_UNIX sockets out of chroot
52685 +         - Deny pivot_root in chroot
52686 +         - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52687 +         - /proc restrictions with special GID set to 10 (usually wheel)
52688 +         - Address Space Layout Randomization (ASLR)
52689 +         - Prevent exploitation of most refcount overflows
52690 +         - Bounds checking of copying between the kernel and userland
52691 +
52692 +config GRKERNSEC_HIGH
52693 +       bool "High"
52694 +       select GRKERNSEC_LINK
52695 +       select GRKERNSEC_FIFO
52696 +       select GRKERNSEC_DMESG
52697 +       select GRKERNSEC_FORKFAIL
52698 +       select GRKERNSEC_TIME
52699 +       select GRKERNSEC_SIGNAL
52700 +       select GRKERNSEC_CHROOT
52701 +       select GRKERNSEC_CHROOT_SHMAT
52702 +       select GRKERNSEC_CHROOT_UNIX
52703 +       select GRKERNSEC_CHROOT_MOUNT
52704 +       select GRKERNSEC_CHROOT_FCHDIR
52705 +       select GRKERNSEC_CHROOT_PIVOT
52706 +       select GRKERNSEC_CHROOT_DOUBLE
52707 +       select GRKERNSEC_CHROOT_CHDIR
52708 +       select GRKERNSEC_CHROOT_MKNOD
52709 +       select GRKERNSEC_CHROOT_CAPS
52710 +       select GRKERNSEC_CHROOT_SYSCTL
52711 +       select GRKERNSEC_CHROOT_FINDTASK
52712 +       select GRKERNSEC_SYSFS_RESTRICT
52713 +       select GRKERNSEC_PROC
52714 +       select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52715 +       select GRKERNSEC_HIDESYM
52716 +       select GRKERNSEC_BRUTE
52717 +       select GRKERNSEC_PROC_USERGROUP
52718 +       select GRKERNSEC_KMEM
52719 +       select GRKERNSEC_RESLOG
52720 +       select GRKERNSEC_RANDNET
52721 +       select GRKERNSEC_PROC_ADD
52722 +       select GRKERNSEC_CHROOT_CHMOD
52723 +       select GRKERNSEC_CHROOT_NICE
52724 +       select GRKERNSEC_AUDIT_MOUNT
52725 +       select GRKERNSEC_MODHARDEN if (MODULES)
52726 +       select GRKERNSEC_HARDEN_PTRACE
52727 +       select GRKERNSEC_VM86 if (X86_32)
52728 +       select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
52729 +       select PAX
52730 +       select PAX_RANDUSTACK
52731 +       select PAX_ASLR
52732 +       select PAX_RANDMMAP
52733 +       select PAX_NOEXEC
52734 +       select PAX_MPROTECT
52735 +       select PAX_EI_PAX
52736 +       select PAX_PT_PAX_FLAGS
52737 +       select PAX_HAVE_ACL_FLAGS
52738 +       select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52739 +       select PAX_MEMORY_UDEREF if (X86 && !XEN)
52740 +       select PAX_RANDKSTACK if (X86_TSC && X86)
52741 +       select PAX_SEGMEXEC if (X86_32)
52742 +       select PAX_PAGEEXEC
52743 +       select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
52744 +       select PAX_EMUTRAMP if (PARISC)
52745 +       select PAX_EMUSIGRT if (PARISC)
52746 +       select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52747 +       select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52748 +       select PAX_REFCOUNT if (X86 || SPARC64)
52749 +       select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
52750 +       help
52751 +         If you say Y here, many of the features of grsecurity will be
52752 +         enabled, which will protect you against many kinds of attacks
52753 +         against your system.  The heightened security comes at a cost
52754 +         of an increased chance of incompatibilities with rare software
52755 +         on your machine.  Since this security level enables PaX, you should
52756 +         view <http://pax.grsecurity.net> and read about the PaX
52757 +         project.  While you are there, download chpax and run it on
52758 +         binaries that cause problems with PaX.  Also remember that
52759 +         since the /proc restrictions are enabled, you must run your
52760 +         identd as gid 1001.  This security level enables the following 
52761 +         features in addition to those listed in the low and medium 
52762 +         security levels:
52763 +
52764 +         - Additional /proc restrictions
52765 +         - Chmod restrictions in chroot
52766 +         - No signals, ptrace, or viewing of processes outside of chroot
52767 +         - Capability restrictions in chroot
52768 +         - Deny fchdir out of chroot
52769 +         - Priority restrictions in chroot
52770 +         - Segmentation-based implementation of PaX
52771 +         - Mprotect restrictions
52772 +         - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52773 +         - Kernel stack randomization
52774 +         - Mount/unmount/remount logging
52775 +         - Kernel symbol hiding
52776 +         - Prevention of memory exhaustion-based exploits
52777 +         - Hardening of module auto-loading
52778 +         - Ptrace restrictions
52779 +         - Restricted vm86 mode
52780 +         - Restricted sysfs/debugfs
52781 +         - Active kernel exploit response
52782 +
52783 +config GRKERNSEC_CUSTOM
52784 +       bool "Custom"
52785 +       help
52786 +         If you say Y here, you will be able to configure every grsecurity
52787 +         option, which allows you to enable many more features that aren't
52788 +         covered in the basic security levels.  These additional features
52789 +         include TPE, socket restrictions, and the sysctl system for
52790 +         grsecurity.  It is advised that you read through the help for
52791 +         each option to determine its usefulness in your situation.
52792 +
52793 +endchoice
52794 +
52795 +menu "Address Space Protection"
52796 +depends on GRKERNSEC
52797 +
52798 +config GRKERNSEC_KMEM
52799 +       bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52800 +       select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52801 +       help
52802 +         If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52803 +         be written to via mmap or otherwise to modify the running kernel.
52804 +         /dev/port will also not be allowed to be opened. If you have module
52805 +         support disabled, enabling this will close up four ways that are
52806 +         currently used  to insert malicious code into the running kernel.
52807 +         Even with all these features enabled, we still highly recommend that
52808 +         you use the RBAC system, as it is still possible for an attacker to
52809 +         modify the running kernel through privileged I/O granted by ioperm/iopl.
52810 +         If you are not using XFree86, you may be able to stop this additional
52811 +         case by enabling the 'Disable privileged I/O' option. Though nothing
52812 +         legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52813 +         but only to video memory, which is the only writing we allow in this
52814 +         case.  If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52815 +         not be allowed to mprotect it with PROT_WRITE later.
52816 +         It is highly recommended that you say Y here if you meet all the
52817 +         conditions above.
52818 +
52819 +config GRKERNSEC_VM86
52820 +       bool "Restrict VM86 mode"
52821 +       depends on X86_32
52822 +
52823 +       help
52824 +         If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52825 +         make use of a special execution mode on 32bit x86 processors called
52826 +         Virtual 8086 (VM86) mode.  XFree86 may need vm86 mode for certain
52827 +         video cards and will still work with this option enabled.  The purpose
52828 +         of the option is to prevent exploitation of emulation errors in
52829 +         virtualization of vm86 mode like the one discovered in VMWare in 2009.
52830 +         Nearly all users should be able to enable this option.
52831 +
52832 +config GRKERNSEC_IO
52833 +       bool "Disable privileged I/O"
52834 +       depends on X86
52835 +       select RTC_CLASS
52836 +       select RTC_INTF_DEV
52837 +       select RTC_DRV_CMOS
52838 +
52839 +       help
52840 +         If you say Y here, all ioperm and iopl calls will return an error.
52841 +         Ioperm and iopl can be used to modify the running kernel.
52842 +         Unfortunately, some programs need this access to operate properly,
52843 +         the most notable of which are XFree86 and hwclock.  hwclock can be
52844 +         remedied by having RTC support in the kernel, so real-time 
52845 +         clock support is enabled if this option is enabled, to ensure 
52846 +         that hwclock operates correctly.  XFree86 still will not 
52847 +         operate correctly with this option enabled, so DO NOT CHOOSE Y 
52848 +         IF YOU USE XFree86.  If you use XFree86 and you still want to 
52849 +         protect your kernel against modification, use the RBAC system.
52850 +
52851 +config GRKERNSEC_PROC_MEMMAP
52852 +       bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52853 +       default y if (PAX_NOEXEC || PAX_ASLR)
52854 +       depends on PAX_NOEXEC || PAX_ASLR
52855 +       help
52856 +         If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52857 +         give no information about the addresses of its mappings if
52858 +         PaX features that rely on random addresses are enabled on the task.
52859 +         If you use PaX it is greatly recommended that you say Y here as it
52860 +         closes up a hole that makes the full ASLR useless for suid
52861 +         binaries.
52862 +
52863 +config GRKERNSEC_BRUTE
52864 +       bool "Deter exploit bruteforcing"
52865 +       help
52866 +         If you say Y here, attempts to bruteforce exploits against forking
52867 +         daemons such as apache or sshd, as well as against suid/sgid binaries
52868 +         will be deterred.  When a child of a forking daemon is killed by PaX
52869 +         or crashes due to an illegal instruction or other suspicious signal,
52870 +         the parent process will be delayed 30 seconds upon every subsequent
52871 +         fork until the administrator is able to assess the situation and
52872 +         restart the daemon.
52873 +         In the suid/sgid case, the attempt is logged, the user has all their
52874 +         processes terminated, and they are prevented from executing any further
52875 +         processes for 15 minutes.
52876 +         It is recommended that you also enable signal logging in the auditing
52877 +         section so that logs are generated when a process triggers a suspicious
52878 +         signal.
52879 +         If the sysctl option is enabled, a sysctl option with name
52880 +         "deter_bruteforce" is created.
52881 +
52882 +
52883 +config GRKERNSEC_MODHARDEN
52884 +       bool "Harden module auto-loading"
52885 +       depends on MODULES
52886 +       help
52887 +         If you say Y here, module auto-loading in response to use of some
52888 +         feature implemented by an unloaded module will be restricted to
52889 +         root users.  Enabling this option helps defend against attacks 
52890 +         by unprivileged users who abuse the auto-loading behavior to 
52891 +         cause a vulnerable module to load that is then exploited.
52892 +
52893 +         If this option prevents a legitimate use of auto-loading for a 
52894 +         non-root user, the administrator can execute modprobe manually 
52895 +         with the exact name of the module mentioned in the alert log.
52896 +         Alternatively, the administrator can add the module to the list
52897 +         of modules loaded at boot by modifying init scripts.
52898 +
52899 +         Modification of init scripts will most likely be needed on 
52900 +         Ubuntu servers with encrypted home directory support enabled,
52901 +         as the first non-root user logging in will cause the ecb(aes),
52902 +         ecb(aes)-all, cbc(aes), and cbc(aes)-all  modules to be loaded.
52903 +
52904 +config GRKERNSEC_HIDESYM
52905 +       bool "Hide kernel symbols"
52906 +       help
52907 +         If you say Y here, getting information on loaded modules, and
52908 +         displaying all kernel symbols through a syscall will be restricted
52909 +         to users with CAP_SYS_MODULE.  For software compatibility reasons,
52910 +         /proc/kallsyms will be restricted to the root user.  The RBAC
52911 +         system can hide that entry even from root.
52912 +
52913 +         This option also prevents leaking of kernel addresses through
52914 +         several /proc entries.
52915 +
52916 +         Note that this option is only effective provided the following
52917 +         conditions are met:
52918 +         1) The kernel using grsecurity is not precompiled by some distribution
52919 +         2) You have also enabled GRKERNSEC_DMESG
52920 +         3) You are using the RBAC system and hiding other files such as your
52921 +            kernel image and System.map.  Alternatively, enabling this option
52922 +            causes the permissions on /boot, /lib/modules, and the kernel
52923 +            source directory to change at compile time to prevent 
52924 +            reading by non-root users.
52925 +         If the above conditions are met, this option will aid in providing a
52926 +         useful protection against local kernel exploitation of overflows
52927 +         and arbitrary read/write vulnerabilities.
52928 +
52929 +config GRKERNSEC_KERN_LOCKOUT
52930 +       bool "Active kernel exploit response"
52931 +       depends on X86 || ARM || PPC || SPARC
52932 +       help
52933 +         If you say Y here, when a PaX alert is triggered due to suspicious
52934 +         activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52935 +         or an OOPs occurs due to bad memory accesses, instead of just
52936 +         terminating the offending process (and potentially allowing
52937 +         a subsequent exploit from the same user), we will take one of two
52938 +         actions:
52939 +          If the user was root, we will panic the system
52940 +          If the user was non-root, we will log the attempt, terminate
52941 +          all processes owned by the user, then prevent them from creating
52942 +          any new processes until the system is restarted
52943 +         This deters repeated kernel exploitation/bruteforcing attempts
52944 +         and is useful for later forensics.
52945 +
52946 +endmenu
52947 +menu "Role Based Access Control Options"
52948 +depends on GRKERNSEC
52949 +
52950 +config GRKERNSEC_RBAC_DEBUG
52951 +       bool
52952 +
52953 +config GRKERNSEC_NO_RBAC
52954 +       bool "Disable RBAC system"
52955 +       help
52956 +         If you say Y here, the /dev/grsec device will be removed from the kernel,
52957 +         preventing the RBAC system from being enabled.  You should only say Y
52958 +         here if you have no intention of using the RBAC system, so as to prevent
52959 +         an attacker with root access from misusing the RBAC system to hide files
52960 +         and processes when loadable module support and /dev/[k]mem have been
52961 +         locked down.
52962 +
52963 +config GRKERNSEC_ACL_HIDEKERN
52964 +       bool "Hide kernel processes"
52965 +       help
52966 +         If you say Y here, all kernel threads will be hidden to all
52967 +         processes but those whose subject has the "view hidden processes"
52968 +         flag.
52969 +
52970 +config GRKERNSEC_ACL_MAXTRIES
52971 +       int "Maximum tries before password lockout"
52972 +       default 3
52973 +       help
52974 +         This option enforces the maximum number of times a user can attempt
52975 +         to authorize themselves with the grsecurity RBAC system before being
52976 +         denied the ability to attempt authorization again for a specified time.
52977 +         The lower the number, the harder it will be to brute-force a password.
52978 +
52979 +config GRKERNSEC_ACL_TIMEOUT
52980 +       int "Time to wait after max password tries, in seconds"
52981 +       default 30
52982 +       help
52983 +         This option specifies the time the user must wait after attempting to
52984 +         authorize to the RBAC system with the maximum number of invalid
52985 +         passwords.  The higher the number, the harder it will be to brute-force
52986 +         a password.
52987 +
52988 +endmenu
52989 +menu "Filesystem Protections"
52990 +depends on GRKERNSEC
52991 +
52992 +config GRKERNSEC_PROC
52993 +       bool "Proc restrictions"
52994 +       help
52995 +         If you say Y here, the permissions of the /proc filesystem
52996 +         will be altered to enhance system security and privacy.  You MUST
52997 +         choose either a user only restriction or a user and group restriction.
52998 +         Depending upon the option you choose, you can either restrict users to
52999 +         see only the processes they themselves run, or choose a group that can
53000 +         view all processes and files normally restricted to root if you choose
53001 +         the "restrict to user only" option.  NOTE: If you're running identd as
53002 +         a non-root user, you will have to run it as the group you specify here.
53003 +
53004 +config GRKERNSEC_PROC_USER
53005 +       bool "Restrict /proc to user only"
53006 +       depends on GRKERNSEC_PROC
53007 +       help
53008 +         If you say Y here, non-root users will only be able to view their own
53009 +         processes, and restricts them from viewing network-related information,
53010 +         and viewing kernel symbol and module information.
53011 +
53012 +config GRKERNSEC_PROC_USERGROUP
53013 +       bool "Allow special group"
53014 +       depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53015 +       help
53016 +         If you say Y here, you will be able to select a group that will be
53017 +          able to view all processes and network-related information.  If you've
53018 +          enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53019 +          remain hidden.  This option is useful if you want to run identd as
53020 +          a non-root user.
53021 +
53022 +config GRKERNSEC_PROC_GID
53023 +       int "GID for special group"
53024 +       depends on GRKERNSEC_PROC_USERGROUP
53025 +       default 1001
53026 +
53027 +config GRKERNSEC_PROC_ADD
53028 +       bool "Additional restrictions"
53029 +       depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53030 +       help
53031 +         If you say Y here, additional restrictions will be placed on
53032 +         /proc that keep normal users from viewing device information and 
53033 +         slabinfo information that could be useful for exploits.
53034 +
53035 +config GRKERNSEC_LINK
53036 +       bool "Linking restrictions"
53037 +       help
53038 +         If you say Y here, /tmp race exploits will be prevented, since users
53039 +         will no longer be able to follow symlinks owned by other users in
53040 +         world-writable +t directories (e.g. /tmp), unless the owner of the
53041 +         symlink is the owner of the directory. users will also not be
53042 +         able to hardlink to files they do not own.  If the sysctl option is
53043 +         enabled, a sysctl option with name "linking_restrictions" is created.
53044 +
53045 +config GRKERNSEC_FIFO
53046 +       bool "FIFO restrictions"
53047 +       help
53048 +         If you say Y here, users will not be able to write to FIFOs they don't
53049 +         own in world-writable +t directories (e.g. /tmp), unless the owner of
53050 +         the FIFO is the same owner of the directory it's held in.  If the sysctl
53051 +         option is enabled, a sysctl option with name "fifo_restrictions" is
53052 +         created.
53053 +
53054 +config GRKERNSEC_SYSFS_RESTRICT
53055 +       bool "Sysfs/debugfs restriction"
53056 +       depends on SYSFS
53057 +       help
53058 +         If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53059 +         any filesystem normally mounted under it (e.g. debugfs) will only
53060 +         be accessible by root.  These filesystems generally provide access
53061 +         to hardware and debug information that isn't appropriate for unprivileged
53062 +         users of the system.  Sysfs and debugfs have also become a large source
53063 +         of new vulnerabilities, ranging from infoleaks to local compromise.
53064 +         There has been very little oversight with an eye toward security involved
53065 +         in adding new exporters of information to these filesystems, so their
53066 +         use is discouraged.
53067 +         This option is equivalent to a chmod 0700 of the mount paths.
53068 +
53069 +config GRKERNSEC_ROFS
53070 +       bool "Runtime read-only mount protection"
53071 +       help
53072 +         If you say Y here, a sysctl option with name "romount_protect" will
53073 +         be created.  By setting this option to 1 at runtime, filesystems
53074 +         will be protected in the following ways:
53075 +         * No new writable mounts will be allowed
53076 +         * Existing read-only mounts won't be able to be remounted read/write
53077 +         * Write operations will be denied on all block devices
53078 +         This option acts independently of grsec_lock: once it is set to 1,
53079 +         it cannot be turned off.  Therefore, please be mindful of the resulting
53080 +         behavior if this option is enabled in an init script on a read-only
53081 +         filesystem.  This feature is mainly intended for secure embedded systems.
53082 +
53083 +config GRKERNSEC_CHROOT
53084 +       bool "Chroot jail restrictions"
53085 +       help
53086 +         If you say Y here, you will be able to choose several options that will
53087 +         make breaking out of a chrooted jail much more difficult.  If you
53088 +         encounter no software incompatibilities with the following options, it
53089 +         is recommended that you enable each one.
53090 +
53091 +config GRKERNSEC_CHROOT_MOUNT
53092 +       bool "Deny mounts"
53093 +       depends on GRKERNSEC_CHROOT
53094 +       help
53095 +         If you say Y here, processes inside a chroot will not be able to
53096 +         mount or remount filesystems.  If the sysctl option is enabled, a
53097 +         sysctl option with name "chroot_deny_mount" is created.
53098 +
53099 +config GRKERNSEC_CHROOT_DOUBLE
53100 +       bool "Deny double-chroots"
53101 +       depends on GRKERNSEC_CHROOT
53102 +       help
53103 +         If you say Y here, processes inside a chroot will not be able to chroot
53104 +         again outside the chroot.  This is a widely used method of breaking
53105 +         out of a chroot jail and should not be allowed.  If the sysctl 
53106 +         option is enabled, a sysctl option with name 
53107 +         "chroot_deny_chroot" is created.
53108 +
53109 +config GRKERNSEC_CHROOT_PIVOT
53110 +       bool "Deny pivot_root in chroot"
53111 +       depends on GRKERNSEC_CHROOT
53112 +       help
53113 +         If you say Y here, processes inside a chroot will not be able to use
53114 +         a function called pivot_root() that was introduced in Linux 2.3.41.  It
53115 +         works similar to chroot in that it changes the root filesystem.  This
53116 +         function could be misused in a chrooted process to attempt to break out
53117 +         of the chroot, and therefore should not be allowed.  If the sysctl
53118 +         option is enabled, a sysctl option with name "chroot_deny_pivot" is
53119 +         created.
53120 +
53121 +config GRKERNSEC_CHROOT_CHDIR
53122 +       bool "Enforce chdir(\"/\") on all chroots"
53123 +       depends on GRKERNSEC_CHROOT
53124 +       help
53125 +         If you say Y here, the current working directory of all newly-chrooted
53126 +         applications will be set to the the root directory of the chroot.
53127 +         The man page on chroot(2) states:
53128 +         Note that this call does not change  the  current  working
53129 +         directory,  so  that `.' can be outside the tree rooted at
53130 +         `/'.  In particular, the  super-user  can  escape  from  a
53131 +         `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53132 +
53133 +         It is recommended that you say Y here, since it's not known to break
53134 +         any software.  If the sysctl option is enabled, a sysctl option with
53135 +         name "chroot_enforce_chdir" is created.
53136 +
53137 +config GRKERNSEC_CHROOT_CHMOD
53138 +       bool "Deny (f)chmod +s"
53139 +       depends on GRKERNSEC_CHROOT
53140 +       help
53141 +         If you say Y here, processes inside a chroot will not be able to chmod
53142 +         or fchmod files to make them have suid or sgid bits.  This protects
53143 +         against another published method of breaking a chroot.  If the sysctl
53144 +         option is enabled, a sysctl option with name "chroot_deny_chmod" is
53145 +         created.
53146 +
53147 +config GRKERNSEC_CHROOT_FCHDIR
53148 +       bool "Deny fchdir out of chroot"
53149 +       depends on GRKERNSEC_CHROOT
53150 +       help
53151 +         If you say Y here, a well-known method of breaking chroots by fchdir'ing
53152 +         to a file descriptor of the chrooting process that points to a directory
53153 +         outside the filesystem will be stopped.  If the sysctl option
53154 +         is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53155 +
53156 +config GRKERNSEC_CHROOT_MKNOD
53157 +       bool "Deny mknod"
53158 +       depends on GRKERNSEC_CHROOT
53159 +       help
53160 +         If you say Y here, processes inside a chroot will not be allowed to
53161 +         mknod.  The problem with using mknod inside a chroot is that it
53162 +         would allow an attacker to create a device entry that is the same
53163 +         as one on the physical root of your system, which could range from
53164 +         anything from the console device to a device for your harddrive (which
53165 +         they could then use to wipe the drive or steal data).  It is recommended
53166 +         that you say Y here, unless you run into software incompatibilities.
53167 +         If the sysctl option is enabled, a sysctl option with name
53168 +         "chroot_deny_mknod" is created.
53169 +
53170 +config GRKERNSEC_CHROOT_SHMAT
53171 +       bool "Deny shmat() out of chroot"
53172 +       depends on GRKERNSEC_CHROOT
53173 +       help
53174 +         If you say Y here, processes inside a chroot will not be able to attach
53175 +         to shared memory segments that were created outside of the chroot jail.
53176 +         It is recommended that you say Y here.  If the sysctl option is enabled,
53177 +         a sysctl option with name "chroot_deny_shmat" is created.
53178 +
53179 +config GRKERNSEC_CHROOT_UNIX
53180 +       bool "Deny access to abstract AF_UNIX sockets out of chroot"
53181 +       depends on GRKERNSEC_CHROOT
53182 +       help
53183 +         If you say Y here, processes inside a chroot will not be able to
53184 +         connect to abstract (meaning not belonging to a filesystem) Unix
53185 +         domain sockets that were bound outside of a chroot.  It is recommended
53186 +         that you say Y here.  If the sysctl option is enabled, a sysctl option
53187 +         with name "chroot_deny_unix" is created.
53188 +
53189 +config GRKERNSEC_CHROOT_FINDTASK
53190 +       bool "Protect outside processes"
53191 +       depends on GRKERNSEC_CHROOT
53192 +       help
53193 +         If you say Y here, processes inside a chroot will not be able to
53194 +         kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, 
53195 +         getsid, or view any process outside of the chroot.  If the sysctl
53196 +         option is enabled, a sysctl option with name "chroot_findtask" is
53197 +         created.
53198 +
53199 +config GRKERNSEC_CHROOT_NICE
53200 +       bool "Restrict priority changes"
53201 +       depends on GRKERNSEC_CHROOT
53202 +       help
53203 +         If you say Y here, processes inside a chroot will not be able to raise
53204 +         the priority of processes in the chroot, or alter the priority of
53205 +         processes outside the chroot.  This provides more security than simply
53206 +         removing CAP_SYS_NICE from the process' capability set.  If the
53207 +         sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53208 +         is created.
53209 +
53210 +config GRKERNSEC_CHROOT_SYSCTL
53211 +       bool "Deny sysctl writes"
53212 +       depends on GRKERNSEC_CHROOT
53213 +       help
53214 +         If you say Y here, an attacker in a chroot will not be able to
53215 +         write to sysctl entries, either by sysctl(2) or through a /proc
53216 +         interface.  It is strongly recommended that you say Y here. If the
53217 +         sysctl option is enabled, a sysctl option with name
53218 +         "chroot_deny_sysctl" is created.
53219 +
53220 +config GRKERNSEC_CHROOT_CAPS
53221 +       bool "Capability restrictions"
53222 +       depends on GRKERNSEC_CHROOT
53223 +       help
53224 +         If you say Y here, the capabilities on all root processes within a
53225 +         chroot jail will be lowered to stop module insertion, raw i/o,
53226 +         system and net admin tasks, rebooting the system, modifying immutable
53227 +         files, modifying IPC owned by another, and changing the system time.
53228 +         This is left an option because it can break some apps.  Disable this
53229 +         if your chrooted apps are having problems performing those kinds of
53230 +         tasks.  If the sysctl option is enabled, a sysctl option with
53231 +         name "chroot_caps" is created.
53232 +
53233 +endmenu
53234 +menu "Kernel Auditing"
53235 +depends on GRKERNSEC
53236 +
53237 +config GRKERNSEC_AUDIT_GROUP
53238 +       bool "Single group for auditing"
53239 +       help
53240 +         If you say Y here, the exec, chdir, and (un)mount logging features
53241 +         will only operate on a group you specify.  This option is recommended
53242 +         if you only want to watch certain users instead of having a large
53243 +         amount of logs from the entire system.  If the sysctl option is enabled,
53244 +         a sysctl option with name "audit_group" is created.
53245 +
53246 +config GRKERNSEC_AUDIT_GID
53247 +       int "GID for auditing"
53248 +       depends on GRKERNSEC_AUDIT_GROUP
53249 +       default 1007
53250 +
53251 +config GRKERNSEC_EXECLOG
53252 +       bool "Exec logging"
53253 +       help
53254 +         If you say Y here, all execve() calls will be logged (since the
53255 +         other exec*() calls are frontends to execve(), all execution
53256 +         will be logged).  Useful for shell-servers that like to keep track
53257 +         of their users.  If the sysctl option is enabled, a sysctl option with
53258 +         name "exec_logging" is created.
53259 +         WARNING: This option when enabled will produce a LOT of logs, especially
53260 +         on an active system.
53261 +
53262 +config GRKERNSEC_RESLOG
53263 +       bool "Resource logging"
53264 +       help
53265 +         If you say Y here, all attempts to overstep resource limits will
53266 +         be logged with the resource name, the requested size, and the current
53267 +         limit.  It is highly recommended that you say Y here.  If the sysctl
53268 +         option is enabled, a sysctl option with name "resource_logging" is
53269 +         created.  If the RBAC system is enabled, the sysctl value is ignored.
53270 +
53271 +config GRKERNSEC_CHROOT_EXECLOG
53272 +       bool "Log execs within chroot"
53273 +       help
53274 +         If you say Y here, all executions inside a chroot jail will be logged
53275 +         to syslog.  This can cause a large amount of logs if certain
53276 +         applications (eg. djb's daemontools) are installed on the system, and
53277 +         is therefore left as an option.  If the sysctl option is enabled, a
53278 +         sysctl option with name "chroot_execlog" is created.
53279 +
53280 +config GRKERNSEC_AUDIT_PTRACE
53281 +       bool "Ptrace logging"
53282 +       help
53283 +         If you say Y here, all attempts to attach to a process via ptrace
53284 +         will be logged.  If the sysctl option is enabled, a sysctl option
53285 +         with name "audit_ptrace" is created.
53286 +
53287 +config GRKERNSEC_AUDIT_CHDIR
53288 +       bool "Chdir logging"
53289 +       help
53290 +         If you say Y here, all chdir() calls will be logged.  If the sysctl
53291 +         option is enabled, a sysctl option with name "audit_chdir" is created.
53292 +
53293 +config GRKERNSEC_AUDIT_MOUNT
53294 +       bool "(Un)Mount logging"
53295 +       help
53296 +         If you say Y here, all mounts and unmounts will be logged.  If the
53297 +         sysctl option is enabled, a sysctl option with name "audit_mount" is
53298 +         created.
53299 +
53300 +config GRKERNSEC_SIGNAL
53301 +       bool "Signal logging"
53302 +       help
53303 +         If you say Y here, certain important signals will be logged, such as
53304 +         SIGSEGV, which will as a result inform you of when a error in a program
53305 +         occurred, which in some cases could mean a possible exploit attempt.
53306 +         If the sysctl option is enabled, a sysctl option with name
53307 +         "signal_logging" is created.
53308 +
53309 +config GRKERNSEC_FORKFAIL
53310 +       bool "Fork failure logging"
53311 +       help
53312 +         If you say Y here, all failed fork() attempts will be logged.
53313 +         This could suggest a fork bomb, or someone attempting to overstep
53314 +         their process limit.  If the sysctl option is enabled, a sysctl option
53315 +         with name "forkfail_logging" is created.
53316 +
53317 +config GRKERNSEC_TIME
53318 +       bool "Time change logging"
53319 +       help
53320 +         If you say Y here, any changes of the system clock will be logged.
53321 +         If the sysctl option is enabled, a sysctl option with name
53322 +         "timechange_logging" is created.
53323 +
53324 +config GRKERNSEC_PROC_IPADDR
53325 +       bool "/proc/<pid>/ipaddr support"
53326 +       help
53327 +         If you say Y here, a new entry will be added to each /proc/<pid>
53328 +         directory that contains the IP address of the person using the task.
53329 +         The IP is carried across local TCP and AF_UNIX stream sockets.
53330 +         This information can be useful for IDS/IPSes to perform remote response
53331 +         to a local attack.  The entry is readable by only the owner of the
53332 +         process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53333 +         the RBAC system), and thus does not create privacy concerns.
53334 +
53335 +config GRKERNSEC_RWXMAP_LOG
53336 +       bool 'Denied RWX mmap/mprotect logging'
53337 +       depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53338 +       help
53339 +         If you say Y here, calls to mmap() and mprotect() with explicit
53340 +         usage of PROT_WRITE and PROT_EXEC together will be logged when
53341 +         denied by the PAX_MPROTECT feature.  If the sysctl option is
53342 +         enabled, a sysctl option with name "rwxmap_logging" is created.
53343 +
53344 +config GRKERNSEC_AUDIT_TEXTREL
53345 +       bool 'ELF text relocations logging (READ HELP)'
53346 +       depends on PAX_MPROTECT
53347 +       help
53348 +         If you say Y here, text relocations will be logged with the filename
53349 +         of the offending library or binary.  The purpose of the feature is
53350 +         to help Linux distribution developers get rid of libraries and
53351 +         binaries that need text relocations which hinder the future progress
53352 +         of PaX.  Only Linux distribution developers should say Y here, and
53353 +         never on a production machine, as this option creates an information
53354 +         leak that could aid an attacker in defeating the randomization of
53355 +         a single memory region.  If the sysctl option is enabled, a sysctl
53356 +         option with name "audit_textrel" is created.
53357 +
53358 +endmenu
53359 +
53360 +menu "Executable Protections"
53361 +depends on GRKERNSEC
53362 +
53363 +config GRKERNSEC_DMESG
53364 +       bool "Dmesg(8) restriction"
53365 +       help
53366 +         If you say Y here, non-root users will not be able to use dmesg(8)
53367 +         to view up to the last 4kb of messages in the kernel's log buffer.
53368 +         The kernel's log buffer often contains kernel addresses and other
53369 +         identifying information useful to an attacker in fingerprinting a
53370 +         system for a targeted exploit.
53371 +         If the sysctl option is enabled, a sysctl option with name "dmesg" is
53372 +         created.
53373 +
53374 +config GRKERNSEC_HARDEN_PTRACE
53375 +       bool "Deter ptrace-based process snooping"
53376 +       help
53377 +         If you say Y here, TTY sniffers and other malicious monitoring
53378 +         programs implemented through ptrace will be defeated.  If you
53379 +         have been using the RBAC system, this option has already been
53380 +         enabled for several years for all users, with the ability to make
53381 +         fine-grained exceptions.
53382 +
53383 +         This option only affects the ability of non-root users to ptrace
53384 +         processes that are not a descendent of the ptracing process.
53385 +         This means that strace ./binary and gdb ./binary will still work,
53386 +         but attaching to arbitrary processes will not.  If the sysctl
53387 +         option is enabled, a sysctl option with name "harden_ptrace" is
53388 +         created.
53389 +
53390 +config GRKERNSEC_TPE
53391 +       bool "Trusted Path Execution (TPE)"
53392 +       help
53393 +         If you say Y here, you will be able to choose a gid to add to the
53394 +         supplementary groups of users you want to mark as "untrusted."
53395 +         These users will not be able to execute any files that are not in
53396 +         root-owned directories writable only by root.  If the sysctl option
53397 +         is enabled, a sysctl option with name "tpe" is created.
53398 +
53399 +config GRKERNSEC_TPE_ALL
53400 +       bool "Partially restrict all non-root users"
53401 +       depends on GRKERNSEC_TPE
53402 +       help
53403 +         If you say Y here, all non-root users will be covered under
53404 +         a weaker TPE restriction.  This is separate from, and in addition to,
53405 +         the main TPE options that you have selected elsewhere.  Thus, if a
53406 +         "trusted" GID is chosen, this restriction applies to even that GID.
53407 +         Under this restriction, all non-root users will only be allowed to
53408 +         execute files in directories they own that are not group or
53409 +         world-writable, or in directories owned by root and writable only by
53410 +         root.  If the sysctl option is enabled, a sysctl option with name
53411 +         "tpe_restrict_all" is created.
53412 +
53413 +config GRKERNSEC_TPE_INVERT
53414 +       bool "Invert GID option"
53415 +       depends on GRKERNSEC_TPE
53416 +       help
53417 +         If you say Y here, the group you specify in the TPE configuration will
53418 +         decide what group TPE restrictions will be *disabled* for.  This
53419 +         option is useful if you want TPE restrictions to be applied to most
53420 +         users on the system.  If the sysctl option is enabled, a sysctl option
53421 +         with name "tpe_invert" is created.  Unlike other sysctl options, this
53422 +         entry will default to on for backward-compatibility.
53423 +
53424 +config GRKERNSEC_TPE_GID
53425 +       int "GID for untrusted users"
53426 +       depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53427 +       default 1005
53428 +       help
53429 +         Setting this GID determines what group TPE restrictions will be
53430 +         *enabled* for.  If the sysctl option is enabled, a sysctl option
53431 +         with name "tpe_gid" is created.
53432 +
53433 +config GRKERNSEC_TPE_GID
53434 +       int "GID for trusted users"
53435 +       depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53436 +       default 1005
53437 +       help
53438 +         Setting this GID determines what group TPE restrictions will be
53439 +         *disabled* for.  If the sysctl option is enabled, a sysctl option
53440 +         with name "tpe_gid" is created.
53441 +
53442 +endmenu
53443 +menu "Network Protections"
53444 +depends on GRKERNSEC
53445 +
53446 +config GRKERNSEC_RANDNET
53447 +       bool "Larger entropy pools"
53448 +       help
53449 +         If you say Y here, the entropy pools used for many features of Linux
53450 +         and grsecurity will be doubled in size.  Since several grsecurity
53451 +         features use additional randomness, it is recommended that you say Y
53452 +         here.  Saying Y here has a similar effect as modifying
53453 +         /proc/sys/kernel/random/poolsize.
53454 +
53455 +config GRKERNSEC_BLACKHOLE
53456 +       bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53457 +       depends on NET
53458 +       help
53459 +         If you say Y here, neither TCP resets nor ICMP
53460 +         destination-unreachable packets will be sent in response to packets
53461 +         sent to ports for which no associated listening process exists.
53462 +         This feature supports both IPV4 and IPV6 and exempts the 
53463 +         loopback interface from blackholing.  Enabling this feature 
53464 +         makes a host more resilient to DoS attacks and reduces network
53465 +         visibility against scanners.
53466 +
53467 +         The blackhole feature as-implemented is equivalent to the FreeBSD
53468 +         blackhole feature, as it prevents RST responses to all packets, not
53469 +         just SYNs.  Under most application behavior this causes no
53470 +         problems, but applications (like haproxy) may not close certain
53471 +         connections in a way that cleanly terminates them on the remote
53472 +         end, leaving the remote host in LAST_ACK state.  Because of this
53473 +         side-effect and to prevent intentional LAST_ACK DoSes, this
53474 +         feature also adds automatic mitigation against such attacks.
53475 +         The mitigation drastically reduces the amount of time a socket
53476 +         can spend in LAST_ACK state.  If you're using haproxy and not
53477 +         all servers it connects to have this option enabled, consider
53478 +         disabling this feature on the haproxy host.
53479 +
53480 +         If the sysctl option is enabled, two sysctl options with names
53481 +         "ip_blackhole" and "lastack_retries" will be created.
53482 +         While "ip_blackhole" takes the standard zero/non-zero on/off
53483 +         toggle, "lastack_retries" uses the same kinds of values as
53484 +         "tcp_retries1" and "tcp_retries2".  The default value of 4
53485 +         prevents a socket from lasting more than 45 seconds in LAST_ACK
53486 +         state.
53487 +
53488 +config GRKERNSEC_SOCKET
53489 +       bool "Socket restrictions"
53490 +       depends on NET
53491 +       help
53492 +         If you say Y here, you will be able to choose from several options.
53493 +         If you assign a GID on your system and add it to the supplementary
53494 +         groups of users you want to restrict socket access to, this patch
53495 +         will perform up to three things, based on the option(s) you choose.
53496 +
53497 +config GRKERNSEC_SOCKET_ALL
53498 +       bool "Deny any sockets to group"
53499 +       depends on GRKERNSEC_SOCKET
53500 +       help
53501 +         If you say Y here, you will be able to choose a GID of whose users will
53502 +         be unable to connect to other hosts from your machine or run server
53503 +         applications from your machine.  If the sysctl option is enabled, a
53504 +         sysctl option with name "socket_all" is created.
53505 +
53506 +config GRKERNSEC_SOCKET_ALL_GID
53507 +       int "GID to deny all sockets for"
53508 +       depends on GRKERNSEC_SOCKET_ALL
53509 +       default 1004
53510 +       help
53511 +         Here you can choose the GID to disable socket access for. Remember to
53512 +         add the users you want socket access disabled for to the GID
53513 +         specified here.  If the sysctl option is enabled, a sysctl option
53514 +         with name "socket_all_gid" is created.
53515 +
53516 +config GRKERNSEC_SOCKET_CLIENT
53517 +       bool "Deny client sockets to group"
53518 +       depends on GRKERNSEC_SOCKET
53519 +       help
53520 +         If you say Y here, you will be able to choose a GID of whose users will
53521 +         be unable to connect to other hosts from your machine, but will be
53522 +         able to run servers.  If this option is enabled, all users in the group
53523 +         you specify will have to use passive mode when initiating ftp transfers
53524 +         from the shell on your machine.  If the sysctl option is enabled, a
53525 +         sysctl option with name "socket_client" is created.
53526 +
53527 +config GRKERNSEC_SOCKET_CLIENT_GID
53528 +       int "GID to deny client sockets for"
53529 +       depends on GRKERNSEC_SOCKET_CLIENT
53530 +       default 1003
53531 +       help
53532 +         Here you can choose the GID to disable client socket access for.
53533 +         Remember to add the users you want client socket access disabled for to
53534 +         the GID specified here.  If the sysctl option is enabled, a sysctl
53535 +         option with name "socket_client_gid" is created.
53536 +
53537 +config GRKERNSEC_SOCKET_SERVER
53538 +       bool "Deny server sockets to group"
53539 +       depends on GRKERNSEC_SOCKET
53540 +       help
53541 +         If you say Y here, you will be able to choose a GID of whose users will
53542 +         be unable to run server applications from your machine.  If the sysctl
53543 +         option is enabled, a sysctl option with name "socket_server" is created.
53544 +
53545 +config GRKERNSEC_SOCKET_SERVER_GID
53546 +       int "GID to deny server sockets for"
53547 +       depends on GRKERNSEC_SOCKET_SERVER
53548 +       default 1002
53549 +       help
53550 +         Here you can choose the GID to disable server socket access for.
53551 +         Remember to add the users you want server socket access disabled for to
53552 +         the GID specified here.  If the sysctl option is enabled, a sysctl
53553 +         option with name "socket_server_gid" is created.
53554 +
53555 +endmenu
53556 +menu "Sysctl support"
53557 +depends on GRKERNSEC && SYSCTL
53558 +
53559 +config GRKERNSEC_SYSCTL
53560 +       bool "Sysctl support"
53561 +       help
53562 +         If you say Y here, you will be able to change the options that
53563 +         grsecurity runs with at bootup, without having to recompile your
53564 +         kernel.  You can echo values to files in /proc/sys/kernel/grsecurity
53565 +         to enable (1) or disable (0) various features.  All the sysctl entries
53566 +         are mutable until the "grsec_lock" entry is set to a non-zero value.
53567 +         All features enabled in the kernel configuration are disabled at boot
53568 +         if you do not say Y to the "Turn on features by default" option.
53569 +         All options should be set at startup, and the grsec_lock entry should
53570 +         be set to a non-zero value after all the options are set.
53571 +         *THIS IS EXTREMELY IMPORTANT*
53572 +
53573 +config GRKERNSEC_SYSCTL_DISTRO
53574 +       bool "Extra sysctl support for distro makers (READ HELP)"
53575 +       depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53576 +       help
53577 +         If you say Y here, additional sysctl options will be created
53578 +         for features that affect processes running as root.  Therefore,
53579 +         it is critical when using this option that the grsec_lock entry be
53580 +         enabled after boot.  Only distros with prebuilt kernel packages
53581 +         with this option enabled that can ensure grsec_lock is enabled
53582 +         after boot should use this option.
53583 +         *Failure to set grsec_lock after boot makes all grsec features
53584 +         this option covers useless*
53585 +
53586 +         Currently this option creates the following sysctl entries:
53587 +         "Disable Privileged I/O": "disable_priv_io"   
53588 +
53589 +config GRKERNSEC_SYSCTL_ON
53590 +       bool "Turn on features by default"
53591 +       depends on GRKERNSEC_SYSCTL
53592 +       help
53593 +         If you say Y here, instead of having all features enabled in the
53594 +         kernel configuration disabled at boot time, the features will be
53595 +         enabled at boot time.  It is recommended you say Y here unless
53596 +         there is some reason you would want all sysctl-tunable features to
53597 +         be disabled by default.  As mentioned elsewhere, it is important
53598 +         to enable the grsec_lock entry once you have finished modifying
53599 +         the sysctl entries.
53600 +
53601 +endmenu
53602 +menu "Logging Options"
53603 +depends on GRKERNSEC
53604 +
53605 +config GRKERNSEC_FLOODTIME
53606 +       int "Seconds in between log messages (minimum)"
53607 +       default 10
53608 +       help
53609 +         This option allows you to enforce the number of seconds between
53610 +         grsecurity log messages.  The default should be suitable for most
53611 +         people, however, if you choose to change it, choose a value small enough
53612 +         to allow informative logs to be produced, but large enough to
53613 +         prevent flooding.
53614 +
53615 +config GRKERNSEC_FLOODBURST
53616 +       int "Number of messages in a burst (maximum)"
53617 +       default 4
53618 +       help
53619 +         This option allows you to choose the maximum number of messages allowed
53620 +         within the flood time interval you chose in a separate option.  The
53621 +         default should be suitable for most people, however if you find that
53622 +         many of your logs are being interpreted as flooding, you may want to
53623 +         raise this value.
53624 +
53625 +endmenu
53626 +
53627 +endmenu
53628 diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
53629 --- linux-3.0.4/grsecurity/Makefile     1969-12-31 19:00:00.000000000 -0500
53630 +++ linux-3.0.4/grsecurity/Makefile     2011-08-23 21:48:14.000000000 -0400
53631 @@ -0,0 +1,34 @@
53632 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53633 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53634 +# into an RBAC system
53635 +#
53636 +# All code in this directory and various hooks inserted throughout the kernel
53637 +# are copyright Brad Spengler - Open Source Security, Inc., and released 
53638 +# under the GPL v2 or higher
53639 +
53640 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53641 +       grsec_mount.o grsec_sig.o grsec_sysctl.o \
53642 +       grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53643 +
53644 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53645 +       gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53646 +       gracl_learn.o grsec_log.o
53647 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53648 +
53649 +ifdef CONFIG_NET
53650 +obj-y += grsec_sock.o
53651 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53652 +endif
53653 +
53654 +ifndef CONFIG_GRKERNSEC
53655 +obj-y += grsec_disabled.o
53656 +endif
53657 +
53658 +ifdef CONFIG_GRKERNSEC_HIDESYM
53659 +extra-y := grsec_hidesym.o
53660 +$(obj)/grsec_hidesym.o:
53661 +       @-chmod -f 500 /boot
53662 +       @-chmod -f 500 /lib/modules
53663 +       @-chmod -f 700 .
53664 +       @echo '  grsec: protected kernel image paths'
53665 +endif
53666 diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
53667 --- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
53668 +++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
53669 @@ -107,7 +107,7 @@ struct acpi_device_ops {
53670         acpi_op_bind bind;
53671         acpi_op_unbind unbind;
53672         acpi_op_notify notify;
53673 -};
53674 +} __no_const;
53675  
53676  #define ACPI_DRIVER_ALL_NOTIFY_EVENTS  0x1     /* system AND device events */
53677  
53678 diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
53679 --- linux-3.0.4/include/asm-generic/atomic-long.h       2011-07-21 22:17:23.000000000 -0400
53680 +++ linux-3.0.4/include/asm-generic/atomic-long.h       2011-08-23 21:47:56.000000000 -0400
53681 @@ -22,6 +22,12 @@
53682  
53683  typedef atomic64_t atomic_long_t;
53684  
53685 +#ifdef CONFIG_PAX_REFCOUNT
53686 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
53687 +#else
53688 +typedef atomic64_t atomic_long_unchecked_t;
53689 +#endif
53690 +
53691  #define ATOMIC_LONG_INIT(i)    ATOMIC64_INIT(i)
53692  
53693  static inline long atomic_long_read(atomic_long_t *l)
53694 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53695         return (long)atomic64_read(v);
53696  }
53697  
53698 +#ifdef CONFIG_PAX_REFCOUNT
53699 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53700 +{
53701 +       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53702 +
53703 +       return (long)atomic64_read_unchecked(v);
53704 +}
53705 +#endif
53706 +
53707  static inline void atomic_long_set(atomic_long_t *l, long i)
53708  {
53709         atomic64_t *v = (atomic64_t *)l;
53710 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53711         atomic64_set(v, i);
53712  }
53713  
53714 +#ifdef CONFIG_PAX_REFCOUNT
53715 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53716 +{
53717 +       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53718 +
53719 +       atomic64_set_unchecked(v, i);
53720 +}
53721 +#endif
53722 +
53723  static inline void atomic_long_inc(atomic_long_t *l)
53724  {
53725         atomic64_t *v = (atomic64_t *)l;
53726 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53727         atomic64_inc(v);
53728  }
53729  
53730 +#ifdef CONFIG_PAX_REFCOUNT
53731 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53732 +{
53733 +       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53734 +
53735 +       atomic64_inc_unchecked(v);
53736 +}
53737 +#endif
53738 +
53739  static inline void atomic_long_dec(atomic_long_t *l)
53740  {
53741         atomic64_t *v = (atomic64_t *)l;
53742 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53743         atomic64_dec(v);
53744  }
53745  
53746 +#ifdef CONFIG_PAX_REFCOUNT
53747 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53748 +{
53749 +       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53750 +
53751 +       atomic64_dec_unchecked(v);
53752 +}
53753 +#endif
53754 +
53755  static inline void atomic_long_add(long i, atomic_long_t *l)
53756  {
53757         atomic64_t *v = (atomic64_t *)l;
53758 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long 
53759         atomic64_add(i, v);
53760  }
53761  
53762 +#ifdef CONFIG_PAX_REFCOUNT
53763 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53764 +{
53765 +       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53766 +
53767 +       atomic64_add_unchecked(i, v);
53768 +}
53769 +#endif
53770 +
53771  static inline void atomic_long_sub(long i, atomic_long_t *l)
53772  {
53773         atomic64_t *v = (atomic64_t *)l;
53774 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long 
53775         atomic64_sub(i, v);
53776  }
53777  
53778 +#ifdef CONFIG_PAX_REFCOUNT
53779 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
53780 +{
53781 +       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53782 +
53783 +       atomic64_sub_unchecked(i, v);
53784 +}
53785 +#endif
53786 +
53787  static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
53788  {
53789         atomic64_t *v = (atomic64_t *)l;
53790 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
53791         return (long)atomic64_inc_return(v);
53792  }
53793  
53794 +#ifdef CONFIG_PAX_REFCOUNT
53795 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53796 +{
53797 +       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53798 +
53799 +       return (long)atomic64_inc_return_unchecked(v);
53800 +}
53801 +#endif
53802 +
53803  static inline long atomic_long_dec_return(atomic_long_t *l)
53804  {
53805         atomic64_t *v = (atomic64_t *)l;
53806 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
53807  
53808  typedef atomic_t atomic_long_t;
53809  
53810 +#ifdef CONFIG_PAX_REFCOUNT
53811 +typedef atomic_unchecked_t atomic_long_unchecked_t;
53812 +#else
53813 +typedef atomic_t atomic_long_unchecked_t;
53814 +#endif
53815 +
53816  #define ATOMIC_LONG_INIT(i)    ATOMIC_INIT(i)
53817  static inline long atomic_long_read(atomic_long_t *l)
53818  {
53819 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
53820         return (long)atomic_read(v);
53821  }
53822  
53823 +#ifdef CONFIG_PAX_REFCOUNT
53824 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53825 +{
53826 +       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53827 +
53828 +       return (long)atomic_read_unchecked(v);
53829 +}
53830 +#endif
53831 +
53832  static inline void atomic_long_set(atomic_long_t *l, long i)
53833  {
53834         atomic_t *v = (atomic_t *)l;
53835 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
53836         atomic_set(v, i);
53837  }
53838  
53839 +#ifdef CONFIG_PAX_REFCOUNT
53840 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53841 +{
53842 +       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53843 +
53844 +       atomic_set_unchecked(v, i);
53845 +}
53846 +#endif
53847 +
53848  static inline void atomic_long_inc(atomic_long_t *l)
53849  {
53850         atomic_t *v = (atomic_t *)l;
53851 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
53852         atomic_inc(v);
53853  }
53854  
53855 +#ifdef CONFIG_PAX_REFCOUNT
53856 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53857 +{
53858 +       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53859 +
53860 +       atomic_inc_unchecked(v);
53861 +}
53862 +#endif
53863 +
53864  static inline void atomic_long_dec(atomic_long_t *l)
53865  {
53866         atomic_t *v = (atomic_t *)l;
53867 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
53868         atomic_dec(v);
53869  }
53870  
53871 +#ifdef CONFIG_PAX_REFCOUNT
53872 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53873 +{
53874 +       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53875 +
53876 +       atomic_dec_unchecked(v);
53877 +}
53878 +#endif
53879 +
53880  static inline void atomic_long_add(long i, atomic_long_t *l)
53881  {
53882         atomic_t *v = (atomic_t *)l;
53883 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long 
53884         atomic_add(i, v);
53885  }
53886  
53887 +#ifdef CONFIG_PAX_REFCOUNT
53888 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53889 +{
53890 +       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53891 +
53892 +       atomic_add_unchecked(i, v);
53893 +}
53894 +#endif
53895 +
53896  static inline void atomic_long_sub(long i, atomic_long_t *l)
53897  {
53898         atomic_t *v = (atomic_t *)l;
53899 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long 
53900         atomic_sub(i, v);
53901  }
53902  
53903 +#ifdef CONFIG_PAX_REFCOUNT
53904 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
53905 +{
53906 +       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53907 +
53908 +       atomic_sub_unchecked(i, v);
53909 +}
53910 +#endif
53911 +
53912  static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
53913  {
53914         atomic_t *v = (atomic_t *)l;
53915 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
53916         return (long)atomic_inc_return(v);
53917  }
53918  
53919 +#ifdef CONFIG_PAX_REFCOUNT
53920 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53921 +{
53922 +       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53923 +
53924 +       return (long)atomic_inc_return_unchecked(v);
53925 +}
53926 +#endif
53927 +
53928  static inline long atomic_long_dec_return(atomic_long_t *l)
53929  {
53930         atomic_t *v = (atomic_t *)l;
53931 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
53932  
53933  #endif  /*  BITS_PER_LONG == 64  */
53934  
53935 +#ifdef CONFIG_PAX_REFCOUNT
53936 +static inline void pax_refcount_needs_these_functions(void)
53937 +{
53938 +       atomic_read_unchecked((atomic_unchecked_t *)NULL);
53939 +       atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53940 +       atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53941 +       atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53942 +       atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53943 +       (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53944 +       atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53945 +       atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53946 +       atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53947 +       atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53948 +       (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53949 +
53950 +       atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53951 +       atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53952 +       atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53953 +       atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
53954 +       atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53955 +       atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53956 +       atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53957 +}
53958 +#else
53959 +#define atomic_read_unchecked(v) atomic_read(v)
53960 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53961 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53962 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53963 +#define atomic_inc_unchecked(v) atomic_inc(v)
53964 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53965 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53966 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53967 +#define atomic_dec_unchecked(v) atomic_dec(v)
53968 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53969 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53970 +
53971 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
53972 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53973 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53974 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
53975 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53976 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53977 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53978 +#endif
53979 +
53980  #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
53981 diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
53982 --- linux-3.0.4/include/asm-generic/cache.h     2011-07-21 22:17:23.000000000 -0400
53983 +++ linux-3.0.4/include/asm-generic/cache.h     2011-08-23 21:47:56.000000000 -0400
53984 @@ -6,7 +6,7 @@
53985   * cache lines need to provide their own cache.h.
53986   */
53987  
53988 -#define L1_CACHE_SHIFT         5
53989 -#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
53990 +#define L1_CACHE_SHIFT         5UL
53991 +#define L1_CACHE_BYTES         (1UL << L1_CACHE_SHIFT)
53992  
53993  #endif /* __ASM_GENERIC_CACHE_H */
53994 diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
53995 --- linux-3.0.4/include/asm-generic/int-l64.h   2011-07-21 22:17:23.000000000 -0400
53996 +++ linux-3.0.4/include/asm-generic/int-l64.h   2011-08-23 21:47:56.000000000 -0400
53997 @@ -46,6 +46,8 @@ typedef unsigned int u32;
53998  typedef signed long s64;
53999  typedef unsigned long u64;
54000  
54001 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54002 +
54003  #define S8_C(x)  x
54004  #define U8_C(x)  x ## U
54005  #define S16_C(x) x
54006 diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
54007 --- linux-3.0.4/include/asm-generic/int-ll64.h  2011-07-21 22:17:23.000000000 -0400
54008 +++ linux-3.0.4/include/asm-generic/int-ll64.h  2011-08-23 21:47:56.000000000 -0400
54009 @@ -51,6 +51,8 @@ typedef unsigned int u32;
54010  typedef signed long long s64;
54011  typedef unsigned long long u64;
54012  
54013 +typedef unsigned long long intoverflow_t;
54014 +
54015  #define S8_C(x)  x
54016  #define U8_C(x)  x ## U
54017  #define S16_C(x) x
54018 diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
54019 --- linux-3.0.4/include/asm-generic/kmap_types.h        2011-07-21 22:17:23.000000000 -0400
54020 +++ linux-3.0.4/include/asm-generic/kmap_types.h        2011-08-23 21:47:56.000000000 -0400
54021 @@ -29,10 +29,11 @@ KMAP_D(16)  KM_IRQ_PTE,
54022  KMAP_D(17)     KM_NMI,
54023  KMAP_D(18)     KM_NMI_PTE,
54024  KMAP_D(19)     KM_KDB,
54025 +KMAP_D(20)     KM_CLEARPAGE,
54026  /*
54027   * Remember to update debug_kmap_atomic() when adding new kmap types!
54028   */
54029 -KMAP_D(20)     KM_TYPE_NR
54030 +KMAP_D(21)     KM_TYPE_NR
54031  };
54032  
54033  #undef KMAP_D
54034 diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
54035 --- linux-3.0.4/include/asm-generic/pgtable.h   2011-07-21 22:17:23.000000000 -0400
54036 +++ linux-3.0.4/include/asm-generic/pgtable.h   2011-08-23 21:47:56.000000000 -0400
54037 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
54038  #endif /* __HAVE_ARCH_PMD_WRITE */
54039  #endif
54040  
54041 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54042 +static inline unsigned long pax_open_kernel(void) { return 0; }
54043 +#endif
54044 +
54045 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54046 +static inline unsigned long pax_close_kernel(void) { return 0; }
54047 +#endif
54048 +
54049  #endif /* !__ASSEMBLY__ */
54050  
54051  #endif /* _ASM_GENERIC_PGTABLE_H */
54052 diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
54053 --- linux-3.0.4/include/asm-generic/pgtable-nopmd.h     2011-07-21 22:17:23.000000000 -0400
54054 +++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h     2011-08-23 21:47:56.000000000 -0400
54055 @@ -1,14 +1,19 @@
54056  #ifndef _PGTABLE_NOPMD_H
54057  #define _PGTABLE_NOPMD_H
54058  
54059 -#ifndef __ASSEMBLY__
54060 -
54061  #include <asm-generic/pgtable-nopud.h>
54062  
54063 -struct mm_struct;
54064 -
54065  #define __PAGETABLE_PMD_FOLDED
54066  
54067 +#define PMD_SHIFT      PUD_SHIFT
54068 +#define PTRS_PER_PMD   1
54069 +#define PMD_SIZE       (_AC(1,UL) << PMD_SHIFT)
54070 +#define PMD_MASK       (~(PMD_SIZE-1))
54071 +
54072 +#ifndef __ASSEMBLY__
54073 +
54074 +struct mm_struct;
54075 +
54076  /*
54077   * Having the pmd type consist of a pud gets the size right, and allows
54078   * us to conceptually access the pud entry that this pmd is folded into
54079 @@ -16,11 +21,6 @@ struct mm_struct;
54080   */
54081  typedef struct { pud_t pud; } pmd_t;
54082  
54083 -#define PMD_SHIFT      PUD_SHIFT
54084 -#define PTRS_PER_PMD   1
54085 -#define PMD_SIZE       (1UL << PMD_SHIFT)
54086 -#define PMD_MASK       (~(PMD_SIZE-1))
54087 -
54088  /*
54089   * The "pud_xxx()" functions here are trivial for a folded two-level
54090   * setup: the pmd is never bad, and a pmd always exists (as it's folded
54091 diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
54092 --- linux-3.0.4/include/asm-generic/pgtable-nopud.h     2011-07-21 22:17:23.000000000 -0400
54093 +++ linux-3.0.4/include/asm-generic/pgtable-nopud.h     2011-08-23 21:47:56.000000000 -0400
54094 @@ -1,10 +1,15 @@
54095  #ifndef _PGTABLE_NOPUD_H
54096  #define _PGTABLE_NOPUD_H
54097  
54098 -#ifndef __ASSEMBLY__
54099 -
54100  #define __PAGETABLE_PUD_FOLDED
54101  
54102 +#define PUD_SHIFT      PGDIR_SHIFT
54103 +#define PTRS_PER_PUD   1
54104 +#define PUD_SIZE       (_AC(1,UL) << PUD_SHIFT)
54105 +#define PUD_MASK       (~(PUD_SIZE-1))
54106 +
54107 +#ifndef __ASSEMBLY__
54108 +
54109  /*
54110   * Having the pud type consist of a pgd gets the size right, and allows
54111   * us to conceptually access the pgd entry that this pud is folded into
54112 @@ -12,11 +17,6 @@
54113   */
54114  typedef struct { pgd_t pgd; } pud_t;
54115  
54116 -#define PUD_SHIFT      PGDIR_SHIFT
54117 -#define PTRS_PER_PUD   1
54118 -#define PUD_SIZE       (1UL << PUD_SHIFT)
54119 -#define PUD_MASK       (~(PUD_SIZE-1))
54120 -
54121  /*
54122   * The "pgd_xxx()" functions here are trivial for a folded two-level
54123   * setup: the pud is never bad, and a pud always exists (as it's folded
54124 diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
54125 --- linux-3.0.4/include/asm-generic/vmlinux.lds.h       2011-07-21 22:17:23.000000000 -0400
54126 +++ linux-3.0.4/include/asm-generic/vmlinux.lds.h       2011-08-23 21:47:56.000000000 -0400
54127 @@ -217,6 +217,7 @@
54128         .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
54129                 VMLINUX_SYMBOL(__start_rodata) = .;                     \
54130                 *(.rodata) *(.rodata.*)                                 \
54131 +               *(.data..read_only)                                     \
54132                 *(__vermagic)           /* Kernel version magic */      \
54133                 . = ALIGN(8);                                           \
54134                 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
54135 @@ -723,17 +724,18 @@
54136   * section in the linker script will go there too.  @phdr should have
54137   * a leading colon.
54138   *
54139 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54140 + * Note that this macros defines per_cpu_load as an absolute symbol.
54141   * If there is no need to put the percpu section at a predetermined
54142   * address, use PERCPU_SECTION.
54143   */
54144  #define PERCPU_VADDR(cacheline, vaddr, phdr)                           \
54145 -       VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
54146 -       .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)         \
54147 +       per_cpu_load = .;                                               \
54148 +       .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load)           \
54149                                 - LOAD_OFFSET) {                        \
54150 +               VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load;      \
54151                 PERCPU_INPUT(cacheline)                                 \
54152         } phdr                                                          \
54153 -       . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
54154 +       . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
54155  
54156  /**
54157   * PERCPU_SECTION - define output section for percpu area, simple version
54158 diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
54159 --- linux-3.0.4/include/drm/drm_crtc_helper.h   2011-07-21 22:17:23.000000000 -0400
54160 +++ linux-3.0.4/include/drm/drm_crtc_helper.h   2011-08-23 21:47:56.000000000 -0400
54161 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
54162  
54163         /* disable crtc when not in use - more explicit than dpms off */
54164         void (*disable)(struct drm_crtc *crtc);
54165 -};
54166 +} __no_const;
54167  
54168  struct drm_encoder_helper_funcs {
54169         void (*dpms)(struct drm_encoder *encoder, int mode);
54170 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
54171                                             struct drm_connector *connector);
54172         /* disable encoder when not in use - more explicit than dpms off */
54173         void (*disable)(struct drm_encoder *encoder);
54174 -};
54175 +} __no_const;
54176  
54177  struct drm_connector_helper_funcs {
54178         int (*get_modes)(struct drm_connector *connector);
54179 diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
54180 --- linux-3.0.4/include/drm/drmP.h      2011-07-21 22:17:23.000000000 -0400
54181 +++ linux-3.0.4/include/drm/drmP.h      2011-08-23 21:47:56.000000000 -0400
54182 @@ -73,6 +73,7 @@
54183  #include <linux/workqueue.h>
54184  #include <linux/poll.h>
54185  #include <asm/pgalloc.h>
54186 +#include <asm/local.h>
54187  #include "drm.h"
54188  
54189  #include <linux/idr.h>
54190 @@ -1033,7 +1034,7 @@ struct drm_device {
54191  
54192         /** \name Usage Counters */
54193         /*@{ */
54194 -       int open_count;                 /**< Outstanding files open */
54195 +       local_t open_count;             /**< Outstanding files open */
54196         atomic_t ioctl_count;           /**< Outstanding IOCTLs pending */
54197         atomic_t vma_count;             /**< Outstanding vma areas open */
54198         int buf_use;                    /**< Buffers in use -- cannot alloc */
54199 @@ -1044,7 +1045,7 @@ struct drm_device {
54200         /*@{ */
54201         unsigned long counters;
54202         enum drm_stat_type types[15];
54203 -       atomic_t counts[15];
54204 +       atomic_unchecked_t counts[15];
54205         /*@} */
54206  
54207         struct list_head filelist;
54208 diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
54209 --- linux-3.0.4/include/drm/ttm/ttm_memory.h    2011-07-21 22:17:23.000000000 -0400
54210 +++ linux-3.0.4/include/drm/ttm/ttm_memory.h    2011-08-23 21:47:56.000000000 -0400
54211 @@ -47,7 +47,7 @@
54212  
54213  struct ttm_mem_shrink {
54214         int (*do_shrink) (struct ttm_mem_shrink *);
54215 -};
54216 +} __no_const;
54217  
54218  /**
54219   * struct ttm_mem_global - Global memory accounting structure.
54220 diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
54221 --- linux-3.0.4/include/linux/a.out.h   2011-07-21 22:17:23.000000000 -0400
54222 +++ linux-3.0.4/include/linux/a.out.h   2011-08-23 21:47:56.000000000 -0400
54223 @@ -39,6 +39,14 @@ enum machine_type {
54224    M_MIPS2 = 152                /* MIPS R6000/R4000 binary */
54225  };
54226  
54227 +/* Constants for the N_FLAGS field */
54228 +#define F_PAX_PAGEEXEC 1       /* Paging based non-executable pages */
54229 +#define F_PAX_EMUTRAMP 2       /* Emulate trampolines */
54230 +#define F_PAX_MPROTECT 4       /* Restrict mprotect() */
54231 +#define F_PAX_RANDMMAP 8       /* Randomize mmap() base */
54232 +/*#define F_PAX_RANDEXEC       16*/    /* Randomize ET_EXEC base */
54233 +#define F_PAX_SEGMEXEC 32      /* Segmentation based non-executable pages */
54234 +
54235  #if !defined (N_MAGIC)
54236  #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54237  #endif
54238 diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
54239 --- linux-3.0.4/include/linux/atmdev.h  2011-07-21 22:17:23.000000000 -0400
54240 +++ linux-3.0.4/include/linux/atmdev.h  2011-08-23 21:47:56.000000000 -0400
54241 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54242  #endif
54243  
54244  struct k_atm_aal_stats {
54245 -#define __HANDLE_ITEM(i) atomic_t i
54246 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54247         __AAL_STAT_ITEMS
54248  #undef __HANDLE_ITEM
54249  };
54250 diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
54251 --- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
54252 +++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
54253 @@ -88,6 +88,7 @@ struct linux_binfmt {
54254         int (*load_binary)(struct linux_binprm *, struct  pt_regs * regs);
54255         int (*load_shlib)(struct file *);
54256         int (*core_dump)(struct coredump_params *cprm);
54257 +       void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54258         unsigned long min_coredump;     /* minimal dump size */
54259  };
54260  
54261 diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
54262 --- linux-3.0.4/include/linux/blkdev.h  2011-07-21 22:17:23.000000000 -0400
54263 +++ linux-3.0.4/include/linux/blkdev.h  2011-08-26 19:49:56.000000000 -0400
54264 @@ -1308,7 +1308,7 @@ struct block_device_operations {
54265         /* this callback is with swap_lock and sometimes page table lock held */
54266         void (*swap_slot_free_notify) (struct block_device *, unsigned long);
54267         struct module *owner;
54268 -};
54269 +} __do_const;
54270  
54271  extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54272                                  unsigned long);
54273 diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
54274 --- linux-3.0.4/include/linux/blktrace_api.h    2011-07-21 22:17:23.000000000 -0400
54275 +++ linux-3.0.4/include/linux/blktrace_api.h    2011-08-23 21:47:56.000000000 -0400
54276 @@ -161,7 +161,7 @@ struct blk_trace {
54277         struct dentry *dir;
54278         struct dentry *dropped_file;
54279         struct dentry *msg_file;
54280 -       atomic_t dropped;
54281 +       atomic_unchecked_t dropped;
54282  };
54283  
54284  extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54285 diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
54286 --- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
54287 +++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
54288 @@ -42,51 +42,51 @@
54289  
54290  static inline __le64 __cpu_to_le64p(const __u64 *p)
54291  {
54292 -       return (__force __le64)*p;
54293 +       return (__force const __le64)*p;
54294  }
54295  static inline __u64 __le64_to_cpup(const __le64 *p)
54296  {
54297 -       return (__force __u64)*p;
54298 +       return (__force const __u64)*p;
54299  }
54300  static inline __le32 __cpu_to_le32p(const __u32 *p)
54301  {
54302 -       return (__force __le32)*p;
54303 +       return (__force const __le32)*p;
54304  }
54305  static inline __u32 __le32_to_cpup(const __le32 *p)
54306  {
54307 -       return (__force __u32)*p;
54308 +       return (__force const __u32)*p;
54309  }
54310  static inline __le16 __cpu_to_le16p(const __u16 *p)
54311  {
54312 -       return (__force __le16)*p;
54313 +       return (__force const __le16)*p;
54314  }
54315  static inline __u16 __le16_to_cpup(const __le16 *p)
54316  {
54317 -       return (__force __u16)*p;
54318 +       return (__force const __u16)*p;
54319  }
54320  static inline __be64 __cpu_to_be64p(const __u64 *p)
54321  {
54322 -       return (__force __be64)__swab64p(p);
54323 +       return (__force const __be64)__swab64p(p);
54324  }
54325  static inline __u64 __be64_to_cpup(const __be64 *p)
54326  {
54327 -       return __swab64p((__u64 *)p);
54328 +       return __swab64p((const __u64 *)p);
54329  }
54330  static inline __be32 __cpu_to_be32p(const __u32 *p)
54331  {
54332 -       return (__force __be32)__swab32p(p);
54333 +       return (__force const __be32)__swab32p(p);
54334  }
54335  static inline __u32 __be32_to_cpup(const __be32 *p)
54336  {
54337 -       return __swab32p((__u32 *)p);
54338 +       return __swab32p((const __u32 *)p);
54339  }
54340  static inline __be16 __cpu_to_be16p(const __u16 *p)
54341  {
54342 -       return (__force __be16)__swab16p(p);
54343 +       return (__force const __be16)__swab16p(p);
54344  }
54345  static inline __u16 __be16_to_cpup(const __be16 *p)
54346  {
54347 -       return __swab16p((__u16 *)p);
54348 +       return __swab16p((const __u16 *)p);
54349  }
54350  #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54351  #define __le64_to_cpus(x) do { (void)(x); } while (0)
54352 diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
54353 --- linux-3.0.4/include/linux/cache.h   2011-07-21 22:17:23.000000000 -0400
54354 +++ linux-3.0.4/include/linux/cache.h   2011-08-23 21:47:56.000000000 -0400
54355 @@ -16,6 +16,10 @@
54356  #define __read_mostly
54357  #endif
54358  
54359 +#ifndef __read_only
54360 +#define __read_only __read_mostly
54361 +#endif
54362 +
54363  #ifndef ____cacheline_aligned
54364  #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54365  #endif
54366 diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
54367 --- linux-3.0.4/include/linux/capability.h      2011-07-21 22:17:23.000000000 -0400
54368 +++ linux-3.0.4/include/linux/capability.h      2011-08-23 21:48:14.000000000 -0400
54369 @@ -547,6 +547,9 @@ extern bool capable(int cap);
54370  extern bool ns_capable(struct user_namespace *ns, int cap);
54371  extern bool task_ns_capable(struct task_struct *t, int cap);
54372  extern bool nsown_capable(int cap);
54373 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
54374 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
54375 +extern bool capable_nolog(int cap);
54376  
54377  /* audit system wants to get cap info from files as well */
54378  extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
54379 diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
54380 --- linux-3.0.4/include/linux/cleancache.h      2011-07-21 22:17:23.000000000 -0400
54381 +++ linux-3.0.4/include/linux/cleancache.h      2011-08-23 21:47:56.000000000 -0400
54382 @@ -31,7 +31,7 @@ struct cleancache_ops {
54383         void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
54384         void (*flush_inode)(int, struct cleancache_filekey);
54385         void (*flush_fs)(int);
54386 -};
54387 +} __no_const;
54388  
54389  extern struct cleancache_ops
54390         cleancache_register_ops(struct cleancache_ops *ops);
54391 diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
54392 --- linux-3.0.4/include/linux/compiler-gcc4.h   2011-07-21 22:17:23.000000000 -0400
54393 +++ linux-3.0.4/include/linux/compiler-gcc4.h   2011-08-26 19:49:56.000000000 -0400
54394 @@ -31,6 +31,12 @@
54395  
54396  
54397  #if __GNUC_MINOR__ >= 5
54398 +
54399 +#ifdef CONSTIFY_PLUGIN
54400 +#define __no_const __attribute__((no_const))
54401 +#define __do_const __attribute__((do_const))
54402 +#endif
54403 +
54404  /*
54405   * Mark a position in code as unreachable.  This can be used to
54406   * suppress control flow warnings after asm blocks that transfer
54407 @@ -46,6 +52,11 @@
54408  #define __noclone      __attribute__((__noclone__))
54409  
54410  #endif
54411 +
54412 +#define __alloc_size(...)      __attribute((alloc_size(__VA_ARGS__)))
54413 +#define __bos(ptr, arg)                __builtin_object_size((ptr), (arg))
54414 +#define __bos0(ptr)            __bos((ptr), 0)
54415 +#define __bos1(ptr)            __bos((ptr), 1)
54416  #endif
54417  
54418  #if __GNUC_MINOR__ > 0
54419 diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
54420 --- linux-3.0.4/include/linux/compiler.h        2011-07-21 22:17:23.000000000 -0400
54421 +++ linux-3.0.4/include/linux/compiler.h        2011-08-26 19:49:56.000000000 -0400
54422 @@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
54423  # define __attribute_const__   /* unimplemented */
54424  #endif
54425  
54426 +#ifndef __no_const
54427 +# define __no_const
54428 +#endif
54429 +
54430 +#ifndef __do_const
54431 +# define __do_const
54432 +#endif
54433 +
54434  /*
54435   * Tell gcc if a function is cold. The compiler will assume any path
54436   * directly leading to the call is unlikely.
54437 @@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
54438  #define __cold
54439  #endif
54440  
54441 +#ifndef __alloc_size
54442 +#define __alloc_size(...)
54443 +#endif
54444 +
54445 +#ifndef __bos
54446 +#define __bos(ptr, arg)
54447 +#endif
54448 +
54449 +#ifndef __bos0
54450 +#define __bos0(ptr)
54451 +#endif
54452 +
54453 +#ifndef __bos1
54454 +#define __bos1(ptr)
54455 +#endif
54456 +
54457  /* Simple shorthand for a section definition */
54458  #ifndef __section
54459  # define __section(S) __attribute__ ((__section__(#S)))
54460 @@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
54461   * use is to mediate communication between process-level code and irq/NMI
54462   * handlers, all running on the same CPU.
54463   */
54464 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54465 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54466 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54467  
54468  #endif /* __LINUX_COMPILER_H */
54469 diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
54470 --- linux-3.0.4/include/linux/cpuset.h  2011-07-21 22:17:23.000000000 -0400
54471 +++ linux-3.0.4/include/linux/cpuset.h  2011-08-23 21:47:56.000000000 -0400
54472 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
54473          * nodemask.
54474          */
54475         smp_mb();
54476 -       --ACCESS_ONCE(current->mems_allowed_change_disable);
54477 +       --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
54478  }
54479  
54480  static inline void set_mems_allowed(nodemask_t nodemask)
54481 diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
54482 --- linux-3.0.4/include/linux/crypto.h  2011-07-21 22:17:23.000000000 -0400
54483 +++ linux-3.0.4/include/linux/crypto.h  2011-08-23 21:47:56.000000000 -0400
54484 @@ -361,7 +361,7 @@ struct cipher_tfm {
54485                           const u8 *key, unsigned int keylen);
54486         void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
54487         void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
54488 -};
54489 +} __no_const;
54490  
54491  struct hash_tfm {
54492         int (*init)(struct hash_desc *desc);
54493 @@ -382,13 +382,13 @@ struct compress_tfm {
54494         int (*cot_decompress)(struct crypto_tfm *tfm,
54495                               const u8 *src, unsigned int slen,
54496                               u8 *dst, unsigned int *dlen);
54497 -};
54498 +} __no_const;
54499  
54500  struct rng_tfm {
54501         int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
54502                               unsigned int dlen);
54503         int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
54504 -};
54505 +} __no_const;
54506  
54507  #define crt_ablkcipher crt_u.ablkcipher
54508  #define crt_aead       crt_u.aead
54509 diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
54510 --- linux-3.0.4/include/linux/decompress/mm.h   2011-07-21 22:17:23.000000000 -0400
54511 +++ linux-3.0.4/include/linux/decompress/mm.h   2011-08-23 21:47:56.000000000 -0400
54512 @@ -77,7 +77,7 @@ static void free(void *where)
54513   * warnings when not needed (indeed large_malloc / large_free are not
54514   * needed by inflate */
54515  
54516 -#define malloc(a) kmalloc(a, GFP_KERNEL)
54517 +#define malloc(a) kmalloc((a), GFP_KERNEL)
54518  #define free(a) kfree(a)
54519  
54520  #define large_malloc(a) vmalloc(a)
54521 diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
54522 --- linux-3.0.4/include/linux/dma-mapping.h     2011-07-21 22:17:23.000000000 -0400
54523 +++ linux-3.0.4/include/linux/dma-mapping.h     2011-08-26 19:49:56.000000000 -0400
54524 @@ -50,7 +50,7 @@ struct dma_map_ops {
54525         int (*dma_supported)(struct device *dev, u64 mask);
54526         int (*set_dma_mask)(struct device *dev, u64 mask);
54527         int is_phys;
54528 -};
54529 +} __do_const;
54530  
54531  #define DMA_BIT_MASK(n)        (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54532  
54533 diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
54534 --- linux-3.0.4/include/linux/efi.h     2011-07-21 22:17:23.000000000 -0400
54535 +++ linux-3.0.4/include/linux/efi.h     2011-08-23 21:47:56.000000000 -0400
54536 @@ -410,7 +410,7 @@ struct efivar_operations {
54537         efi_get_variable_t *get_variable;
54538         efi_get_next_variable_t *get_next_variable;
54539         efi_set_variable_t *set_variable;
54540 -};
54541 +} __no_const;
54542  
54543  struct efivars {
54544         /*
54545 diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
54546 --- linux-3.0.4/include/linux/elf.h     2011-07-21 22:17:23.000000000 -0400
54547 +++ linux-3.0.4/include/linux/elf.h     2011-08-23 21:47:56.000000000 -0400
54548 @@ -49,6 +49,17 @@ typedef __s64        Elf64_Sxword;
54549  #define PT_GNU_EH_FRAME                0x6474e550
54550  
54551  #define PT_GNU_STACK   (PT_LOOS + 0x474e551)
54552 +#define PT_GNU_RELRO   (PT_LOOS + 0x474e552)
54553 +
54554 +#define PT_PAX_FLAGS   (PT_LOOS + 0x5041580)
54555 +
54556 +/* Constants for the e_flags field */
54557 +#define EF_PAX_PAGEEXEC                1       /* Paging based non-executable pages */
54558 +#define EF_PAX_EMUTRAMP                2       /* Emulate trampolines */
54559 +#define EF_PAX_MPROTECT                4       /* Restrict mprotect() */
54560 +#define EF_PAX_RANDMMAP                8       /* Randomize mmap() base */
54561 +/*#define EF_PAX_RANDEXEC              16*/    /* Randomize ET_EXEC base */
54562 +#define EF_PAX_SEGMEXEC                32      /* Segmentation based non-executable pages */
54563  
54564  /*
54565   * Extended Numbering
54566 @@ -106,6 +117,8 @@ typedef __s64       Elf64_Sxword;
54567  #define DT_DEBUG       21
54568  #define DT_TEXTREL     22
54569  #define DT_JMPREL      23
54570 +#define DT_FLAGS       30
54571 +  #define DF_TEXTREL  0x00000004
54572  #define DT_ENCODING    32
54573  #define OLD_DT_LOOS    0x60000000
54574  #define DT_LOOS                0x6000000d
54575 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
54576  #define PF_W           0x2
54577  #define PF_X           0x1
54578  
54579 +#define PF_PAGEEXEC    (1U << 4)       /* Enable  PAGEEXEC */
54580 +#define PF_NOPAGEEXEC  (1U << 5)       /* Disable PAGEEXEC */
54581 +#define PF_SEGMEXEC    (1U << 6)       /* Enable  SEGMEXEC */
54582 +#define PF_NOSEGMEXEC  (1U << 7)       /* Disable SEGMEXEC */
54583 +#define PF_MPROTECT    (1U << 8)       /* Enable  MPROTECT */
54584 +#define PF_NOMPROTECT  (1U << 9)       /* Disable MPROTECT */
54585 +/*#define PF_RANDEXEC  (1U << 10)*/    /* Enable  RANDEXEC */
54586 +/*#define PF_NORANDEXEC        (1U << 11)*/    /* Disable RANDEXEC */
54587 +#define PF_EMUTRAMP    (1U << 12)      /* Enable  EMUTRAMP */
54588 +#define PF_NOEMUTRAMP  (1U << 13)      /* Disable EMUTRAMP */
54589 +#define PF_RANDMMAP    (1U << 14)      /* Enable  RANDMMAP */
54590 +#define PF_NORANDMMAP  (1U << 15)      /* Disable RANDMMAP */
54591 +
54592  typedef struct elf32_phdr{
54593    Elf32_Word   p_type;
54594    Elf32_Off    p_offset;
54595 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
54596  #define        EI_OSABI        7
54597  #define        EI_PAD          8
54598  
54599 +#define        EI_PAX          14
54600 +
54601  #define        ELFMAG0         0x7f            /* EI_MAG */
54602  #define        ELFMAG1         'E'
54603  #define        ELFMAG2         'L'
54604 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
54605  #define elf_note       elf32_note
54606  #define elf_addr_t     Elf32_Off
54607  #define Elf_Half       Elf32_Half
54608 +#define elf_dyn                Elf32_Dyn
54609  
54610  #else
54611  
54612 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
54613  #define elf_note       elf64_note
54614  #define elf_addr_t     Elf64_Off
54615  #define Elf_Half       Elf64_Half
54616 +#define elf_dyn                Elf64_Dyn
54617  
54618  #endif
54619  
54620 diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
54621 --- linux-3.0.4/include/linux/firewire.h        2011-07-21 22:17:23.000000000 -0400
54622 +++ linux-3.0.4/include/linux/firewire.h        2011-08-23 21:47:56.000000000 -0400
54623 @@ -428,7 +428,7 @@ struct fw_iso_context {
54624         union {
54625                 fw_iso_callback_t sc;
54626                 fw_iso_mc_callback_t mc;
54627 -       } callback;
54628 +       } __no_const callback;
54629         void *callback_data;
54630  };
54631  
54632 diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
54633 --- linux-3.0.4/include/linux/fscache-cache.h   2011-07-21 22:17:23.000000000 -0400
54634 +++ linux-3.0.4/include/linux/fscache-cache.h   2011-08-23 21:47:56.000000000 -0400
54635 @@ -102,7 +102,7 @@ struct fscache_operation {
54636         fscache_operation_release_t release;
54637  };
54638  
54639 -extern atomic_t fscache_op_debug_id;
54640 +extern atomic_unchecked_t fscache_op_debug_id;
54641  extern void fscache_op_work_func(struct work_struct *work);
54642  
54643  extern void fscache_enqueue_operation(struct fscache_operation *);
54644 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
54645  {
54646         INIT_WORK(&op->work, fscache_op_work_func);
54647         atomic_set(&op->usage, 1);
54648 -       op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54649 +       op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54650         op->processor = processor;
54651         op->release = release;
54652         INIT_LIST_HEAD(&op->pend_link);
54653 diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
54654 --- linux-3.0.4/include/linux/fs.h      2011-07-21 22:17:23.000000000 -0400
54655 +++ linux-3.0.4/include/linux/fs.h      2011-08-26 19:49:56.000000000 -0400
54656 @@ -109,6 +109,11 @@ struct inodes_stat_t {
54657  /* File was opened by fanotify and shouldn't generate fanotify events */
54658  #define FMODE_NONOTIFY         ((__force fmode_t)0x1000000)
54659  
54660 +/* Hack for grsec so as not to require read permission simply to execute
54661 + * a binary
54662 + */
54663 +#define FMODE_GREXEC           ((__force fmode_t)0x2000000)
54664 +
54665  /*
54666   * The below are the various read and write types that we support. Some of
54667   * them include behavioral modifiers that send information down to the
54668 @@ -1571,7 +1576,8 @@ struct file_operations {
54669         int (*setlease)(struct file *, long, struct file_lock **);
54670         long (*fallocate)(struct file *file, int mode, loff_t offset,
54671                           loff_t len);
54672 -};
54673 +} __do_const;
54674 +typedef struct file_operations __no_const file_operations_no_const;
54675  
54676  #define IPERM_FLAG_RCU 0x0001
54677  
54678 diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
54679 --- linux-3.0.4/include/linux/fsnotify.h        2011-07-21 22:17:23.000000000 -0400
54680 +++ linux-3.0.4/include/linux/fsnotify.h        2011-08-24 18:10:29.000000000 -0400
54681 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
54682   */
54683  static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
54684  {
54685 -       return kstrdup(name, GFP_KERNEL);
54686 +       return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
54687  }
54688  
54689  /*
54690 diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
54691 --- linux-3.0.4/include/linux/fs_struct.h       2011-07-21 22:17:23.000000000 -0400
54692 +++ linux-3.0.4/include/linux/fs_struct.h       2011-08-23 21:47:56.000000000 -0400
54693 @@ -6,7 +6,7 @@
54694  #include <linux/seqlock.h>
54695  
54696  struct fs_struct {
54697 -       int users;
54698 +       atomic_t users;
54699         spinlock_t lock;
54700         seqcount_t seq;
54701         int umask;
54702 diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
54703 --- linux-3.0.4/include/linux/ftrace_event.h    2011-07-21 22:17:23.000000000 -0400
54704 +++ linux-3.0.4/include/linux/ftrace_event.h    2011-08-23 21:47:56.000000000 -0400
54705 @@ -96,7 +96,7 @@ struct trace_event_functions {
54706         trace_print_func        raw;
54707         trace_print_func        hex;
54708         trace_print_func        binary;
54709 -};
54710 +} __no_const;
54711  
54712  struct trace_event {
54713         struct hlist_node               node;
54714 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
54715  extern int trace_add_event_call(struct ftrace_event_call *call);
54716  extern void trace_remove_event_call(struct ftrace_event_call *call);
54717  
54718 -#define is_signed_type(type)   (((type)(-1)) < 0)
54719 +#define is_signed_type(type)   (((type)(-1)) < (type)1)
54720  
54721  int trace_set_clr_event(const char *system, const char *event, int set);
54722  
54723 diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
54724 --- linux-3.0.4/include/linux/genhd.h   2011-07-21 22:17:23.000000000 -0400
54725 +++ linux-3.0.4/include/linux/genhd.h   2011-08-23 21:47:56.000000000 -0400
54726 @@ -184,7 +184,7 @@ struct gendisk {
54727         struct kobject *slave_dir;
54728  
54729         struct timer_rand_state *random;
54730 -       atomic_t sync_io;               /* RAID */
54731 +       atomic_unchecked_t sync_io;     /* RAID */
54732         struct disk_events *ev;
54733  #ifdef  CONFIG_BLK_DEV_INTEGRITY
54734         struct blk_integrity *integrity;
54735 diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
54736 --- linux-3.0.4/include/linux/gracl.h   1969-12-31 19:00:00.000000000 -0500
54737 +++ linux-3.0.4/include/linux/gracl.h   2011-08-23 21:48:14.000000000 -0400
54738 @@ -0,0 +1,317 @@
54739 +#ifndef GR_ACL_H
54740 +#define GR_ACL_H
54741 +
54742 +#include <linux/grdefs.h>
54743 +#include <linux/resource.h>
54744 +#include <linux/capability.h>
54745 +#include <linux/dcache.h>
54746 +#include <asm/resource.h>
54747 +
54748 +/* Major status information */
54749 +
54750 +#define GR_VERSION  "grsecurity 2.2.2"
54751 +#define GRSECURITY_VERSION 0x2202
54752 +
54753 +enum {
54754 +       GR_SHUTDOWN = 0,
54755 +       GR_ENABLE = 1,
54756 +       GR_SPROLE = 2,
54757 +       GR_RELOAD = 3,
54758 +       GR_SEGVMOD = 4,
54759 +       GR_STATUS = 5,
54760 +       GR_UNSPROLE = 6,
54761 +       GR_PASSSET = 7,
54762 +       GR_SPROLEPAM = 8,
54763 +};
54764 +
54765 +/* Password setup definitions
54766 + * kernel/grhash.c */
54767 +enum {
54768 +       GR_PW_LEN = 128,
54769 +       GR_SALT_LEN = 16,
54770 +       GR_SHA_LEN = 32,
54771 +};
54772 +
54773 +enum {
54774 +       GR_SPROLE_LEN = 64,
54775 +};
54776 +
54777 +enum {
54778 +       GR_NO_GLOB = 0,
54779 +       GR_REG_GLOB,
54780 +       GR_CREATE_GLOB
54781 +};
54782 +
54783 +#define GR_NLIMITS 32
54784 +
54785 +/* Begin Data Structures */
54786 +
54787 +struct sprole_pw {
54788 +       unsigned char *rolename;
54789 +       unsigned char salt[GR_SALT_LEN];
54790 +       unsigned char sum[GR_SHA_LEN];  /* 256-bit SHA hash of the password */
54791 +};
54792 +
54793 +struct name_entry {
54794 +       __u32 key;
54795 +       ino_t inode;
54796 +       dev_t device;
54797 +       char *name;
54798 +       __u16 len;
54799 +       __u8 deleted;
54800 +       struct name_entry *prev;
54801 +       struct name_entry *next;
54802 +};
54803 +
54804 +struct inodev_entry {
54805 +       struct name_entry *nentry;
54806 +       struct inodev_entry *prev;
54807 +       struct inodev_entry *next;
54808 +};
54809 +
54810 +struct acl_role_db {
54811 +       struct acl_role_label **r_hash;
54812 +       __u32 r_size;
54813 +};
54814 +
54815 +struct inodev_db {
54816 +       struct inodev_entry **i_hash;
54817 +       __u32 i_size;
54818 +};
54819 +
54820 +struct name_db {
54821 +       struct name_entry **n_hash;
54822 +       __u32 n_size;
54823 +};
54824 +
54825 +struct crash_uid {
54826 +       uid_t uid;
54827 +       unsigned long expires;
54828 +};
54829 +
54830 +struct gr_hash_struct {
54831 +       void **table;
54832 +       void **nametable;
54833 +       void *first;
54834 +       __u32 table_size;
54835 +       __u32 used_size;
54836 +       int type;
54837 +};
54838 +
54839 +/* Userspace Grsecurity ACL data structures */
54840 +
54841 +struct acl_subject_label {
54842 +       char *filename;
54843 +       ino_t inode;
54844 +       dev_t device;
54845 +       __u32 mode;
54846 +       kernel_cap_t cap_mask;
54847 +       kernel_cap_t cap_lower;
54848 +       kernel_cap_t cap_invert_audit;
54849 +
54850 +       struct rlimit res[GR_NLIMITS];
54851 +       __u32 resmask;
54852 +
54853 +       __u8 user_trans_type;
54854 +       __u8 group_trans_type;
54855 +       uid_t *user_transitions;
54856 +       gid_t *group_transitions;
54857 +       __u16 user_trans_num;
54858 +       __u16 group_trans_num;
54859 +
54860 +       __u32 sock_families[2];
54861 +       __u32 ip_proto[8];
54862 +       __u32 ip_type;
54863 +       struct acl_ip_label **ips;
54864 +       __u32 ip_num;
54865 +       __u32 inaddr_any_override;
54866 +
54867 +       __u32 crashes;
54868 +       unsigned long expires;
54869 +
54870 +       struct acl_subject_label *parent_subject;
54871 +       struct gr_hash_struct *hash;
54872 +       struct acl_subject_label *prev;
54873 +       struct acl_subject_label *next;
54874 +
54875 +       struct acl_object_label **obj_hash;
54876 +       __u32 obj_hash_size;
54877 +       __u16 pax_flags;
54878 +};
54879 +
54880 +struct role_allowed_ip {
54881 +       __u32 addr;
54882 +       __u32 netmask;
54883 +
54884 +       struct role_allowed_ip *prev;
54885 +       struct role_allowed_ip *next;
54886 +};
54887 +
54888 +struct role_transition {
54889 +       char *rolename;
54890 +
54891 +       struct role_transition *prev;
54892 +       struct role_transition *next;
54893 +};
54894 +
54895 +struct acl_role_label {
54896 +       char *rolename;
54897 +       uid_t uidgid;
54898 +       __u16 roletype;
54899 +
54900 +       __u16 auth_attempts;
54901 +       unsigned long expires;
54902 +
54903 +       struct acl_subject_label *root_label;
54904 +       struct gr_hash_struct *hash;
54905 +
54906 +       struct acl_role_label *prev;
54907 +       struct acl_role_label *next;
54908 +
54909 +       struct role_transition *transitions;
54910 +       struct role_allowed_ip *allowed_ips;
54911 +       uid_t *domain_children;
54912 +       __u16 domain_child_num;
54913 +
54914 +       struct acl_subject_label **subj_hash;
54915 +       __u32 subj_hash_size;
54916 +};
54917 +
54918 +struct user_acl_role_db {
54919 +       struct acl_role_label **r_table;
54920 +       __u32 num_pointers;             /* Number of allocations to track */
54921 +       __u32 num_roles;                /* Number of roles */
54922 +       __u32 num_domain_children;      /* Number of domain children */
54923 +       __u32 num_subjects;             /* Number of subjects */
54924 +       __u32 num_objects;              /* Number of objects */
54925 +};
54926 +
54927 +struct acl_object_label {
54928 +       char *filename;
54929 +       ino_t inode;
54930 +       dev_t device;
54931 +       __u32 mode;
54932 +
54933 +       struct acl_subject_label *nested;
54934 +       struct acl_object_label *globbed;
54935 +
54936 +       /* next two structures not used */
54937 +
54938 +       struct acl_object_label *prev;
54939 +       struct acl_object_label *next;
54940 +};
54941 +
54942 +struct acl_ip_label {
54943 +       char *iface;
54944 +       __u32 addr;
54945 +       __u32 netmask;
54946 +       __u16 low, high;
54947 +       __u8 mode;
54948 +       __u32 type;
54949 +       __u32 proto[8];
54950 +
54951 +       /* next two structures not used */
54952 +
54953 +       struct acl_ip_label *prev;
54954 +       struct acl_ip_label *next;
54955 +};
54956 +
54957 +struct gr_arg {
54958 +       struct user_acl_role_db role_db;
54959 +       unsigned char pw[GR_PW_LEN];
54960 +       unsigned char salt[GR_SALT_LEN];
54961 +       unsigned char sum[GR_SHA_LEN];
54962 +       unsigned char sp_role[GR_SPROLE_LEN];
54963 +       struct sprole_pw *sprole_pws;
54964 +       dev_t segv_device;
54965 +       ino_t segv_inode;
54966 +       uid_t segv_uid;
54967 +       __u16 num_sprole_pws;
54968 +       __u16 mode;
54969 +};
54970 +
54971 +struct gr_arg_wrapper {
54972 +       struct gr_arg *arg;
54973 +       __u32 version;
54974 +       __u32 size;
54975 +};
54976 +
54977 +struct subject_map {
54978 +       struct acl_subject_label *user;
54979 +       struct acl_subject_label *kernel;
54980 +       struct subject_map *prev;
54981 +       struct subject_map *next;
54982 +};
54983 +
54984 +struct acl_subj_map_db {
54985 +       struct subject_map **s_hash;
54986 +       __u32 s_size;
54987 +};
54988 +
54989 +/* End Data Structures Section */
54990 +
54991 +/* Hash functions generated by empirical testing by Brad Spengler
54992 +   Makes good use of the low bits of the inode.  Generally 0-1 times
54993 +   in loop for successful match.  0-3 for unsuccessful match.
54994 +   Shift/add algorithm with modulus of table size and an XOR*/
54995 +
54996 +static __inline__ unsigned int
54997 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
54998 +{
54999 +       return ((((uid + type) << (16 + type)) ^ uid) % sz);
55000 +}
55001 +
55002 + static __inline__ unsigned int
55003 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55004 +{
55005 +       return ((const unsigned long)userp % sz);
55006 +}
55007 +
55008 +static __inline__ unsigned int
55009 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55010 +{
55011 +       return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55012 +}
55013 +
55014 +static __inline__ unsigned int
55015 +nhash(const char *name, const __u16 len, const unsigned int sz)
55016 +{
55017 +       return full_name_hash((const unsigned char *)name, len) % sz;
55018 +}
55019 +
55020 +#define FOR_EACH_ROLE_START(role) \
55021 +       role = role_list; \
55022 +       while (role) {
55023 +
55024 +#define FOR_EACH_ROLE_END(role) \
55025 +               role = role->prev; \
55026 +       }
55027 +
55028 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55029 +       subj = NULL; \
55030 +       iter = 0; \
55031 +       while (iter < role->subj_hash_size) { \
55032 +               if (subj == NULL) \
55033 +                       subj = role->subj_hash[iter]; \
55034 +               if (subj == NULL) { \
55035 +                       iter++; \
55036 +                       continue; \
55037 +               }
55038 +
55039 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55040 +               subj = subj->next; \
55041 +               if (subj == NULL) \
55042 +                       iter++; \
55043 +       }
55044 +
55045 +
55046 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55047 +       subj = role->hash->first; \
55048 +       while (subj != NULL) {
55049 +
55050 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55051 +               subj = subj->next; \
55052 +       }
55053 +
55054 +#endif
55055 +
55056 diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
55057 --- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55058 +++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
55059 @@ -0,0 +1,9 @@
55060 +#ifndef __GRALLOC_H
55061 +#define __GRALLOC_H
55062 +
55063 +void acl_free_all(void);
55064 +int acl_alloc_stack_init(unsigned long size);
55065 +void *acl_alloc(unsigned long len);
55066 +void *acl_alloc_num(unsigned long num, unsigned long len);
55067 +
55068 +#endif
55069 diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
55070 --- linux-3.0.4/include/linux/grdefs.h  1969-12-31 19:00:00.000000000 -0500
55071 +++ linux-3.0.4/include/linux/grdefs.h  2011-08-23 21:48:14.000000000 -0400
55072 @@ -0,0 +1,140 @@
55073 +#ifndef GRDEFS_H
55074 +#define GRDEFS_H
55075 +
55076 +/* Begin grsecurity status declarations */
55077 +
55078 +enum {
55079 +       GR_READY = 0x01,
55080 +       GR_STATUS_INIT = 0x00   // disabled state
55081 +};
55082 +
55083 +/* Begin  ACL declarations */
55084 +
55085 +/* Role flags */
55086 +
55087 +enum {
55088 +       GR_ROLE_USER = 0x0001,
55089 +       GR_ROLE_GROUP = 0x0002,
55090 +       GR_ROLE_DEFAULT = 0x0004,
55091 +       GR_ROLE_SPECIAL = 0x0008,
55092 +       GR_ROLE_AUTH = 0x0010,
55093 +       GR_ROLE_NOPW = 0x0020,
55094 +       GR_ROLE_GOD = 0x0040,
55095 +       GR_ROLE_LEARN = 0x0080,
55096 +       GR_ROLE_TPE = 0x0100,
55097 +       GR_ROLE_DOMAIN = 0x0200,
55098 +       GR_ROLE_PAM = 0x0400,
55099 +       GR_ROLE_PERSIST = 0x0800
55100 +};
55101 +
55102 +/* ACL Subject and Object mode flags */
55103 +enum {
55104 +       GR_DELETED = 0x80000000
55105 +};
55106 +
55107 +/* ACL Object-only mode flags */
55108 +enum {
55109 +       GR_READ         = 0x00000001,
55110 +       GR_APPEND       = 0x00000002,
55111 +       GR_WRITE        = 0x00000004,
55112 +       GR_EXEC         = 0x00000008,
55113 +       GR_FIND         = 0x00000010,
55114 +       GR_INHERIT      = 0x00000020,
55115 +       GR_SETID        = 0x00000040,
55116 +       GR_CREATE       = 0x00000080,
55117 +       GR_DELETE       = 0x00000100,
55118 +       GR_LINK         = 0x00000200,
55119 +       GR_AUDIT_READ   = 0x00000400,
55120 +       GR_AUDIT_APPEND = 0x00000800,
55121 +       GR_AUDIT_WRITE  = 0x00001000,
55122 +       GR_AUDIT_EXEC   = 0x00002000,
55123 +       GR_AUDIT_FIND   = 0x00004000,
55124 +       GR_AUDIT_INHERIT= 0x00008000,
55125 +       GR_AUDIT_SETID  = 0x00010000,
55126 +       GR_AUDIT_CREATE = 0x00020000,
55127 +       GR_AUDIT_DELETE = 0x00040000,
55128 +       GR_AUDIT_LINK   = 0x00080000,
55129 +       GR_PTRACERD     = 0x00100000,
55130 +       GR_NOPTRACE     = 0x00200000,
55131 +       GR_SUPPRESS     = 0x00400000,
55132 +       GR_NOLEARN      = 0x00800000,
55133 +       GR_INIT_TRANSFER= 0x01000000
55134 +};
55135 +
55136 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55137 +                  GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55138 +                  GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55139 +
55140 +/* ACL subject-only mode flags */
55141 +enum {
55142 +       GR_KILL         = 0x00000001,
55143 +       GR_VIEW         = 0x00000002,
55144 +       GR_PROTECTED    = 0x00000004,
55145 +       GR_LEARN        = 0x00000008,
55146 +       GR_OVERRIDE     = 0x00000010,
55147 +       /* just a placeholder, this mode is only used in userspace */
55148 +       GR_DUMMY        = 0x00000020,
55149 +       GR_PROTSHM      = 0x00000040,
55150 +       GR_KILLPROC     = 0x00000080,
55151 +       GR_KILLIPPROC   = 0x00000100,
55152 +       /* just a placeholder, this mode is only used in userspace */
55153 +       GR_NOTROJAN     = 0x00000200,
55154 +       GR_PROTPROCFD   = 0x00000400,
55155 +       GR_PROCACCT     = 0x00000800,
55156 +       GR_RELAXPTRACE  = 0x00001000,
55157 +       GR_NESTED       = 0x00002000,
55158 +       GR_INHERITLEARN = 0x00004000,
55159 +       GR_PROCFIND     = 0x00008000,
55160 +       GR_POVERRIDE    = 0x00010000,
55161 +       GR_KERNELAUTH   = 0x00020000,
55162 +       GR_ATSECURE     = 0x00040000,
55163 +       GR_SHMEXEC      = 0x00080000
55164 +};
55165 +
55166 +enum {
55167 +       GR_PAX_ENABLE_SEGMEXEC  = 0x0001,
55168 +       GR_PAX_ENABLE_PAGEEXEC  = 0x0002,
55169 +       GR_PAX_ENABLE_MPROTECT  = 0x0004,
55170 +       GR_PAX_ENABLE_RANDMMAP  = 0x0008,
55171 +       GR_PAX_ENABLE_EMUTRAMP  = 0x0010,
55172 +       GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55173 +       GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55174 +       GR_PAX_DISABLE_MPROTECT = 0x0400,
55175 +       GR_PAX_DISABLE_RANDMMAP = 0x0800,
55176 +       GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55177 +};
55178 +
55179 +enum {
55180 +       GR_ID_USER      = 0x01,
55181 +       GR_ID_GROUP     = 0x02,
55182 +};
55183 +
55184 +enum {
55185 +       GR_ID_ALLOW     = 0x01,
55186 +       GR_ID_DENY      = 0x02,
55187 +};
55188 +
55189 +#define GR_CRASH_RES   31
55190 +#define GR_UIDTABLE_MAX 500
55191 +
55192 +/* begin resource learning section */
55193 +enum {
55194 +       GR_RLIM_CPU_BUMP = 60,
55195 +       GR_RLIM_FSIZE_BUMP = 50000,
55196 +       GR_RLIM_DATA_BUMP = 10000,
55197 +       GR_RLIM_STACK_BUMP = 1000,
55198 +       GR_RLIM_CORE_BUMP = 10000,
55199 +       GR_RLIM_RSS_BUMP = 500000,
55200 +       GR_RLIM_NPROC_BUMP = 1,
55201 +       GR_RLIM_NOFILE_BUMP = 5,
55202 +       GR_RLIM_MEMLOCK_BUMP = 50000,
55203 +       GR_RLIM_AS_BUMP = 500000,
55204 +       GR_RLIM_LOCKS_BUMP = 2,
55205 +       GR_RLIM_SIGPENDING_BUMP = 5,
55206 +       GR_RLIM_MSGQUEUE_BUMP = 10000,
55207 +       GR_RLIM_NICE_BUMP = 1,
55208 +       GR_RLIM_RTPRIO_BUMP = 1,
55209 +       GR_RLIM_RTTIME_BUMP = 1000000
55210 +};
55211 +
55212 +#endif
55213 diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
55214 --- linux-3.0.4/include/linux/grinternal.h      1969-12-31 19:00:00.000000000 -0500
55215 +++ linux-3.0.4/include/linux/grinternal.h      2011-08-23 21:48:14.000000000 -0400
55216 @@ -0,0 +1,219 @@
55217 +#ifndef __GRINTERNAL_H
55218 +#define __GRINTERNAL_H
55219 +
55220 +#ifdef CONFIG_GRKERNSEC
55221 +
55222 +#include <linux/fs.h>
55223 +#include <linux/mnt_namespace.h>
55224 +#include <linux/nsproxy.h>
55225 +#include <linux/gracl.h>
55226 +#include <linux/grdefs.h>
55227 +#include <linux/grmsg.h>
55228 +
55229 +void gr_add_learn_entry(const char *fmt, ...)
55230 +       __attribute__ ((format (printf, 1, 2)));
55231 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55232 +                           const struct vfsmount *mnt);
55233 +__u32 gr_check_create(const struct dentry *new_dentry,
55234 +                            const struct dentry *parent,
55235 +                            const struct vfsmount *mnt, const __u32 mode);
55236 +int gr_check_protected_task(const struct task_struct *task);
55237 +__u32 to_gr_audit(const __u32 reqmode);
55238 +int gr_set_acls(const int type);
55239 +int gr_apply_subject_to_task(struct task_struct *task);
55240 +int gr_acl_is_enabled(void);
55241 +char gr_roletype_to_char(void);
55242 +
55243 +void gr_handle_alertkill(struct task_struct *task);
55244 +char *gr_to_filename(const struct dentry *dentry,
55245 +                           const struct vfsmount *mnt);
55246 +char *gr_to_filename1(const struct dentry *dentry,
55247 +                           const struct vfsmount *mnt);
55248 +char *gr_to_filename2(const struct dentry *dentry,
55249 +                           const struct vfsmount *mnt);
55250 +char *gr_to_filename3(const struct dentry *dentry,
55251 +                           const struct vfsmount *mnt);
55252 +
55253 +extern int grsec_enable_harden_ptrace;
55254 +extern int grsec_enable_link;
55255 +extern int grsec_enable_fifo;
55256 +extern int grsec_enable_execve;
55257 +extern int grsec_enable_shm;
55258 +extern int grsec_enable_execlog;
55259 +extern int grsec_enable_signal;
55260 +extern int grsec_enable_audit_ptrace;
55261 +extern int grsec_enable_forkfail;
55262 +extern int grsec_enable_time;
55263 +extern int grsec_enable_rofs;
55264 +extern int grsec_enable_chroot_shmat;
55265 +extern int grsec_enable_chroot_mount;
55266 +extern int grsec_enable_chroot_double;
55267 +extern int grsec_enable_chroot_pivot;
55268 +extern int grsec_enable_chroot_chdir;
55269 +extern int grsec_enable_chroot_chmod;
55270 +extern int grsec_enable_chroot_mknod;
55271 +extern int grsec_enable_chroot_fchdir;
55272 +extern int grsec_enable_chroot_nice;
55273 +extern int grsec_enable_chroot_execlog;
55274 +extern int grsec_enable_chroot_caps;
55275 +extern int grsec_enable_chroot_sysctl;
55276 +extern int grsec_enable_chroot_unix;
55277 +extern int grsec_enable_tpe;
55278 +extern int grsec_tpe_gid;
55279 +extern int grsec_enable_tpe_all;
55280 +extern int grsec_enable_tpe_invert;
55281 +extern int grsec_enable_socket_all;
55282 +extern int grsec_socket_all_gid;
55283 +extern int grsec_enable_socket_client;
55284 +extern int grsec_socket_client_gid;
55285 +extern int grsec_enable_socket_server;
55286 +extern int grsec_socket_server_gid;
55287 +extern int grsec_audit_gid;
55288 +extern int grsec_enable_group;
55289 +extern int grsec_enable_audit_textrel;
55290 +extern int grsec_enable_log_rwxmaps;
55291 +extern int grsec_enable_mount;
55292 +extern int grsec_enable_chdir;
55293 +extern int grsec_resource_logging;
55294 +extern int grsec_enable_blackhole;
55295 +extern int grsec_lastack_retries;
55296 +extern int grsec_enable_brute;
55297 +extern int grsec_lock;
55298 +
55299 +extern spinlock_t grsec_alert_lock;
55300 +extern unsigned long grsec_alert_wtime;
55301 +extern unsigned long grsec_alert_fyet;
55302 +
55303 +extern spinlock_t grsec_audit_lock;
55304 +
55305 +extern rwlock_t grsec_exec_file_lock;
55306 +
55307 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55308 +                       gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55309 +                       (tsk)->exec_file->f_vfsmnt) : "/")
55310 +
55311 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55312 +                       gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55313 +                       (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55314 +
55315 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55316 +                       gr_to_filename((tsk)->exec_file->f_path.dentry, \
55317 +                       (tsk)->exec_file->f_vfsmnt) : "/")
55318 +
55319 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55320 +                       gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55321 +                       (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55322 +
55323 +#define proc_is_chrooted(tsk_a)  ((tsk_a)->gr_is_chrooted)
55324 +
55325 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55326 +
55327 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55328 +                      (task)->pid, (cred)->uid, \
55329 +                      (cred)->euid, (cred)->gid, (cred)->egid, \
55330 +                      gr_parent_task_fullpath(task), \
55331 +                      (task)->real_parent->comm, (task)->real_parent->pid, \
55332 +                      (pcred)->uid, (pcred)->euid, \
55333 +                      (pcred)->gid, (pcred)->egid
55334 +
55335 +#define GR_CHROOT_CAPS {{ \
55336 +       CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55337 +       CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55338 +       CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55339 +       CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55340 +       CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55341 +       CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55342 +
55343 +#define security_learn(normal_msg,args...) \
55344 +({ \
55345 +       read_lock(&grsec_exec_file_lock); \
55346 +       gr_add_learn_entry(normal_msg "\n", ## args); \
55347 +       read_unlock(&grsec_exec_file_lock); \
55348 +})
55349 +
55350 +enum {
55351 +       GR_DO_AUDIT,
55352 +       GR_DONT_AUDIT,
55353 +       /* used for non-audit messages that we shouldn't kill the task on */
55354 +       GR_DONT_AUDIT_GOOD
55355 +};
55356 +
55357 +enum {
55358 +       GR_TTYSNIFF,
55359 +       GR_RBAC,
55360 +       GR_RBAC_STR,
55361 +       GR_STR_RBAC,
55362 +       GR_RBAC_MODE2,
55363 +       GR_RBAC_MODE3,
55364 +       GR_FILENAME,
55365 +       GR_SYSCTL_HIDDEN,
55366 +       GR_NOARGS,
55367 +       GR_ONE_INT,
55368 +       GR_ONE_INT_TWO_STR,
55369 +       GR_ONE_STR,
55370 +       GR_STR_INT,
55371 +       GR_TWO_STR_INT,
55372 +       GR_TWO_INT,
55373 +       GR_TWO_U64,
55374 +       GR_THREE_INT,
55375 +       GR_FIVE_INT_TWO_STR,
55376 +       GR_TWO_STR,
55377 +       GR_THREE_STR,
55378 +       GR_FOUR_STR,
55379 +       GR_STR_FILENAME,
55380 +       GR_FILENAME_STR,
55381 +       GR_FILENAME_TWO_INT,
55382 +       GR_FILENAME_TWO_INT_STR,
55383 +       GR_TEXTREL,
55384 +       GR_PTRACE,
55385 +       GR_RESOURCE,
55386 +       GR_CAP,
55387 +       GR_SIG,
55388 +       GR_SIG2,
55389 +       GR_CRASH1,
55390 +       GR_CRASH2,
55391 +       GR_PSACCT,
55392 +       GR_RWXMAP
55393 +};
55394 +
55395 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55396 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55397 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55398 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55399 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55400 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55401 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55402 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55403 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55404 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55405 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55406 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55407 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55408 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55409 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55410 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55411 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55412 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55413 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55414 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55415 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55416 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55417 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55418 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55419 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55420 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55421 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55422 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55423 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55424 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55425 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55426 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55427 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55428 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55429 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55430 +
55431 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55432 +
55433 +#endif
55434 +
55435 +#endif
55436 diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
55437 --- linux-3.0.4/include/linux/grmsg.h   1969-12-31 19:00:00.000000000 -0500
55438 +++ linux-3.0.4/include/linux/grmsg.h   2011-08-25 17:27:26.000000000 -0400
55439 @@ -0,0 +1,107 @@
55440 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55441 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55442 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55443 +#define GR_STOPMOD_MSG "denied modification of module state by "
55444 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55445 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55446 +#define GR_IOPERM_MSG "denied use of ioperm() by "
55447 +#define GR_IOPL_MSG "denied use of iopl() by "
55448 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55449 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55450 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55451 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55452 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55453 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55454 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55455 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55456 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55457 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55458 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55459 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55460 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55461 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55462 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55463 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55464 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55465 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55466 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55467 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55468 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55469 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55470 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55471 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55472 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55473 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55474 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55475 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55476 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55477 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55478 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55479 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55480 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55481 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55482 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55483 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55484 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55485 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55486 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55487 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55488 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55489 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55490 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55491 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55492 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55493 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55494 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55495 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55496 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55497 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55498 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55499 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55500 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55501 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55502 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55503 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55504 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55505 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55506 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55507 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55508 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55509 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55510 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55511 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55512 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
55513 +#define GR_NICE_CHROOT_MSG "denied priority change by "
55514 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55515 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55516 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55517 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55518 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55519 +#define GR_TIME_MSG "time set by "
55520 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55521 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55522 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55523 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55524 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55525 +#define GR_BIND_MSG "denied bind() by "
55526 +#define GR_CONNECT_MSG "denied connect() by "
55527 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55528 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55529 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55530 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55531 +#define GR_CAP_ACL_MSG "use of %s denied for "
55532 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55533 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55534 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55535 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55536 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55537 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55538 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55539 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55540 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55541 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55542 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55543 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55544 +#define GR_VM86_MSG "denied use of vm86 by "
55545 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55546 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55547 diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
55548 --- linux-3.0.4/include/linux/grsecurity.h      1969-12-31 19:00:00.000000000 -0500
55549 +++ linux-3.0.4/include/linux/grsecurity.h      2011-08-25 17:27:36.000000000 -0400
55550 @@ -0,0 +1,227 @@
55551 +#ifndef GR_SECURITY_H
55552 +#define GR_SECURITY_H
55553 +#include <linux/fs.h>
55554 +#include <linux/fs_struct.h>
55555 +#include <linux/binfmts.h>
55556 +#include <linux/gracl.h>
55557 +
55558 +/* notify of brain-dead configs */
55559 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55560 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
55561 +#endif
55562 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55563 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55564 +#endif
55565 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55566 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55567 +#endif
55568 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55569 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55570 +#endif
55571 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55572 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55573 +#endif
55574 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55575 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
55576 +#endif
55577 +
55578 +#include <linux/compat.h>
55579 +
55580 +struct user_arg_ptr {
55581 +#ifdef CONFIG_COMPAT
55582 +       bool is_compat;
55583 +#endif
55584 +       union {
55585 +               const char __user *const __user *native;
55586 +#ifdef CONFIG_COMPAT
55587 +               compat_uptr_t __user *compat;
55588 +#endif
55589 +       } ptr;
55590 +};
55591 +
55592 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55593 +void gr_handle_brute_check(void);
55594 +void gr_handle_kernel_exploit(void);
55595 +int gr_process_user_ban(void);
55596 +
55597 +char gr_roletype_to_char(void);
55598 +
55599 +int gr_acl_enable_at_secure(void);
55600 +
55601 +int gr_check_user_change(int real, int effective, int fs);
55602 +int gr_check_group_change(int real, int effective, int fs);
55603 +
55604 +void gr_del_task_from_ip_table(struct task_struct *p);
55605 +
55606 +int gr_pid_is_chrooted(struct task_struct *p);
55607 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55608 +int gr_handle_chroot_nice(void);
55609 +int gr_handle_chroot_sysctl(const int op);
55610 +int gr_handle_chroot_setpriority(struct task_struct *p,
55611 +                                       const int niceval);
55612 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55613 +int gr_handle_chroot_chroot(const struct dentry *dentry,
55614 +                                  const struct vfsmount *mnt);
55615 +int gr_handle_chroot_caps(struct path *path);
55616 +void gr_handle_chroot_chdir(struct path *path);
55617 +int gr_handle_chroot_chmod(const struct dentry *dentry,
55618 +                                 const struct vfsmount *mnt, const int mode);
55619 +int gr_handle_chroot_mknod(const struct dentry *dentry,
55620 +                                 const struct vfsmount *mnt, const int mode);
55621 +int gr_handle_chroot_mount(const struct dentry *dentry,
55622 +                                 const struct vfsmount *mnt,
55623 +                                 const char *dev_name);
55624 +int gr_handle_chroot_pivot(void);
55625 +int gr_handle_chroot_unix(const pid_t pid);
55626 +
55627 +int gr_handle_rawio(const struct inode *inode);
55628 +
55629 +void gr_handle_ioperm(void);
55630 +void gr_handle_iopl(void);
55631 +
55632 +int gr_tpe_allow(const struct file *file);
55633 +
55634 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55635 +void gr_clear_chroot_entries(struct task_struct *task);
55636 +
55637 +void gr_log_forkfail(const int retval);
55638 +void gr_log_timechange(void);
55639 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55640 +void gr_log_chdir(const struct dentry *dentry,
55641 +                        const struct vfsmount *mnt);
55642 +void gr_log_chroot_exec(const struct dentry *dentry,
55643 +                              const struct vfsmount *mnt);
55644 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
55645 +void gr_log_remount(const char *devname, const int retval);
55646 +void gr_log_unmount(const char *devname, const int retval);
55647 +void gr_log_mount(const char *from, const char *to, const int retval);
55648 +void gr_log_textrel(struct vm_area_struct *vma);
55649 +void gr_log_rwxmmap(struct file *file);
55650 +void gr_log_rwxmprotect(struct file *file);
55651 +
55652 +int gr_handle_follow_link(const struct inode *parent,
55653 +                                const struct inode *inode,
55654 +                                const struct dentry *dentry,
55655 +                                const struct vfsmount *mnt);
55656 +int gr_handle_fifo(const struct dentry *dentry,
55657 +                         const struct vfsmount *mnt,
55658 +                         const struct dentry *dir, const int flag,
55659 +                         const int acc_mode);
55660 +int gr_handle_hardlink(const struct dentry *dentry,
55661 +                             const struct vfsmount *mnt,
55662 +                             struct inode *inode,
55663 +                             const int mode, const char *to);
55664 +
55665 +int gr_is_capable(const int cap);
55666 +int gr_is_capable_nolog(const int cap);
55667 +void gr_learn_resource(const struct task_struct *task, const int limit,
55668 +                             const unsigned long wanted, const int gt);
55669 +void gr_copy_label(struct task_struct *tsk);
55670 +void gr_handle_crash(struct task_struct *task, const int sig);
55671 +int gr_handle_signal(const struct task_struct *p, const int sig);
55672 +int gr_check_crash_uid(const uid_t uid);
55673 +int gr_check_protected_task(const struct task_struct *task);
55674 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55675 +int gr_acl_handle_mmap(const struct file *file,
55676 +                             const unsigned long prot);
55677 +int gr_acl_handle_mprotect(const struct file *file,
55678 +                                 const unsigned long prot);
55679 +int gr_check_hidden_task(const struct task_struct *tsk);
55680 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55681 +                                   const struct vfsmount *mnt);
55682 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
55683 +                                const struct vfsmount *mnt);
55684 +__u32 gr_acl_handle_access(const struct dentry *dentry,
55685 +                                 const struct vfsmount *mnt, const int fmode);
55686 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55687 +                                 const struct vfsmount *mnt, mode_t mode);
55688 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55689 +                                const struct vfsmount *mnt, mode_t mode);
55690 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
55691 +                                const struct vfsmount *mnt);
55692 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55693 +                                const struct vfsmount *mnt);
55694 +int gr_handle_ptrace(struct task_struct *task, const long request);
55695 +int gr_handle_proc_ptrace(struct task_struct *task);
55696 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
55697 +                                 const struct vfsmount *mnt);
55698 +int gr_check_crash_exec(const struct file *filp);
55699 +int gr_acl_is_enabled(void);
55700 +void gr_set_kernel_label(struct task_struct *task);
55701 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
55702 +                             const gid_t gid);
55703 +int gr_set_proc_label(const struct dentry *dentry,
55704 +                       const struct vfsmount *mnt,
55705 +                       const int unsafe_share);
55706 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55707 +                               const struct vfsmount *mnt);
55708 +__u32 gr_acl_handle_open(const struct dentry *dentry,
55709 +                               const struct vfsmount *mnt, const int fmode);
55710 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
55711 +                                const struct dentry *p_dentry,
55712 +                                const struct vfsmount *p_mnt, const int fmode,
55713 +                                const int imode);
55714 +void gr_handle_create(const struct dentry *dentry,
55715 +                            const struct vfsmount *mnt);
55716 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55717 +                                const struct dentry *parent_dentry,
55718 +                                const struct vfsmount *parent_mnt,
55719 +                                const int mode);
55720 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55721 +                                const struct dentry *parent_dentry,
55722 +                                const struct vfsmount *parent_mnt);
55723 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55724 +                                const struct vfsmount *mnt);
55725 +void gr_handle_delete(const ino_t ino, const dev_t dev);
55726 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55727 +                                 const struct vfsmount *mnt);
55728 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55729 +                                  const struct dentry *parent_dentry,
55730 +                                  const struct vfsmount *parent_mnt,
55731 +                                  const char *from);
55732 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55733 +                               const struct dentry *parent_dentry,
55734 +                               const struct vfsmount *parent_mnt,
55735 +                               const struct dentry *old_dentry,
55736 +                               const struct vfsmount *old_mnt, const char *to);
55737 +int gr_acl_handle_rename(struct dentry *new_dentry,
55738 +                               struct dentry *parent_dentry,
55739 +                               const struct vfsmount *parent_mnt,
55740 +                               struct dentry *old_dentry,
55741 +                               struct inode *old_parent_inode,
55742 +                               struct vfsmount *old_mnt, const char *newname);
55743 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55744 +                               struct dentry *old_dentry,
55745 +                               struct dentry *new_dentry,
55746 +                               struct vfsmount *mnt, const __u8 replace);
55747 +__u32 gr_check_link(const struct dentry *new_dentry,
55748 +                          const struct dentry *parent_dentry,
55749 +                          const struct vfsmount *parent_mnt,
55750 +                          const struct dentry *old_dentry,
55751 +                          const struct vfsmount *old_mnt);
55752 +int gr_acl_handle_filldir(const struct file *file, const char *name,
55753 +                                const unsigned int namelen, const ino_t ino);
55754 +
55755 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
55756 +                               const struct vfsmount *mnt);
55757 +void gr_acl_handle_exit(void);
55758 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
55759 +int gr_acl_handle_procpidmem(const struct task_struct *task);
55760 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55761 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55762 +void gr_audit_ptrace(struct task_struct *task);
55763 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55764 +
55765 +#ifdef CONFIG_GRKERNSEC
55766 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55767 +void gr_handle_vm86(void);
55768 +void gr_handle_mem_readwrite(u64 from, u64 to);
55769 +
55770 +extern int grsec_enable_dmesg;
55771 +extern int grsec_disable_privio;
55772 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55773 +extern int grsec_enable_chroot_findtask;
55774 +#endif
55775 +#endif
55776 +
55777 +#endif
55778 diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
55779 --- linux-3.0.4/include/linux/grsock.h  1969-12-31 19:00:00.000000000 -0500
55780 +++ linux-3.0.4/include/linux/grsock.h  2011-08-23 21:48:14.000000000 -0400
55781 @@ -0,0 +1,19 @@
55782 +#ifndef __GRSOCK_H
55783 +#define __GRSOCK_H
55784 +
55785 +extern void gr_attach_curr_ip(const struct sock *sk);
55786 +extern int gr_handle_sock_all(const int family, const int type,
55787 +                             const int protocol);
55788 +extern int gr_handle_sock_server(const struct sockaddr *sck);
55789 +extern int gr_handle_sock_server_other(const struct sock *sck);
55790 +extern int gr_handle_sock_client(const struct sockaddr *sck);
55791 +extern int gr_search_connect(struct socket * sock,
55792 +                            struct sockaddr_in * addr);
55793 +extern int gr_search_bind(struct socket * sock,
55794 +                         struct sockaddr_in * addr);
55795 +extern int gr_search_listen(struct socket * sock);
55796 +extern int gr_search_accept(struct socket * sock);
55797 +extern int gr_search_socket(const int domain, const int type,
55798 +                           const int protocol);
55799 +
55800 +#endif
55801 diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
55802 --- linux-3.0.4/include/linux/hid.h     2011-07-21 22:17:23.000000000 -0400
55803 +++ linux-3.0.4/include/linux/hid.h     2011-08-23 21:47:56.000000000 -0400
55804 @@ -675,7 +675,7 @@ struct hid_ll_driver {
55805                         unsigned int code, int value);
55806  
55807         int (*parse)(struct hid_device *hdev);
55808 -};
55809 +} __no_const;
55810  
55811  #define        PM_HINT_FULLON  1<<5
55812  #define PM_HINT_NORMAL 1<<1
55813 diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
55814 --- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
55815 +++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
55816 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
55817         kunmap_atomic(kaddr, KM_USER0);
55818  }
55819  
55820 +static inline void sanitize_highpage(struct page *page)
55821 +{
55822 +       void *kaddr;
55823 +       unsigned long flags;
55824 +
55825 +       local_irq_save(flags);
55826 +       kaddr = kmap_atomic(page, KM_CLEARPAGE);
55827 +       clear_page(kaddr);
55828 +       kunmap_atomic(kaddr, KM_CLEARPAGE);
55829 +       local_irq_restore(flags);
55830 +}
55831 +
55832  static inline void zero_user_segments(struct page *page,
55833         unsigned start1, unsigned end1,
55834         unsigned start2, unsigned end2)
55835 diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
55836 --- linux-3.0.4/include/linux/i2c.h     2011-07-21 22:17:23.000000000 -0400
55837 +++ linux-3.0.4/include/linux/i2c.h     2011-08-23 21:47:56.000000000 -0400
55838 @@ -346,6 +346,7 @@ struct i2c_algorithm {
55839         /* To determine what the adapter supports */
55840         u32 (*functionality) (struct i2c_adapter *);
55841  };
55842 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
55843  
55844  /*
55845   * i2c_adapter is the structure used to identify a physical i2c bus along
55846 diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
55847 --- linux-3.0.4/include/linux/i2o.h     2011-07-21 22:17:23.000000000 -0400
55848 +++ linux-3.0.4/include/linux/i2o.h     2011-08-23 21:47:56.000000000 -0400
55849 @@ -564,7 +564,7 @@ struct i2o_controller {
55850         struct i2o_device *exec;        /* Executive */
55851  #if BITS_PER_LONG == 64
55852         spinlock_t context_list_lock;   /* lock for context_list */
55853 -       atomic_t context_list_counter;  /* needed for unique contexts */
55854 +       atomic_unchecked_t context_list_counter;        /* needed for unique contexts */
55855         struct list_head context_list;  /* list of context id's
55856                                            and pointers */
55857  #endif
55858 diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
55859 --- linux-3.0.4/include/linux/init.h    2011-07-21 22:17:23.000000000 -0400
55860 +++ linux-3.0.4/include/linux/init.h    2011-08-23 21:47:56.000000000 -0400
55861 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
55862  
55863  /* Each module must use one module_init(). */
55864  #define module_init(initfn)                                    \
55865 -       static inline initcall_t __inittest(void)               \
55866 +       static inline __used initcall_t __inittest(void)        \
55867         { return initfn; }                                      \
55868         int init_module(void) __attribute__((alias(#initfn)));
55869  
55870  /* This is only required if you want to be unloadable. */
55871  #define module_exit(exitfn)                                    \
55872 -       static inline exitcall_t __exittest(void)               \
55873 +       static inline __used exitcall_t __exittest(void)        \
55874         { return exitfn; }                                      \
55875         void cleanup_module(void) __attribute__((alias(#exitfn)));
55876  
55877 diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
55878 --- linux-3.0.4/include/linux/init_task.h       2011-07-21 22:17:23.000000000 -0400
55879 +++ linux-3.0.4/include/linux/init_task.h       2011-08-23 21:47:56.000000000 -0400
55880 @@ -126,6 +126,12 @@ extern struct cred init_cred;
55881  # define INIT_PERF_EVENTS(tsk)
55882  #endif
55883  
55884 +#ifdef CONFIG_X86
55885 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55886 +#else
55887 +#define INIT_TASK_THREAD_INFO
55888 +#endif
55889 +
55890  /*
55891   *  INIT_TASK is used to set up the first task table, touch at
55892   * your own risk!. Base=0, limit=0x1fffff (=2MB)
55893 @@ -164,6 +170,7 @@ extern struct cred init_cred;
55894         RCU_INIT_POINTER(.cred, &init_cred),                            \
55895         .comm           = "swapper",                                    \
55896         .thread         = INIT_THREAD,                                  \
55897 +       INIT_TASK_THREAD_INFO                                           \
55898         .fs             = &init_fs,                                     \
55899         .files          = &init_files,                                  \
55900         .signal         = &init_signals,                                \
55901 diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
55902 --- linux-3.0.4/include/linux/intel-iommu.h     2011-07-21 22:17:23.000000000 -0400
55903 +++ linux-3.0.4/include/linux/intel-iommu.h     2011-08-23 21:47:56.000000000 -0400
55904 @@ -296,7 +296,7 @@ struct iommu_flush {
55905                               u8 fm, u64 type);
55906         void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
55907                             unsigned int size_order, u64 type);
55908 -};
55909 +} __no_const;
55910  
55911  enum {
55912         SR_DMAR_FECTL_REG,
55913 diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
55914 --- linux-3.0.4/include/linux/interrupt.h       2011-07-21 22:17:23.000000000 -0400
55915 +++ linux-3.0.4/include/linux/interrupt.h       2011-08-23 21:47:56.000000000 -0400
55916 @@ -422,7 +422,7 @@ enum
55917  /* map softirq index to softirq name. update 'softirq_to_name' in
55918   * kernel/softirq.c when adding a new softirq.
55919   */
55920 -extern char *softirq_to_name[NR_SOFTIRQS];
55921 +extern const char * const softirq_to_name[NR_SOFTIRQS];
55922  
55923  /* softirq mask and active fields moved to irq_cpustat_t in
55924   * asm/hardirq.h to get better cache usage.  KAO
55925 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55926  
55927  struct softirq_action
55928  {
55929 -       void    (*action)(struct softirq_action *);
55930 +       void    (*action)(void);
55931  };
55932  
55933  asmlinkage void do_softirq(void);
55934  asmlinkage void __do_softirq(void);
55935 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55936 +extern void open_softirq(int nr, void (*action)(void));
55937  extern void softirq_init(void);
55938  static inline void __raise_softirq_irqoff(unsigned int nr)
55939  {
55940 diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
55941 --- linux-3.0.4/include/linux/kallsyms.h        2011-07-21 22:17:23.000000000 -0400
55942 +++ linux-3.0.4/include/linux/kallsyms.h        2011-08-23 21:48:14.000000000 -0400
55943 @@ -15,7 +15,8 @@
55944  
55945  struct module;
55946  
55947 -#ifdef CONFIG_KALLSYMS
55948 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
55949 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55950  /* Lookup the address for a symbol. Returns 0 if not found. */
55951  unsigned long kallsyms_lookup_name(const char *name);
55952  
55953 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
55954  /* Stupid that this does nothing, but I didn't create this mess. */
55955  #define __print_symbol(fmt, addr)
55956  #endif /*CONFIG_KALLSYMS*/
55957 +#else /* when included by kallsyms.c, vsnprintf.c, or
55958 +        arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
55959 +extern void __print_symbol(const char *fmt, unsigned long address);
55960 +extern int sprint_backtrace(char *buffer, unsigned long address);
55961 +extern int sprint_symbol(char *buffer, unsigned long address);
55962 +const char *kallsyms_lookup(unsigned long addr,
55963 +                           unsigned long *symbolsize,
55964 +                           unsigned long *offset,
55965 +                           char **modname, char *namebuf);
55966 +#endif
55967  
55968  /* This macro allows us to keep printk typechecking */
55969  static void __check_printsym_format(const char *fmt, ...)
55970 diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
55971 --- linux-3.0.4/include/linux/kgdb.h    2011-07-21 22:17:23.000000000 -0400
55972 +++ linux-3.0.4/include/linux/kgdb.h    2011-08-26 19:49:56.000000000 -0400
55973 @@ -53,7 +53,7 @@ extern int kgdb_connected;
55974  extern int kgdb_io_module_registered;
55975  
55976  extern atomic_t                        kgdb_setting_breakpoint;
55977 -extern atomic_t                        kgdb_cpu_doing_single_step;
55978 +extern atomic_unchecked_t      kgdb_cpu_doing_single_step;
55979  
55980  extern struct task_struct      *kgdb_usethread;
55981  extern struct task_struct      *kgdb_contthread;
55982 @@ -251,7 +251,7 @@ struct kgdb_arch {
55983         void    (*disable_hw_break)(struct pt_regs *regs);
55984         void    (*remove_all_hw_break)(void);
55985         void    (*correct_hw_break)(void);
55986 -};
55987 +} __do_const;
55988  
55989  /**
55990   * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
55991 @@ -276,7 +276,7 @@ struct kgdb_io {
55992         void                    (*pre_exception) (void);
55993         void                    (*post_exception) (void);
55994         int                     is_console;
55995 -};
55996 +} __do_const;
55997  
55998  extern struct kgdb_arch                arch_kgdb_ops;
55999  
56000 diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
56001 --- linux-3.0.4/include/linux/kmod.h    2011-07-21 22:17:23.000000000 -0400
56002 +++ linux-3.0.4/include/linux/kmod.h    2011-08-23 21:48:14.000000000 -0400
56003 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
56004   * usually useless though. */
56005  extern int __request_module(bool wait, const char *name, ...) \
56006         __attribute__((format(printf, 2, 3)));
56007 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56008 +       __attribute__((format(printf, 3, 4)));
56009  #define request_module(mod...) __request_module(true, mod)
56010  #define request_module_nowait(mod...) __request_module(false, mod)
56011  #define try_then_request_module(x, mod...) \
56012 diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
56013 --- linux-3.0.4/include/linux/kvm_host.h        2011-07-21 22:17:23.000000000 -0400
56014 +++ linux-3.0.4/include/linux/kvm_host.h        2011-08-23 21:47:56.000000000 -0400
56015 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56016  void vcpu_load(struct kvm_vcpu *vcpu);
56017  void vcpu_put(struct kvm_vcpu *vcpu);
56018  
56019 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56020 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56021                   struct module *module);
56022  void kvm_exit(void);
56023  
56024 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56025                                         struct kvm_guest_debug *dbg);
56026  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56027  
56028 -int kvm_arch_init(void *opaque);
56029 +int kvm_arch_init(const void *opaque);
56030  void kvm_arch_exit(void);
56031  
56032  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56033 diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
56034 --- linux-3.0.4/include/linux/libata.h  2011-07-21 22:17:23.000000000 -0400
56035 +++ linux-3.0.4/include/linux/libata.h  2011-08-26 19:49:56.000000000 -0400
56036 @@ -899,7 +899,7 @@ struct ata_port_operations {
56037          * fields must be pointers.
56038          */
56039         const struct ata_port_operations        *inherits;
56040 -};
56041 +} __do_const;
56042  
56043  struct ata_port_info {
56044         unsigned long           flags;
56045 diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
56046 --- linux-3.0.4/include/linux/mca.h     2011-07-21 22:17:23.000000000 -0400
56047 +++ linux-3.0.4/include/linux/mca.h     2011-08-23 21:47:56.000000000 -0400
56048 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
56049                                                   int region);
56050         void *          (*mca_transform_memory)(struct mca_device *,
56051                                                 void *memory);
56052 -};
56053 +} __no_const;
56054  
56055  struct mca_bus {
56056         u64                     default_dma_mask;
56057 diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
56058 --- linux-3.0.4/include/linux/memory.h  2011-07-21 22:17:23.000000000 -0400
56059 +++ linux-3.0.4/include/linux/memory.h  2011-08-23 21:47:56.000000000 -0400
56060 @@ -144,7 +144,7 @@ struct memory_accessor {
56061                         size_t count);
56062         ssize_t (*write)(struct memory_accessor *, const char *buf,
56063                          off_t offset, size_t count);
56064 -};
56065 +} __no_const;
56066  
56067  /*
56068   * Kernel text modification mutex, used for code patching. Users of this lock
56069 diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
56070 --- linux-3.0.4/include/linux/mfd/abx500.h      2011-07-21 22:17:23.000000000 -0400
56071 +++ linux-3.0.4/include/linux/mfd/abx500.h      2011-08-23 21:47:56.000000000 -0400
56072 @@ -234,6 +234,7 @@ struct abx500_ops {
56073         int (*event_registers_startup_state_get) (struct device *, u8 *);
56074         int (*startup_irq_enabled) (struct device *, unsigned int);
56075  };
56076 +typedef struct abx500_ops __no_const abx500_ops_no_const;
56077  
56078  int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
56079  void abx500_remove_ops(struct device *dev);
56080 diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
56081 --- linux-3.0.4/include/linux/mm.h      2011-08-23 21:44:40.000000000 -0400
56082 +++ linux-3.0.4/include/linux/mm.h      2011-08-23 21:47:56.000000000 -0400
56083 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void 
56084  
56085  #define VM_CAN_NONLINEAR 0x08000000    /* Has ->fault & does nonlinear pages */
56086  #define VM_MIXEDMAP    0x10000000      /* Can contain "struct page" and pure PFN pages */
56087 +
56088 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56089 +#define VM_SAO         0x00000000      /* Strong Access Ordering (powerpc) */
56090 +#define VM_PAGEEXEC    0x20000000      /* vma->vm_page_prot needs special handling */
56091 +#else
56092  #define VM_SAO         0x20000000      /* Strong Access Ordering (powerpc) */
56093 +#endif
56094 +
56095  #define VM_PFN_AT_MMAP 0x40000000      /* PFNMAP vma that is fully mapped at mmap time */
56096  #define VM_MERGEABLE   0x80000000      /* KSM may merge identical pages */
56097  
56098 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
56099  int set_page_dirty_lock(struct page *page);
56100  int clear_page_dirty_for_io(struct page *page);
56101  
56102 -/* Is the vma a continuation of the stack vma above it? */
56103 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
56104 -{
56105 -       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56106 -}
56107 -
56108 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
56109 -                                            unsigned long addr)
56110 -{
56111 -       return (vma->vm_flags & VM_GROWSDOWN) &&
56112 -               (vma->vm_start == addr) &&
56113 -               !vma_growsdown(vma->vm_prev, addr);
56114 -}
56115 -
56116 -/* Is the vma a continuation of the stack vma below it? */
56117 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
56118 -{
56119 -       return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
56120 -}
56121 -
56122 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
56123 -                                          unsigned long addr)
56124 -{
56125 -       return (vma->vm_flags & VM_GROWSUP) &&
56126 -               (vma->vm_end == addr) &&
56127 -               !vma_growsup(vma->vm_next, addr);
56128 -}
56129 -
56130  extern unsigned long move_page_tables(struct vm_area_struct *vma,
56131                 unsigned long old_addr, struct vm_area_struct *new_vma,
56132                 unsigned long new_addr, unsigned long len);
56133 @@ -1169,6 +1148,15 @@ struct shrinker {
56134  extern void register_shrinker(struct shrinker *);
56135  extern void unregister_shrinker(struct shrinker *);
56136  
56137 +#ifdef CONFIG_MMU
56138 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
56139 +#else
56140 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
56141 +{
56142 +       return __pgprot(0);
56143 +}
56144 +#endif
56145 +
56146  int vma_wants_writenotify(struct vm_area_struct *vma);
56147  
56148  extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
56149 @@ -1452,6 +1440,7 @@ out:
56150  }
56151  
56152  extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56153 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56154  
56155  extern unsigned long do_brk(unsigned long, unsigned long);
56156  
56157 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
56158  extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56159                                              struct vm_area_struct **pprev);
56160  
56161 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56162 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56163 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56164 +
56165  /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56166     NULL if none.  Assume start_addr < end_addr. */
56167  static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56168 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
56169         return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56170  }
56171  
56172 -#ifdef CONFIG_MMU
56173 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56174 -#else
56175 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
56176 -{
56177 -       return __pgprot(0);
56178 -}
56179 -#endif
56180 -
56181  struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56182  int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56183                         unsigned long pfn, unsigned long size, pgprot_t);
56184 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
56185  extern int sysctl_memory_failure_early_kill;
56186  extern int sysctl_memory_failure_recovery;
56187  extern void shake_page(struct page *p, int access);
56188 -extern atomic_long_t mce_bad_pages;
56189 +extern atomic_long_unchecked_t mce_bad_pages;
56190  extern int soft_offline_page(struct page *page, int flags);
56191  
56192  extern void dump_page(struct page *page);
56193 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
56194                                 unsigned int pages_per_huge_page);
56195  #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
56196  
56197 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56198 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56199 +#else
56200 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56201 +#endif
56202 +
56203  #endif /* __KERNEL__ */
56204  #endif /* _LINUX_MM_H */
56205 diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
56206 --- linux-3.0.4/include/linux/mm_types.h        2011-07-21 22:17:23.000000000 -0400
56207 +++ linux-3.0.4/include/linux/mm_types.h        2011-08-23 21:47:56.000000000 -0400
56208 @@ -184,6 +184,8 @@ struct vm_area_struct {
56209  #ifdef CONFIG_NUMA
56210         struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
56211  #endif
56212 +
56213 +       struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56214  };
56215  
56216  struct core_thread {
56217 @@ -316,6 +318,24 @@ struct mm_struct {
56218  #ifdef CONFIG_CPUMASK_OFFSTACK
56219         struct cpumask cpumask_allocation;
56220  #endif
56221 +
56222 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56223 +       unsigned long pax_flags;
56224 +#endif
56225 +
56226 +#ifdef CONFIG_PAX_DLRESOLVE
56227 +       unsigned long call_dl_resolve;
56228 +#endif
56229 +
56230 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56231 +       unsigned long call_syscall;
56232 +#endif
56233 +
56234 +#ifdef CONFIG_PAX_ASLR
56235 +       unsigned long delta_mmap;               /* randomized offset */
56236 +       unsigned long delta_stack;              /* randomized offset */
56237 +#endif
56238 +
56239  };
56240  
56241  static inline void mm_init_cpumask(struct mm_struct *mm)
56242 diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
56243 --- linux-3.0.4/include/linux/mmu_notifier.h    2011-07-21 22:17:23.000000000 -0400
56244 +++ linux-3.0.4/include/linux/mmu_notifier.h    2011-08-23 21:47:56.000000000 -0400
56245 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
56246   */
56247  #define ptep_clear_flush_notify(__vma, __address, __ptep)              \
56248  ({                                                                     \
56249 -       pte_t __pte;                                                    \
56250 +       pte_t ___pte;                                                   \
56251         struct vm_area_struct *___vma = __vma;                          \
56252         unsigned long ___address = __address;                           \
56253 -       __pte = ptep_clear_flush(___vma, ___address, __ptep);           \
56254 +       ___pte = ptep_clear_flush(___vma, ___address, __ptep);          \
56255         mmu_notifier_invalidate_page(___vma->vm_mm, ___address);        \
56256 -       __pte;                                                          \
56257 +       ___pte;                                                         \
56258  })
56259  
56260  #define pmdp_clear_flush_notify(__vma, __address, __pmdp)              \
56261 diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
56262 --- linux-3.0.4/include/linux/mmzone.h  2011-07-21 22:17:23.000000000 -0400
56263 +++ linux-3.0.4/include/linux/mmzone.h  2011-08-23 21:47:56.000000000 -0400
56264 @@ -350,7 +350,7 @@ struct zone {
56265         unsigned long           flags;             /* zone flags, see below */
56266  
56267         /* Zone statistics */
56268 -       atomic_long_t           vm_stat[NR_VM_ZONE_STAT_ITEMS];
56269 +       atomic_long_unchecked_t         vm_stat[NR_VM_ZONE_STAT_ITEMS];
56270  
56271         /*
56272          * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
56273 diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
56274 --- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
56275 +++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
56276 @@ -12,7 +12,7 @@
56277  typedef unsigned long kernel_ulong_t;
56278  #endif
56279  
56280 -#define PCI_ANY_ID (~0)
56281 +#define PCI_ANY_ID ((__u16)~0)
56282  
56283  struct pci_device_id {
56284         __u32 vendor, device;           /* Vendor and device ID or PCI_ANY_ID*/
56285 @@ -131,7 +131,7 @@ struct usb_device_id {
56286  #define USB_DEVICE_ID_MATCH_INT_SUBCLASS       0x0100
56287  #define USB_DEVICE_ID_MATCH_INT_PROTOCOL       0x0200
56288  
56289 -#define HID_ANY_ID                             (~0)
56290 +#define HID_ANY_ID                             (~0U)
56291  
56292  struct hid_device_id {
56293         __u16 bus;
56294 diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
56295 --- linux-3.0.4/include/linux/module.h  2011-07-21 22:17:23.000000000 -0400
56296 +++ linux-3.0.4/include/linux/module.h  2011-08-23 21:47:56.000000000 -0400
56297 @@ -16,6 +16,7 @@
56298  #include <linux/kobject.h>
56299  #include <linux/moduleparam.h>
56300  #include <linux/tracepoint.h>
56301 +#include <linux/fs.h>
56302  
56303  #include <linux/percpu.h>
56304  #include <asm/module.h>
56305 @@ -325,19 +326,16 @@ struct module
56306         int (*init)(void);
56307  
56308         /* If this is non-NULL, vfree after init() returns */
56309 -       void *module_init;
56310 +       void *module_init_rx, *module_init_rw;
56311  
56312         /* Here is the actual code + data, vfree'd on unload. */
56313 -       void *module_core;
56314 +       void *module_core_rx, *module_core_rw;
56315  
56316         /* Here are the sizes of the init and core sections */
56317 -       unsigned int init_size, core_size;
56318 +       unsigned int init_size_rw, core_size_rw;
56319  
56320         /* The size of the executable code in each section.  */
56321 -       unsigned int init_text_size, core_text_size;
56322 -
56323 -       /* Size of RO sections of the module (text+rodata) */
56324 -       unsigned int init_ro_size, core_ro_size;
56325 +       unsigned int init_size_rx, core_size_rx;
56326  
56327         /* Arch-specific module values */
56328         struct mod_arch_specific arch;
56329 @@ -393,6 +391,10 @@ struct module
56330  #ifdef CONFIG_EVENT_TRACING
56331         struct ftrace_event_call **trace_events;
56332         unsigned int num_trace_events;
56333 +       struct file_operations trace_id;
56334 +       struct file_operations trace_enable;
56335 +       struct file_operations trace_format;
56336 +       struct file_operations trace_filter;
56337  #endif
56338  #ifdef CONFIG_FTRACE_MCOUNT_RECORD
56339         unsigned int num_ftrace_callsites;
56340 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
56341  bool is_module_percpu_address(unsigned long addr);
56342  bool is_module_text_address(unsigned long addr);
56343  
56344 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56345 +{
56346 +
56347 +#ifdef CONFIG_PAX_KERNEXEC
56348 +       if (ktla_ktva(addr) >= (unsigned long)start &&
56349 +           ktla_ktva(addr) < (unsigned long)start + size)
56350 +               return 1;
56351 +#endif
56352 +
56353 +       return ((void *)addr >= start && (void *)addr < start + size);
56354 +}
56355 +
56356 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56357 +{
56358 +       return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56359 +}
56360 +
56361 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56362 +{
56363 +       return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56364 +}
56365 +
56366 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56367 +{
56368 +       return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56369 +}
56370 +
56371 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56372 +{
56373 +       return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56374 +}
56375 +
56376  static inline int within_module_core(unsigned long addr, struct module *mod)
56377  {
56378 -       return (unsigned long)mod->module_core <= addr &&
56379 -              addr < (unsigned long)mod->module_core + mod->core_size;
56380 +       return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56381  }
56382  
56383  static inline int within_module_init(unsigned long addr, struct module *mod)
56384  {
56385 -       return (unsigned long)mod->module_init <= addr &&
56386 -              addr < (unsigned long)mod->module_init + mod->init_size;
56387 +       return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56388  }
56389  
56390  /* Search for module by name: must hold module_mutex. */
56391 diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
56392 --- linux-3.0.4/include/linux/moduleloader.h    2011-07-21 22:17:23.000000000 -0400
56393 +++ linux-3.0.4/include/linux/moduleloader.h    2011-08-23 21:47:56.000000000 -0400
56394 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56395     sections.  Returns NULL on failure. */
56396  void *module_alloc(unsigned long size);
56397  
56398 +#ifdef CONFIG_PAX_KERNEXEC
56399 +void *module_alloc_exec(unsigned long size);
56400 +#else
56401 +#define module_alloc_exec(x) module_alloc(x)
56402 +#endif
56403 +
56404  /* Free memory returned from module_alloc. */
56405  void module_free(struct module *mod, void *module_region);
56406  
56407 +#ifdef CONFIG_PAX_KERNEXEC
56408 +void module_free_exec(struct module *mod, void *module_region);
56409 +#else
56410 +#define module_free_exec(x, y) module_free((x), (y))
56411 +#endif
56412 +
56413  /* Apply the given relocation to the (simplified) ELF.  Return -error
56414     or 0. */
56415  int apply_relocate(Elf_Shdr *sechdrs,
56416 diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
56417 --- linux-3.0.4/include/linux/moduleparam.h     2011-07-21 22:17:23.000000000 -0400
56418 +++ linux-3.0.4/include/linux/moduleparam.h     2011-08-23 21:47:56.000000000 -0400
56419 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
56420   * @len is usually just sizeof(string).
56421   */
56422  #define module_param_string(name, string, len, perm)                   \
56423 -       static const struct kparam_string __param_string_##name         \
56424 +       static const struct kparam_string __param_string_##name __used  \
56425                 = { len, string };                                      \
56426         __module_param_call(MODULE_PARAM_PREFIX, name,                  \
56427                             &param_ops_string,                          \
56428 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
56429   * module_param_named() for why this might be necessary.
56430   */
56431  #define module_param_array_named(name, array, type, nump, perm)                \
56432 -       static const struct kparam_array __param_arr_##name             \
56433 +       static const struct kparam_array __param_arr_##name __used      \
56434         = { .max = ARRAY_SIZE(array), .num = nump,                      \
56435             .ops = &param_ops_##type,                                   \
56436             .elemsize = sizeof(array[0]), .elem = array };              \
56437 diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
56438 --- linux-3.0.4/include/linux/namei.h   2011-07-21 22:17:23.000000000 -0400
56439 +++ linux-3.0.4/include/linux/namei.h   2011-08-23 21:47:56.000000000 -0400
56440 @@ -24,7 +24,7 @@ struct nameidata {
56441         unsigned        seq;
56442         int             last_type;
56443         unsigned        depth;
56444 -       char *saved_names[MAX_NESTED_LINKS + 1];
56445 +       const char *saved_names[MAX_NESTED_LINKS + 1];
56446  
56447         /* Intent data */
56448         union {
56449 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
56450  extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56451  extern void unlock_rename(struct dentry *, struct dentry *);
56452  
56453 -static inline void nd_set_link(struct nameidata *nd, char *path)
56454 +static inline void nd_set_link(struct nameidata *nd, const char *path)
56455  {
56456         nd->saved_names[nd->depth] = path;
56457  }
56458  
56459 -static inline char *nd_get_link(struct nameidata *nd)
56460 +static inline const char *nd_get_link(const struct nameidata *nd)
56461  {
56462         return nd->saved_names[nd->depth];
56463  }
56464 diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
56465 --- linux-3.0.4/include/linux/netdevice.h       2011-08-23 21:44:40.000000000 -0400
56466 +++ linux-3.0.4/include/linux/netdevice.h       2011-08-23 21:47:56.000000000 -0400
56467 @@ -979,6 +979,7 @@ struct net_device_ops {
56468         int                     (*ndo_set_features)(struct net_device *dev,
56469                                                     u32 features);
56470  };
56471 +typedef struct net_device_ops __no_const net_device_ops_no_const;
56472  
56473  /*
56474   *     The DEVICE structure.
56475 diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
56476 --- linux-3.0.4/include/linux/netfilter/xt_gradm.h      1969-12-31 19:00:00.000000000 -0500
56477 +++ linux-3.0.4/include/linux/netfilter/xt_gradm.h      2011-08-23 21:48:14.000000000 -0400
56478 @@ -0,0 +1,9 @@
56479 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
56480 +#define _LINUX_NETFILTER_XT_GRADM_H 1
56481 +
56482 +struct xt_gradm_mtinfo {
56483 +       __u16 flags;
56484 +       __u16 invflags;
56485 +};
56486 +
56487 +#endif
56488 diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
56489 --- linux-3.0.4/include/linux/oprofile.h        2011-07-21 22:17:23.000000000 -0400
56490 +++ linux-3.0.4/include/linux/oprofile.h        2011-08-23 21:47:56.000000000 -0400
56491 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
56492  int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56493         char const * name, ulong * val);
56494   
56495 -/** Create a file for read-only access to an atomic_t. */
56496 +/** Create a file for read-only access to an atomic_unchecked_t. */
56497  int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56498 -       char const * name, atomic_t * val);
56499 +       char const * name, atomic_unchecked_t * val);
56500   
56501  /** create a directory */
56502  struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56503 diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
56504 --- linux-3.0.4/include/linux/padata.h  2011-07-21 22:17:23.000000000 -0400
56505 +++ linux-3.0.4/include/linux/padata.h  2011-08-23 21:47:56.000000000 -0400
56506 @@ -129,7 +129,7 @@ struct parallel_data {
56507         struct padata_instance          *pinst;
56508         struct padata_parallel_queue    __percpu *pqueue;
56509         struct padata_serial_queue      __percpu *squeue;
56510 -       atomic_t                        seq_nr;
56511 +       atomic_unchecked_t              seq_nr;
56512         atomic_t                        reorder_objects;
56513         atomic_t                        refcnt;
56514         unsigned int                    max_seq_nr;
56515 diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
56516 --- linux-3.0.4/include/linux/perf_event.h      2011-07-21 22:17:23.000000000 -0400
56517 +++ linux-3.0.4/include/linux/perf_event.h      2011-08-23 21:47:56.000000000 -0400
56518 @@ -761,8 +761,8 @@ struct perf_event {
56519  
56520         enum perf_event_active_state    state;
56521         unsigned int                    attach_state;
56522 -       local64_t                       count;
56523 -       atomic64_t                      child_count;
56524 +       local64_t                       count; /* PaX: fix it one day */
56525 +       atomic64_unchecked_t            child_count;
56526  
56527         /*
56528          * These are the total time in nanoseconds that the event
56529 @@ -813,8 +813,8 @@ struct perf_event {
56530          * These accumulate total time (in nanoseconds) that children
56531          * events have been enabled and running, respectively.
56532          */
56533 -       atomic64_t                      child_total_time_enabled;
56534 -       atomic64_t                      child_total_time_running;
56535 +       atomic64_unchecked_t            child_total_time_enabled;
56536 +       atomic64_unchecked_t            child_total_time_running;
56537  
56538         /*
56539          * Protect attach/detach and child_list:
56540 diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
56541 --- linux-3.0.4/include/linux/pipe_fs_i.h       2011-07-21 22:17:23.000000000 -0400
56542 +++ linux-3.0.4/include/linux/pipe_fs_i.h       2011-08-23 21:47:56.000000000 -0400
56543 @@ -46,9 +46,9 @@ struct pipe_buffer {
56544  struct pipe_inode_info {
56545         wait_queue_head_t wait;
56546         unsigned int nrbufs, curbuf, buffers;
56547 -       unsigned int readers;
56548 -       unsigned int writers;
56549 -       unsigned int waiting_writers;
56550 +       atomic_t readers;
56551 +       atomic_t writers;
56552 +       atomic_t waiting_writers;
56553         unsigned int r_counter;
56554         unsigned int w_counter;
56555         struct page *tmp_page;
56556 diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
56557 --- linux-3.0.4/include/linux/pm_runtime.h      2011-07-21 22:17:23.000000000 -0400
56558 +++ linux-3.0.4/include/linux/pm_runtime.h      2011-08-23 21:47:56.000000000 -0400
56559 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
56560  
56561  static inline void pm_runtime_mark_last_busy(struct device *dev)
56562  {
56563 -       ACCESS_ONCE(dev->power.last_busy) = jiffies;
56564 +       ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
56565  }
56566  
56567  #else /* !CONFIG_PM_RUNTIME */
56568 diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
56569 --- linux-3.0.4/include/linux/poison.h  2011-07-21 22:17:23.000000000 -0400
56570 +++ linux-3.0.4/include/linux/poison.h  2011-08-23 21:47:56.000000000 -0400
56571 @@ -19,8 +19,8 @@
56572   * under normal circumstances, used to verify that nobody uses
56573   * non-initialized list entries.
56574   */
56575 -#define LIST_POISON1  ((void *) 0x00100100 + POISON_POINTER_DELTA)
56576 -#define LIST_POISON2  ((void *) 0x00200200 + POISON_POINTER_DELTA)
56577 +#define LIST_POISON1  ((void *) (long)0xFFFFFF01)
56578 +#define LIST_POISON2  ((void *) (long)0xFFFFFF02)
56579  
56580  /********** include/linux/timer.h **********/
56581  /*
56582 diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
56583 --- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
56584 +++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
56585 @@ -115,7 +115,7 @@ struct preempt_ops {
56586         void (*sched_in)(struct preempt_notifier *notifier, int cpu);
56587         void (*sched_out)(struct preempt_notifier *notifier,
56588                           struct task_struct *next);
56589 -};
56590 +} __no_const;
56591  
56592  /**
56593   * preempt_notifier - key for installing preemption notifiers
56594 diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
56595 --- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
56596 +++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
56597 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56598         return proc_create_data(name, mode, parent, proc_fops, NULL);
56599  }
56600  
56601 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56602 +       struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56603 +{
56604 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56605 +       return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56606 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56607 +       return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56608 +#else
56609 +       return proc_create_data(name, mode, parent, proc_fops, NULL);
56610 +#endif
56611 +}
56612 +       
56613 +
56614  static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56615         mode_t mode, struct proc_dir_entry *base, 
56616         read_proc_t *read_proc, void * data)
56617 @@ -278,7 +278,7 @@ union proc_op {
56618         int (*proc_vs_read)(char *page);
56619         int (*proc_vxi_read)(struct vx_info *vxi, char *page);
56620         int (*proc_nxi_read)(struct nx_info *nxi, char *page);
56621 -};
56622 +} __no_const;
56623  
56624  struct ctl_table_header;
56625  struct ctl_table;
56626 diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
56627 --- linux-3.0.4/include/linux/ptrace.h  2011-07-21 22:17:23.000000000 -0400
56628 +++ linux-3.0.4/include/linux/ptrace.h  2011-08-23 21:48:14.000000000 -0400
56629 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
56630  extern void exit_ptrace(struct task_struct *tracer);
56631  #define PTRACE_MODE_READ   1
56632  #define PTRACE_MODE_ATTACH 2
56633 -/* Returns 0 on success, -errno on denial. */
56634 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56635  /* Returns true on success, false on denial. */
56636  extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56637 +/* Returns true on success, false on denial. */
56638 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56639  
56640  static inline int ptrace_reparented(struct task_struct *child)
56641  {
56642 diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
56643 --- linux-3.0.4/include/linux/random.h  2011-08-23 21:44:40.000000000 -0400
56644 +++ linux-3.0.4/include/linux/random.h  2011-08-23 21:47:56.000000000 -0400
56645 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
56646  
56647  u32 prandom32(struct rnd_state *);
56648  
56649 +static inline unsigned long pax_get_random_long(void)
56650 +{
56651 +       return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56652 +}
56653 +
56654  /*
56655   * Handle minimum values for seeds
56656   */
56657  static inline u32 __seed(u32 x, u32 m)
56658  {
56659 -       return (x < m) ? x + m : x;
56660 +       return (x <= m) ? x + m + 1 : x;
56661  }
56662  
56663  /**
56664 diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
56665 --- linux-3.0.4/include/linux/reboot.h  2011-07-21 22:17:23.000000000 -0400
56666 +++ linux-3.0.4/include/linux/reboot.h  2011-08-23 21:47:56.000000000 -0400
56667 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56668   * Architecture-specific implementations of sys_reboot commands.
56669   */
56670  
56671 -extern void machine_restart(char *cmd);
56672 -extern void machine_halt(void);
56673 -extern void machine_power_off(void);
56674 +extern void machine_restart(char *cmd) __noreturn;
56675 +extern void machine_halt(void) __noreturn;
56676 +extern void machine_power_off(void) __noreturn;
56677  
56678  extern void machine_shutdown(void);
56679  struct pt_regs;
56680 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56681   */
56682  
56683  extern void kernel_restart_prepare(char *cmd);
56684 -extern void kernel_restart(char *cmd);
56685 -extern void kernel_halt(void);
56686 -extern void kernel_power_off(void);
56687 +extern void kernel_restart(char *cmd) __noreturn;
56688 +extern void kernel_halt(void) __noreturn;
56689 +extern void kernel_power_off(void) __noreturn;
56690  
56691  extern int C_A_D; /* for sysctl */
56692  void ctrl_alt_del(void);
56693 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
56694   * Emergency restart, callable from an interrupt handler.
56695   */
56696  
56697 -extern void emergency_restart(void);
56698 +extern void emergency_restart(void) __noreturn;
56699  #include <asm/emergency-restart.h>
56700  
56701  #endif
56702 diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
56703 --- linux-3.0.4/include/linux/reiserfs_fs.h     2011-07-21 22:17:23.000000000 -0400
56704 +++ linux-3.0.4/include/linux/reiserfs_fs.h     2011-08-23 21:47:56.000000000 -0400
56705 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
56706  #define REISERFS_USER_MEM              1       /* reiserfs user memory mode            */
56707  
56708  #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56709 -#define get_generation(s) atomic_read (&fs_generation(s))
56710 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56711  #define FILESYSTEM_CHANGED_TB(tb)  (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56712  #define __fs_changed(gen,s) (gen != get_generation (s))
56713  #define fs_changed(gen,s)              \
56714 diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
56715 --- linux-3.0.4/include/linux/reiserfs_fs_sb.h  2011-07-21 22:17:23.000000000 -0400
56716 +++ linux-3.0.4/include/linux/reiserfs_fs_sb.h  2011-08-23 21:47:56.000000000 -0400
56717 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
56718         /* Comment? -Hans */
56719         wait_queue_head_t s_wait;
56720         /* To be obsoleted soon by per buffer seals.. -Hans */
56721 -       atomic_t s_generation_counter;  // increased by one every time the
56722 +       atomic_unchecked_t s_generation_counter;        // increased by one every time the
56723         // tree gets re-balanced
56724         unsigned long s_properties;     /* File system properties. Currently holds
56725                                            on-disk FS format */
56726 diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
56727 --- linux-3.0.4/include/linux/relay.h   2011-07-21 22:17:23.000000000 -0400
56728 +++ linux-3.0.4/include/linux/relay.h   2011-08-23 21:47:56.000000000 -0400
56729 @@ -159,7 +159,7 @@ struct rchan_callbacks
56730          * The callback should return 0 if successful, negative if not.
56731          */
56732         int (*remove_buf_file)(struct dentry *dentry);
56733 -};
56734 +} __no_const;
56735  
56736  /*
56737   * CONFIG_RELAY kernel API, kernel/relay.c
56738 diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
56739 --- linux-3.0.4/include/linux/rfkill.h  2011-07-21 22:17:23.000000000 -0400
56740 +++ linux-3.0.4/include/linux/rfkill.h  2011-08-23 21:47:56.000000000 -0400
56741 @@ -147,6 +147,7 @@ struct rfkill_ops {
56742         void    (*query)(struct rfkill *rfkill, void *data);
56743         int     (*set_block)(void *data, bool blocked);
56744  };
56745 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
56746  
56747  #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
56748  /**
56749 diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
56750 --- linux-3.0.4/include/linux/rmap.h    2011-07-21 22:17:23.000000000 -0400
56751 +++ linux-3.0.4/include/linux/rmap.h    2011-08-23 21:47:56.000000000 -0400
56752 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
56753  void anon_vma_init(void);      /* create anon_vma_cachep */
56754  int  anon_vma_prepare(struct vm_area_struct *);
56755  void unlink_anon_vmas(struct vm_area_struct *);
56756 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
56757 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
56758 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
56759 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
56760  void __anon_vma_link(struct vm_area_struct *);
56761  
56762  static inline void anon_vma_merge(struct vm_area_struct *vma,
56763 diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
56764 --- linux-3.0.4/include/linux/sched.h   2011-07-21 22:17:23.000000000 -0400
56765 +++ linux-3.0.4/include/linux/sched.h   2011-08-25 17:22:27.000000000 -0400
56766 @@ -100,6 +100,7 @@ struct bio_list;
56767  struct fs_struct;
56768  struct perf_event_context;
56769  struct blk_plug;
56770 +struct linux_binprm;
56771  
56772  /*
56773   * List of flags we want to share for kernel threads,
56774 @@ -380,10 +381,13 @@ struct user_namespace;
56775  #define DEFAULT_MAX_MAP_COUNT  (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56776  
56777  extern int sysctl_max_map_count;
56778 +extern unsigned long sysctl_heap_stack_gap;
56779  
56780  #include <linux/aio.h>
56781  
56782  #ifdef CONFIG_MMU
56783 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56784 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56785  extern void arch_pick_mmap_layout(struct mm_struct *mm);
56786  extern unsigned long
56787  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56788 @@ -629,6 +633,17 @@ struct signal_struct {
56789  #ifdef CONFIG_TASKSTATS
56790         struct taskstats *stats;
56791  #endif
56792 +
56793 +#ifdef CONFIG_GRKERNSEC
56794 +       u32 curr_ip;
56795 +       u32 saved_ip;
56796 +       u32 gr_saddr;
56797 +       u32 gr_daddr;
56798 +       u16 gr_sport;
56799 +       u16 gr_dport;
56800 +       u8 used_accept:1;
56801 +#endif
56802 +
56803  #ifdef CONFIG_AUDIT
56804         unsigned audit_tty;
56805         struct tty_audit_buf *tty_audit_buf;
56806 @@ -710,6 +725,11 @@ struct user_struct {
56807         struct key *session_keyring;    /* UID's default session keyring */
56808  #endif
56809  
56810 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56811 +       unsigned int banned;
56812 +       unsigned long ban_expires;
56813 +#endif
56814 +
56815         /* Hash table maintenance information */
56816         struct hlist_node uidhash_node;
56817         uid_t uid;
56818 @@ -1340,8 +1360,8 @@ struct task_struct {
56819         struct list_head thread_group;
56820  
56821         struct completion *vfork_done;          /* for vfork() */
56822 -       int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
56823 -       int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
56824 +       pid_t __user *set_child_tid;            /* CLONE_CHILD_SETTID */
56825 +       pid_t __user *clear_child_tid;          /* CLONE_CHILD_CLEARTID */
56826  
56827         cputime_t utime, stime, utimescaled, stimescaled;
56828         cputime_t gtime;
56829 @@ -1357,13 +1377,6 @@ struct task_struct {
56830         struct task_cputime cputime_expires;
56831         struct list_head cpu_timers[3];
56832  
56833 -/* process credentials */
56834 -       const struct cred __rcu *real_cred; /* objective and real subjective task
56835 -                                        * credentials (COW) */
56836 -       const struct cred __rcu *cred;  /* effective (overridable) subjective task
56837 -                                        * credentials (COW) */
56838 -       struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56839 -
56840         char comm[TASK_COMM_LEN]; /* executable name excluding path
56841                                      - access with [gs]et_task_comm (which lock
56842                                        it with task_lock())
56843 @@ -1380,8 +1393,16 @@ struct task_struct {
56844  #endif
56845  /* CPU-specific state of this task */
56846         struct thread_struct thread;
56847 +/* thread_info moved to task_struct */
56848 +#ifdef CONFIG_X86
56849 +       struct thread_info tinfo;
56850 +#endif
56851  /* filesystem information */
56852         struct fs_struct *fs;
56853 +
56854 +       const struct cred __rcu *cred;  /* effective (overridable) subjective task
56855 +                                        * credentials (COW) */
56856 +
56857  /* open file information */
56858         struct files_struct *files;
56859  /* namespaces */
56860 @@ -1428,6 +1449,11 @@ struct task_struct {
56861         struct rt_mutex_waiter *pi_blocked_on;
56862  #endif
56863  
56864 +/* process credentials */
56865 +       const struct cred __rcu *real_cred; /* objective and real subjective task
56866 +                                        * credentials (COW) */
56867 +       struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56868 +
56869  #ifdef CONFIG_DEBUG_MUTEXES
56870         /* mutex deadlock detection */
56871         struct mutex_waiter *blocked_on;
56872 @@ -1538,6 +1564,21 @@ struct task_struct {
56873         unsigned long default_timer_slack_ns;
56874  
56875         struct list_head        *scm_work_list;
56876 +
56877 +#ifdef CONFIG_GRKERNSEC
56878 +       /* grsecurity */
56879 +       struct dentry *gr_chroot_dentry;
56880 +       struct acl_subject_label *acl;
56881 +       struct acl_role_label *role;
56882 +       struct file *exec_file;
56883 +       u16 acl_role_id;
56884 +       /* is this the task that authenticated to the special role */
56885 +       u8 acl_sp_role;
56886 +       u8 is_writable;
56887 +       u8 brute;
56888 +       u8 gr_is_chrooted;
56889 +#endif
56890 +
56891  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56892         /* Index of current stored address in ret_stack */
56893         int curr_ret_stack;
56894 @@ -1572,6 +1613,57 @@ struct task_struct {
56895  #endif
56896  };
56897  
56898 +#define MF_PAX_PAGEEXEC                0x01000000      /* Paging based non-executable pages */
56899 +#define MF_PAX_EMUTRAMP                0x02000000      /* Emulate trampolines */
56900 +#define MF_PAX_MPROTECT                0x04000000      /* Restrict mprotect() */
56901 +#define MF_PAX_RANDMMAP                0x08000000      /* Randomize mmap() base */
56902 +/*#define MF_PAX_RANDEXEC              0x10000000*/    /* Randomize ET_EXEC base */
56903 +#define MF_PAX_SEGMEXEC                0x20000000      /* Segmentation based non-executable pages */
56904 +
56905 +#ifdef CONFIG_PAX_SOFTMODE
56906 +extern int pax_softmode;
56907 +#endif
56908 +
56909 +extern int pax_check_flags(unsigned long *);
56910 +
56911 +/* if tsk != current then task_lock must be held on it */
56912 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56913 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
56914 +{
56915 +       if (likely(tsk->mm))
56916 +               return tsk->mm->pax_flags;
56917 +       else
56918 +               return 0UL;
56919 +}
56920 +
56921 +/* if tsk != current then task_lock must be held on it */
56922 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
56923 +{
56924 +       if (likely(tsk->mm)) {
56925 +               tsk->mm->pax_flags = flags;
56926 +               return 0;
56927 +       }
56928 +       return -EINVAL;
56929 +}
56930 +#endif
56931 +
56932 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56933 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
56934 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56935 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
56936 +#endif
56937 +
56938 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
56939 +extern void pax_report_insns(void *pc, void *sp);
56940 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
56941 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
56942 +
56943 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56944 +extern void pax_track_stack(void);
56945 +#else
56946 +static inline void pax_track_stack(void) {}
56947 +#endif
56948 +
56949  /* Future-safe accessor for struct task_struct's cpus_allowed. */
56950  #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
56951  
56952 @@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
56953  #define PF_DUMPCORE    0x00000200      /* dumped core */
56954  #define PF_SIGNALED    0x00000400      /* killed by a signal */
56955  #define PF_MEMALLOC    0x00000800      /* Allocating memory */
56956 +#define PF_NPROC_EXCEEDED 0x00001000   /* set_user noticed that RLIMIT_NPROC was exceeded */
56957  #define PF_USED_MATH   0x00002000      /* if unset the fpu must be initialized before use */
56958  #define PF_FREEZING    0x00004000      /* freeze in progress. do not account to load */
56959  #define PF_NOFREEZE    0x00008000      /* this thread should not be frozen */
56960 @@ -2056,7 +2149,9 @@ void yield(void);
56961  extern struct exec_domain      default_exec_domain;
56962  
56963  union thread_union {
56964 +#ifndef CONFIG_X86
56965         struct thread_info thread_info;
56966 +#endif
56967         unsigned long stack[THREAD_SIZE/sizeof(long)];
56968  };
56969  
56970 @@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
56971   */
56972  
56973  extern struct task_struct *find_task_by_vpid(pid_t nr);
56974 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
56975  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
56976                 struct pid_namespace *ns);
56977  
56978 @@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
56979  extern void exit_itimers(struct signal_struct *);
56980  extern void flush_itimer_signals(void);
56981  
56982 -extern NORET_TYPE void do_group_exit(int);
56983 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
56984  
56985  extern void daemonize(const char *, ...);
56986  extern int allow_signal(int);
56987 @@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
56988  
56989  #endif
56990  
56991 -static inline int object_is_on_stack(void *obj)
56992 +static inline int object_starts_on_stack(void *obj)
56993  {
56994 -       void *stack = task_stack_page(current);
56995 +       const void *stack = task_stack_page(current);
56996  
56997         return (obj >= stack) && (obj < (stack + THREAD_SIZE));
56998  }
56999  
57000 +#ifdef CONFIG_PAX_USERCOPY
57001 +extern int object_is_on_stack(const void *obj, unsigned long len);
57002 +#endif
57003 +
57004  extern void thread_info_cache_init(void);
57005  
57006  #ifdef CONFIG_DEBUG_STACK_USAGE
57007 diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
57008 --- linux-3.0.4/include/linux/screen_info.h     2011-07-21 22:17:23.000000000 -0400
57009 +++ linux-3.0.4/include/linux/screen_info.h     2011-08-23 21:47:56.000000000 -0400
57010 @@ -43,7 +43,8 @@ struct screen_info {
57011         __u16 pages;            /* 0x32 */
57012         __u16 vesa_attributes;  /* 0x34 */
57013         __u32 capabilities;     /* 0x36 */
57014 -       __u8  _reserved[6];     /* 0x3a */
57015 +       __u16 vesapm_size;      /* 0x3a */
57016 +       __u8  _reserved[4];     /* 0x3c */
57017  } __attribute__((packed));
57018  
57019  #define VIDEO_TYPE_MDA         0x10    /* Monochrome Text Display      */
57020 diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
57021 --- linux-3.0.4/include/linux/security.h        2011-07-21 22:17:23.000000000 -0400
57022 +++ linux-3.0.4/include/linux/security.h        2011-08-23 21:48:14.000000000 -0400
57023 @@ -36,6 +36,7 @@
57024  #include <linux/key.h>
57025  #include <linux/xfrm.h>
57026  #include <linux/slab.h>
57027 +#include <linux/grsecurity.h>
57028  #include <net/flow.h>
57029  
57030  /* Maximum number of letters for an LSM name string */
57031 diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
57032 --- linux-3.0.4/include/linux/seq_file.h        2011-07-21 22:17:23.000000000 -0400
57033 +++ linux-3.0.4/include/linux/seq_file.h        2011-08-23 21:47:56.000000000 -0400
57034 @@ -32,6 +32,7 @@ struct seq_operations {
57035         void * (*next) (struct seq_file *m, void *v, loff_t *pos);
57036         int (*show) (struct seq_file *m, void *v);
57037  };
57038 +typedef struct seq_operations __no_const seq_operations_no_const;
57039  
57040  #define SEQ_SKIP 1
57041  
57042 diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
57043 --- linux-3.0.4/include/linux/shmem_fs.h        2011-07-21 22:17:23.000000000 -0400
57044 +++ linux-3.0.4/include/linux/shmem_fs.h        2011-08-23 21:47:56.000000000 -0400
57045 @@ -10,7 +10,7 @@
57046  
57047  #define SHMEM_NR_DIRECT 16
57048  
57049 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
57050 +#define SHMEM_SYMLINK_INLINE_LEN 64
57051  
57052  struct shmem_inode_info {
57053         spinlock_t              lock;
57054 diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
57055 --- linux-3.0.4/include/linux/shm.h     2011-07-21 22:17:23.000000000 -0400
57056 +++ linux-3.0.4/include/linux/shm.h     2011-08-23 21:48:14.000000000 -0400
57057 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57058         pid_t                   shm_cprid;
57059         pid_t                   shm_lprid;
57060         struct user_struct      *mlock_user;
57061 +#ifdef CONFIG_GRKERNSEC
57062 +       time_t                  shm_createtime;
57063 +       pid_t                   shm_lapid;
57064 +#endif
57065  };
57066  
57067  /* shm_mode upper byte flags */
57068 diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
57069 --- linux-3.0.4/include/linux/skbuff.h  2011-07-21 22:17:23.000000000 -0400
57070 +++ linux-3.0.4/include/linux/skbuff.h  2011-08-23 21:47:56.000000000 -0400
57071 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
57072   */
57073  static inline int skb_queue_empty(const struct sk_buff_head *list)
57074  {
57075 -       return list->next == (struct sk_buff *)list;
57076 +       return list->next == (const struct sk_buff *)list;
57077  }
57078  
57079  /**
57080 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const 
57081  static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57082                                      const struct sk_buff *skb)
57083  {
57084 -       return skb->next == (struct sk_buff *)list;
57085 +       return skb->next == (const struct sk_buff *)list;
57086  }
57087  
57088  /**
57089 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
57090  static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57091                                       const struct sk_buff *skb)
57092  {
57093 -       return skb->prev == (struct sk_buff *)list;
57094 +       return skb->prev == (const struct sk_buff *)list;
57095  }
57096  
57097  /**
57098 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
57099   * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
57100   */
57101  #ifndef NET_SKB_PAD
57102 -#define NET_SKB_PAD    max(32, L1_CACHE_BYTES)
57103 +#define NET_SKB_PAD    max(_AC(32,UL), L1_CACHE_BYTES)
57104  #endif
57105  
57106  extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57107 diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
57108 --- linux-3.0.4/include/linux/slab_def.h        2011-07-21 22:17:23.000000000 -0400
57109 +++ linux-3.0.4/include/linux/slab_def.h        2011-08-23 21:47:56.000000000 -0400
57110 @@ -96,10 +96,10 @@ struct kmem_cache {
57111         unsigned long node_allocs;
57112         unsigned long node_frees;
57113         unsigned long node_overflow;
57114 -       atomic_t allochit;
57115 -       atomic_t allocmiss;
57116 -       atomic_t freehit;
57117 -       atomic_t freemiss;
57118 +       atomic_unchecked_t allochit;
57119 +       atomic_unchecked_t allocmiss;
57120 +       atomic_unchecked_t freehit;
57121 +       atomic_unchecked_t freemiss;
57122  
57123         /*
57124          * If debugging is enabled, then the allocator can add additional
57125 diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
57126 --- linux-3.0.4/include/linux/slab.h    2011-07-21 22:17:23.000000000 -0400
57127 +++ linux-3.0.4/include/linux/slab.h    2011-08-23 21:47:56.000000000 -0400
57128 @@ -11,12 +11,20 @@
57129  
57130  #include <linux/gfp.h>
57131  #include <linux/types.h>
57132 +#include <linux/err.h>
57133  
57134  /*
57135   * Flags to pass to kmem_cache_create().
57136   * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57137   */
57138  #define SLAB_DEBUG_FREE                0x00000100UL    /* DEBUG: Perform (expensive) checks on free */
57139 +
57140 +#ifdef CONFIG_PAX_USERCOPY
57141 +#define SLAB_USERCOPY          0x00000200UL    /* PaX: Allow copying objs to/from userland */
57142 +#else
57143 +#define SLAB_USERCOPY          0x00000000UL
57144 +#endif
57145 +
57146  #define SLAB_RED_ZONE          0x00000400UL    /* DEBUG: Red zone objs in a cache */
57147  #define SLAB_POISON            0x00000800UL    /* DEBUG: Poison objects */
57148  #define SLAB_HWCACHE_ALIGN     0x00002000UL    /* Align objs on cache lines */
57149 @@ -87,10 +95,13 @@
57150   * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57151   * Both make kfree a no-op.
57152   */
57153 -#define ZERO_SIZE_PTR ((void *)16)
57154 +#define ZERO_SIZE_PTR                          \
57155 +({                                             \
57156 +       BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57157 +       (void *)(-MAX_ERRNO-1L);                \
57158 +})
57159  
57160 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57161 -                               (unsigned long)ZERO_SIZE_PTR)
57162 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57163  
57164  /*
57165   * struct kmem_cache related prototypes
57166 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void 
57167  void kfree(const void *);
57168  void kzfree(const void *);
57169  size_t ksize(const void *);
57170 +void check_object_size(const void *ptr, unsigned long n, bool to);
57171  
57172  /*
57173   * Allocator specific definitions. These are mainly used to establish optimized
57174 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t 
57175  
57176  void __init kmem_cache_init_late(void);
57177  
57178 +#define kmalloc(x, y)                                          \
57179 +({                                                             \
57180 +       void *___retval;                                        \
57181 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57182 +       if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))  \
57183 +               ___retval = NULL;                               \
57184 +       else                                                    \
57185 +               ___retval = kmalloc((size_t)___x, (y));         \
57186 +       ___retval;                                              \
57187 +})
57188 +
57189 +#define kmalloc_node(x, y, z)                                  \
57190 +({                                                             \
57191 +       void *___retval;                                        \
57192 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57193 +       if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57194 +               ___retval = NULL;                               \
57195 +       else                                                    \
57196 +               ___retval = kmalloc_node((size_t)___x, (y), (z));\
57197 +       ___retval;                                              \
57198 +})
57199 +
57200 +#define kzalloc(x, y)                                          \
57201 +({                                                             \
57202 +       void *___retval;                                        \
57203 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57204 +       if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))  \
57205 +               ___retval = NULL;                               \
57206 +       else                                                    \
57207 +               ___retval = kzalloc((size_t)___x, (y));         \
57208 +       ___retval;                                              \
57209 +})
57210 +
57211 +#define __krealloc(x, y, z)                                    \
57212 +({                                                             \
57213 +       void *___retval;                                        \
57214 +       intoverflow_t ___y = (intoverflow_t)y;                  \
57215 +       if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
57216 +               ___retval = NULL;                               \
57217 +       else                                                    \
57218 +               ___retval = __krealloc((x), (size_t)___y, (z)); \
57219 +       ___retval;                                              \
57220 +})
57221 +
57222 +#define krealloc(x, y, z)                                      \
57223 +({                                                             \
57224 +       void *___retval;                                        \
57225 +       intoverflow_t ___y = (intoverflow_t)y;                  \
57226 +       if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
57227 +               ___retval = NULL;                               \
57228 +       else                                                    \
57229 +               ___retval = krealloc((x), (size_t)___y, (z));   \
57230 +       ___retval;                                              \
57231 +})
57232 +
57233  #endif /* _LINUX_SLAB_H */
57234 diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
57235 --- linux-3.0.4/include/linux/slub_def.h        2011-07-21 22:17:23.000000000 -0400
57236 +++ linux-3.0.4/include/linux/slub_def.h        2011-08-23 21:47:56.000000000 -0400
57237 @@ -82,7 +82,7 @@ struct kmem_cache {
57238         struct kmem_cache_order_objects max;
57239         struct kmem_cache_order_objects min;
57240         gfp_t allocflags;       /* gfp flags to use on each alloc */
57241 -       int refcount;           /* Refcount for slab cache destroy */
57242 +       atomic_t refcount;      /* Refcount for slab cache destroy */
57243         void (*ctor)(void *);
57244         int inuse;              /* Offset to metadata */
57245         int align;              /* Alignment */
57246 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
57247  }
57248  
57249  void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57250 -void *__kmalloc(size_t size, gfp_t flags);
57251 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
57252  
57253  static __always_inline void *
57254  kmalloc_order(size_t size, gfp_t flags, unsigned int order)
57255 diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
57256 --- linux-3.0.4/include/linux/sonet.h   2011-07-21 22:17:23.000000000 -0400
57257 +++ linux-3.0.4/include/linux/sonet.h   2011-08-23 21:47:56.000000000 -0400
57258 @@ -61,7 +61,7 @@ struct sonet_stats {
57259  #include <asm/atomic.h>
57260  
57261  struct k_sonet_stats {
57262 -#define __HANDLE_ITEM(i) atomic_t i
57263 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57264         __SONET_ITEMS
57265  #undef __HANDLE_ITEM
57266  };
57267 diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
57268 --- linux-3.0.4/include/linux/sunrpc/clnt.h     2011-07-21 22:17:23.000000000 -0400
57269 +++ linux-3.0.4/include/linux/sunrpc/clnt.h     2011-08-23 21:47:56.000000000 -0400
57270 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
57271  {
57272         switch (sap->sa_family) {
57273         case AF_INET:
57274 -               return ntohs(((struct sockaddr_in *)sap)->sin_port);
57275 +               return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57276         case AF_INET6:
57277 -               return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57278 +               return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57279         }
57280         return 0;
57281  }
57282 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
57283  static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57284                                     const struct sockaddr *src)
57285  {
57286 -       const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57287 +       const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57288         struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57289  
57290         dsin->sin_family = ssin->sin_family;
57291 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
57292         if (sa->sa_family != AF_INET6)
57293                 return 0;
57294  
57295 -       return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57296 +       return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57297  }
57298  
57299  #endif /* __KERNEL__ */
57300 diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
57301 --- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
57302 +++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
57303 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57304  extern unsigned int svcrdma_max_requests;
57305  extern unsigned int svcrdma_max_req_size;
57306  
57307 -extern atomic_t rdma_stat_recv;
57308 -extern atomic_t rdma_stat_read;
57309 -extern atomic_t rdma_stat_write;
57310 -extern atomic_t rdma_stat_sq_starve;
57311 -extern atomic_t rdma_stat_rq_starve;
57312 -extern atomic_t rdma_stat_rq_poll;
57313 -extern atomic_t rdma_stat_rq_prod;
57314 -extern atomic_t rdma_stat_sq_poll;
57315 -extern atomic_t rdma_stat_sq_prod;
57316 +extern atomic_unchecked_t rdma_stat_recv;
57317 +extern atomic_unchecked_t rdma_stat_read;
57318 +extern atomic_unchecked_t rdma_stat_write;
57319 +extern atomic_unchecked_t rdma_stat_sq_starve;
57320 +extern atomic_unchecked_t rdma_stat_rq_starve;
57321 +extern atomic_unchecked_t rdma_stat_rq_poll;
57322 +extern atomic_unchecked_t rdma_stat_rq_prod;
57323 +extern atomic_unchecked_t rdma_stat_sq_poll;
57324 +extern atomic_unchecked_t rdma_stat_sq_prod;
57325  
57326  #define RPCRDMA_VERSION 1
57327  
57328 diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
57329 --- linux-3.0.4/include/linux/sysctl.h  2011-07-21 22:17:23.000000000 -0400
57330 +++ linux-3.0.4/include/linux/sysctl.h  2011-08-23 21:48:14.000000000 -0400
57331 @@ -155,7 +155,11 @@ enum
57332         KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57333  };
57334  
57335 -
57336 +#ifdef CONFIG_PAX_SOFTMODE
57337 +enum {
57338 +       PAX_SOFTMODE=1          /* PaX: disable/enable soft mode */
57339 +};
57340 +#endif
57341  
57342  /* CTL_VM names: */
57343  enum
57344 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
57345  
57346  extern int proc_dostring(struct ctl_table *, int,
57347                          void __user *, size_t *, loff_t *);
57348 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57349 +                        void __user *, size_t *, loff_t *);
57350  extern int proc_dointvec(struct ctl_table *, int,
57351                          void __user *, size_t *, loff_t *);
57352  extern int proc_dointvec_minmax(struct ctl_table *, int,
57353 diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
57354 --- linux-3.0.4/include/linux/tty_ldisc.h       2011-07-21 22:17:23.000000000 -0400
57355 +++ linux-3.0.4/include/linux/tty_ldisc.h       2011-08-23 21:47:56.000000000 -0400
57356 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
57357  
57358         struct  module *owner;
57359         
57360 -       int refcount;
57361 +       atomic_t refcount;
57362  };
57363  
57364  struct tty_ldisc {
57365 diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
57366 --- linux-3.0.4/include/linux/types.h   2011-07-21 22:17:23.000000000 -0400
57367 +++ linux-3.0.4/include/linux/types.h   2011-08-23 21:47:56.000000000 -0400
57368 @@ -213,10 +213,26 @@ typedef struct {
57369         int counter;
57370  } atomic_t;
57371  
57372 +#ifdef CONFIG_PAX_REFCOUNT
57373 +typedef struct {
57374 +       int counter;
57375 +} atomic_unchecked_t;
57376 +#else
57377 +typedef atomic_t atomic_unchecked_t;
57378 +#endif
57379 +
57380  #ifdef CONFIG_64BIT
57381  typedef struct {
57382         long counter;
57383  } atomic64_t;
57384 +
57385 +#ifdef CONFIG_PAX_REFCOUNT
57386 +typedef struct {
57387 +       long counter;
57388 +} atomic64_unchecked_t;
57389 +#else
57390 +typedef atomic64_t atomic64_unchecked_t;
57391 +#endif
57392  #endif
57393  
57394  struct list_head {
57395 diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
57396 --- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
57397 +++ linux-3.0.4/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
57398 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57399                 long ret;                               \
57400                 mm_segment_t old_fs = get_fs();         \
57401                                                         \
57402 -               set_fs(KERNEL_DS);                      \
57403                 pagefault_disable();                    \
57404 +               set_fs(KERNEL_DS);                      \
57405                 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval));            \
57406 -               pagefault_enable();                     \
57407                 set_fs(old_fs);                         \
57408 +               pagefault_enable();                     \
57409                 ret;                                    \
57410         })
57411  
57412 diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
57413 --- linux-3.0.4/include/linux/unaligned/access_ok.h     2011-07-21 22:17:23.000000000 -0400
57414 +++ linux-3.0.4/include/linux/unaligned/access_ok.h     2011-08-23 21:47:56.000000000 -0400
57415 @@ -6,32 +6,32 @@
57416  
57417  static inline u16 get_unaligned_le16(const void *p)
57418  {
57419 -       return le16_to_cpup((__le16 *)p);
57420 +       return le16_to_cpup((const __le16 *)p);
57421  }
57422  
57423  static inline u32 get_unaligned_le32(const void *p)
57424  {
57425 -       return le32_to_cpup((__le32 *)p);
57426 +       return le32_to_cpup((const __le32 *)p);
57427  }
57428  
57429  static inline u64 get_unaligned_le64(const void *p)
57430  {
57431 -       return le64_to_cpup((__le64 *)p);
57432 +       return le64_to_cpup((const __le64 *)p);
57433  }
57434  
57435  static inline u16 get_unaligned_be16(const void *p)
57436  {
57437 -       return be16_to_cpup((__be16 *)p);
57438 +       return be16_to_cpup((const __be16 *)p);
57439  }
57440  
57441  static inline u32 get_unaligned_be32(const void *p)
57442  {
57443 -       return be32_to_cpup((__be32 *)p);
57444 +       return be32_to_cpup((const __be32 *)p);
57445  }
57446  
57447  static inline u64 get_unaligned_be64(const void *p)
57448  {
57449 -       return be64_to_cpup((__be64 *)p);
57450 +       return be64_to_cpup((const __be64 *)p);
57451  }
57452  
57453  static inline void put_unaligned_le16(u16 val, void *p)
57454 diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
57455 --- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
57456 +++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
57457 @@ -13,6 +13,11 @@ struct vm_area_struct;               /* vma defining 
57458  #define VM_MAP         0x00000004      /* vmap()ed pages */
57459  #define VM_USERMAP     0x00000008      /* suitable for remap_vmalloc_range */
57460  #define VM_VPAGES      0x00000010      /* buffer for pages was vmalloc'ed */
57461 +
57462 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57463 +#define VM_KERNEXEC    0x00000020      /* allocate from executable kernel memory range */
57464 +#endif
57465 +
57466  /* bits [20..32] reserved for arch specific ioremap internals */
57467  
57468  /*
57469 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
57470  # endif
57471  #endif
57472  
57473 +#define vmalloc(x)                                             \
57474 +({                                                             \
57475 +       void *___retval;                                        \
57476 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57477 +       if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n"))  \
57478 +               ___retval = NULL;                               \
57479 +       else                                                    \
57480 +               ___retval = vmalloc((unsigned long)___x);       \
57481 +       ___retval;                                              \
57482 +})
57483 +
57484 +#define vzalloc(x)                                             \
57485 +({                                                             \
57486 +       void *___retval;                                        \
57487 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57488 +       if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n"))  \
57489 +               ___retval = NULL;                               \
57490 +       else                                                    \
57491 +               ___retval = vzalloc((unsigned long)___x);       \
57492 +       ___retval;                                              \
57493 +})
57494 +
57495 +#define __vmalloc(x, y, z)                                     \
57496 +({                                                             \
57497 +       void *___retval;                                        \
57498 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57499 +       if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57500 +               ___retval = NULL;                               \
57501 +       else                                                    \
57502 +               ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57503 +       ___retval;                                              \
57504 +})
57505 +
57506 +#define vmalloc_user(x)                                                \
57507 +({                                                             \
57508 +       void *___retval;                                        \
57509 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57510 +       if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57511 +               ___retval = NULL;                               \
57512 +       else                                                    \
57513 +               ___retval = vmalloc_user((unsigned long)___x);  \
57514 +       ___retval;                                              \
57515 +})
57516 +
57517 +#define vmalloc_exec(x)                                                \
57518 +({                                                             \
57519 +       void *___retval;                                        \
57520 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57521 +       if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57522 +               ___retval = NULL;                               \
57523 +       else                                                    \
57524 +               ___retval = vmalloc_exec((unsigned long)___x);  \
57525 +       ___retval;                                              \
57526 +})
57527 +
57528 +#define vmalloc_node(x, y)                                     \
57529 +({                                                             \
57530 +       void *___retval;                                        \
57531 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57532 +       if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57533 +               ___retval = NULL;                               \
57534 +       else                                                    \
57535 +               ___retval = vmalloc_node((unsigned long)___x, (y));\
57536 +       ___retval;                                              \
57537 +})
57538 +
57539 +#define vzalloc_node(x, y)                                     \
57540 +({                                                             \
57541 +       void *___retval;                                        \
57542 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57543 +       if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
57544 +               ___retval = NULL;                               \
57545 +       else                                                    \
57546 +               ___retval = vzalloc_node((unsigned long)___x, (y));\
57547 +       ___retval;                                              \
57548 +})
57549 +
57550 +#define vmalloc_32(x)                                          \
57551 +({                                                             \
57552 +       void *___retval;                                        \
57553 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57554 +       if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57555 +               ___retval = NULL;                               \
57556 +       else                                                    \
57557 +               ___retval = vmalloc_32((unsigned long)___x);    \
57558 +       ___retval;                                              \
57559 +})
57560 +
57561 +#define vmalloc_32_user(x)                                     \
57562 +({                                                             \
57563 +void *___retval;                                       \
57564 +       intoverflow_t ___x = (intoverflow_t)x;                  \
57565 +       if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57566 +               ___retval = NULL;                               \
57567 +       else                                                    \
57568 +               ___retval = vmalloc_32_user((unsigned long)___x);\
57569 +       ___retval;                                              \
57570 +})
57571 +
57572  #endif /* _LINUX_VMALLOC_H */
57573 diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
57574 --- linux-3.0.4/include/linux/vmstat.h  2011-07-21 22:17:23.000000000 -0400
57575 +++ linux-3.0.4/include/linux/vmstat.h  2011-08-23 21:47:56.000000000 -0400
57576 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
57577  /*
57578   * Zone based page accounting with per cpu differentials.
57579   */
57580 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57581 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57582  
57583  static inline void zone_page_state_add(long x, struct zone *zone,
57584                                  enum zone_stat_item item)
57585  {
57586 -       atomic_long_add(x, &zone->vm_stat[item]);
57587 -       atomic_long_add(x, &vm_stat[item]);
57588 +       atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57589 +       atomic_long_add_unchecked(x, &vm_stat[item]);
57590  }
57591  
57592  static inline unsigned long global_page_state(enum zone_stat_item item)
57593  {
57594 -       long x = atomic_long_read(&vm_stat[item]);
57595 +       long x = atomic_long_read_unchecked(&vm_stat[item]);
57596  #ifdef CONFIG_SMP
57597         if (x < 0)
57598                 x = 0;
57599 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
57600  static inline unsigned long zone_page_state(struct zone *zone,
57601                                         enum zone_stat_item item)
57602  {
57603 -       long x = atomic_long_read(&zone->vm_stat[item]);
57604 +       long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57605  #ifdef CONFIG_SMP
57606         if (x < 0)
57607                 x = 0;
57608 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
57609  static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57610                                         enum zone_stat_item item)
57611  {
57612 -       long x = atomic_long_read(&zone->vm_stat[item]);
57613 +       long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57614  
57615  #ifdef CONFIG_SMP
57616         int cpu;
57617 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
57618  
57619  static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57620  {
57621 -       atomic_long_inc(&zone->vm_stat[item]);
57622 -       atomic_long_inc(&vm_stat[item]);
57623 +       atomic_long_inc_unchecked(&zone->vm_stat[item]);
57624 +       atomic_long_inc_unchecked(&vm_stat[item]);
57625  }
57626  
57627  static inline void __inc_zone_page_state(struct page *page,
57628 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
57629  
57630  static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57631  {
57632 -       atomic_long_dec(&zone->vm_stat[item]);
57633 -       atomic_long_dec(&vm_stat[item]);
57634 +       atomic_long_dec_unchecked(&zone->vm_stat[item]);
57635 +       atomic_long_dec_unchecked(&vm_stat[item]);
57636  }
57637  
57638  static inline void __dec_zone_page_state(struct page *page,
57639 diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
57640 --- linux-3.0.4/include/media/saa7146_vv.h      2011-07-21 22:17:23.000000000 -0400
57641 +++ linux-3.0.4/include/media/saa7146_vv.h      2011-08-24 18:26:09.000000000 -0400
57642 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
57643         int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
57644  
57645         /* the extension can override this */
57646 -       struct v4l2_ioctl_ops ops;
57647 +       v4l2_ioctl_ops_no_const ops;
57648         /* pointer to the saa7146 core ops */
57649         const struct v4l2_ioctl_ops *core_ops;
57650  
57651 diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
57652 --- linux-3.0.4/include/media/v4l2-ioctl.h      2011-07-21 22:17:23.000000000 -0400
57653 +++ linux-3.0.4/include/media/v4l2-ioctl.h      2011-08-24 18:25:45.000000000 -0400
57654 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
57655         long (*vidioc_default)         (struct file *file, void *fh,
57656                                         bool valid_prio, int cmd, void *arg);
57657  };
57658 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
57659  
57660  
57661  /* v4l debugging and diagnostics */
57662 diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
57663 --- linux-3.0.4/include/net/caif/cfctrl.h       2011-07-21 22:17:23.000000000 -0400
57664 +++ linux-3.0.4/include/net/caif/cfctrl.h       2011-08-23 21:47:56.000000000 -0400
57665 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
57666         void (*radioset_rsp)(void);
57667         void (*reject_rsp)(struct cflayer *layer, u8 linkid,
57668                                 struct cflayer *client_layer);
57669 -};
57670 +} __no_const;
57671  
57672  /* Link Setup Parameters for CAIF-Links. */
57673  struct cfctrl_link_param {
57674 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
57675  struct cfctrl {
57676         struct cfsrvl serv;
57677         struct cfctrl_rsp res;
57678 -       atomic_t req_seq_no;
57679 -       atomic_t rsp_seq_no;
57680 +       atomic_unchecked_t req_seq_no;
57681 +       atomic_unchecked_t rsp_seq_no;
57682         struct list_head list;
57683         /* Protects from simultaneous access to first_req list */
57684         spinlock_t info_list_lock;
57685 diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
57686 --- linux-3.0.4/include/net/flow.h      2011-07-21 22:17:23.000000000 -0400
57687 +++ linux-3.0.4/include/net/flow.h      2011-08-23 21:47:56.000000000 -0400
57688 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
57689                 u8 dir, flow_resolve_t resolver, void *ctx);
57690  
57691  extern void flow_cache_flush(void);
57692 -extern atomic_t flow_cache_genid;
57693 +extern atomic_unchecked_t flow_cache_genid;
57694  
57695  #endif
57696 diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
57697 --- linux-3.0.4/include/net/inetpeer.h  2011-07-21 22:17:23.000000000 -0400
57698 +++ linux-3.0.4/include/net/inetpeer.h  2011-08-23 21:47:56.000000000 -0400
57699 @@ -43,8 +43,8 @@ struct inet_peer {
57700          */
57701         union {
57702                 struct {
57703 -                       atomic_t                        rid;            /* Frag reception counter */
57704 -                       atomic_t                        ip_id_count;    /* IP ID for the next packet */
57705 +                       atomic_unchecked_t              rid;            /* Frag reception counter */
57706 +                       atomic_unchecked_t              ip_id_count;    /* IP ID for the next packet */
57707                         __u32                           tcp_ts;
57708                         __u32                           tcp_ts_stamp;
57709                         u32                             metrics[RTAX_MAX];
57710 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
57711  {
57712         more++;
57713         inet_peer_refcheck(p);
57714 -       return atomic_add_return(more, &p->ip_id_count) - more;
57715 +       return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
57716  }
57717  
57718  #endif /* _NET_INETPEER_H */
57719 diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
57720 --- linux-3.0.4/include/net/ip_fib.h    2011-07-21 22:17:23.000000000 -0400
57721 +++ linux-3.0.4/include/net/ip_fib.h    2011-08-23 21:47:56.000000000 -0400
57722 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
57723  
57724  #define FIB_RES_SADDR(net, res)                                \
57725         ((FIB_RES_NH(res).nh_saddr_genid ==             \
57726 -         atomic_read(&(net)->ipv4.dev_addr_genid)) ?   \
57727 +         atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
57728          FIB_RES_NH(res).nh_saddr :                     \
57729          fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
57730  #define FIB_RES_GW(res)                        (FIB_RES_NH(res).nh_gw)
57731 diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
57732 --- linux-3.0.4/include/net/ip_vs.h     2011-07-21 22:17:23.000000000 -0400
57733 +++ linux-3.0.4/include/net/ip_vs.h     2011-08-23 21:47:56.000000000 -0400
57734 @@ -509,7 +509,7 @@ struct ip_vs_conn {
57735         struct ip_vs_conn       *control;       /* Master control connection */
57736         atomic_t                n_control;      /* Number of controlled ones */
57737         struct ip_vs_dest       *dest;          /* real server */
57738 -       atomic_t                in_pkts;        /* incoming packet counter */
57739 +       atomic_unchecked_t      in_pkts;        /* incoming packet counter */
57740  
57741         /* packet transmitter for different forwarding methods.  If it
57742            mangles the packet, it must return NF_DROP or better NF_STOLEN,
57743 @@ -647,7 +647,7 @@ struct ip_vs_dest {
57744         __be16                  port;           /* port number of the server */
57745         union nf_inet_addr      addr;           /* IP address of the server */
57746         volatile unsigned       flags;          /* dest status flags */
57747 -       atomic_t                conn_flags;     /* flags to copy to conn */
57748 +       atomic_unchecked_t      conn_flags;     /* flags to copy to conn */
57749         atomic_t                weight;         /* server weight */
57750  
57751         atomic_t                refcnt;         /* reference counter */
57752 diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
57753 --- linux-3.0.4/include/net/irda/ircomm_core.h  2011-07-21 22:17:23.000000000 -0400
57754 +++ linux-3.0.4/include/net/irda/ircomm_core.h  2011-08-23 21:47:56.000000000 -0400
57755 @@ -51,7 +51,7 @@ typedef struct {
57756         int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
57757         int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *, 
57758                                   struct ircomm_info *);        
57759 -} call_t;
57760 +} __no_const call_t;
57761  
57762  struct ircomm_cb {
57763         irda_queue_t queue;
57764 diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
57765 --- linux-3.0.4/include/net/irda/ircomm_tty.h   2011-07-21 22:17:23.000000000 -0400
57766 +++ linux-3.0.4/include/net/irda/ircomm_tty.h   2011-08-23 21:47:56.000000000 -0400
57767 @@ -35,6 +35,7 @@
57768  #include <linux/termios.h>
57769  #include <linux/timer.h>
57770  #include <linux/tty.h>         /* struct tty_struct */
57771 +#include <asm/local.h>
57772  
57773  #include <net/irda/irias_object.h>
57774  #include <net/irda/ircomm_core.h>
57775 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57776          unsigned short    close_delay;
57777          unsigned short    closing_wait; /* time to wait before closing */
57778  
57779 -       int  open_count;
57780 -       int  blocked_open;      /* # of blocked opens */
57781 +       local_t open_count;
57782 +       local_t blocked_open;   /* # of blocked opens */
57783  
57784         /* Protect concurent access to :
57785          *      o self->open_count
57786 diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
57787 --- linux-3.0.4/include/net/iucv/af_iucv.h      2011-07-21 22:17:23.000000000 -0400
57788 +++ linux-3.0.4/include/net/iucv/af_iucv.h      2011-08-23 21:47:56.000000000 -0400
57789 @@ -87,7 +87,7 @@ struct iucv_sock {
57790  struct iucv_sock_list {
57791         struct hlist_head head;
57792         rwlock_t          lock;
57793 -       atomic_t          autobind_name;
57794 +       atomic_unchecked_t autobind_name;
57795  };
57796  
57797  unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57798 diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
57799 --- linux-3.0.4/include/net/lapb.h      2011-07-21 22:17:23.000000000 -0400
57800 +++ linux-3.0.4/include/net/lapb.h      2011-08-23 21:47:56.000000000 -0400
57801 @@ -95,7 +95,7 @@ struct lapb_cb {
57802         struct sk_buff_head     write_queue;
57803         struct sk_buff_head     ack_queue;
57804         unsigned char           window;
57805 -       struct lapb_register_struct callbacks;
57806 +       struct lapb_register_struct *callbacks;
57807  
57808         /* FRMR control information */
57809         struct lapb_frame       frmr_data;
57810 diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
57811 --- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
57812 +++ linux-3.0.4/include/net/neighbour.h 2011-08-26 19:49:56.000000000 -0400
57813 @@ -117,14 +117,14 @@ struct neighbour {
57814  };
57815  
57816  struct neigh_ops {
57817 -       int                     family;
57818 +       const int               family;
57819         void                    (*solicit)(struct neighbour *, struct sk_buff*);
57820         void                    (*error_report)(struct neighbour *, struct sk_buff*);
57821         int                     (*output)(struct sk_buff*);
57822         int                     (*connected_output)(struct sk_buff*);
57823         int                     (*hh_output)(struct sk_buff*);
57824         int                     (*queue_xmit)(struct sk_buff*);
57825 -};
57826 +} __do_const;
57827  
57828  struct pneigh_entry {
57829         struct pneigh_entry     *next;
57830 diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
57831 --- linux-3.0.4/include/net/netlink.h   2011-07-21 22:17:23.000000000 -0400
57832 +++ linux-3.0.4/include/net/netlink.h   2011-08-23 21:47:56.000000000 -0400
57833 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
57834  static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57835  {
57836         if (mark)
57837 -               skb_trim(skb, (unsigned char *) mark - skb->data);
57838 +               skb_trim(skb, (const unsigned char *) mark - skb->data);
57839  }
57840  
57841  /**
57842 diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
57843 --- linux-3.0.4/include/net/netns/ipv4.h        2011-07-21 22:17:23.000000000 -0400
57844 +++ linux-3.0.4/include/net/netns/ipv4.h        2011-08-23 21:47:56.000000000 -0400
57845 @@ -56,8 +56,8 @@ struct netns_ipv4 {
57846  
57847         unsigned int sysctl_ping_group_range[2];
57848  
57849 -       atomic_t rt_genid;
57850 -       atomic_t dev_addr_genid;
57851 +       atomic_unchecked_t rt_genid;
57852 +       atomic_unchecked_t dev_addr_genid;
57853  
57854  #ifdef CONFIG_IP_MROUTE
57855  #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
57856 diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
57857 --- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
57858 +++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
57859 @@ -315,9 +315,9 @@ do {                                                                        \
57860  
57861  #else  /* SCTP_DEBUG */
57862  
57863 -#define SCTP_DEBUG_PRINTK(whatever...)
57864 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
57865 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57866 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57867 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
57868 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57869  #define SCTP_ENABLE_DEBUG
57870  #define SCTP_DISABLE_DEBUG
57871  #define SCTP_ASSERT(expr, str, func)
57872 diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
57873 --- linux-3.0.4/include/net/sock.h      2011-07-21 22:17:23.000000000 -0400
57874 +++ linux-3.0.4/include/net/sock.h      2011-08-23 21:47:56.000000000 -0400
57875 @@ -277,7 +277,7 @@ struct sock {
57876  #ifdef CONFIG_RPS
57877         __u32                   sk_rxhash;
57878  #endif
57879 -       atomic_t                sk_drops;
57880 +       atomic_unchecked_t      sk_drops;
57881         int                     sk_rcvbuf;
57882  
57883         struct sk_filter __rcu  *sk_filter;
57884 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct 
57885  }
57886  
57887  static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
57888 -                                          char __user *from, char *to,
57889 +                                          char __user *from, unsigned char *to,
57890                                            int copy, int offset)
57891  {
57892         if (skb->ip_summed == CHECKSUM_NONE) {
57893 diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
57894 --- linux-3.0.4/include/net/tcp.h       2011-07-21 22:17:23.000000000 -0400
57895 +++ linux-3.0.4/include/net/tcp.h       2011-08-23 21:47:56.000000000 -0400
57896 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
57897  struct tcp_seq_afinfo {
57898         char                    *name;
57899         sa_family_t             family;
57900 -       struct file_operations  seq_fops;
57901 -       struct seq_operations   seq_ops;
57902 +       file_operations_no_const        seq_fops;
57903 +       seq_operations_no_const seq_ops;
57904  };
57905  
57906  struct tcp_iter_state {
57907 diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
57908 --- linux-3.0.4/include/net/udp.h       2011-07-21 22:17:23.000000000 -0400
57909 +++ linux-3.0.4/include/net/udp.h       2011-08-23 21:47:56.000000000 -0400
57910 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
57911         char                    *name;
57912         sa_family_t             family;
57913         struct udp_table        *udp_table;
57914 -       struct file_operations  seq_fops;
57915 -       struct seq_operations   seq_ops;
57916 +       file_operations_no_const        seq_fops;
57917 +       seq_operations_no_const seq_ops;
57918  };
57919  
57920  struct udp_iter_state {
57921 diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
57922 --- linux-3.0.4/include/net/xfrm.h      2011-07-21 22:17:23.000000000 -0400
57923 +++ linux-3.0.4/include/net/xfrm.h      2011-08-23 21:47:56.000000000 -0400
57924 @@ -505,7 +505,7 @@ struct xfrm_policy {
57925         struct timer_list       timer;
57926  
57927         struct flow_cache_object flo;
57928 -       atomic_t                genid;
57929 +       atomic_unchecked_t      genid;
57930         u32                     priority;
57931         u32                     index;
57932         struct xfrm_mark        mark;
57933 diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
57934 --- linux-3.0.4/include/rdma/iw_cm.h    2011-07-21 22:17:23.000000000 -0400
57935 +++ linux-3.0.4/include/rdma/iw_cm.h    2011-08-23 21:47:56.000000000 -0400
57936 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
57937                                          int backlog);
57938  
57939         int             (*destroy_listen)(struct iw_cm_id *cm_id);
57940 -};
57941 +} __no_const;
57942  
57943  /**
57944   * iw_create_cm_id - Create an IW CM identifier.
57945 diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
57946 --- linux-3.0.4/include/scsi/libfc.h    2011-07-21 22:17:23.000000000 -0400
57947 +++ linux-3.0.4/include/scsi/libfc.h    2011-08-23 21:47:56.000000000 -0400
57948 @@ -750,6 +750,7 @@ struct libfc_function_template {
57949          */
57950         void (*disc_stop_final) (struct fc_lport *);
57951  };
57952 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
57953  
57954  /**
57955   * struct fc_disc - Discovery context
57956 @@ -853,7 +854,7 @@ struct fc_lport {
57957         struct fc_vport                *vport;
57958  
57959         /* Operational Information */
57960 -       struct libfc_function_template tt;
57961 +       libfc_function_template_no_const tt;
57962         u8                             link_up;
57963         u8                             qfull;
57964         enum fc_lport_state            state;
57965 diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
57966 --- linux-3.0.4/include/scsi/scsi_device.h      2011-07-21 22:17:23.000000000 -0400
57967 +++ linux-3.0.4/include/scsi/scsi_device.h      2011-08-23 21:47:56.000000000 -0400
57968 @@ -161,9 +161,9 @@ struct scsi_device {
57969         unsigned int max_device_blocked; /* what device_blocked counts down from  */
57970  #define SCSI_DEFAULT_DEVICE_BLOCKED    3
57971  
57972 -       atomic_t iorequest_cnt;
57973 -       atomic_t iodone_cnt;
57974 -       atomic_t ioerr_cnt;
57975 +       atomic_unchecked_t iorequest_cnt;
57976 +       atomic_unchecked_t iodone_cnt;
57977 +       atomic_unchecked_t ioerr_cnt;
57978  
57979         struct device           sdev_gendev,
57980                                 sdev_dev;
57981 diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
57982 --- linux-3.0.4/include/scsi/scsi_transport_fc.h        2011-07-21 22:17:23.000000000 -0400
57983 +++ linux-3.0.4/include/scsi/scsi_transport_fc.h        2011-08-26 19:49:56.000000000 -0400
57984 @@ -711,7 +711,7 @@ struct fc_function_template {
57985         unsigned long   show_host_system_hostname:1;
57986  
57987         unsigned long   disable_target_scan:1;
57988 -};
57989 +} __do_const;
57990  
57991  
57992  /**
57993 diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
57994 --- linux-3.0.4/include/sound/ak4xxx-adda.h     2011-07-21 22:17:23.000000000 -0400
57995 +++ linux-3.0.4/include/sound/ak4xxx-adda.h     2011-08-23 21:47:56.000000000 -0400
57996 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
57997         void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
57998                       unsigned char val);
57999         void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
58000 -};
58001 +} __no_const;
58002  
58003  #define AK4XXX_IMAGE_SIZE      (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
58004  
58005 diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
58006 --- linux-3.0.4/include/sound/hwdep.h   2011-07-21 22:17:23.000000000 -0400
58007 +++ linux-3.0.4/include/sound/hwdep.h   2011-08-23 21:47:56.000000000 -0400
58008 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
58009                           struct snd_hwdep_dsp_status *status);
58010         int (*dsp_load)(struct snd_hwdep *hw,
58011                         struct snd_hwdep_dsp_image *image);
58012 -};
58013 +} __no_const;
58014  
58015  struct snd_hwdep {
58016         struct snd_card *card;
58017 diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
58018 --- linux-3.0.4/include/sound/info.h    2011-07-21 22:17:23.000000000 -0400
58019 +++ linux-3.0.4/include/sound/info.h    2011-08-23 21:47:56.000000000 -0400
58020 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
58021                      struct snd_info_buffer *buffer);
58022         void (*write)(struct snd_info_entry *entry,
58023                       struct snd_info_buffer *buffer);
58024 -};
58025 +} __no_const;
58026  
58027  struct snd_info_entry_ops {
58028         int (*open)(struct snd_info_entry *entry,
58029 diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
58030 --- linux-3.0.4/include/sound/pcm.h     2011-07-21 22:17:23.000000000 -0400
58031 +++ linux-3.0.4/include/sound/pcm.h     2011-08-23 21:47:56.000000000 -0400
58032 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
58033         int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
58034         int (*ack)(struct snd_pcm_substream *substream);
58035  };
58036 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
58037  
58038  /*
58039   *
58040 diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
58041 --- linux-3.0.4/include/sound/sb16_csp.h        2011-07-21 22:17:23.000000000 -0400
58042 +++ linux-3.0.4/include/sound/sb16_csp.h        2011-08-23 21:47:56.000000000 -0400
58043 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
58044         int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
58045         int (*csp_stop) (struct snd_sb_csp * p);
58046         int (*csp_qsound_transfer) (struct snd_sb_csp * p);
58047 -};
58048 +} __no_const;
58049  
58050  /*
58051   * CSP private data
58052 diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
58053 --- linux-3.0.4/include/sound/soc.h     2011-07-21 22:17:23.000000000 -0400
58054 +++ linux-3.0.4/include/sound/soc.h     2011-08-26 19:49:56.000000000 -0400
58055 @@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
58056  
58057         /* platform stream ops */
58058         struct snd_pcm_ops *ops;
58059 -};
58060 +} __do_const;
58061  
58062  struct snd_soc_platform {
58063         const char *name;
58064 diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
58065 --- linux-3.0.4/include/sound/ymfpci.h  2011-07-21 22:17:23.000000000 -0400
58066 +++ linux-3.0.4/include/sound/ymfpci.h  2011-08-23 21:47:56.000000000 -0400
58067 @@ -358,7 +358,7 @@ struct snd_ymfpci {
58068         spinlock_t reg_lock;
58069         spinlock_t voice_lock;
58070         wait_queue_head_t interrupt_sleep;
58071 -       atomic_t interrupt_sleep_count;
58072 +       atomic_unchecked_t interrupt_sleep_count;
58073         struct snd_info_entry *proc_entry;
58074         const struct firmware *dsp_microcode;
58075         const struct firmware *controller_microcode;
58076 diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
58077 --- linux-3.0.4/include/target/target_core_base.h       2011-07-21 22:17:23.000000000 -0400
58078 +++ linux-3.0.4/include/target/target_core_base.h       2011-08-23 21:47:56.000000000 -0400
58079 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
58080         int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
58081         int (*t10_pr_register)(struct se_cmd *);
58082         int (*t10_pr_clear)(struct se_cmd *);
58083 -};
58084 +} __no_const;
58085  
58086  struct t10_reservation_template {
58087         /* Reservation effects all target ports */
58088 @@ -432,8 +432,8 @@ struct se_transport_task {
58089         atomic_t                t_task_cdbs_left;
58090         atomic_t                t_task_cdbs_ex_left;
58091         atomic_t                t_task_cdbs_timeout_left;
58092 -       atomic_t                t_task_cdbs_sent;
58093 -       atomic_t                t_transport_aborted;
58094 +       atomic_unchecked_t      t_task_cdbs_sent;
58095 +       atomic_unchecked_t      t_transport_aborted;
58096         atomic_t                t_transport_active;
58097         atomic_t                t_transport_complete;
58098         atomic_t                t_transport_queue_active;
58099 @@ -774,7 +774,7 @@ struct se_device {
58100         atomic_t                active_cmds;
58101         atomic_t                simple_cmds;
58102         atomic_t                depth_left;
58103 -       atomic_t                dev_ordered_id;
58104 +       atomic_unchecked_t      dev_ordered_id;
58105         atomic_t                dev_tur_active;
58106         atomic_t                execute_tasks;
58107         atomic_t                dev_status_thr_count;
58108 diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
58109 --- linux-3.0.4/include/trace/events/irq.h      2011-07-21 22:17:23.000000000 -0400
58110 +++ linux-3.0.4/include/trace/events/irq.h      2011-08-23 21:47:56.000000000 -0400
58111 @@ -36,7 +36,7 @@ struct softirq_action;
58112   */
58113  TRACE_EVENT(irq_handler_entry,
58114  
58115 -       TP_PROTO(int irq, struct irqaction *action),
58116 +       TP_PROTO(int irq, const struct irqaction *action),
58117  
58118         TP_ARGS(irq, action),
58119  
58120 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
58121   */
58122  TRACE_EVENT(irq_handler_exit,
58123  
58124 -       TP_PROTO(int irq, struct irqaction *action, int ret),
58125 +       TP_PROTO(int irq, const struct irqaction *action, int ret),
58126  
58127         TP_ARGS(irq, action, ret),
58128  
58129 diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
58130 --- linux-3.0.4/include/video/udlfb.h   2011-07-21 22:17:23.000000000 -0400
58131 +++ linux-3.0.4/include/video/udlfb.h   2011-08-23 21:47:56.000000000 -0400
58132 @@ -51,10 +51,10 @@ struct dlfb_data {
58133         int base8;
58134         u32 pseudo_palette[256];
58135         /* blit-only rendering path metrics, exposed through sysfs */
58136 -       atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58137 -       atomic_t bytes_identical; /* saved effort with backbuffer comparison */
58138 -       atomic_t bytes_sent; /* to usb, after compression including overhead */
58139 -       atomic_t cpu_kcycles_used; /* transpired during pixel processing */
58140 +       atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58141 +       atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
58142 +       atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
58143 +       atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
58144  };
58145  
58146  #define NR_USB_REQUEST_I2C_SUB_IO 0x02
58147 diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
58148 --- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
58149 +++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
58150 @@ -177,6 +177,7 @@ struct uvesafb_par {
58151         u8 ypan;                        /* 0 - nothing, 1 - ypan, 2 - ywrap */
58152         u8 pmi_setpal;                  /* PMI for palette changes */
58153         u16 *pmi_base;                  /* protected mode interface location */
58154 +       u8 *pmi_code;                   /* protected mode code location */
58155         void *pmi_start;
58156         void *pmi_pal;
58157         u8 *vbe_state_orig;             /*
58158 diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
58159 --- linux-3.0.4/init/do_mounts.c        2011-07-21 22:17:23.000000000 -0400
58160 +++ linux-3.0.4/init/do_mounts.c        2011-08-23 21:47:56.000000000 -0400
58161 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
58162  
58163  static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58164  {
58165 -       int err = sys_mount(name, "/root", fs, flags, data);
58166 +       int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58167         if (err)
58168                 return err;
58169  
58170 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
58171         va_start(args, fmt);
58172         vsprintf(buf, fmt, args);
58173         va_end(args);
58174 -       fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58175 +       fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58176         if (fd >= 0) {
58177                 sys_ioctl(fd, FDEJECT, 0);
58178                 sys_close(fd);
58179         }
58180         printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58181 -       fd = sys_open("/dev/console", O_RDWR, 0);
58182 +       fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58183         if (fd >= 0) {
58184                 sys_ioctl(fd, TCGETS, (long)&termios);
58185                 termios.c_lflag &= ~ICANON;
58186                 sys_ioctl(fd, TCSETSF, (long)&termios);
58187 -               sys_read(fd, &c, 1);
58188 +               sys_read(fd, (char __user *)&c, 1);
58189                 termios.c_lflag |= ICANON;
58190                 sys_ioctl(fd, TCSETSF, (long)&termios);
58191                 sys_close(fd);
58192 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
58193         mount_root();
58194  out:
58195         devtmpfs_mount("dev");
58196 -       sys_mount(".", "/", NULL, MS_MOVE, NULL);
58197 +       sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58198         sys_chroot((const char __user __force *)".");
58199  }
58200 diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
58201 --- linux-3.0.4/init/do_mounts.h        2011-07-21 22:17:23.000000000 -0400
58202 +++ linux-3.0.4/init/do_mounts.h        2011-08-23 21:47:56.000000000 -0400
58203 @@ -15,15 +15,15 @@ extern int root_mountflags;
58204  
58205  static inline int create_dev(char *name, dev_t dev)
58206  {
58207 -       sys_unlink(name);
58208 -       return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58209 +       sys_unlink((__force char __user *)name);
58210 +       return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58211  }
58212  
58213  #if BITS_PER_LONG == 32
58214  static inline u32 bstat(char *name)
58215  {
58216         struct stat64 stat;
58217 -       if (sys_stat64(name, &stat) != 0)
58218 +       if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58219                 return 0;
58220         if (!S_ISBLK(stat.st_mode))
58221                 return 0;
58222 diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
58223 --- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
58224 +++ linux-3.0.4/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
58225 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
58226         create_dev("/dev/root.old", Root_RAM0);
58227         /* mount initrd on rootfs' /root */
58228         mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58229 -       sys_mkdir("/old", 0700);
58230 -       root_fd = sys_open("/", 0, 0);
58231 -       old_fd = sys_open("/old", 0, 0);
58232 +       sys_mkdir((__force const char __user *)"/old", 0700);
58233 +       root_fd = sys_open((__force const char __user *)"/", 0, 0);
58234 +       old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58235         /* move initrd over / and chdir/chroot in initrd root */
58236 -       sys_chdir("/root");
58237 -       sys_mount(".", "/", NULL, MS_MOVE, NULL);
58238 -       sys_chroot(".");
58239 +       sys_chdir((__force const char __user *)"/root");
58240 +       sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58241 +       sys_chroot((__force const char __user *)".");
58242  
58243         /*
58244          * In case that a resume from disk is carried out by linuxrc or one of
58245 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
58246  
58247         /* move initrd to rootfs' /old */
58248         sys_fchdir(old_fd);
58249 -       sys_mount("/", ".", NULL, MS_MOVE, NULL);
58250 +       sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58251         /* switch root and cwd back to / of rootfs */
58252         sys_fchdir(root_fd);
58253 -       sys_chroot(".");
58254 +       sys_chroot((__force const char __user *)".");
58255         sys_close(old_fd);
58256         sys_close(root_fd);
58257  
58258         if (new_decode_dev(real_root_dev) == Root_RAM0) {
58259 -               sys_chdir("/old");
58260 +               sys_chdir((__force const char __user *)"/old");
58261                 return;
58262         }
58263  
58264 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
58265         mount_root();
58266  
58267         printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58268 -       error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58269 +       error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58270         if (!error)
58271                 printk("okay\n");
58272         else {
58273 -               int fd = sys_open("/dev/root.old", O_RDWR, 0);
58274 +               int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58275                 if (error == -ENOENT)
58276                         printk("/initrd does not exist. Ignored.\n");
58277                 else
58278                         printk("failed\n");
58279                 printk(KERN_NOTICE "Unmounting old root\n");
58280 -               sys_umount("/old", MNT_DETACH);
58281 +               sys_umount((__force char __user *)"/old", MNT_DETACH);
58282                 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58283                 if (fd < 0) {
58284                         error = fd;
58285 @@ -116,11 +116,11 @@ int __init initrd_load(void)
58286                  * mounted in the normal path.
58287                  */
58288                 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58289 -                       sys_unlink("/initrd.image");
58290 +                       sys_unlink((__force const char __user *)"/initrd.image");
58291                         handle_initrd();
58292                         return 1;
58293                 }
58294         }
58295 -       sys_unlink("/initrd.image");
58296 +       sys_unlink((__force const char __user *)"/initrd.image");
58297         return 0;
58298  }
58299 diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
58300 --- linux-3.0.4/init/do_mounts_md.c     2011-07-21 22:17:23.000000000 -0400
58301 +++ linux-3.0.4/init/do_mounts_md.c     2011-08-23 21:47:56.000000000 -0400
58302 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58303                         partitioned ? "_d" : "", minor,
58304                         md_setup_args[ent].device_names);
58305  
58306 -               fd = sys_open(name, 0, 0);
58307 +               fd = sys_open((__force char __user *)name, 0, 0);
58308                 if (fd < 0) {
58309                         printk(KERN_ERR "md: open failed - cannot start "
58310                                         "array %s\n", name);
58311 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58312                          * array without it
58313                          */
58314                         sys_close(fd);
58315 -                       fd = sys_open(name, 0, 0);
58316 +                       fd = sys_open((__force char __user *)name, 0, 0);
58317                         sys_ioctl(fd, BLKRRPART, 0);
58318                 }
58319                 sys_close(fd);
58320 diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
58321 --- linux-3.0.4/init/initramfs.c        2011-07-21 22:17:23.000000000 -0400
58322 +++ linux-3.0.4/init/initramfs.c        2011-08-23 21:47:56.000000000 -0400
58323 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58324         }
58325  }
58326  
58327 -static long __init do_utime(char __user *filename, time_t mtime)
58328 +static long __init do_utime(__force char __user *filename, time_t mtime)
58329  {
58330         struct timespec t[2];
58331  
58332 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58333         struct dir_entry *de, *tmp;
58334         list_for_each_entry_safe(de, tmp, &dir_list, list) {
58335                 list_del(&de->list);
58336 -               do_utime(de->name, de->mtime);
58337 +               do_utime((__force char __user *)de->name, de->mtime);
58338                 kfree(de->name);
58339                 kfree(de);
58340         }
58341 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58342         if (nlink >= 2) {
58343                 char *old = find_link(major, minor, ino, mode, collected);
58344                 if (old)
58345 -                       return (sys_link(old, collected) < 0) ? -1 : 1;
58346 +                       return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58347         }
58348         return 0;
58349  }
58350 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58351  {
58352         struct stat st;
58353  
58354 -       if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58355 +       if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58356                 if (S_ISDIR(st.st_mode))
58357 -                       sys_rmdir(path);
58358 +                       sys_rmdir((__force char __user *)path);
58359                 else
58360 -                       sys_unlink(path);
58361 +                       sys_unlink((__force char __user *)path);
58362         }
58363  }
58364  
58365 @@ -305,7 +305,7 @@ static int __init do_name(void)
58366                         int openflags = O_WRONLY|O_CREAT;
58367                         if (ml != 1)
58368                                 openflags |= O_TRUNC;
58369 -                       wfd = sys_open(collected, openflags, mode);
58370 +                       wfd = sys_open((__force char __user *)collected, openflags, mode);
58371  
58372                         if (wfd >= 0) {
58373                                 sys_fchown(wfd, uid, gid);
58374 @@ -317,17 +317,17 @@ static int __init do_name(void)
58375                         }
58376                 }
58377         } else if (S_ISDIR(mode)) {
58378 -               sys_mkdir(collected, mode);
58379 -               sys_chown(collected, uid, gid);
58380 -               sys_chmod(collected, mode);
58381 +               sys_mkdir((__force char __user *)collected, mode);
58382 +               sys_chown((__force char __user *)collected, uid, gid);
58383 +               sys_chmod((__force char __user *)collected, mode);
58384                 dir_add(collected, mtime);
58385         } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58386                    S_ISFIFO(mode) || S_ISSOCK(mode)) {
58387                 if (maybe_link() == 0) {
58388 -                       sys_mknod(collected, mode, rdev);
58389 -                       sys_chown(collected, uid, gid);
58390 -                       sys_chmod(collected, mode);
58391 -                       do_utime(collected, mtime);
58392 +                       sys_mknod((__force char __user *)collected, mode, rdev);
58393 +                       sys_chown((__force char __user *)collected, uid, gid);
58394 +                       sys_chmod((__force char __user *)collected, mode);
58395 +                       do_utime((__force char __user *)collected, mtime);
58396                 }
58397         }
58398         return 0;
58399 @@ -336,15 +336,15 @@ static int __init do_name(void)
58400  static int __init do_copy(void)
58401  {
58402         if (count >= body_len) {
58403 -               sys_write(wfd, victim, body_len);
58404 +               sys_write(wfd, (__force char __user *)victim, body_len);
58405                 sys_close(wfd);
58406 -               do_utime(vcollected, mtime);
58407 +               do_utime((__force char __user *)vcollected, mtime);
58408                 kfree(vcollected);
58409                 eat(body_len);
58410                 state = SkipIt;
58411                 return 0;
58412         } else {
58413 -               sys_write(wfd, victim, count);
58414 +               sys_write(wfd, (__force char __user *)victim, count);
58415                 body_len -= count;
58416                 eat(count);
58417                 return 1;
58418 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58419  {
58420         collected[N_ALIGN(name_len) + body_len] = '\0';
58421         clean_path(collected, 0);
58422 -       sys_symlink(collected + N_ALIGN(name_len), collected);
58423 -       sys_lchown(collected, uid, gid);
58424 -       do_utime(collected, mtime);
58425 +       sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58426 +       sys_lchown((__force char __user *)collected, uid, gid);
58427 +       do_utime((__force char __user *)collected, mtime);
58428         state = SkipIt;
58429         next_state = Reset;
58430         return 0;
58431 diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
58432 --- linux-3.0.4/init/Kconfig    2011-07-21 22:17:23.000000000 -0400
58433 +++ linux-3.0.4/init/Kconfig    2011-08-23 21:47:56.000000000 -0400
58434 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
58435  
58436  config COMPAT_BRK
58437         bool "Disable heap randomization"
58438 -       default y
58439 +       default n
58440         help
58441           Randomizing heap placement makes heap exploits harder, but it
58442           also breaks ancient binaries (including anything libc5 based).
58443 diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
58444 --- linux-3.0.4/init/main.c     2011-07-21 22:17:23.000000000 -0400
58445 +++ linux-3.0.4/init/main.c     2011-08-23 21:48:14.000000000 -0400
58446 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) 
58447  extern void tc_init(void);
58448  #endif
58449  
58450 +extern void grsecurity_init(void);
58451 +
58452  /*
58453   * Debug helper: via this flag we know that we are in 'early bootup code'
58454   * where only the boot processor is running with IRQ disabled.  This means
58455 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
58456  
58457  __setup("reset_devices", set_reset_devices);
58458  
58459 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58460 +extern char pax_enter_kernel_user[];
58461 +extern char pax_exit_kernel_user[];
58462 +extern pgdval_t clone_pgd_mask;
58463 +#endif
58464 +
58465 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58466 +static int __init setup_pax_nouderef(char *str)
58467 +{
58468 +#ifdef CONFIG_X86_32
58469 +       unsigned int cpu;
58470 +       struct desc_struct *gdt;
58471 +
58472 +       for (cpu = 0; cpu < NR_CPUS; cpu++) {
58473 +               gdt = get_cpu_gdt_table(cpu);
58474 +               gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58475 +               gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58476 +               gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58477 +               gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58478 +       }
58479 +       asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58480 +#else
58481 +       memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58482 +       memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58483 +       clone_pgd_mask = ~(pgdval_t)0UL;
58484 +#endif
58485 +
58486 +       return 0;
58487 +}
58488 +early_param("pax_nouderef", setup_pax_nouderef);
58489 +#endif
58490 +
58491 +#ifdef CONFIG_PAX_SOFTMODE
58492 +int pax_softmode;
58493 +
58494 +static int __init setup_pax_softmode(char *str)
58495 +{
58496 +       get_option(&str, &pax_softmode);
58497 +       return 1;
58498 +}
58499 +__setup("pax_softmode=", setup_pax_softmode);
58500 +#endif
58501 +
58502  static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58503  const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58504  static const char *panic_later, *panic_param;
58505 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
58506  {
58507         int count = preempt_count();
58508         int ret;
58509 +       const char *msg1 = "", *msg2 = "";
58510  
58511         if (initcall_debug)
58512                 ret = do_one_initcall_debug(fn);
58513 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
58514                 sprintf(msgbuf, "error code %d ", ret);
58515  
58516         if (preempt_count() != count) {
58517 -               strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58518 +               msg1 = " preemption imbalance";
58519                 preempt_count() = count;
58520         }
58521         if (irqs_disabled()) {
58522 -               strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58523 +               msg2 = " disabled interrupts";
58524                 local_irq_enable();
58525         }
58526 -       if (msgbuf[0]) {
58527 -               printk("initcall %pF returned with %s\n", fn, msgbuf);
58528 +       if (msgbuf[0] || *msg1 || *msg2) {
58529 +               printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58530         }
58531  
58532         return ret;
58533 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
58534         do_basic_setup();
58535  
58536         /* Open the /dev/console on the rootfs, this should never fail */
58537 -       if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
58538 +       if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
58539                 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
58540  
58541         (void) sys_dup(0);
58542 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
58543         if (!ramdisk_execute_command)
58544                 ramdisk_execute_command = "/init";
58545  
58546 -       if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58547 +       if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58548                 ramdisk_execute_command = NULL;
58549                 prepare_namespace();
58550         }
58551  
58552 +       grsecurity_init();
58553 +
58554         /*
58555          * Ok, we have completed the initial bootup, and
58556          * we're essentially up and running. Get rid of the
58557 diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
58558 --- linux-3.0.4/ipc/mqueue.c    2011-07-21 22:17:23.000000000 -0400
58559 +++ linux-3.0.4/ipc/mqueue.c    2011-08-23 21:48:14.000000000 -0400
58560 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
58561                         mq_bytes = (mq_msg_tblsz +
58562                                 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58563  
58564 +                       gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58565                         spin_lock(&mq_lock);
58566                         if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58567                             u->mq_bytes + mq_bytes >
58568 diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
58569 --- linux-3.0.4/ipc/msg.c       2011-07-21 22:17:23.000000000 -0400
58570 +++ linux-3.0.4/ipc/msg.c       2011-08-23 21:47:56.000000000 -0400
58571 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
58572         return security_msg_queue_associate(msq, msgflg);
58573  }
58574  
58575 +static struct ipc_ops msg_ops = {
58576 +       .getnew         = newque,
58577 +       .associate      = msg_security,
58578 +       .more_checks    = NULL
58579 +};
58580 +
58581  SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
58582  {
58583         struct ipc_namespace *ns;
58584 -       struct ipc_ops msg_ops;
58585         struct ipc_params msg_params;
58586  
58587         ns = current->nsproxy->ipc_ns;
58588  
58589 -       msg_ops.getnew = newque;
58590 -       msg_ops.associate = msg_security;
58591 -       msg_ops.more_checks = NULL;
58592 -
58593         msg_params.key = key;
58594         msg_params.flg = msgflg;
58595  
58596 diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
58597 --- linux-3.0.4/ipc/sem.c       2011-08-23 21:44:40.000000000 -0400
58598 +++ linux-3.0.4/ipc/sem.c       2011-08-23 21:48:14.000000000 -0400
58599 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
58600         return 0;
58601  }
58602  
58603 +static struct ipc_ops sem_ops = {
58604 +       .getnew         = newary,
58605 +       .associate      = sem_security,
58606 +       .more_checks    = sem_more_checks
58607 +};
58608 +
58609  SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
58610  {
58611         struct ipc_namespace *ns;
58612 -       struct ipc_ops sem_ops;
58613         struct ipc_params sem_params;
58614  
58615         ns = current->nsproxy->ipc_ns;
58616 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
58617         if (nsems < 0 || nsems > ns->sc_semmsl)
58618                 return -EINVAL;
58619  
58620 -       sem_ops.getnew = newary;
58621 -       sem_ops.associate = sem_security;
58622 -       sem_ops.more_checks = sem_more_checks;
58623 -
58624         sem_params.key = key;
58625         sem_params.flg = semflg;
58626         sem_params.u.nsems = nsems;
58627 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
58628         int nsems;
58629         struct list_head tasks;
58630  
58631 +       pax_track_stack();
58632 +
58633         sma = sem_lock_check(ns, semid);
58634         if (IS_ERR(sma))
58635                 return PTR_ERR(sma);
58636 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, 
58637         struct ipc_namespace *ns;
58638         struct list_head tasks;
58639  
58640 +       pax_track_stack();
58641 +
58642         ns = current->nsproxy->ipc_ns;
58643  
58644         if (nsops < 1 || semid < 0)
58645 diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
58646 --- linux-3.0.4/ipc/shm.c       2011-07-21 22:17:23.000000000 -0400
58647 +++ linux-3.0.4/ipc/shm.c       2011-08-23 21:48:14.000000000 -0400
58648 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
58649  static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58650  #endif
58651  
58652 +#ifdef CONFIG_GRKERNSEC
58653 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58654 +                          const time_t shm_createtime, const uid_t cuid,
58655 +                          const int shmid);
58656 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58657 +                          const time_t shm_createtime);
58658 +#endif
58659 +
58660  void shm_init_ns(struct ipc_namespace *ns)
58661  {
58662         ns->shm_ctlmax = SHMMAX;
58663 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
58664         shp->shm_lprid = 0;
58665         shp->shm_atim = shp->shm_dtim = 0;
58666         shp->shm_ctim = get_seconds();
58667 +#ifdef CONFIG_GRKERNSEC
58668 +       {
58669 +               struct timespec timeval;
58670 +               do_posix_clock_monotonic_gettime(&timeval);
58671 +
58672 +               shp->shm_createtime = timeval.tv_sec;
58673 +       }
58674 +#endif
58675         shp->shm_segsz = size;
58676         shp->shm_nattch = 0;
58677         shp->shm_file = file;
58678 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
58679         return 0;
58680  }
58681  
58682 +static struct ipc_ops shm_ops = {
58683 +       .getnew         = newseg,
58684 +       .associate      = shm_security,
58685 +       .more_checks    = shm_more_checks
58686 +};
58687 +
58688  SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
58689  {
58690         struct ipc_namespace *ns;
58691 -       struct ipc_ops shm_ops;
58692         struct ipc_params shm_params;
58693  
58694         ns = current->nsproxy->ipc_ns;
58695  
58696 -       shm_ops.getnew = newseg;
58697 -       shm_ops.associate = shm_security;
58698 -       shm_ops.more_checks = shm_more_checks;
58699 -
58700         shm_params.key = key;
58701         shm_params.flg = shmflg;
58702         shm_params.u.size = size;
58703 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
58704         case SHM_LOCK:
58705         case SHM_UNLOCK:
58706         {
58707 -               struct file *uninitialized_var(shm_file);
58708 -
58709                 lru_add_drain_all();  /* drain pagevecs to lru lists */
58710  
58711                 shp = shm_lock_check(ns, shmid);
58712 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
58713         if (err)
58714                 goto out_unlock;
58715  
58716 +#ifdef CONFIG_GRKERNSEC
58717 +       if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58718 +                            shp->shm_perm.cuid, shmid) ||
58719 +           !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58720 +               err = -EACCES;
58721 +               goto out_unlock;
58722 +       }
58723 +#endif
58724 +
58725         path = shp->shm_file->f_path;
58726         path_get(&path);
58727         shp->shm_nattch++;
58728 +#ifdef CONFIG_GRKERNSEC
58729 +       shp->shm_lapid = current->pid;
58730 +#endif
58731         size = i_size_read(path.dentry->d_inode);
58732         shm_unlock(shp);
58733  
58734 diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
58735 --- linux-3.0.4/kernel/acct.c   2011-07-21 22:17:23.000000000 -0400
58736 +++ linux-3.0.4/kernel/acct.c   2011-08-23 21:47:56.000000000 -0400
58737 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
58738          */
58739         flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58740         current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58741 -       file->f_op->write(file, (char *)&ac,
58742 +       file->f_op->write(file, (__force char __user *)&ac,
58743                                sizeof(acct_t), &file->f_pos);
58744         current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58745         set_fs(fs);
58746 diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
58747 --- linux-3.0.4/kernel/audit.c  2011-07-21 22:17:23.000000000 -0400
58748 +++ linux-3.0.4/kernel/audit.c  2011-08-23 21:47:56.000000000 -0400
58749 @@ -112,7 +112,7 @@ u32         audit_sig_sid = 0;
58750     3) suppressed due to audit_rate_limit
58751     4) suppressed due to audit_backlog_limit
58752  */
58753 -static atomic_t    audit_lost = ATOMIC_INIT(0);
58754 +static atomic_unchecked_t    audit_lost = ATOMIC_INIT(0);
58755  
58756  /* The netlink socket. */
58757  static struct sock *audit_sock;
58758 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
58759         unsigned long           now;
58760         int                     print;
58761  
58762 -       atomic_inc(&audit_lost);
58763 +       atomic_inc_unchecked(&audit_lost);
58764  
58765         print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58766  
58767 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
58768                         printk(KERN_WARNING
58769                                 "audit: audit_lost=%d audit_rate_limit=%d "
58770                                 "audit_backlog_limit=%d\n",
58771 -                               atomic_read(&audit_lost),
58772 +                               atomic_read_unchecked(&audit_lost),
58773                                 audit_rate_limit,
58774                                 audit_backlog_limit);
58775                 audit_panic(message);
58776 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
58777                 status_set.pid           = audit_pid;
58778                 status_set.rate_limit    = audit_rate_limit;
58779                 status_set.backlog_limit = audit_backlog_limit;
58780 -               status_set.lost          = atomic_read(&audit_lost);
58781 +               status_set.lost          = atomic_read_unchecked(&audit_lost);
58782                 status_set.backlog       = skb_queue_len(&audit_skb_queue);
58783                 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58784                                  &status_set, sizeof(status_set));
58785 diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
58786 --- linux-3.0.4/kernel/auditsc.c        2011-07-21 22:17:23.000000000 -0400
58787 +++ linux-3.0.4/kernel/auditsc.c        2011-08-23 21:47:56.000000000 -0400
58788 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
58789  }
58790  
58791  /* global counter which is incremented every time something logs in */
58792 -static atomic_t session_id = ATOMIC_INIT(0);
58793 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58794  
58795  /**
58796   * audit_set_loginuid - set a task's audit_context loginuid
58797 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
58798   */
58799  int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58800  {
58801 -       unsigned int sessionid = atomic_inc_return(&session_id);
58802 +       unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58803         struct audit_context *context = task->audit_context;
58804  
58805         if (context && context->in_syscall) {
58806 diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
58807 --- linux-3.0.4/kernel/capability.c     2011-07-21 22:17:23.000000000 -0400
58808 +++ linux-3.0.4/kernel/capability.c     2011-08-23 21:48:14.000000000 -0400
58809 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
58810                  * before modification is attempted and the application
58811                  * fails.
58812                  */
58813 +               if (tocopy > ARRAY_SIZE(kdata))
58814 +                       return -EFAULT;
58815 +
58816                 if (copy_to_user(dataptr, kdata, tocopy
58817                                  * sizeof(struct __user_cap_data_struct))) {
58818                         return -EFAULT;
58819 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
58820                 BUG();
58821         }
58822  
58823 -       if (security_capable(ns, current_cred(), cap) == 0) {
58824 +       if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
58825                 current->flags |= PF_SUPERPRIV;
58826                 return true;
58827         }
58828 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
58829  }
58830  EXPORT_SYMBOL(ns_capable);
58831  
58832 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
58833 +{
58834 +       if (unlikely(!cap_valid(cap))) {
58835 +               printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58836 +               BUG();
58837 +       }
58838 +
58839 +       if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
58840 +               current->flags |= PF_SUPERPRIV;
58841 +               return true;
58842 +       }
58843 +       return false;
58844 +}
58845 +EXPORT_SYMBOL(ns_capable_nolog);
58846 +
58847 +bool capable_nolog(int cap)
58848 +{
58849 +       return ns_capable_nolog(&init_user_ns, cap);
58850 +}
58851 +EXPORT_SYMBOL(capable_nolog);
58852 +
58853  /**
58854   * task_ns_capable - Determine whether current task has a superior
58855   * capability targeted at a specific task's user namespace.
58856 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct 
58857  }
58858  EXPORT_SYMBOL(task_ns_capable);
58859  
58860 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
58861 +{
58862 +       return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
58863 +}
58864 +EXPORT_SYMBOL(task_ns_capable_nolog);
58865 +
58866  /**
58867   * nsown_capable - Check superior capability to one's own user_ns
58868   * @cap: The capability in question
58869 diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
58870 --- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
58871 +++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
58872 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
58873         struct hlist_head *hhead;
58874         struct cg_cgroup_link *link;
58875  
58876 +       pax_track_stack();
58877 +
58878         /* First see if we already have a cgroup group that matches
58879          * the desired set */
58880         read_lock(&css_set_lock);
58881 diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
58882 --- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
58883 +++ linux-3.0.4/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
58884 @@ -13,6 +13,7 @@
58885  
58886  #include <linux/linkage.h>
58887  #include <linux/compat.h>
58888 +#include <linux/module.h>
58889  #include <linux/errno.h>
58890  #include <linux/time.h>
58891  #include <linux/signal.h>
58892 diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
58893 --- linux-3.0.4/kernel/configs.c        2011-07-21 22:17:23.000000000 -0400
58894 +++ linux-3.0.4/kernel/configs.c        2011-08-23 21:48:14.000000000 -0400
58895 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
58896         struct proc_dir_entry *entry;
58897  
58898         /* create the current config file */
58899 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58900 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58901 +       entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58902 +                           &ikconfig_file_ops);
58903 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58904 +       entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58905 +                           &ikconfig_file_ops);
58906 +#endif
58907 +#else
58908         entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58909                             &ikconfig_file_ops);
58910 +#endif
58911 +
58912         if (!entry)
58913                 return -ENOMEM;
58914  
58915 diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
58916 --- linux-3.0.4/kernel/cred.c   2011-07-21 22:17:23.000000000 -0400
58917 +++ linux-3.0.4/kernel/cred.c   2011-08-25 17:23:03.000000000 -0400
58918 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
58919   */
58920  void __put_cred(struct cred *cred)
58921  {
58922 +       pax_track_stack();
58923 +
58924         kdebug("__put_cred(%p{%d,%d})", cred,
58925                atomic_read(&cred->usage),
58926                read_cred_subscribers(cred));
58927 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
58928  {
58929         struct cred *cred;
58930  
58931 +       pax_track_stack();
58932 +
58933         kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58934                atomic_read(&tsk->cred->usage),
58935                read_cred_subscribers(tsk->cred));
58936 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct 
58937  {
58938         const struct cred *cred;
58939  
58940 +       pax_track_stack();
58941 +
58942         rcu_read_lock();
58943  
58944         do {
58945 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
58946  {
58947         struct cred *new;
58948  
58949 +       pax_track_stack();
58950 +
58951         new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58952         if (!new)
58953                 return NULL;
58954 @@ -268,6 +268,8 @@ struct cred *__prepare_creds(const struc
58955  {
58956         struct cred *new;
58957  
58958 +       pax_track_stack();
58959 +
58960         new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58961         if (!new)
58962                 return NULL;
58963 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
58964         struct thread_group_cred *tgcred = NULL;
58965         struct cred *new;
58966  
58967 +       pax_track_stack();
58968 +
58969  #ifdef CONFIG_KEYS
58970         tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58971         if (!tgcred)
58972 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
58973         struct cred *new;
58974         int ret;
58975  
58976 +       pax_track_stack();
58977 +
58978         if (
58979  #ifdef CONFIG_KEYS
58980                 !p->cred->thread_keyring &&
58981 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
58982         struct task_struct *task = current;
58983         const struct cred *old = task->real_cred;
58984  
58985 +       pax_track_stack();
58986 +
58987         kdebug("commit_creds(%p{%d,%d})", new,
58988                atomic_read(&new->usage),
58989                read_cred_subscribers(new));
58990 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
58991  
58992         get_cred(new); /* we will require a ref for the subj creds too */
58993  
58994 +       gr_set_role_label(task, new->uid, new->gid);
58995 +
58996         /* dumpability changes */
58997         if (old->euid != new->euid ||
58998             old->egid != new->egid ||
58999 @@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
59000                 key_fsgid_changed(task);
59001  
59002         /* do it
59003 -        * - What if a process setreuid()'s and this brings the
59004 -        *   new uid over his NPROC rlimit?  We can check this now
59005 -        *   cheaply with the new uid cache, so if it matters
59006 -        *   we should be checking for it.  -DaveM
59007 +        * RLIMIT_NPROC limits on user->processes have already been checked
59008 +        * in set_user().
59009          */
59010         alter_cred_subscribers(new, 2);
59011         if (new->user != old->user)
59012 @@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
59013   */
59014  void abort_creds(struct cred *new)
59015  {
59016 +       pax_track_stack();
59017 +
59018         kdebug("abort_creds(%p{%d,%d})", new,
59019                atomic_read(&new->usage),
59020                read_cred_subscribers(new));
59021 @@ -574,6 +592,8 @@ const struct cred *override_creds(const 
59022  {
59023         const struct cred *old = current->cred;
59024  
59025 +       pax_track_stack();
59026 +
59027         kdebug("override_creds(%p{%d,%d})", new,
59028                atomic_read(&new->usage),
59029                read_cred_subscribers(new));
59030 @@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
59031  {
59032         const struct cred *override = current->cred;
59033  
59034 +       pax_track_stack();
59035 +
59036         kdebug("revert_creds(%p{%d,%d})", old,
59037                atomic_read(&old->usage),
59038                read_cred_subscribers(old));
59039 @@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct 
59040         const struct cred *old;
59041         struct cred *new;
59042  
59043 +       pax_track_stack();
59044 +
59045         new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59046         if (!new)
59047                 return NULL;
59048 @@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59049   */
59050  int set_security_override(struct cred *new, u32 secid)
59051  {
59052 +       pax_track_stack();
59053 +
59054         return security_kernel_act_as(new, secid);
59055  }
59056  EXPORT_SYMBOL(set_security_override);
59057 @@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
59058         u32 secid;
59059         int ret;
59060  
59061 +       pax_track_stack();
59062 +
59063         ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59064         if (ret < 0)
59065                 return ret;
59066 diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
59067 --- linux-3.0.4/kernel/debug/debug_core.c       2011-07-21 22:17:23.000000000 -0400
59068 +++ linux-3.0.4/kernel/debug/debug_core.c       2011-08-23 21:47:56.000000000 -0400
59069 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
59070   */
59071  static atomic_t                        masters_in_kgdb;
59072  static atomic_t                        slaves_in_kgdb;
59073 -static atomic_t                        kgdb_break_tasklet_var;
59074 +static atomic_unchecked_t      kgdb_break_tasklet_var;
59075  atomic_t                       kgdb_setting_breakpoint;
59076  
59077  struct task_struct             *kgdb_usethread;
59078 @@ -129,7 +129,7 @@ int                         kgdb_single_step;
59079  static pid_t                   kgdb_sstep_pid;
59080  
59081  /* to keep track of the CPU which is doing the single stepping*/
59082 -atomic_t                       kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59083 +atomic_unchecked_t             kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59084  
59085  /*
59086   * If you are debugging a problem where roundup (the collection of
59087 @@ -542,7 +542,7 @@ return_normal:
59088          * kernel will only try for the value of sstep_tries before
59089          * giving up and continuing on.
59090          */
59091 -       if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59092 +       if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59093             (kgdb_info[cpu].task &&
59094              kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
59095                 atomic_set(&kgdb_active, -1);
59096 @@ -636,8 +636,8 @@ cpu_master_loop:
59097         }
59098  
59099  kgdb_restore:
59100 -       if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
59101 -               int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
59102 +       if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
59103 +               int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
59104                 if (kgdb_info[sstep_cpu].task)
59105                         kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
59106                 else
59107 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
59108  static void kgdb_tasklet_bpt(unsigned long ing)
59109  {
59110         kgdb_breakpoint();
59111 -       atomic_set(&kgdb_break_tasklet_var, 0);
59112 +       atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
59113  }
59114  
59115  static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
59116  
59117  void kgdb_schedule_breakpoint(void)
59118  {
59119 -       if (atomic_read(&kgdb_break_tasklet_var) ||
59120 +       if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
59121                 atomic_read(&kgdb_active) != -1 ||
59122                 atomic_read(&kgdb_setting_breakpoint))
59123                 return;
59124 -       atomic_inc(&kgdb_break_tasklet_var);
59125 +       atomic_inc_unchecked(&kgdb_break_tasklet_var);
59126         tasklet_schedule(&kgdb_tasklet_breakpoint);
59127  }
59128  EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
59129 diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
59130 --- linux-3.0.4/kernel/debug/kdb/kdb_main.c     2011-07-21 22:17:23.000000000 -0400
59131 +++ linux-3.0.4/kernel/debug/kdb/kdb_main.c     2011-08-23 21:47:56.000000000 -0400
59132 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
59133         list_for_each_entry(mod, kdb_modules, list) {
59134  
59135                 kdb_printf("%-20s%8u  0x%p ", mod->name,
59136 -                          mod->core_size, (void *)mod);
59137 +                          mod->core_size_rx + mod->core_size_rw, (void *)mod);
59138  #ifdef CONFIG_MODULE_UNLOAD
59139                 kdb_printf("%4d ", module_refcount(mod));
59140  #endif
59141 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
59142                         kdb_printf(" (Loading)");
59143                 else
59144                         kdb_printf(" (Live)");
59145 -               kdb_printf(" 0x%p", mod->module_core);
59146 +               kdb_printf(" 0x%p 0x%p", mod->module_core_rx,  mod->module_core_rw);
59147  
59148  #ifdef CONFIG_MODULE_UNLOAD
59149                 {
59150 diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
59151 --- linux-3.0.4/kernel/events/core.c    2011-08-23 21:44:40.000000000 -0400
59152 +++ linux-3.0.4/kernel/events/core.c    2011-08-23 21:47:56.000000000 -0400
59153 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
59154         return 0;
59155  }
59156  
59157 -static atomic64_t perf_event_id;
59158 +static atomic64_unchecked_t perf_event_id;
59159  
59160  static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
59161                               enum event_type_t event_type);
59162 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
59163  
59164  static inline u64 perf_event_count(struct perf_event *event)
59165  {
59166 -       return local64_read(&event->count) + atomic64_read(&event->child_count);
59167 +       return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
59168  }
59169  
59170  static u64 perf_event_read(struct perf_event *event)
59171 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
59172         mutex_lock(&event->child_mutex);
59173         total += perf_event_read(event);
59174         *enabled += event->total_time_enabled +
59175 -                       atomic64_read(&event->child_total_time_enabled);
59176 +                       atomic64_read_unchecked(&event->child_total_time_enabled);
59177         *running += event->total_time_running +
59178 -                       atomic64_read(&event->child_total_time_running);
59179 +                       atomic64_read_unchecked(&event->child_total_time_running);
59180  
59181         list_for_each_entry(child, &event->child_list, child_list) {
59182                 total += perf_event_read(child);
59183 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
59184                 userpg->offset -= local64_read(&event->hw.prev_count);
59185  
59186         userpg->time_enabled = event->total_time_enabled +
59187 -                       atomic64_read(&event->child_total_time_enabled);
59188 +                       atomic64_read_unchecked(&event->child_total_time_enabled);
59189  
59190         userpg->time_running = event->total_time_running +
59191 -                       atomic64_read(&event->child_total_time_running);
59192 +                       atomic64_read_unchecked(&event->child_total_time_running);
59193  
59194         barrier();
59195         ++userpg->lock;
59196 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct 
59197         values[n++] = perf_event_count(event);
59198         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
59199                 values[n++] = enabled +
59200 -                       atomic64_read(&event->child_total_time_enabled);
59201 +                       atomic64_read_unchecked(&event->child_total_time_enabled);
59202         }
59203         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
59204                 values[n++] = running +
59205 -                       atomic64_read(&event->child_total_time_running);
59206 +                       atomic64_read_unchecked(&event->child_total_time_running);
59207         }
59208         if (read_format & PERF_FORMAT_ID)
59209                 values[n++] = primary_event_id(event);
59210 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr 
59211         event->parent           = parent_event;
59212  
59213         event->ns               = get_pid_ns(current->nsproxy->pid_ns);
59214 -       event->id               = atomic64_inc_return(&perf_event_id);
59215 +       event->id               = atomic64_inc_return_unchecked(&perf_event_id);
59216  
59217         event->state            = PERF_EVENT_STATE_INACTIVE;
59218  
59219 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
59220         /*
59221          * Add back the child's count to the parent's count:
59222          */
59223 -       atomic64_add(child_val, &parent_event->child_count);
59224 -       atomic64_add(child_event->total_time_enabled,
59225 +       atomic64_add_unchecked(child_val, &parent_event->child_count);
59226 +       atomic64_add_unchecked(child_event->total_time_enabled,
59227                      &parent_event->child_total_time_enabled);
59228 -       atomic64_add(child_event->total_time_running,
59229 +       atomic64_add_unchecked(child_event->total_time_running,
59230                      &parent_event->child_total_time_running);
59231  
59232         /*
59233 diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
59234 --- linux-3.0.4/kernel/exit.c   2011-07-21 22:17:23.000000000 -0400
59235 +++ linux-3.0.4/kernel/exit.c   2011-08-23 21:48:14.000000000 -0400
59236 @@ -57,6 +57,10 @@
59237  #include <asm/pgtable.h>
59238  #include <asm/mmu_context.h>
59239  
59240 +#ifdef CONFIG_GRKERNSEC
59241 +extern rwlock_t grsec_exec_file_lock;
59242 +#endif
59243 +
59244  static void exit_mm(struct task_struct * tsk);
59245  
59246  static void __unhash_process(struct task_struct *p, bool group_dead)
59247 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
59248         struct task_struct *leader;
59249         int zap_leader;
59250  repeat:
59251 +#ifdef CONFIG_NET
59252 +       gr_del_task_from_ip_table(p);
59253 +#endif
59254 +
59255         tracehook_prepare_release_task(p);
59256         /* don't need to get the RCU readlock here - the process is dead and
59257          * can't be modifying its own credentials. But shut RCU-lockdep up */
59258 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
59259  {
59260         write_lock_irq(&tasklist_lock);
59261  
59262 +#ifdef CONFIG_GRKERNSEC
59263 +       write_lock(&grsec_exec_file_lock);
59264 +       if (current->exec_file) {
59265 +               fput(current->exec_file);
59266 +               current->exec_file = NULL;
59267 +       }
59268 +       write_unlock(&grsec_exec_file_lock);
59269 +#endif
59270 +
59271         ptrace_unlink(current);
59272         /* Reparent to init */
59273         current->real_parent = current->parent = kthreadd_task;
59274         list_move_tail(&current->sibling, &current->real_parent->children);
59275  
59276 +       gr_set_kernel_label(current);
59277 +
59278         /* Set the exit signal to SIGCHLD so we signal init on exit */
59279         current->exit_signal = SIGCHLD;
59280  
59281 @@ -394,7 +413,7 @@ int allow_signal(int sig)
59282          * know it'll be handled, so that they don't get converted to
59283          * SIGKILL or just silently dropped.
59284          */
59285 -       current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59286 +       current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59287         recalc_sigpending();
59288         spin_unlock_irq(&current->sighand->siglock);
59289         return 0;
59290 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
59291         vsnprintf(current->comm, sizeof(current->comm), name, args);
59292         va_end(args);
59293  
59294 +#ifdef CONFIG_GRKERNSEC
59295 +       write_lock(&grsec_exec_file_lock);
59296 +       if (current->exec_file) {
59297 +               fput(current->exec_file);
59298 +               current->exec_file = NULL;
59299 +       }
59300 +       write_unlock(&grsec_exec_file_lock);
59301 +#endif
59302 +
59303 +       gr_set_kernel_label(current);
59304 +
59305         /*
59306          * If we were started as result of loading a module, close all of the
59307          * user space pages.  We don't need them, and if we didn't close them
59308 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
59309         struct task_struct *tsk = current;
59310         int group_dead;
59311  
59312 -       profile_task_exit(tsk);
59313 -
59314 -       WARN_ON(atomic_read(&tsk->fs_excl));
59315 -       WARN_ON(blk_needs_flush_plug(tsk));
59316 -
59317         if (unlikely(in_interrupt()))
59318                 panic("Aiee, killing interrupt handler!");
59319 -       if (unlikely(!tsk->pid))
59320 -               panic("Attempted to kill the idle task!");
59321  
59322         /*
59323          * If do_exit is called because this processes oopsed, it's possible
59324 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
59325          */
59326         set_fs(USER_DS);
59327  
59328 +       profile_task_exit(tsk);
59329 +
59330 +       WARN_ON(atomic_read(&tsk->fs_excl));
59331 +       WARN_ON(blk_needs_flush_plug(tsk));
59332 +
59333 +       if (unlikely(!tsk->pid))
59334 +               panic("Attempted to kill the idle task!");
59335 +
59336         tracehook_report_exit(&code);
59337  
59338         validate_creds_for_do_exit(tsk);
59339 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
59340         tsk->exit_code = code;
59341         taskstats_exit(tsk, group_dead);
59342  
59343 +       gr_acl_handle_psacct(tsk, code);
59344 +       gr_acl_handle_exit();
59345 +
59346         exit_mm(tsk);
59347  
59348         if (group_dead)
59349 diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
59350 --- linux-3.0.4/kernel/fork.c   2011-07-21 22:17:23.000000000 -0400
59351 +++ linux-3.0.4/kernel/fork.c   2011-08-25 17:23:36.000000000 -0400
59352 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
59353         *stackend = STACK_END_MAGIC;    /* for overflow detection */
59354  
59355  #ifdef CONFIG_CC_STACKPROTECTOR
59356 -       tsk->stack_canary = get_random_int();
59357 +       tsk->stack_canary = pax_get_random_long();
59358  #endif
59359  
59360         /* One for us, one for whoever does the "release_task()" (usually parent) */
59361 @@ -308,13 +308,77 @@ out:
59362  }
59363  
59364  #ifdef CONFIG_MMU
59365 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
59366 +{
59367 +       struct vm_area_struct *tmp;
59368 +       unsigned long charge;
59369 +       struct mempolicy *pol;
59370 +       struct file *file;
59371 +
59372 +       charge = 0;
59373 +       if (mpnt->vm_flags & VM_ACCOUNT) {
59374 +               unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59375 +               if (security_vm_enough_memory(len))
59376 +                       goto fail_nomem;
59377 +               charge = len;
59378 +       }
59379 +       tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59380 +       if (!tmp)
59381 +               goto fail_nomem;
59382 +       *tmp = *mpnt;
59383 +       tmp->vm_mm = mm;
59384 +       INIT_LIST_HEAD(&tmp->anon_vma_chain);
59385 +       pol = mpol_dup(vma_policy(mpnt));
59386 +       if (IS_ERR(pol))
59387 +               goto fail_nomem_policy;
59388 +       vma_set_policy(tmp, pol);
59389 +       if (anon_vma_fork(tmp, mpnt))
59390 +               goto fail_nomem_anon_vma_fork;
59391 +       tmp->vm_flags &= ~VM_LOCKED;
59392 +       tmp->vm_next = tmp->vm_prev = NULL;
59393 +       tmp->vm_mirror = NULL;
59394 +       file = tmp->vm_file;
59395 +       if (file) {
59396 +               struct inode *inode = file->f_path.dentry->d_inode;
59397 +               struct address_space *mapping = file->f_mapping;
59398 +
59399 +               get_file(file);
59400 +               if (tmp->vm_flags & VM_DENYWRITE)
59401 +                       atomic_dec(&inode->i_writecount);
59402 +               mutex_lock(&mapping->i_mmap_mutex);
59403 +               if (tmp->vm_flags & VM_SHARED)
59404 +                       mapping->i_mmap_writable++;
59405 +               flush_dcache_mmap_lock(mapping);
59406 +               /* insert tmp into the share list, just after mpnt */
59407 +               vma_prio_tree_add(tmp, mpnt);
59408 +               flush_dcache_mmap_unlock(mapping);
59409 +               mutex_unlock(&mapping->i_mmap_mutex);
59410 +       }
59411 +
59412 +       /*
59413 +        * Clear hugetlb-related page reserves for children. This only
59414 +        * affects MAP_PRIVATE mappings. Faults generated by the child
59415 +        * are not guaranteed to succeed, even if read-only
59416 +        */
59417 +       if (is_vm_hugetlb_page(tmp))
59418 +               reset_vma_resv_huge_pages(tmp);
59419 +
59420 +       return tmp;
59421 +
59422 +fail_nomem_anon_vma_fork:
59423 +       mpol_put(pol);
59424 +fail_nomem_policy:
59425 +       kmem_cache_free(vm_area_cachep, tmp);
59426 +fail_nomem:
59427 +       vm_unacct_memory(charge);
59428 +       return NULL;
59429 +}
59430 +
59431  static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
59432  {
59433         struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
59434         struct rb_node **rb_link, *rb_parent;
59435         int retval;
59436 -       unsigned long charge;
59437 -       struct mempolicy *pol;
59438  
59439         down_write(&oldmm->mmap_sem);
59440         flush_cache_dup_mm(oldmm);
59441 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
59442         mm->locked_vm = 0;
59443         mm->mmap = NULL;
59444         mm->mmap_cache = NULL;
59445 -       mm->free_area_cache = oldmm->mmap_base;
59446 -       mm->cached_hole_size = ~0UL;
59447 +       mm->free_area_cache = oldmm->free_area_cache;
59448 +       mm->cached_hole_size = oldmm->cached_hole_size;
59449         mm->map_count = 0;
59450         cpumask_clear(mm_cpumask(mm));
59451         mm->mm_rb = RB_ROOT;
59452 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
59453  
59454         prev = NULL;
59455         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
59456 -               struct file *file;
59457 -
59458                 if (mpnt->vm_flags & VM_DONTCOPY) {
59459                         long pages = vma_pages(mpnt);
59460                         mm->total_vm -= pages;
59461 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
59462                                                                 -pages);
59463                         continue;
59464                 }
59465 -               charge = 0;
59466 -               if (mpnt->vm_flags & VM_ACCOUNT) {
59467 -                       unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59468 -                       if (security_vm_enough_memory(len))
59469 -                               goto fail_nomem;
59470 -                       charge = len;
59471 -               }
59472 -               tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59473 -               if (!tmp)
59474 -                       goto fail_nomem;
59475 -               *tmp = *mpnt;
59476 -               INIT_LIST_HEAD(&tmp->anon_vma_chain);
59477 -               pol = mpol_dup(vma_policy(mpnt));
59478 -               retval = PTR_ERR(pol);
59479 -               if (IS_ERR(pol))
59480 -                       goto fail_nomem_policy;
59481 -               vma_set_policy(tmp, pol);
59482 -               tmp->vm_mm = mm;
59483 -               if (anon_vma_fork(tmp, mpnt))
59484 -                       goto fail_nomem_anon_vma_fork;
59485 -               tmp->vm_flags &= ~VM_LOCKED;
59486 -               tmp->vm_next = tmp->vm_prev = NULL;
59487 -               file = tmp->vm_file;
59488 -               if (file) {
59489 -                       struct inode *inode = file->f_path.dentry->d_inode;
59490 -                       struct address_space *mapping = file->f_mapping;
59491 -
59492 -                       get_file(file);
59493 -                       if (tmp->vm_flags & VM_DENYWRITE)
59494 -                               atomic_dec(&inode->i_writecount);
59495 -                       mutex_lock(&mapping->i_mmap_mutex);
59496 -                       if (tmp->vm_flags & VM_SHARED)
59497 -                               mapping->i_mmap_writable++;
59498 -                       flush_dcache_mmap_lock(mapping);
59499 -                       /* insert tmp into the share list, just after mpnt */
59500 -                       vma_prio_tree_add(tmp, mpnt);
59501 -                       flush_dcache_mmap_unlock(mapping);
59502 -                       mutex_unlock(&mapping->i_mmap_mutex);
59503 +               tmp = dup_vma(mm, mpnt);
59504 +               if (!tmp) {
59505 +                       retval = -ENOMEM;
59506 +                       goto out;
59507                 }
59508  
59509                 /*
59510 -                * Clear hugetlb-related page reserves for children. This only
59511 -                * affects MAP_PRIVATE mappings. Faults generated by the child
59512 -                * are not guaranteed to succeed, even if read-only
59513 -                */
59514 -               if (is_vm_hugetlb_page(tmp))
59515 -                       reset_vma_resv_huge_pages(tmp);
59516 -
59517 -               /*
59518                  * Link in the new vma and copy the page table entries.
59519                  */
59520                 *pprev = tmp;
59521 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
59522                 if (retval)
59523                         goto out;
59524         }
59525 +
59526 +#ifdef CONFIG_PAX_SEGMEXEC
59527 +       if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59528 +               struct vm_area_struct *mpnt_m;
59529 +
59530 +               for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59531 +                       BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59532 +
59533 +                       if (!mpnt->vm_mirror)
59534 +                               continue;
59535 +
59536 +                       if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59537 +                               BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59538 +                               mpnt->vm_mirror = mpnt_m;
59539 +                       } else {
59540 +                               BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59541 +                               mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59542 +                               mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59543 +                               mpnt->vm_mirror->vm_mirror = mpnt;
59544 +                       }
59545 +               }
59546 +               BUG_ON(mpnt_m);
59547 +       }
59548 +#endif
59549 +
59550         /* a new mm has just been created */
59551         arch_dup_mmap(oldmm, mm);
59552         retval = 0;
59553 @@ -429,14 +474,6 @@ out:
59554         flush_tlb_mm(oldmm);
59555         up_write(&oldmm->mmap_sem);
59556         return retval;
59557 -fail_nomem_anon_vma_fork:
59558 -       mpol_put(pol);
59559 -fail_nomem_policy:
59560 -       kmem_cache_free(vm_area_cachep, tmp);
59561 -fail_nomem:
59562 -       retval = -ENOMEM;
59563 -       vm_unacct_memory(charge);
59564 -       goto out;
59565  }
59566  
59567  static inline int mm_alloc_pgd(struct mm_struct * mm)
59568 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
59569                         spin_unlock(&fs->lock);
59570                         return -EAGAIN;
59571                 }
59572 -               fs->users++;
59573 +               atomic_inc(&fs->users);
59574                 spin_unlock(&fs->lock);
59575                 return 0;
59576         }
59577         tsk->fs = copy_fs_struct(fs);
59578         if (!tsk->fs)
59579                 return -ENOMEM;
59580 +       gr_set_chroot_entries(tsk, &tsk->fs->root);
59581         return 0;
59582  }
59583  
59584 @@ -1156,15 +1156,18 @@ static struct task_struct *copy_process(
59585         init_vx_info(&p->vx_info, current_vx_info());
59586         init_nx_info(&p->nx_info, current_nx_info());
59587  
59588 +       gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59589 +
59590         retval = -EAGAIN;
59591         if (!vx_nproc_avail(1))
59592                 goto bad_fork_free;
59593         if (atomic_read(&p->real_cred->user->processes) >=
59594                         task_rlimit(p, RLIMIT_NPROC)) {
59595 -               if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59596 -                   p->real_cred->user != INIT_USER)
59597 +               if (p->real_cred->user != INIT_USER &&
59598 +                   !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
59599                         goto bad_fork_free;
59600         }
59601 +       current->flags &= ~PF_NPROC_EXCEEDED;
59602  
59603         retval = copy_creds(p, clone_flags);
59604         if (retval < 0)
59605 @@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
59606         if (clone_flags & CLONE_THREAD)
59607                 p->tgid = current->tgid;
59608  
59609 +       gr_copy_label(p);
59610 +
59611         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59612         /*
59613          * Clear TID on mm_release()?
59614 @@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
59615  bad_fork_free:
59616         free_task(p);
59617  fork_out:
59618 +       gr_log_forkfail(retval);
59619 +
59620         return ERR_PTR(retval);
59621  }
59622  
59623 @@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
59624                 if (clone_flags & CLONE_PARENT_SETTID)
59625                         put_user(nr, parent_tidptr);
59626  
59627 +               gr_handle_brute_check();
59628 +
59629                 if (clone_flags & CLONE_VFORK) {
59630                         p->vfork_done = &vfork;
59631                         init_completion(&vfork);
59632 @@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
59633                 return 0;
59634  
59635         /* don't need lock here; in the worst case we'll do useless copy */
59636 -       if (fs->users == 1)
59637 +       if (atomic_read(&fs->users) == 1)
59638                 return 0;
59639  
59640         *new_fsp = copy_fs_struct(fs);
59641 @@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, 
59642                         fs = current->fs;
59643                         spin_lock(&fs->lock);
59644                         current->fs = new_fs;
59645 -                       if (--fs->users)
59646 +                       gr_set_chroot_entries(current, &current->fs->root);
59647 +                       if (atomic_dec_return(&fs->users))
59648                                 new_fs = NULL;
59649                         else
59650                                 new_fs = fs;
59651 diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
59652 --- linux-3.0.4/kernel/futex.c  2011-08-23 21:44:40.000000000 -0400
59653 +++ linux-3.0.4/kernel/futex.c  2011-08-23 21:48:14.000000000 -0400
59654 @@ -54,6 +54,7 @@
59655  #include <linux/mount.h>
59656  #include <linux/pagemap.h>
59657  #include <linux/syscalls.h>
59658 +#include <linux/ptrace.h>
59659  #include <linux/signal.h>
59660  #include <linux/module.h>
59661  #include <linux/magic.h>
59662 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59663         struct page *page, *page_head;
59664         int err, ro = 0;
59665  
59666 +#ifdef CONFIG_PAX_SEGMEXEC
59667 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59668 +               return -EFAULT;
59669 +#endif
59670 +
59671         /*
59672          * The futex address must be "naturally" aligned.
59673          */
59674 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
59675         struct futex_q q = futex_q_init;
59676         int ret;
59677  
59678 +       pax_track_stack();
59679 +
59680         if (!bitset)
59681                 return -EINVAL;
59682         q.bitset = bitset;
59683 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
59684         struct futex_q q = futex_q_init;
59685         int res, ret;
59686  
59687 +       pax_track_stack();
59688 +
59689         if (!bitset)
59690                 return -EINVAL;
59691  
59692 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59693  {
59694         struct robust_list_head __user *head;
59695         unsigned long ret;
59696 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59697         const struct cred *cred = current_cred(), *pcred;
59698 +#endif
59699  
59700         if (!futex_cmpxchg_enabled)
59701                 return -ENOSYS;
59702 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59703                 if (!p)
59704                         goto err_unlock;
59705                 ret = -EPERM;
59706 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59707 +               if (!ptrace_may_access(p, PTRACE_MODE_READ))
59708 +                       goto err_unlock;
59709 +#else
59710                 pcred = __task_cred(p);
59711                 /* If victim is in different user_ns, then uids are not
59712                    comparable, so we must have CAP_SYS_PTRACE */
59713 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59714                     !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
59715                         goto err_unlock;
59716  ok:
59717 +#endif
59718                 head = p->robust_list;
59719                 rcu_read_unlock();
59720         }
59721 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
59722  {
59723         u32 curval;
59724         int i;
59725 +       mm_segment_t oldfs;
59726  
59727         /*
59728          * This will fail and we want it. Some arch implementations do
59729 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
59730          * implementation, the non-functional ones will return
59731          * -ENOSYS.
59732          */
59733 +       oldfs = get_fs();
59734 +       set_fs(USER_DS);
59735         if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
59736                 futex_cmpxchg_enabled = 1;
59737 +       set_fs(oldfs);
59738  
59739         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
59740                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
59741 diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
59742 --- linux-3.0.4/kernel/futex_compat.c   2011-07-21 22:17:23.000000000 -0400
59743 +++ linux-3.0.4/kernel/futex_compat.c   2011-08-23 21:48:14.000000000 -0400
59744 @@ -10,6 +10,7 @@
59745  #include <linux/compat.h>
59746  #include <linux/nsproxy.h>
59747  #include <linux/futex.h>
59748 +#include <linux/ptrace.h>
59749  
59750  #include <asm/uaccess.h>
59751  
59752 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
59753  {
59754         struct compat_robust_list_head __user *head;
59755         unsigned long ret;
59756 -       const struct cred *cred = current_cred(), *pcred;
59757 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59758 +       const struct cred *cred = current_cred();
59759 +       const struct cred *pcred;
59760 +#endif
59761  
59762         if (!futex_cmpxchg_enabled)
59763                 return -ENOSYS;
59764 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
59765                 if (!p)
59766                         goto err_unlock;
59767                 ret = -EPERM;
59768 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59769 +               if (!ptrace_may_access(p, PTRACE_MODE_READ))
59770 +                       goto err_unlock;
59771 +#else
59772                 pcred = __task_cred(p);
59773                 /* If victim is in different user_ns, then uids are not
59774                    comparable, so we must have CAP_SYS_PTRACE */
59775 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
59776                     !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
59777                         goto err_unlock;
59778  ok:
59779 +#endif
59780                 head = p->compat_robust_list;
59781                 rcu_read_unlock();
59782         }
59783 diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
59784 --- linux-3.0.4/kernel/gcov/base.c      2011-07-21 22:17:23.000000000 -0400
59785 +++ linux-3.0.4/kernel/gcov/base.c      2011-08-23 21:47:56.000000000 -0400
59786 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59787  }
59788  
59789  #ifdef CONFIG_MODULES
59790 -static inline int within(void *addr, void *start, unsigned long size)
59791 -{
59792 -       return ((addr >= start) && (addr < start + size));
59793 -}
59794 -
59795  /* Update list and generate events when modules are unloaded. */
59796  static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59797                                 void *data)
59798 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59799         prev = NULL;
59800         /* Remove entries located in module from linked list. */
59801         for (info = gcov_info_head; info; info = info->next) {
59802 -               if (within(info, mod->module_core, mod->core_size)) {
59803 +               if (within_module_core_rw((unsigned long)info, mod)) {
59804                         if (prev)
59805                                 prev->next = info->next;
59806                         else
59807 diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
59808 --- linux-3.0.4/kernel/hrtimer.c        2011-07-21 22:17:23.000000000 -0400
59809 +++ linux-3.0.4/kernel/hrtimer.c        2011-08-23 21:47:56.000000000 -0400
59810 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59811         local_irq_restore(flags);
59812  }
59813  
59814 -static void run_hrtimer_softirq(struct softirq_action *h)
59815 +static void run_hrtimer_softirq(void)
59816  {
59817         hrtimer_peek_ahead_timers();
59818  }
59819 diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
59820 --- linux-3.0.4/kernel/jump_label.c     2011-07-21 22:17:23.000000000 -0400
59821 +++ linux-3.0.4/kernel/jump_label.c     2011-08-23 21:47:56.000000000 -0400
59822 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
59823  
59824         size = (((unsigned long)stop - (unsigned long)start)
59825                                         / sizeof(struct jump_entry));
59826 +       pax_open_kernel();
59827         sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59828 +       pax_close_kernel();
59829  }
59830  
59831  static void jump_label_update(struct jump_label_key *key, int enable);
59832 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
59833         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
59834         struct jump_entry *iter;
59835  
59836 +       pax_open_kernel();
59837         for (iter = iter_start; iter < iter_stop; iter++) {
59838                 if (within_module_init(iter->code, mod))
59839                         iter->code = 0;
59840         }
59841 +       pax_close_kernel();
59842  }
59843  
59844  static int
59845 diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
59846 --- linux-3.0.4/kernel/kallsyms.c       2011-07-21 22:17:23.000000000 -0400
59847 +++ linux-3.0.4/kernel/kallsyms.c       2011-08-23 21:48:14.000000000 -0400
59848 @@ -11,6 +11,9 @@
59849   *      Changed the compression method from stem compression to "table lookup"
59850   *      compression (see scripts/kallsyms.c for a more complete description)
59851   */
59852 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59853 +#define __INCLUDED_BY_HIDESYM 1
59854 +#endif
59855  #include <linux/kallsyms.h>
59856  #include <linux/module.h>
59857  #include <linux/init.h>
59858 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
59859  
59860  static inline int is_kernel_inittext(unsigned long addr)
59861  {
59862 +       if (system_state != SYSTEM_BOOTING)
59863 +               return 0;
59864 +
59865         if (addr >= (unsigned long)_sinittext
59866             && addr <= (unsigned long)_einittext)
59867                 return 1;
59868         return 0;
59869  }
59870  
59871 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59872 +#ifdef CONFIG_MODULES
59873 +static inline int is_module_text(unsigned long addr)
59874 +{
59875 +       if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59876 +               return 1;
59877 +
59878 +       addr = ktla_ktva(addr);
59879 +       return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59880 +}
59881 +#else
59882 +static inline int is_module_text(unsigned long addr)
59883 +{
59884 +       return 0;
59885 +}
59886 +#endif
59887 +#endif
59888 +
59889  static inline int is_kernel_text(unsigned long addr)
59890  {
59891         if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59892 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
59893  
59894  static inline int is_kernel(unsigned long addr)
59895  {
59896 +
59897 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59898 +       if (is_kernel_text(addr) || is_kernel_inittext(addr))
59899 +               return 1;
59900 +
59901 +       if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59902 +#else
59903         if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59904 +#endif
59905 +
59906                 return 1;
59907         return in_gate_area_no_mm(addr);
59908  }
59909  
59910  static int is_ksym_addr(unsigned long addr)
59911  {
59912 +
59913 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59914 +       if (is_module_text(addr))
59915 +               return 0;
59916 +#endif
59917 +
59918         if (all_var)
59919                 return is_kernel(addr);
59920  
59921 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
59922  
59923  static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59924  {
59925 -       iter->name[0] = '\0';
59926         iter->nameoff = get_symbol_offset(new_pos);
59927         iter->pos = new_pos;
59928  }
59929 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
59930  {
59931         struct kallsym_iter *iter = m->private;
59932  
59933 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59934 +       if (current_uid())
59935 +               return 0;
59936 +#endif
59937 +
59938         /* Some debugging symbols have no name.  Ignore them. */
59939         if (!iter->name[0])
59940                 return 0;
59941 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
59942         struct kallsym_iter *iter;
59943         int ret;
59944  
59945 -       iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59946 +       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59947         if (!iter)
59948                 return -ENOMEM;
59949         reset_iter(iter, 0);
59950 diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
59951 --- linux-3.0.4/kernel/kmod.c   2011-07-21 22:17:23.000000000 -0400
59952 +++ linux-3.0.4/kernel/kmod.c   2011-08-23 21:48:14.000000000 -0400
59953 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59954   * If module auto-loading support is disabled then this function
59955   * becomes a no-operation.
59956   */
59957 -int __request_module(bool wait, const char *fmt, ...)
59958 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59959  {
59960 -       va_list args;
59961         char module_name[MODULE_NAME_LEN];
59962         unsigned int max_modprobes;
59963         int ret;
59964 -       char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59965 +       char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59966         static char *envp[] = { "HOME=/",
59967                                 "TERM=linux",
59968                                 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59969 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
59970  #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
59971         static int kmod_loop_msg;
59972  
59973 -       va_start(args, fmt);
59974 -       ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59975 -       va_end(args);
59976 +       ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59977         if (ret >= MODULE_NAME_LEN)
59978                 return -ENAMETOOLONG;
59979  
59980 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
59981         if (ret)
59982                 return ret;
59983  
59984 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59985 +       if (!current_uid()) {
59986 +               /* hack to workaround consolekit/udisks stupidity */
59987 +               read_lock(&tasklist_lock);
59988 +               if (!strcmp(current->comm, "mount") &&
59989 +                   current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59990 +                       read_unlock(&tasklist_lock);
59991 +                       printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59992 +                       return -EPERM;
59993 +               }
59994 +               read_unlock(&tasklist_lock);
59995 +       }
59996 +#endif
59997 +
59998         /* If modprobe needs a service that is in a module, we get a recursive
59999          * loop.  Limit the number of running kmod threads to max_threads/2 or
60000          * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
60001 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
60002         atomic_dec(&kmod_concurrent);
60003         return ret;
60004  }
60005 +
60006 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60007 +{
60008 +       va_list args;
60009 +       int ret;
60010 +
60011 +       va_start(args, fmt);
60012 +       ret = ____request_module(wait, module_param, fmt, args);
60013 +       va_end(args);
60014 +
60015 +       return ret;
60016 +}
60017 +
60018 +int __request_module(bool wait, const char *fmt, ...)
60019 +{
60020 +       va_list args;
60021 +       int ret;
60022 +
60023 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60024 +       if (current_uid()) {
60025 +               char module_param[MODULE_NAME_LEN];
60026 +
60027 +               memset(module_param, 0, sizeof(module_param));
60028 +
60029 +               snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60030 +
60031 +               va_start(args, fmt);
60032 +               ret = ____request_module(wait, module_param, fmt, args);
60033 +               va_end(args);
60034 +
60035 +               return ret;
60036 +       }
60037 +#endif
60038 +
60039 +       va_start(args, fmt);
60040 +       ret = ____request_module(wait, NULL, fmt, args);
60041 +       va_end(args);
60042 +
60043 +       return ret;
60044 +}
60045 +
60046  EXPORT_SYMBOL(__request_module);
60047  #endif /* CONFIG_MODULES */
60048  
60049 diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
60050 --- linux-3.0.4/kernel/kprobes.c        2011-07-21 22:17:23.000000000 -0400
60051 +++ linux-3.0.4/kernel/kprobes.c        2011-08-23 21:47:56.000000000 -0400
60052 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
60053          * kernel image and loaded module images reside. This is required
60054          * so x86_64 can correctly handle the %rip-relative fixups.
60055          */
60056 -       kip->insns = module_alloc(PAGE_SIZE);
60057 +       kip->insns = module_alloc_exec(PAGE_SIZE);
60058         if (!kip->insns) {
60059                 kfree(kip);
60060                 return NULL;
60061 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
60062                  */
60063                 if (!list_is_singular(&kip->list)) {
60064                         list_del(&kip->list);
60065 -                       module_free(NULL, kip->insns);
60066 +                       module_free_exec(NULL, kip->insns);
60067                         kfree(kip);
60068                 }
60069                 return 1;
60070 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
60071  {
60072         int i, err = 0;
60073         unsigned long offset = 0, size = 0;
60074 -       char *modname, namebuf[128];
60075 +       char *modname, namebuf[KSYM_NAME_LEN];
60076         const char *symbol_name;
60077         void *addr;
60078         struct kprobe_blackpoint *kb;
60079 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
60080         const char *sym = NULL;
60081         unsigned int i = *(loff_t *) v;
60082         unsigned long offset = 0;
60083 -       char *modname, namebuf[128];
60084 +       char *modname, namebuf[KSYM_NAME_LEN];
60085  
60086         head = &kprobe_table[i];
60087         preempt_disable();
60088 diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
60089 --- linux-3.0.4/kernel/lockdep.c        2011-07-21 22:17:23.000000000 -0400
60090 +++ linux-3.0.4/kernel/lockdep.c        2011-08-23 21:47:56.000000000 -0400
60091 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
60092                       end   = (unsigned long) &_end,
60093                       addr  = (unsigned long) obj;
60094  
60095 +#ifdef CONFIG_PAX_KERNEXEC
60096 +       start = ktla_ktva(start);
60097 +#endif
60098 +
60099         /*
60100          * static variable?
60101          */
60102 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
60103         if (!static_obj(lock->key)) {
60104                 debug_locks_off();
60105                 printk("INFO: trying to register non-static key.\n");
60106 +               printk("lock:%pS key:%pS.\n", lock, lock->key);
60107                 printk("the code is fine but needs lockdep annotation.\n");
60108                 printk("turning off the locking correctness validator.\n");
60109                 dump_stack();
60110 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
60111                 if (!class)
60112                         return 0;
60113         }
60114 -       atomic_inc((atomic_t *)&class->ops);
60115 +       atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
60116         if (very_verbose(class)) {
60117                 printk("\nacquire class [%p] %s", class->key, class->name);
60118                 if (class->name_version > 1)
60119 diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
60120 --- linux-3.0.4/kernel/lockdep_proc.c   2011-07-21 22:17:23.000000000 -0400
60121 +++ linux-3.0.4/kernel/lockdep_proc.c   2011-08-23 21:47:56.000000000 -0400
60122 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60123  
60124  static void print_name(struct seq_file *m, struct lock_class *class)
60125  {
60126 -       char str[128];
60127 +       char str[KSYM_NAME_LEN];
60128         const char *name = class->name;
60129  
60130         if (!name) {
60131 diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
60132 --- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
60133 +++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
60134 @@ -58,6 +58,7 @@
60135  #include <linux/jump_label.h>
60136  #include <linux/pfn.h>
60137  #include <linux/bsearch.h>
60138 +#include <linux/grsecurity.h>
60139  
60140  #define CREATE_TRACE_POINTS
60141  #include <trace/events/module.h>
60142 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
60143  
60144  /* Bounds of module allocation, for speeding __module_address.
60145   * Protected by module_mutex. */
60146 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60147 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60148 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60149  
60150  int register_module_notifier(struct notifier_block * nb)
60151  {
60152 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
60153                 return true;
60154  
60155         list_for_each_entry_rcu(mod, &modules, list) {
60156 -               struct symsearch arr[] = {
60157 +               struct symsearch modarr[] = {
60158                         { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60159                           NOT_GPL_ONLY, false },
60160                         { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60161 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
60162  #endif
60163                 };
60164  
60165 -               if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60166 +               if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60167                         return true;
60168         }
60169         return false;
60170 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
60171  static int percpu_modalloc(struct module *mod,
60172                            unsigned long size, unsigned long align)
60173  {
60174 -       if (align > PAGE_SIZE) {
60175 +       if (align-1 >= PAGE_SIZE) {
60176                 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60177                        mod->name, align, PAGE_SIZE);
60178                 align = PAGE_SIZE;
60179 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
60180   */
60181  #ifdef CONFIG_SYSFS
60182  
60183 -#ifdef CONFIG_KALLSYMS
60184 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60185  static inline bool sect_empty(const Elf_Shdr *sect)
60186  {
60187         return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
60188 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
60189  
60190  static void unset_module_core_ro_nx(struct module *mod)
60191  {
60192 -       set_page_attributes(mod->module_core + mod->core_text_size,
60193 -               mod->module_core + mod->core_size,
60194 +       set_page_attributes(mod->module_core_rw,
60195 +               mod->module_core_rw + mod->core_size_rw,
60196                 set_memory_x);
60197 -       set_page_attributes(mod->module_core,
60198 -               mod->module_core + mod->core_ro_size,
60199 +       set_page_attributes(mod->module_core_rx,
60200 +               mod->module_core_rx + mod->core_size_rx,
60201                 set_memory_rw);
60202  }
60203  
60204  static void unset_module_init_ro_nx(struct module *mod)
60205  {
60206 -       set_page_attributes(mod->module_init + mod->init_text_size,
60207 -               mod->module_init + mod->init_size,
60208 +       set_page_attributes(mod->module_init_rw,
60209 +               mod->module_init_rw + mod->init_size_rw,
60210                 set_memory_x);
60211 -       set_page_attributes(mod->module_init,
60212 -               mod->module_init + mod->init_ro_size,
60213 +       set_page_attributes(mod->module_init_rx,
60214 +               mod->module_init_rx + mod->init_size_rx,
60215                 set_memory_rw);
60216  }
60217  
60218 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
60219  
60220         mutex_lock(&module_mutex);
60221         list_for_each_entry_rcu(mod, &modules, list) {
60222 -               if ((mod->module_core) && (mod->core_text_size)) {
60223 -                       set_page_attributes(mod->module_core,
60224 -                                               mod->module_core + mod->core_text_size,
60225 +               if ((mod->module_core_rx) && (mod->core_size_rx)) {
60226 +                       set_page_attributes(mod->module_core_rx,
60227 +                                               mod->module_core_rx + mod->core_size_rx,
60228                                                 set_memory_rw);
60229                 }
60230 -               if ((mod->module_init) && (mod->init_text_size)) {
60231 -                       set_page_attributes(mod->module_init,
60232 -                                               mod->module_init + mod->init_text_size,
60233 +               if ((mod->module_init_rx) && (mod->init_size_rx)) {
60234 +                       set_page_attributes(mod->module_init_rx,
60235 +                                               mod->module_init_rx + mod->init_size_rx,
60236                                                 set_memory_rw);
60237                 }
60238         }
60239 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
60240  
60241         mutex_lock(&module_mutex);
60242         list_for_each_entry_rcu(mod, &modules, list) {
60243 -               if ((mod->module_core) && (mod->core_text_size)) {
60244 -                       set_page_attributes(mod->module_core,
60245 -                                               mod->module_core + mod->core_text_size,
60246 +               if ((mod->module_core_rx) && (mod->core_size_rx)) {
60247 +                       set_page_attributes(mod->module_core_rx,
60248 +                                               mod->module_core_rx + mod->core_size_rx,
60249                                                 set_memory_ro);
60250                 }
60251 -               if ((mod->module_init) && (mod->init_text_size)) {
60252 -                       set_page_attributes(mod->module_init,
60253 -                                               mod->module_init + mod->init_text_size,
60254 +               if ((mod->module_init_rx) && (mod->init_size_rx)) {
60255 +                       set_page_attributes(mod->module_init_rx,
60256 +                                               mod->module_init_rx + mod->init_size_rx,
60257                                                 set_memory_ro);
60258                 }
60259         }
60260 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
60261  
60262         /* This may be NULL, but that's OK */
60263         unset_module_init_ro_nx(mod);
60264 -       module_free(mod, mod->module_init);
60265 +       module_free(mod, mod->module_init_rw);
60266 +       module_free_exec(mod, mod->module_init_rx);
60267         kfree(mod->args);
60268         percpu_modfree(mod);
60269  
60270         /* Free lock-classes: */
60271 -       lockdep_free_key_range(mod->module_core, mod->core_size);
60272 +       lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60273 +       lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60274  
60275         /* Finally, free the core (containing the module structure) */
60276         unset_module_core_ro_nx(mod);
60277 -       module_free(mod, mod->module_core);
60278 +       module_free_exec(mod, mod->module_core_rx);
60279 +       module_free(mod, mod->module_core_rw);
60280  
60281  #ifdef CONFIG_MPU
60282         update_protections(current->mm);
60283 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
60284         unsigned int i;
60285         int ret = 0;
60286         const struct kernel_symbol *ksym;
60287 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60288 +       int is_fs_load = 0;
60289 +       int register_filesystem_found = 0;
60290 +       char *p;
60291 +
60292 +       p = strstr(mod->args, "grsec_modharden_fs");
60293 +       if (p) {
60294 +               char *endptr = p + strlen("grsec_modharden_fs");
60295 +               /* copy \0 as well */
60296 +               memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60297 +               is_fs_load = 1;
60298 +       }
60299 +#endif
60300  
60301         for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
60302                 const char *name = info->strtab + sym[i].st_name;
60303  
60304 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60305 +               /* it's a real shame this will never get ripped and copied
60306 +                  upstream! ;(
60307 +               */
60308 +               if (is_fs_load && !strcmp(name, "register_filesystem"))
60309 +                       register_filesystem_found = 1;
60310 +#endif
60311 +
60312                 switch (sym[i].st_shndx) {
60313                 case SHN_COMMON:
60314                         /* We compiled with -fno-common.  These are not
60315 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
60316                         ksym = resolve_symbol_wait(mod, info, name);
60317                         /* Ok if resolved.  */
60318                         if (ksym && !IS_ERR(ksym)) {
60319 +                               pax_open_kernel();
60320                                 sym[i].st_value = ksym->value;
60321 +                               pax_close_kernel();
60322                                 break;
60323                         }
60324  
60325 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
60326                                 secbase = (unsigned long)mod_percpu(mod);
60327                         else
60328                                 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
60329 +                       pax_open_kernel();
60330                         sym[i].st_value += secbase;
60331 +                       pax_close_kernel();
60332                         break;
60333                 }
60334         }
60335  
60336 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60337 +       if (is_fs_load && !register_filesystem_found) {
60338 +               printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60339 +               ret = -EPERM;
60340 +       }
60341 +#endif
60342 +
60343         return ret;
60344  }
60345  
60346 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
60347                             || s->sh_entsize != ~0UL
60348                             || strstarts(sname, ".init"))
60349                                 continue;
60350 -                       s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60351 +                       if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60352 +                               s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60353 +                       else
60354 +                               s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60355                         DEBUGP("\t%s\n", name);
60356                 }
60357 -               switch (m) {
60358 -               case 0: /* executable */
60359 -                       mod->core_size = debug_align(mod->core_size);
60360 -                       mod->core_text_size = mod->core_size;
60361 -                       break;
60362 -               case 1: /* RO: text and ro-data */
60363 -                       mod->core_size = debug_align(mod->core_size);
60364 -                       mod->core_ro_size = mod->core_size;
60365 -                       break;
60366 -               case 3: /* whole core */
60367 -                       mod->core_size = debug_align(mod->core_size);
60368 -                       break;
60369 -               }
60370         }
60371  
60372         DEBUGP("Init section allocation order:\n");
60373 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
60374                             || s->sh_entsize != ~0UL
60375                             || !strstarts(sname, ".init"))
60376                                 continue;
60377 -                       s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60378 -                                        | INIT_OFFSET_MASK);
60379 +                       if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60380 +                               s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60381 +                       else
60382 +                               s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60383 +                       s->sh_entsize |= INIT_OFFSET_MASK;
60384                         DEBUGP("\t%s\n", sname);
60385                 }
60386 -               switch (m) {
60387 -               case 0: /* executable */
60388 -                       mod->init_size = debug_align(mod->init_size);
60389 -                       mod->init_text_size = mod->init_size;
60390 -                       break;
60391 -               case 1: /* RO: text and ro-data */
60392 -                       mod->init_size = debug_align(mod->init_size);
60393 -                       mod->init_ro_size = mod->init_size;
60394 -                       break;
60395 -               case 3: /* whole init */
60396 -                       mod->init_size = debug_align(mod->init_size);
60397 -                       break;
60398 -               }
60399         }
60400  }
60401  
60402 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module 
60403  
60404         /* Put symbol section at end of init part of module. */
60405         symsect->sh_flags |= SHF_ALLOC;
60406 -       symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60407 +       symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60408                                          info->index.sym) | INIT_OFFSET_MASK;
60409         DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
60410  
60411 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module 
60412                 }
60413  
60414         /* Append room for core symbols at end of core part. */
60415 -       info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60416 -       mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
60417 +       info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60418 +       mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
60419  
60420         /* Put string table section at end of init part of module. */
60421         strsect->sh_flags |= SHF_ALLOC;
60422 -       strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60423 +       strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60424                                          info->index.str) | INIT_OFFSET_MASK;
60425         DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
60426  
60427         /* Append room for core symbols' strings at end of core part. */
60428 -       info->stroffs = mod->core_size;
60429 +       info->stroffs = mod->core_size_rx;
60430         __set_bit(0, info->strmap);
60431 -       mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
60432 +       mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
60433  }
60434  
60435  static void add_kallsyms(struct module *mod, const struct load_info *info)
60436 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
60437         /* Make sure we get permanent strtab: don't use info->strtab. */
60438         mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
60439  
60440 +       pax_open_kernel();
60441 +
60442         /* Set types up while we still have access to sections. */
60443         for (i = 0; i < mod->num_symtab; i++)
60444                 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
60445  
60446 -       mod->core_symtab = dst = mod->module_core + info->symoffs;
60447 +       mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
60448         src = mod->symtab;
60449         *dst = *src;
60450         for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60451 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
60452         }
60453         mod->core_num_syms = ndst;
60454  
60455 -       mod->core_strtab = s = mod->module_core + info->stroffs;
60456 +       mod->core_strtab = s = mod->module_core_rx + info->stroffs;
60457         for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
60458                 if (test_bit(i, info->strmap))
60459                         *++s = mod->strtab[i];
60460 +
60461 +       pax_close_kernel();
60462  }
60463  #else
60464  static inline void layout_symtab(struct module *mod, struct load_info *info)
60465 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct 
60466                 ddebug_remove_module(debug->modname);
60467  }
60468  
60469 -static void *module_alloc_update_bounds(unsigned long size)
60470 +static void *module_alloc_update_bounds_rw(unsigned long size)
60471  {
60472         void *ret = module_alloc(size);
60473  
60474         if (ret) {
60475                 mutex_lock(&module_mutex);
60476                 /* Update module bounds. */
60477 -               if ((unsigned long)ret < module_addr_min)
60478 -                       module_addr_min = (unsigned long)ret;
60479 -               if ((unsigned long)ret + size > module_addr_max)
60480 -                       module_addr_max = (unsigned long)ret + size;
60481 +               if ((unsigned long)ret < module_addr_min_rw)
60482 +                       module_addr_min_rw = (unsigned long)ret;
60483 +               if ((unsigned long)ret + size > module_addr_max_rw)
60484 +                       module_addr_max_rw = (unsigned long)ret + size;
60485 +               mutex_unlock(&module_mutex);
60486 +       }
60487 +       return ret;
60488 +}
60489 +
60490 +static void *module_alloc_update_bounds_rx(unsigned long size)
60491 +{
60492 +       void *ret = module_alloc_exec(size);
60493 +
60494 +       if (ret) {
60495 +               mutex_lock(&module_mutex);
60496 +               /* Update module bounds. */
60497 +               if ((unsigned long)ret < module_addr_min_rx)
60498 +                       module_addr_min_rx = (unsigned long)ret;
60499 +               if ((unsigned long)ret + size > module_addr_max_rx)
60500 +                       module_addr_max_rx = (unsigned long)ret + size;
60501                 mutex_unlock(&module_mutex);
60502         }
60503         return ret;
60504 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
60505         void *ptr;
60506  
60507         /* Do the allocs. */
60508 -       ptr = module_alloc_update_bounds(mod->core_size);
60509 +       ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60510         /*
60511          * The pointer to this block is stored in the module structure
60512          * which is inside the block. Just mark it as not being a
60513 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
60514         if (!ptr)
60515                 return -ENOMEM;
60516  
60517 -       memset(ptr, 0, mod->core_size);
60518 -       mod->module_core = ptr;
60519 +       memset(ptr, 0, mod->core_size_rw);
60520 +       mod->module_core_rw = ptr;
60521  
60522 -       ptr = module_alloc_update_bounds(mod->init_size);
60523 +       ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60524         /*
60525          * The pointer to this block is stored in the module structure
60526          * which is inside the block. This block doesn't need to be
60527          * scanned as it contains data and code that will be freed
60528          * after the module is initialized.
60529          */
60530 -       kmemleak_ignore(ptr);
60531 -       if (!ptr && mod->init_size) {
60532 -               module_free(mod, mod->module_core);
60533 +       kmemleak_not_leak(ptr);
60534 +       if (!ptr && mod->init_size_rw) {
60535 +               module_free(mod, mod->module_core_rw);
60536                 return -ENOMEM;
60537         }
60538 -       memset(ptr, 0, mod->init_size);
60539 -       mod->module_init = ptr;
60540 +       memset(ptr, 0, mod->init_size_rw);
60541 +       mod->module_init_rw = ptr;
60542 +
60543 +       ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60544 +       kmemleak_not_leak(ptr);
60545 +       if (!ptr) {
60546 +               module_free(mod, mod->module_init_rw);
60547 +               module_free(mod, mod->module_core_rw);
60548 +               return -ENOMEM;
60549 +       }
60550 +
60551 +       pax_open_kernel();
60552 +       memset(ptr, 0, mod->core_size_rx);
60553 +       pax_close_kernel();
60554 +       mod->module_core_rx = ptr;
60555 +
60556 +       ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60557 +       kmemleak_not_leak(ptr);
60558 +       if (!ptr && mod->init_size_rx) {
60559 +               module_free_exec(mod, mod->module_core_rx);
60560 +               module_free(mod, mod->module_init_rw);
60561 +               module_free(mod, mod->module_core_rw);
60562 +               return -ENOMEM;
60563 +       }
60564 +
60565 +       pax_open_kernel();
60566 +       memset(ptr, 0, mod->init_size_rx);
60567 +       pax_close_kernel();
60568 +       mod->module_init_rx = ptr;
60569  
60570         /* Transfer each section which specifies SHF_ALLOC */
60571         DEBUGP("final section addresses:\n");
60572 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
60573                 if (!(shdr->sh_flags & SHF_ALLOC))
60574                         continue;
60575  
60576 -               if (shdr->sh_entsize & INIT_OFFSET_MASK)
60577 -                       dest = mod->module_init
60578 -                               + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60579 -               else
60580 -                       dest = mod->module_core + shdr->sh_entsize;
60581 +               if (shdr->sh_entsize & INIT_OFFSET_MASK) {
60582 +                       if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
60583 +                               dest = mod->module_init_rw
60584 +                                       + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60585 +                       else
60586 +                               dest = mod->module_init_rx
60587 +                                       + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
60588 +               } else {
60589 +                       if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
60590 +                               dest = mod->module_core_rw + shdr->sh_entsize;
60591 +                       else
60592 +                               dest = mod->module_core_rx + shdr->sh_entsize;
60593 +               }
60594 +
60595 +               if (shdr->sh_type != SHT_NOBITS) {
60596 +
60597 +#ifdef CONFIG_PAX_KERNEXEC
60598 +#ifdef CONFIG_X86_64
60599 +                       if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
60600 +                               set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60601 +#endif
60602 +                       if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
60603 +                               pax_open_kernel();
60604 +                               memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
60605 +                               pax_close_kernel();
60606 +                       } else
60607 +#endif
60608  
60609 -               if (shdr->sh_type != SHT_NOBITS)
60610                         memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
60611 +               }
60612                 /* Update sh_addr to point to copy in image. */
60613 -               shdr->sh_addr = (unsigned long)dest;
60614 +
60615 +#ifdef CONFIG_PAX_KERNEXEC
60616 +               if (shdr->sh_flags & SHF_EXECINSTR)
60617 +                       shdr->sh_addr = ktva_ktla((unsigned long)dest);
60618 +               else
60619 +#endif
60620 +
60621 +                       shdr->sh_addr = (unsigned long)dest;
60622                 DEBUGP("\t0x%lx %s\n",
60623                        shdr->sh_addr, info->secstrings + shdr->sh_name);
60624         }
60625 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
60626          * Do it before processing of module parameters, so the module
60627          * can provide parameter accessor functions of its own.
60628          */
60629 -       if (mod->module_init)
60630 -               flush_icache_range((unsigned long)mod->module_init,
60631 -                                  (unsigned long)mod->module_init
60632 -                                  + mod->init_size);
60633 -       flush_icache_range((unsigned long)mod->module_core,
60634 -                          (unsigned long)mod->module_core + mod->core_size);
60635 +       if (mod->module_init_rx)
60636 +               flush_icache_range((unsigned long)mod->module_init_rx,
60637 +                                  (unsigned long)mod->module_init_rx
60638 +                                  + mod->init_size_rx);
60639 +       flush_icache_range((unsigned long)mod->module_core_rx,
60640 +                          (unsigned long)mod->module_core_rx + mod->core_size_rx);
60641  
60642         set_fs(old_fs);
60643  }
60644 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
60645  {
60646         kfree(info->strmap);
60647         percpu_modfree(mod);
60648 -       module_free(mod, mod->module_init);
60649 -       module_free(mod, mod->module_core);
60650 +       module_free_exec(mod, mod->module_init_rx);
60651 +       module_free_exec(mod, mod->module_core_rx);
60652 +       module_free(mod, mod->module_init_rw);
60653 +       module_free(mod, mod->module_core_rw);
60654  }
60655  
60656  static int post_relocation(struct module *mod, const struct load_info *info)
60657 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
60658         if (err)
60659                 goto free_unload;
60660  
60661 +       /* Now copy in args */
60662 +       mod->args = strndup_user(uargs, ~0UL >> 1);
60663 +       if (IS_ERR(mod->args)) {
60664 +               err = PTR_ERR(mod->args);
60665 +               goto free_unload;
60666 +       }
60667 +
60668         /* Set up MODINFO_ATTR fields */
60669         setup_modinfo(mod, &info);
60670  
60671 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60672 +       {
60673 +               char *p, *p2;
60674 +
60675 +               if (strstr(mod->args, "grsec_modharden_netdev")) {
60676 +                       printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60677 +                       err = -EPERM;
60678 +                       goto free_modinfo;
60679 +               } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60680 +                       p += strlen("grsec_modharden_normal");
60681 +                       p2 = strstr(p, "_");
60682 +                       if (p2) {
60683 +                               *p2 = '\0';
60684 +                               printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60685 +                               *p2 = '_';
60686 +                       }
60687 +                       err = -EPERM;
60688 +                       goto free_modinfo;
60689 +               }
60690 +       }
60691 +#endif
60692 +
60693         /* Fix up syms, so that st_value is a pointer to location. */
60694         err = simplify_symbols(mod, &info);
60695         if (err < 0)
60696 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
60697  
60698         flush_module_icache(mod);
60699  
60700 -       /* Now copy in args */
60701 -       mod->args = strndup_user(uargs, ~0UL >> 1);
60702 -       if (IS_ERR(mod->args)) {
60703 -               err = PTR_ERR(mod->args);
60704 -               goto free_arch_cleanup;
60705 -       }
60706 -
60707         /* Mark state as coming so strong_try_module_get() ignores us. */
60708         mod->state = MODULE_STATE_COMING;
60709  
60710 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
60711   unlock:
60712         mutex_unlock(&module_mutex);
60713         synchronize_sched();
60714 -       kfree(mod->args);
60715 - free_arch_cleanup:
60716         module_arch_cleanup(mod);
60717   free_modinfo:
60718         free_modinfo(mod);
60719 +       kfree(mod->args);
60720   free_unload:
60721         module_unload_free(mod);
60722   free_module:
60723 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
60724                         MODULE_STATE_COMING, mod);
60725  
60726         /* Set RO and NX regions for core */
60727 -       set_section_ro_nx(mod->module_core,
60728 -                               mod->core_text_size,
60729 -                               mod->core_ro_size,
60730 -                               mod->core_size);
60731 +       set_section_ro_nx(mod->module_core_rx,
60732 +                               mod->core_size_rx,
60733 +                               mod->core_size_rx,
60734 +                               mod->core_size_rx);
60735  
60736         /* Set RO and NX regions for init */
60737 -       set_section_ro_nx(mod->module_init,
60738 -                               mod->init_text_size,
60739 -                               mod->init_ro_size,
60740 -                               mod->init_size);
60741 +       set_section_ro_nx(mod->module_init_rx,
60742 +                               mod->init_size_rx,
60743 +                               mod->init_size_rx,
60744 +                               mod->init_size_rx);
60745  
60746         do_mod_ctors(mod);
60747         /* Start the module */
60748 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
60749         mod->strtab = mod->core_strtab;
60750  #endif
60751         unset_module_init_ro_nx(mod);
60752 -       module_free(mod, mod->module_init);
60753 -       mod->module_init = NULL;
60754 -       mod->init_size = 0;
60755 -       mod->init_ro_size = 0;
60756 -       mod->init_text_size = 0;
60757 +       module_free(mod, mod->module_init_rw);
60758 +       module_free_exec(mod, mod->module_init_rx);
60759 +       mod->module_init_rw = NULL;
60760 +       mod->module_init_rx = NULL;
60761 +       mod->init_size_rw = 0;
60762 +       mod->init_size_rx = 0;
60763         mutex_unlock(&module_mutex);
60764  
60765         return 0;
60766 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
60767         unsigned long nextval;
60768  
60769         /* At worse, next value is at end of module */
60770 -       if (within_module_init(addr, mod))
60771 -               nextval = (unsigned long)mod->module_init+mod->init_text_size;
60772 +       if (within_module_init_rx(addr, mod))
60773 +               nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60774 +       else if (within_module_init_rw(addr, mod))
60775 +               nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60776 +       else if (within_module_core_rx(addr, mod))
60777 +               nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60778 +       else if (within_module_core_rw(addr, mod))
60779 +               nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60780         else
60781 -               nextval = (unsigned long)mod->module_core+mod->core_text_size;
60782 +               return NULL;
60783  
60784         /* Scan for closest preceding symbol, and next symbol. (ELF
60785            starts real symbols at 1). */
60786 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
60787         char buf[8];
60788  
60789         seq_printf(m, "%s %u",
60790 -                  mod->name, mod->init_size + mod->core_size);
60791 +                  mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60792         print_unload_info(m, mod);
60793  
60794         /* Informative for users. */
60795 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
60796                    mod->state == MODULE_STATE_COMING ? "Loading":
60797                    "Live");
60798         /* Used by oprofile and other similar tools. */
60799 -       seq_printf(m, " 0x%pK", mod->module_core);
60800 +       seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
60801  
60802         /* Taints info */
60803         if (mod->taints)
60804 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
60805  
60806  static int __init proc_modules_init(void)
60807  {
60808 +#ifndef CONFIG_GRKERNSEC_HIDESYM
60809 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60810 +       proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60811 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60812 +       proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60813 +#else
60814         proc_create("modules", 0, NULL, &proc_modules_operations);
60815 +#endif
60816 +#else
60817 +       proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60818 +#endif
60819         return 0;
60820  }
60821  module_init(proc_modules_init);
60822 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
60823  {
60824         struct module *mod;
60825  
60826 -       if (addr < module_addr_min || addr > module_addr_max)
60827 +       if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60828 +           (addr < module_addr_min_rw || addr > module_addr_max_rw))
60829                 return NULL;
60830  
60831         list_for_each_entry_rcu(mod, &modules, list)
60832 -               if (within_module_core(addr, mod)
60833 -                   || within_module_init(addr, mod))
60834 +               if (within_module_init(addr, mod) || within_module_core(addr, mod))
60835                         return mod;
60836         return NULL;
60837  }
60838 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
60839   */
60840  struct module *__module_text_address(unsigned long addr)
60841  {
60842 -       struct module *mod = __module_address(addr);
60843 +       struct module *mod;
60844 +
60845 +#ifdef CONFIG_X86_32
60846 +       addr = ktla_ktva(addr);
60847 +#endif
60848 +
60849 +       if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60850 +               return NULL;
60851 +
60852 +       mod = __module_address(addr);
60853 +
60854         if (mod) {
60855                 /* Make sure it's within the text section. */
60856 -               if (!within(addr, mod->module_init, mod->init_text_size)
60857 -                   && !within(addr, mod->module_core, mod->core_text_size))
60858 +               if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60859                         mod = NULL;
60860         }
60861         return mod;
60862 diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
60863 --- linux-3.0.4/kernel/mutex.c  2011-07-21 22:17:23.000000000 -0400
60864 +++ linux-3.0.4/kernel/mutex.c  2011-08-23 21:47:56.000000000 -0400
60865 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, 
60866         spin_lock_mutex(&lock->wait_lock, flags);
60867  
60868         debug_mutex_lock_common(lock, &waiter);
60869 -       debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60870 +       debug_mutex_add_waiter(lock, &waiter, task);
60871  
60872         /* add waiting tasks to the end of the waitqueue (FIFO): */
60873         list_add_tail(&waiter.list, &lock->wait_list);
60874 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, 
60875                  * TASK_UNINTERRUPTIBLE case.)
60876                  */
60877                 if (unlikely(signal_pending_state(state, task))) {
60878 -                       mutex_remove_waiter(lock, &waiter,
60879 -                                           task_thread_info(task));
60880 +                       mutex_remove_waiter(lock, &waiter, task);
60881                         mutex_release(&lock->dep_map, 1, ip);
60882                         spin_unlock_mutex(&lock->wait_lock, flags);
60883  
60884 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, 
60885  done:
60886         lock_acquired(&lock->dep_map, ip);
60887         /* got the lock - rejoice! */
60888 -       mutex_remove_waiter(lock, &waiter, current_thread_info());
60889 +       mutex_remove_waiter(lock, &waiter, task);
60890         mutex_set_owner(lock);
60891  
60892         /* set it to 0 if there are no waiters left: */
60893 diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
60894 --- linux-3.0.4/kernel/mutex-debug.c    2011-07-21 22:17:23.000000000 -0400
60895 +++ linux-3.0.4/kernel/mutex-debug.c    2011-08-23 21:47:56.000000000 -0400
60896 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60897  }
60898  
60899  void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60900 -                           struct thread_info *ti)
60901 +                           struct task_struct *task)
60902  {
60903         SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60904  
60905         /* Mark the current thread as blocked on the lock: */
60906 -       ti->task->blocked_on = waiter;
60907 +       task->blocked_on = waiter;
60908  }
60909  
60910  void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60911 -                        struct thread_info *ti)
60912 +                        struct task_struct *task)
60913  {
60914         DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60915 -       DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60916 -       DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60917 -       ti->task->blocked_on = NULL;
60918 +       DEBUG_LOCKS_WARN_ON(waiter->task != task);
60919 +       DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60920 +       task->blocked_on = NULL;
60921  
60922         list_del_init(&waiter->list);
60923         waiter->task = NULL;
60924 diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
60925 --- linux-3.0.4/kernel/mutex-debug.h    2011-07-21 22:17:23.000000000 -0400
60926 +++ linux-3.0.4/kernel/mutex-debug.h    2011-08-23 21:47:56.000000000 -0400
60927 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
60928  extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60929  extern void debug_mutex_add_waiter(struct mutex *lock,
60930                                    struct mutex_waiter *waiter,
60931 -                                  struct thread_info *ti);
60932 +                                  struct task_struct *task);
60933  extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60934 -                               struct thread_info *ti);
60935 +                               struct task_struct *task);
60936  extern void debug_mutex_unlock(struct mutex *lock);
60937  extern void debug_mutex_init(struct mutex *lock, const char *name,
60938                              struct lock_class_key *key);
60939 diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
60940 --- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
60941 +++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
60942 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
60943         padata->pd = pd;
60944         padata->cb_cpu = cb_cpu;
60945  
60946 -       if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
60947 -               atomic_set(&pd->seq_nr, -1);
60948 +       if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
60949 +               atomic_set_unchecked(&pd->seq_nr, -1);
60950  
60951 -       padata->seq_nr = atomic_inc_return(&pd->seq_nr);
60952 +       padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
60953  
60954         target_cpu = padata_cpu_hash(padata);
60955         queue = per_cpu_ptr(pd->pqueue, target_cpu);
60956 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
60957         padata_init_pqueues(pd);
60958         padata_init_squeues(pd);
60959         setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
60960 -       atomic_set(&pd->seq_nr, -1);
60961 +       atomic_set_unchecked(&pd->seq_nr, -1);
60962         atomic_set(&pd->reorder_objects, 0);
60963         atomic_set(&pd->refcnt, 0);
60964         pd->pinst = pinst;
60965 diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
60966 --- linux-3.0.4/kernel/panic.c  2011-07-21 22:17:23.000000000 -0400
60967 +++ linux-3.0.4/kernel/panic.c  2011-08-23 21:48:14.000000000 -0400
60968 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
60969         const char *board;
60970  
60971         printk(KERN_WARNING "------------[ cut here ]------------\n");
60972 -       printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60973 +       printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60974         board = dmi_get_system_info(DMI_PRODUCT_NAME);
60975         if (board)
60976                 printk(KERN_WARNING "Hardware name: %s\n", board);
60977 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60978   */
60979  void __stack_chk_fail(void)
60980  {
60981 -       panic("stack-protector: Kernel stack is corrupted in: %p\n",
60982 +       dump_stack();
60983 +       panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60984                 __builtin_return_address(0));
60985  }
60986  EXPORT_SYMBOL(__stack_chk_fail);
60987 diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
60988 --- linux-3.0.4/kernel/pid.c    2011-07-21 22:17:23.000000000 -0400
60989 +++ linux-3.0.4/kernel/pid.c    2011-08-23 21:48:14.000000000 -0400
60990 @@ -33,6 +33,7 @@
60991  #include <linux/rculist.h>
60992  #include <linux/bootmem.h>
60993  #include <linux/hash.h>
60994 +#include <linux/security.h>
60995  #include <linux/pid_namespace.h>
60996  #include <linux/init_task.h>
60997  #include <linux/syscalls.h>
60998 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
60999  
61000  int pid_max = PID_MAX_DEFAULT;
61001  
61002 -#define RESERVED_PIDS          300
61003 +#define RESERVED_PIDS          500
61004  
61005  int pid_max_min = RESERVED_PIDS + 1;
61006  int pid_max_max = PID_MAX_LIMIT;
61007 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
61008   */
61009  struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61010  {
61011 +       struct task_struct *task;
61012 +
61013         rcu_lockdep_assert(rcu_read_lock_held());
61014 -       return pid_task(find_pid_ns(vx_rmap_pid(nr), ns), PIDTYPE_PID);
61015 +       task = pid_task(find_pid_ns(vx_rmap_pid(nr), ns), PIDTYPE_PID);
61016 +
61017 +       if (gr_pid_is_chrooted(task))
61018 +               return NULL;
61019 +
61020 +       return task;
61021  }
61022  
61023  struct task_struct *find_task_by_vpid(pid_t vnr)
61024 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
61025         return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
61026  }
61027  
61028 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
61029 +{
61030 +       rcu_lockdep_assert(rcu_read_lock_held());       
61031 +       return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
61032 +}
61033 +
61034  struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
61035  {
61036         struct pid *pid;
61037 diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
61038 --- linux-3.0.4/kernel/posix-cpu-timers.c       2011-07-21 22:17:23.000000000 -0400
61039 +++ linux-3.0.4/kernel/posix-cpu-timers.c       2011-08-23 21:48:14.000000000 -0400
61040 @@ -6,6 +6,7 @@
61041  #include <linux/posix-timers.h>
61042  #include <linux/errno.h>
61043  #include <linux/math64.h>
61044 +#include <linux/security.h>
61045  #include <asm/uaccess.h>
61046  #include <linux/kernel_stat.h>
61047  #include <trace/events/timer.h>
61048 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
61049  
61050  static __init int init_posix_cpu_timers(void)
61051  {
61052 -       struct k_clock process = {
61053 +       static struct k_clock process = {
61054                 .clock_getres   = process_cpu_clock_getres,
61055                 .clock_get      = process_cpu_clock_get,
61056                 .timer_create   = process_cpu_timer_create,
61057                 .nsleep         = process_cpu_nsleep,
61058                 .nsleep_restart = process_cpu_nsleep_restart,
61059         };
61060 -       struct k_clock thread = {
61061 +       static struct k_clock thread = {
61062                 .clock_getres   = thread_cpu_clock_getres,
61063                 .clock_get      = thread_cpu_clock_get,
61064                 .timer_create   = thread_cpu_timer_create,
61065 diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
61066 --- linux-3.0.4/kernel/posix-timers.c   2011-07-21 22:17:23.000000000 -0400
61067 +++ linux-3.0.4/kernel/posix-timers.c   2011-08-23 21:48:14.000000000 -0400
61068 @@ -43,6 +43,7 @@
61069  #include <linux/idr.h>
61070  #include <linux/posix-clock.h>
61071  #include <linux/posix-timers.h>
61072 +#include <linux/grsecurity.h>
61073  #include <linux/syscalls.h>
61074  #include <linux/wait.h>
61075  #include <linux/workqueue.h>
61076 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
61077   *         which we beg off on and pass to do_sys_settimeofday().
61078   */
61079  
61080 -static struct k_clock posix_clocks[MAX_CLOCKS];
61081 +static struct k_clock *posix_clocks[MAX_CLOCKS];
61082  
61083  /*
61084   * These ones are defined below.
61085 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
61086   */
61087  static __init int init_posix_timers(void)
61088  {
61089 -       struct k_clock clock_realtime = {
61090 +       static struct k_clock clock_realtime = {
61091                 .clock_getres   = hrtimer_get_res,
61092                 .clock_get      = posix_clock_realtime_get,
61093                 .clock_set      = posix_clock_realtime_set,
61094 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
61095                 .timer_get      = common_timer_get,
61096                 .timer_del      = common_timer_del,
61097         };
61098 -       struct k_clock clock_monotonic = {
61099 +       static struct k_clock clock_monotonic = {
61100                 .clock_getres   = hrtimer_get_res,
61101                 .clock_get      = posix_ktime_get_ts,
61102                 .nsleep         = common_nsleep,
61103 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
61104                 .timer_get      = common_timer_get,
61105                 .timer_del      = common_timer_del,
61106         };
61107 -       struct k_clock clock_monotonic_raw = {
61108 +       static struct k_clock clock_monotonic_raw = {
61109                 .clock_getres   = hrtimer_get_res,
61110                 .clock_get      = posix_get_monotonic_raw,
61111         };
61112 -       struct k_clock clock_realtime_coarse = {
61113 +       static struct k_clock clock_realtime_coarse = {
61114                 .clock_getres   = posix_get_coarse_res,
61115                 .clock_get      = posix_get_realtime_coarse,
61116         };
61117 -       struct k_clock clock_monotonic_coarse = {
61118 +       static struct k_clock clock_monotonic_coarse = {
61119                 .clock_getres   = posix_get_coarse_res,
61120                 .clock_get      = posix_get_monotonic_coarse,
61121         };
61122 -       struct k_clock clock_boottime = {
61123 +       static struct k_clock clock_boottime = {
61124                 .clock_getres   = hrtimer_get_res,
61125                 .clock_get      = posix_get_boottime,
61126                 .nsleep         = common_nsleep,
61127 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
61128                 .timer_del      = common_timer_del,
61129         };
61130  
61131 +       pax_track_stack();
61132 +
61133         posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
61134         posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
61135         posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61136 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
61137                 return;
61138         }
61139  
61140 -       posix_clocks[clock_id] = *new_clock;
61141 +       posix_clocks[clock_id] = new_clock;
61142  }
61143  EXPORT_SYMBOL_GPL(posix_timers_register_clock);
61144  
61145 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
61146                 return (id & CLOCKFD_MASK) == CLOCKFD ?
61147                         &clock_posix_dynamic : &clock_posix_cpu;
61148  
61149 -       if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
61150 +       if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
61151                 return NULL;
61152 -       return &posix_clocks[id];
61153 +       return posix_clocks[id];
61154  }
61155  
61156  static int common_timer_create(struct k_itimer *new_timer)
61157 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61158         if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61159                 return -EFAULT;
61160  
61161 +       /* only the CLOCK_REALTIME clock can be set, all other clocks
61162 +          have their clock_set fptr set to a nosettime dummy function
61163 +          CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61164 +          call common_clock_set, which calls do_sys_settimeofday, which
61165 +          we hook
61166 +       */
61167 +
61168         return kc->clock_set(which_clock, &new_tp);
61169  }
61170  
61171 diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
61172 --- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
61173 +++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
61174 @@ -37,7 +37,7 @@ static struct sysrq_key_op    sysrq_powerof
61175         .enable_mask    = SYSRQ_ENABLE_BOOT,
61176  };
61177  
61178 -static int pm_sysrq_init(void)
61179 +static int __init pm_sysrq_init(void)
61180  {
61181         register_sysrq_key('o', &sysrq_poweroff_op);
61182         return 0;
61183 diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
61184 --- linux-3.0.4/kernel/power/process.c  2011-07-21 22:17:23.000000000 -0400
61185 +++ linux-3.0.4/kernel/power/process.c  2011-08-23 21:47:56.000000000 -0400
61186 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
61187         u64 elapsed_csecs64;
61188         unsigned int elapsed_csecs;
61189         bool wakeup = false;
61190 +       bool timedout = false;
61191  
61192         do_gettimeofday(&start);
61193  
61194 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
61195  
61196         while (true) {
61197                 todo = 0;
61198 +               if (time_after(jiffies, end_time))
61199 +                       timedout = true;
61200                 read_lock(&tasklist_lock);
61201                 do_each_thread(g, p) {
61202                         if (frozen(p) || !freezable(p))
61203 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
61204                          * try_to_stop() after schedule() in ptrace/signal
61205                          * stop sees TIF_FREEZE.
61206                          */
61207 -                       if (!task_is_stopped_or_traced(p) &&
61208 -                           !freezer_should_skip(p))
61209 +                       if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61210                                 todo++;
61211 +                               if (timedout) {
61212 +                                       printk(KERN_ERR "Task refusing to freeze:\n");
61213 +                                       sched_show_task(p);
61214 +                               }
61215 +                       }
61216                 } while_each_thread(g, p);
61217                 read_unlock(&tasklist_lock);
61218  
61219 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
61220                         todo += wq_busy;
61221                 }
61222  
61223 -               if (!todo || time_after(jiffies, end_time))
61224 +               if (!todo || timedout)
61225                         break;
61226  
61227                 if (pm_wakeup_pending()) {
61228 diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
61229 --- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
61230 +++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
61231 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int 
61232         if (from_file && type != SYSLOG_ACTION_OPEN)
61233                 return 0;
61234  
61235 +#ifdef CONFIG_GRKERNSEC_DMESG
61236 +       if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
61237 +               return -EPERM;
61238 +#endif
61239 +
61240         if (syslog_action_restricted(type)) {
61241                 if (vx_capable(CAP_SYSLOG, VXC_SYSLOG))
61242                         return 0;
61243                 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
61244                 if (capable(CAP_SYS_ADMIN)) {
61245 -                       WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
61246 +                       printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
61247                                  "but no CAP_SYSLOG (deprecated).\n");
61248                         return 0;
61249                 }
61250 diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
61251 --- linux-3.0.4/kernel/profile.c        2011-07-21 22:17:23.000000000 -0400
61252 +++ linux-3.0.4/kernel/profile.c        2011-08-23 21:47:56.000000000 -0400
61253 @@ -39,7 +39,7 @@ struct profile_hit {
61254  /* Oprofile timer tick hook */
61255  static int (*timer_hook)(struct pt_regs *) __read_mostly;
61256  
61257 -static atomic_t *prof_buffer;
61258 +static atomic_unchecked_t *prof_buffer;
61259  static unsigned long prof_len, prof_shift;
61260  
61261  int prof_on __read_mostly;
61262 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
61263                                         hits[i].pc = 0;
61264                                 continue;
61265                         }
61266 -                       atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61267 +                       atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61268                         hits[i].hits = hits[i].pc = 0;
61269                 }
61270         }
61271 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
61272          * Add the current hit(s) and flush the write-queue out
61273          * to the global buffer:
61274          */
61275 -       atomic_add(nr_hits, &prof_buffer[pc]);
61276 +       atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61277         for (i = 0; i < NR_PROFILE_HIT; ++i) {
61278 -               atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61279 +               atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61280                 hits[i].pc = hits[i].hits = 0;
61281         }
61282  out:
61283 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
61284  {
61285         unsigned long pc;
61286         pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61287 -       atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61288 +       atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61289  }
61290  #endif /* !CONFIG_SMP */
61291  
61292 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61293                         return -EFAULT;
61294                 buf++; p++; count--; read++;
61295         }
61296 -       pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61297 +       pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61298         if (copy_to_user(buf, (void *)pnt, count))
61299                 return -EFAULT;
61300         read += count;
61301 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61302         }
61303  #endif
61304         profile_discard_flip_buffers();
61305 -       memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61306 +       memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61307         return count;
61308  }
61309  
61310 diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
61311 --- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
61312 +++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
61313 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
61314         return ret;
61315  }
61316  
61317 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61318 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61319 +                              unsigned int log)
61320  {
61321         const struct cred *cred = current_cred(), *tcred;
61322  
61323 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
61324              cred->gid == tcred->sgid &&
61325              cred->gid == tcred->gid))
61326                 goto ok;
61327 -       if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
61328 +       if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
61329 +           (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
61330                 goto ok;
61331         rcu_read_unlock();
61332         return -EPERM;
61333 @@ -167,7 +169,9 @@ ok:
61334         smp_rmb();
61335         if (task->mm)
61336                 dumpable = get_dumpable(task->mm);
61337 -       if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
61338 +       if (!dumpable &&
61339 +               ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
61340 +                (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
61341                 return -EPERM;
61342  
61343         return security_ptrace_access_check(task, mode);
61344 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
61345  {
61346         int err;
61347         task_lock(task);
61348 -       err = __ptrace_may_access(task, mode);
61349 +       err = __ptrace_may_access(task, mode, 0);
61350 +       task_unlock(task);
61351 +       return !err;
61352 +}
61353 +
61354 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61355 +{
61356 +       int err;
61357 +       task_lock(task);
61358 +       err = __ptrace_may_access(task, mode, 1);
61359         task_unlock(task);
61360         return !err;
61361  }
61362 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
61363                 goto out;
61364  
61365         task_lock(task);
61366 -       retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61367 +       retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61368         task_unlock(task);
61369         if (retval)
61370                 goto unlock_creds;
61371 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
61372                 goto unlock_tasklist;
61373  
61374         task->ptrace = PT_PTRACED;
61375 -       if (task_ns_capable(task, CAP_SYS_PTRACE))
61376 +       if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
61377                 task->ptrace |= PT_PTRACE_CAP;
61378  
61379         __ptrace_link(task, current);
61380 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
61381  {
61382         int copied = 0;
61383  
61384 +       pax_track_stack();
61385 +
61386         while (len > 0) {
61387                 char buf[128];
61388                 int this_len, retval;
61389 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
61390                                 break;
61391                         return -EIO;
61392                 }
61393 -               if (copy_to_user(dst, buf, retval))
61394 +               if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
61395                         return -EFAULT;
61396                 copied += retval;
61397                 src += retval;
61398 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct 
61399  {
61400         int copied = 0;
61401  
61402 +       pax_track_stack();
61403 +
61404         while (len > 0) {
61405                 char buf[128];
61406                 int this_len, retval;
61407 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
61408  {
61409         int ret = -EIO;
61410         siginfo_t siginfo;
61411 -       void __user *datavp = (void __user *) data;
61412 +       void __user *datavp = (__force void __user *) data;
61413         unsigned long __user *datalp = datavp;
61414  
61415 +       pax_track_stack();
61416 +
61417         switch (request) {
61418         case PTRACE_PEEKTEXT:
61419         case PTRACE_PEEKDATA:
61420 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61421                 goto out;
61422         }
61423  
61424 +       if (gr_handle_ptrace(child, request)) {
61425 +               ret = -EPERM;
61426 +               goto out_put_task_struct;
61427 +       }
61428 +
61429         if (request == PTRACE_ATTACH) {
61430                 ret = ptrace_attach(child);
61431                 /*
61432                  * Some architectures need to do book-keeping after
61433                  * a ptrace attach.
61434                  */
61435 -               if (!ret)
61436 +               if (!ret) {
61437                         arch_ptrace_attach(child);
61438 +                       gr_audit_ptrace(child);
61439 +               }
61440                 goto out_put_task_struct;
61441         }
61442  
61443 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
61444         copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61445         if (copied != sizeof(tmp))
61446                 return -EIO;
61447 -       return put_user(tmp, (unsigned long __user *)data);
61448 +       return put_user(tmp, (__force unsigned long __user *)data);
61449  }
61450  
61451  int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
61452 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
61453         siginfo_t siginfo;
61454         int ret;
61455  
61456 +       pax_track_stack();
61457 +
61458         switch (request) {
61459         case PTRACE_PEEKTEXT:
61460         case PTRACE_PEEKDATA:
61461 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
61462                 goto out;
61463         }
61464  
61465 +       if (gr_handle_ptrace(child, request)) {
61466 +               ret = -EPERM;
61467 +               goto out_put_task_struct;
61468 +       }
61469 +
61470         if (request == PTRACE_ATTACH) {
61471                 ret = ptrace_attach(child);
61472                 /*
61473                  * Some architectures need to do book-keeping after
61474                  * a ptrace attach.
61475                  */
61476 -               if (!ret)
61477 +               if (!ret) {
61478                         arch_ptrace_attach(child);
61479 +                       gr_audit_ptrace(child);
61480 +               }
61481                 goto out_put_task_struct;
61482         }
61483  
61484 diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
61485 --- linux-3.0.4/kernel/rcutorture.c     2011-07-21 22:17:23.000000000 -0400
61486 +++ linux-3.0.4/kernel/rcutorture.c     2011-08-23 21:47:56.000000000 -0400
61487 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61488         { 0 };
61489  static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61490         { 0 };
61491 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61492 -static atomic_t n_rcu_torture_alloc;
61493 -static atomic_t n_rcu_torture_alloc_fail;
61494 -static atomic_t n_rcu_torture_free;
61495 -static atomic_t n_rcu_torture_mberror;
61496 -static atomic_t n_rcu_torture_error;
61497 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61498 +static atomic_unchecked_t n_rcu_torture_alloc;
61499 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61500 +static atomic_unchecked_t n_rcu_torture_free;
61501 +static atomic_unchecked_t n_rcu_torture_mberror;
61502 +static atomic_unchecked_t n_rcu_torture_error;
61503  static long n_rcu_torture_boost_ktrerror;
61504  static long n_rcu_torture_boost_rterror;
61505  static long n_rcu_torture_boost_failure;
61506 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
61507  
61508         spin_lock_bh(&rcu_torture_lock);
61509         if (list_empty(&rcu_torture_freelist)) {
61510 -               atomic_inc(&n_rcu_torture_alloc_fail);
61511 +               atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61512                 spin_unlock_bh(&rcu_torture_lock);
61513                 return NULL;
61514         }
61515 -       atomic_inc(&n_rcu_torture_alloc);
61516 +       atomic_inc_unchecked(&n_rcu_torture_alloc);
61517         p = rcu_torture_freelist.next;
61518         list_del_init(p);
61519         spin_unlock_bh(&rcu_torture_lock);
61520 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
61521  static void
61522  rcu_torture_free(struct rcu_torture *p)
61523  {
61524 -       atomic_inc(&n_rcu_torture_free);
61525 +       atomic_inc_unchecked(&n_rcu_torture_free);
61526         spin_lock_bh(&rcu_torture_lock);
61527         list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61528         spin_unlock_bh(&rcu_torture_lock);
61529 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
61530         i = rp->rtort_pipe_count;
61531         if (i > RCU_TORTURE_PIPE_LEN)
61532                 i = RCU_TORTURE_PIPE_LEN;
61533 -       atomic_inc(&rcu_torture_wcount[i]);
61534 +       atomic_inc_unchecked(&rcu_torture_wcount[i]);
61535         if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61536                 rp->rtort_mbtest = 0;
61537                 rcu_torture_free(rp);
61538 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
61539                 i = rp->rtort_pipe_count;
61540                 if (i > RCU_TORTURE_PIPE_LEN)
61541                         i = RCU_TORTURE_PIPE_LEN;
61542 -               atomic_inc(&rcu_torture_wcount[i]);
61543 +               atomic_inc_unchecked(&rcu_torture_wcount[i]);
61544                 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61545                         rp->rtort_mbtest = 0;
61546                         list_del(&rp->rtort_free);
61547 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
61548                         i = old_rp->rtort_pipe_count;
61549                         if (i > RCU_TORTURE_PIPE_LEN)
61550                                 i = RCU_TORTURE_PIPE_LEN;
61551 -                       atomic_inc(&rcu_torture_wcount[i]);
61552 +                       atomic_inc_unchecked(&rcu_torture_wcount[i]);
61553                         old_rp->rtort_pipe_count++;
61554                         cur_ops->deferred_free(old_rp);
61555                 }
61556 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
61557                 return;
61558         }
61559         if (p->rtort_mbtest == 0)
61560 -               atomic_inc(&n_rcu_torture_mberror);
61561 +               atomic_inc_unchecked(&n_rcu_torture_mberror);
61562         spin_lock(&rand_lock);
61563         cur_ops->read_delay(&rand);
61564         n_rcu_torture_timers++;
61565 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
61566                         continue;
61567                 }
61568                 if (p->rtort_mbtest == 0)
61569 -                       atomic_inc(&n_rcu_torture_mberror);
61570 +                       atomic_inc_unchecked(&n_rcu_torture_mberror);
61571                 cur_ops->read_delay(&rand);
61572                 preempt_disable();
61573                 pipe_count = p->rtort_pipe_count;
61574 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
61575                        rcu_torture_current,
61576                        rcu_torture_current_version,
61577                        list_empty(&rcu_torture_freelist),
61578 -                      atomic_read(&n_rcu_torture_alloc),
61579 -                      atomic_read(&n_rcu_torture_alloc_fail),
61580 -                      atomic_read(&n_rcu_torture_free),
61581 -                      atomic_read(&n_rcu_torture_mberror),
61582 +                      atomic_read_unchecked(&n_rcu_torture_alloc),
61583 +                      atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61584 +                      atomic_read_unchecked(&n_rcu_torture_free),
61585 +                      atomic_read_unchecked(&n_rcu_torture_mberror),
61586                        n_rcu_torture_boost_ktrerror,
61587                        n_rcu_torture_boost_rterror,
61588                        n_rcu_torture_boost_failure,
61589                        n_rcu_torture_boosts,
61590                        n_rcu_torture_timers);
61591 -       if (atomic_read(&n_rcu_torture_mberror) != 0 ||
61592 +       if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
61593             n_rcu_torture_boost_ktrerror != 0 ||
61594             n_rcu_torture_boost_rterror != 0 ||
61595             n_rcu_torture_boost_failure != 0)
61596 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
61597         cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61598         if (i > 1) {
61599                 cnt += sprintf(&page[cnt], "!!! ");
61600 -               atomic_inc(&n_rcu_torture_error);
61601 +               atomic_inc_unchecked(&n_rcu_torture_error);
61602                 WARN_ON_ONCE(1);
61603         }
61604         cnt += sprintf(&page[cnt], "Reader Pipe: ");
61605 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
61606         cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61607         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61608                 cnt += sprintf(&page[cnt], " %d",
61609 -                              atomic_read(&rcu_torture_wcount[i]));
61610 +                              atomic_read_unchecked(&rcu_torture_wcount[i]));
61611         }
61612         cnt += sprintf(&page[cnt], "\n");
61613         if (cur_ops->stats)
61614 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
61615  
61616         if (cur_ops->cleanup)
61617                 cur_ops->cleanup();
61618 -       if (atomic_read(&n_rcu_torture_error))
61619 +       if (atomic_read_unchecked(&n_rcu_torture_error))
61620                 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
61621         else
61622                 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
61623 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
61624  
61625         rcu_torture_current = NULL;
61626         rcu_torture_current_version = 0;
61627 -       atomic_set(&n_rcu_torture_alloc, 0);
61628 -       atomic_set(&n_rcu_torture_alloc_fail, 0);
61629 -       atomic_set(&n_rcu_torture_free, 0);
61630 -       atomic_set(&n_rcu_torture_mberror, 0);
61631 -       atomic_set(&n_rcu_torture_error, 0);
61632 +       atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61633 +       atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61634 +       atomic_set_unchecked(&n_rcu_torture_free, 0);
61635 +       atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61636 +       atomic_set_unchecked(&n_rcu_torture_error, 0);
61637         n_rcu_torture_boost_ktrerror = 0;
61638         n_rcu_torture_boost_rterror = 0;
61639         n_rcu_torture_boost_failure = 0;
61640         n_rcu_torture_boosts = 0;
61641         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61642 -               atomic_set(&rcu_torture_wcount[i], 0);
61643 +               atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61644         for_each_possible_cpu(cpu) {
61645                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61646                         per_cpu(rcu_torture_count, cpu)[i] = 0;
61647 diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
61648 --- linux-3.0.4/kernel/rcutree.c        2011-07-21 22:17:23.000000000 -0400
61649 +++ linux-3.0.4/kernel/rcutree.c        2011-08-23 21:47:56.000000000 -0400
61650 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
61651  /*
61652   * Do softirq processing for the current CPU.
61653   */
61654 -static void rcu_process_callbacks(struct softirq_action *unused)
61655 +static void rcu_process_callbacks(void)
61656  {
61657         __rcu_process_callbacks(&rcu_sched_state,
61658                                 &__get_cpu_var(rcu_sched_data));
61659 diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
61660 --- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
61661 +++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
61662 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
61663  
61664         /* Clean up and exit. */
61665         smp_mb(); /* ensure expedited GP seen before counter increment. */
61666 -       ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
61667 +       ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
61668  unlock_mb_ret:
61669         mutex_unlock(&sync_rcu_preempt_exp_mutex);
61670  mb_ret:
61671 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
61672  
61673  #else /* #ifndef CONFIG_SMP */
61674  
61675 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
61676 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
61677 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
61678 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
61679  
61680  static int synchronize_sched_expedited_cpu_stop(void *data)
61681  {
61682 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
61683         int firstsnap, s, snap, trycount = 0;
61684  
61685         /* Note that atomic_inc_return() implies full memory barrier. */
61686 -       firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
61687 +       firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
61688         get_online_cpus();
61689  
61690         /*
61691 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
61692                 }
61693  
61694                 /* Check to see if someone else did our work for us. */
61695 -               s = atomic_read(&sync_sched_expedited_done);
61696 +               s = atomic_read_unchecked(&sync_sched_expedited_done);
61697                 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
61698                         smp_mb(); /* ensure test happens before caller kfree */
61699                         return;
61700 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
61701                  * grace period works for us.
61702                  */
61703                 get_online_cpus();
61704 -               snap = atomic_read(&sync_sched_expedited_started) - 1;
61705 +               snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
61706                 smp_mb(); /* ensure read is before try_stop_cpus(). */
61707         }
61708  
61709 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
61710          * than we did beat us to the punch.
61711          */
61712         do {
61713 -               s = atomic_read(&sync_sched_expedited_done);
61714 +               s = atomic_read_unchecked(&sync_sched_expedited_done);
61715                 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
61716                         smp_mb(); /* ensure test happens before caller kfree */
61717                         break;
61718                 }
61719 -       } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
61720 +       } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
61721  
61722         put_online_cpus();
61723  }
61724 diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
61725 --- linux-3.0.4/kernel/relay.c  2011-07-21 22:17:23.000000000 -0400
61726 +++ linux-3.0.4/kernel/relay.c  2011-08-23 21:48:14.000000000 -0400
61727 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
61728         };
61729         ssize_t ret;
61730  
61731 +       pax_track_stack();
61732 +
61733         if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61734                 return 0;
61735         if (splice_grow_spd(pipe, &spd))
61736 diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
61737 --- linux-3.0.4/kernel/resource.c       2011-07-21 22:17:23.000000000 -0400
61738 +++ linux-3.0.4/kernel/resource.c       2011-08-23 21:48:14.000000000 -0400
61739 @@ -141,8 +141,18 @@ static const struct file_operations proc
61740  
61741  static int __init ioresources_init(void)
61742  {
61743 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61744 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61745 +       proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61746 +       proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61747 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61748 +       proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61749 +       proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61750 +#endif
61751 +#else
61752         proc_create("ioports", 0, NULL, &proc_ioports_operations);
61753         proc_create("iomem", 0, NULL, &proc_iomem_operations);
61754 +#endif
61755         return 0;
61756  }
61757  __initcall(ioresources_init);
61758 diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
61759 --- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
61760 +++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
61761 @@ -20,7 +20,7 @@
61762  #define MAX_RT_TEST_MUTEXES    8
61763  
61764  static spinlock_t rttest_lock;
61765 -static atomic_t rttest_event;
61766 +static atomic_unchecked_t rttest_event;
61767  
61768  struct test_thread_data {
61769         int                     opcode;
61770 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
61771  
61772         case RTTEST_LOCKCONT:
61773                 td->mutexes[td->opdata] = 1;
61774 -               td->event = atomic_add_return(1, &rttest_event);
61775 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61776                 return 0;
61777  
61778         case RTTEST_RESET:
61779 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
61780                 return 0;
61781  
61782         case RTTEST_RESETEVENT:
61783 -               atomic_set(&rttest_event, 0);
61784 +               atomic_set_unchecked(&rttest_event, 0);
61785                 return 0;
61786  
61787         default:
61788 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
61789                         return ret;
61790  
61791                 td->mutexes[id] = 1;
61792 -               td->event = atomic_add_return(1, &rttest_event);
61793 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61794                 rt_mutex_lock(&mutexes[id]);
61795 -               td->event = atomic_add_return(1, &rttest_event);
61796 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61797                 td->mutexes[id] = 4;
61798                 return 0;
61799  
61800 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
61801                         return ret;
61802  
61803                 td->mutexes[id] = 1;
61804 -               td->event = atomic_add_return(1, &rttest_event);
61805 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61806                 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61807 -               td->event = atomic_add_return(1, &rttest_event);
61808 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61809                 td->mutexes[id] = ret ? 0 : 4;
61810                 return ret ? -EINTR : 0;
61811  
61812 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
61813                 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61814                         return ret;
61815  
61816 -               td->event = atomic_add_return(1, &rttest_event);
61817 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61818                 rt_mutex_unlock(&mutexes[id]);
61819 -               td->event = atomic_add_return(1, &rttest_event);
61820 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61821                 td->mutexes[id] = 0;
61822                 return 0;
61823  
61824 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
61825                         break;
61826  
61827                 td->mutexes[dat] = 2;
61828 -               td->event = atomic_add_return(1, &rttest_event);
61829 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61830                 break;
61831  
61832         default:
61833 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
61834                         return;
61835  
61836                 td->mutexes[dat] = 3;
61837 -               td->event = atomic_add_return(1, &rttest_event);
61838 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61839                 break;
61840  
61841         case RTTEST_LOCKNOWAIT:
61842 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
61843                         return;
61844  
61845                 td->mutexes[dat] = 1;
61846 -               td->event = atomic_add_return(1, &rttest_event);
61847 +               td->event = atomic_add_return_unchecked(1, &rttest_event);
61848                 return;
61849  
61850         default:
61851 diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
61852 --- linux-3.0.4/kernel/sched_autogroup.c        2011-07-21 22:17:23.000000000 -0400
61853 +++ linux-3.0.4/kernel/sched_autogroup.c        2011-08-23 21:47:56.000000000 -0400
61854 @@ -7,7 +7,7 @@
61855  
61856  unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
61857  static struct autogroup autogroup_default;
61858 -static atomic_t autogroup_seq_nr;
61859 +static atomic_unchecked_t autogroup_seq_nr;
61860  
61861  static void __init autogroup_init(struct task_struct *init_task)
61862  {
61863 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
61864  
61865         kref_init(&ag->kref);
61866         init_rwsem(&ag->lock);
61867 -       ag->id = atomic_inc_return(&autogroup_seq_nr);
61868 +       ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
61869         ag->tg = tg;
61870  #ifdef CONFIG_RT_GROUP_SCHED
61871         /*
61872 diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
61873 --- linux-3.0.4/kernel/sched.c  2011-07-21 22:17:23.000000000 -0400
61874 +++ linux-3.0.4/kernel/sched.c  2011-08-23 21:48:14.000000000 -0400
61875 @@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
61876         struct rq *rq;
61877         int cpu;
61878  
61879 +       pax_track_stack();
61880 +
61881  need_resched:
61882         preempt_disable();
61883         cpu = smp_processor_id();
61884 @@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
61885         /* convert nice value [19,-20] to rlimit style value [1,40] */
61886         int nice_rlim = 20 - nice;
61887  
61888 +       gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61889 +
61890         return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
61891                 capable(CAP_SYS_NICE));
61892  }
61893 @@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61894         if (nice > 19)
61895                 nice = 19;
61896  
61897 -       if (increment < 0 && !can_nice(current, nice))
61898 +       if (increment < 0 && (!can_nice(current, nice) ||
61899 +                             gr_handle_chroot_nice()))
61900                 return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
61901  
61902         retval = security_task_setnice(current, nice);
61903 @@ -5111,6 +5116,7 @@ recheck:
61904                         unsigned long rlim_rtprio =
61905                                         task_rlimit(p, RLIMIT_RTPRIO);
61906  
61907 +                        gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
61908                         /* can't set/change the rt policy */
61909                         if (policy != p->policy && !rlim_rtprio)
61910                                 return -EPERM;
61911 diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
61912 --- linux-3.0.4/kernel/sched_fair.c     2011-07-21 22:17:23.000000000 -0400
61913 +++ linux-3.0.4/kernel/sched_fair.c     2011-08-23 21:47:56.000000000 -0400
61914 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
61915   * run_rebalance_domains is triggered when needed from the scheduler tick.
61916   * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
61917   */
61918 -static void run_rebalance_domains(struct softirq_action *h)
61919 +static void run_rebalance_domains(void)
61920  {
61921         int this_cpu = smp_processor_id();
61922         struct rq *this_rq = cpu_rq(this_cpu);
61923 diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
61924 --- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
61925 +++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
61926 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
61927  
61928  int print_fatal_signals __read_mostly;
61929  
61930 -static void __user *sig_handler(struct task_struct *t, int sig)
61931 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
61932  {
61933         return t->sighand->action[sig - 1].sa.sa_handler;
61934  }
61935  
61936 -static int sig_handler_ignored(void __user *handler, int sig)
61937 +static int sig_handler_ignored(__sighandler_t handler, int sig)
61938  {
61939         /* Is it explicitly or implicitly ignored? */
61940         return handler == SIG_IGN ||
61941 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
61942  static int sig_task_ignored(struct task_struct *t, int sig,
61943                 int from_ancestor_ns)
61944  {
61945 -       void __user *handler;
61946 +       __sighandler_t handler;
61947  
61948         handler = sig_handler(t, sig);
61949  
61950 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
61951         atomic_inc(&user->sigpending);
61952         rcu_read_unlock();
61953  
61954 +       if (!override_rlimit)
61955 +               gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61956 +
61957         if (override_rlimit ||
61958             atomic_read(&user->sigpending) <=
61959                         task_rlimit(t, RLIMIT_SIGPENDING)) {
61960 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
61961  
61962  int unhandled_signal(struct task_struct *tsk, int sig)
61963  {
61964 -       void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61965 +       __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61966         if (is_global_init(tsk))
61967                 return 1;
61968         if (handler != SIG_IGN && handler != SIG_DFL)
61969 @@ -797,6 +797,14 @@
61970                         sig, info, t, vx_task_xid(t), t->pid, current->xid);
61971                 return error;
61972         }
61973 +
61974 +       /* allow glibc communication via tgkill to other threads in our
61975 +          thread group */
61976 +       if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
61977 +            sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
61978 +           && gr_handle_signal(t, sig))
61979 +               return -EPERM;
61980 +
61981  /* skip: */
61982         return security_task_kill(t, info, sig, 0);
61983  }
61984 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
61985         return send_signal(sig, info, p, 1);
61986  }
61987  
61988 -static int
61989 +int
61990  specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61991  {
61992         return send_signal(sig, info, t, 0);
61993 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
61994         unsigned long int flags;
61995         int ret, blocked, ignored;
61996         struct k_sigaction *action;
61997 +       int is_unhandled = 0;
61998  
61999         spin_lock_irqsave(&t->sighand->siglock, flags);
62000         action = &t->sighand->action[sig-1];
62001 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
62002         }
62003         if (action->sa.sa_handler == SIG_DFL)
62004                 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62005 +       if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62006 +               is_unhandled = 1;
62007         ret = specific_send_sig_info(sig, info, t);
62008         spin_unlock_irqrestore(&t->sighand->siglock, flags);
62009  
62010 +       /* only deal with unhandled signals, java etc trigger SIGSEGV during
62011 +          normal operation */
62012 +       if (is_unhandled) {
62013 +               gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62014 +               gr_handle_crash(t, sig);
62015 +       }
62016 +
62017         return ret;
62018  }
62019  
62020 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct 
62021         ret = check_kill_permission(sig, info, p);
62022         rcu_read_unlock();
62023  
62024 -       if (!ret && sig)
62025 +       if (!ret && sig) {
62026                 ret = do_send_sig_info(sig, info, p, true);
62027 +               if (!ret)
62028 +                       gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62029 +       }
62030  
62031         return ret;
62032  }
62033 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
62034  {
62035         siginfo_t info;
62036  
62037 +       pax_track_stack();
62038 +
62039         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62040  
62041         memset(&info, 0, sizeof info);
62042 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid, 
62043         int error = -ESRCH;
62044  
62045         rcu_read_lock();
62046 -       p = find_task_by_vpid(pid);
62047 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62048 +       /* allow glibc communication via tgkill to other threads in our
62049 +          thread group */
62050 +       if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
62051 +           sig == (SIGRTMIN+1) && tgid == info->si_pid)            
62052 +               p = find_task_by_vpid_unrestricted(pid);
62053 +       else
62054 +#endif
62055 +               p = find_task_by_vpid(pid);
62056         if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
62057                 error = check_kill_permission(sig, info, p);
62058                 /*
62059 diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
62060 --- linux-3.0.4/kernel/smp.c    2011-07-21 22:17:23.000000000 -0400
62061 +++ linux-3.0.4/kernel/smp.c    2011-08-23 21:47:56.000000000 -0400
62062 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
62063  }
62064  EXPORT_SYMBOL(smp_call_function);
62065  
62066 -void ipi_call_lock(void)
62067 +void ipi_call_lock(void) __acquires(call_function.lock)
62068  {
62069         raw_spin_lock(&call_function.lock);
62070  }
62071  
62072 -void ipi_call_unlock(void)
62073 +void ipi_call_unlock(void) __releases(call_function.lock)
62074  {
62075         raw_spin_unlock(&call_function.lock);
62076  }
62077  
62078 -void ipi_call_lock_irq(void)
62079 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
62080  {
62081         raw_spin_lock_irq(&call_function.lock);
62082  }
62083  
62084 -void ipi_call_unlock_irq(void)
62085 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
62086  {
62087         raw_spin_unlock_irq(&call_function.lock);
62088  }
62089 diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
62090 --- linux-3.0.4/kernel/softirq.c        2011-07-21 22:17:23.000000000 -0400
62091 +++ linux-3.0.4/kernel/softirq.c        2011-08-23 21:47:56.000000000 -0400
62092 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62093  
62094  DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62095  
62096 -char *softirq_to_name[NR_SOFTIRQS] = {
62097 +const char * const softirq_to_name[NR_SOFTIRQS] = {
62098         "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62099         "TASKLET", "SCHED", "HRTIMER", "RCU"
62100  };
62101 @@ -235,7 +235,7 @@ restart:
62102                         kstat_incr_softirqs_this_cpu(vec_nr);
62103  
62104                         trace_softirq_entry(vec_nr);
62105 -                       h->action(h);
62106 +                       h->action();
62107                         trace_softirq_exit(vec_nr);
62108                         if (unlikely(prev_count != preempt_count())) {
62109                                 printk(KERN_ERR "huh, entered softirq %u %s %p"
62110 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
62111         local_irq_restore(flags);
62112  }
62113  
62114 -void open_softirq(int nr, void (*action)(struct softirq_action *))
62115 +void open_softirq(int nr, void (*action)(void))
62116  {
62117 -       softirq_vec[nr].action = action;
62118 +       pax_open_kernel();
62119 +       *(void **)&softirq_vec[nr].action = action;
62120 +       pax_close_kernel();
62121  }
62122  
62123  /*
62124 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct 
62125  
62126  EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62127  
62128 -static void tasklet_action(struct softirq_action *a)
62129 +static void tasklet_action(void)
62130  {
62131         struct tasklet_struct *list;
62132  
62133 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
62134         }
62135  }
62136  
62137 -static void tasklet_hi_action(struct softirq_action *a)
62138 +static void tasklet_hi_action(void)
62139  {
62140         struct tasklet_struct *list;
62141  
62142 diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
62143 --- linux-3.0.4/kernel/sys.c    2011-08-29 23:26:14.000000000 -0400
62144 +++ linux-3.0.4/kernel/sys.c    2011-08-29 23:26:27.000000000 -0400
62145 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
62146                 error = -EACCES;
62147                 goto out;
62148         }
62149 +
62150 +       if (gr_handle_chroot_setpriority(p, niceval)) {
62151 +               error = -EACCES;
62152 +               goto out;
62153 +       }
62154 +
62155         no_nice = security_task_setnice(p, niceval);
62156         if (no_nice) {
62157                 error = no_nice;
62158 @@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62159                         goto error;
62160         }
62161  
62162 +       if (gr_check_group_change(new->gid, new->egid, -1))
62163 +               goto error;
62164 +
62165         if (rgid != (gid_t) -1 ||
62166             (egid != (gid_t) -1 && egid != old->gid))
62167                 new->sgid = new->egid;
62168 @@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62169         old = current_cred();
62170  
62171         retval = -EPERM;
62172 +
62173 +       if (gr_check_group_change(gid, gid, gid))
62174 +               goto error;
62175 +
62176         if (nsown_capable(CAP_SETGID))
62177                 new->gid = new->egid = new->sgid = new->fsgid = gid;
62178         else if (gid == old->gid || gid == old->sgid)
62179 @@ -595,11 +608,18 @@ static int set_user(struct cred *new)
62180         if (!new_user)
62181                 return -EAGAIN;
62182  
62183 +       /*
62184 +        * We don't fail in case of NPROC limit excess here because too many
62185 +        * poorly written programs don't check set*uid() return code, assuming
62186 +        * it never fails if called by root.  We may still enforce NPROC limit
62187 +        * for programs doing set*uid()+execve() by harmlessly deferring the
62188 +        * failure to the execve() stage.
62189 +        */
62190         if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
62191 -                       new_user != INIT_USER) {
62192 -               free_uid(new_user);
62193 -               return -EAGAIN;
62194 -       }
62195 +                       new_user != INIT_USER)
62196 +               current->flags |= PF_NPROC_EXCEEDED;
62197 +       else
62198 +               current->flags &= ~PF_NPROC_EXCEEDED;
62199  
62200         free_uid(new->user);
62201         new->user = new_user;
62202 @@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62203                         goto error;
62204         }
62205  
62206 +       if (gr_check_user_change(new->uid, new->euid, -1))
62207 +               goto error;
62208 +
62209         if (new->uid != old->uid) {
62210                 retval = set_user(new);
62211                 if (retval < 0)
62212 @@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62213         old = current_cred();
62214  
62215         retval = -EPERM;
62216 +
62217 +       if (gr_check_crash_uid(uid))
62218 +               goto error;
62219 +       if (gr_check_user_change(uid, uid, uid))
62220 +               goto error;
62221 +
62222         if (nsown_capable(CAP_SETUID)) {
62223                 new->suid = new->uid = uid;
62224                 if (uid != old->uid) {
62225 @@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, 
62226                         goto error;
62227         }
62228  
62229 +       if (gr_check_user_change(ruid, euid, -1))
62230 +               goto error;
62231 +
62232         if (ruid != (uid_t) -1) {
62233                 new->uid = ruid;
62234                 if (ruid != old->uid) {
62235 @@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, 
62236                         goto error;
62237         }
62238  
62239 +       if (gr_check_group_change(rgid, egid, -1))
62240 +               goto error;
62241 +
62242         if (rgid != (gid_t) -1)
62243                 new->gid = rgid;
62244         if (egid != (gid_t) -1)
62245 @@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62246         old = current_cred();
62247         old_fsuid = old->fsuid;
62248  
62249 +       if (gr_check_user_change(-1, -1, uid))
62250 +               goto error;
62251 +
62252         if (uid == old->uid  || uid == old->euid  ||
62253             uid == old->suid || uid == old->fsuid ||
62254             nsown_capable(CAP_SETUID)) {
62255 @@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62256                 }
62257         }
62258  
62259 +error:
62260         abort_creds(new);
62261         return old_fsuid;
62262  
62263 @@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62264         if (gid == old->gid  || gid == old->egid  ||
62265             gid == old->sgid || gid == old->fsgid ||
62266             nsown_capable(CAP_SETGID)) {
62267 +               if (gr_check_group_change(-1, -1, gid))
62268 +                       goto error;
62269 +
62270                 if (gid != old_fsgid) {
62271                         new->fsgid = gid;
62272                         goto change_okay;
62273                 }
62274         }
62275  
62276 +error:
62277         abort_creds(new);
62278         return old_fsgid;
62279  
62280 @@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62281                         error = get_dumpable(me->mm);
62282                         break;
62283                 case PR_SET_DUMPABLE:
62284 -                       if (arg2 < 0 || arg2 > 1) {
62285 +                       if (arg2 > 1) {
62286                                 error = -EINVAL;
62287                                 break;
62288                         }
62289 diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
62290 --- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
62291 +++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
62292 @@ -85,6 +85,13 @@
62293  
62294  
62295  #if defined(CONFIG_SYSCTL)
62296 +#include <linux/grsecurity.h>
62297 +#include <linux/grinternal.h>
62298 +
62299 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62300 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62301 +                               const int op);
62302 +extern int gr_handle_chroot_sysctl(const int op);
62303  
62304  /* External variables not in a header file. */
62305  extern int sysctl_overcommit_memory;
62306 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
62307  }
62308  
62309  #endif
62310 +extern struct ctl_table grsecurity_table[];
62311  
62312  static struct ctl_table root_table[];
62313  static struct ctl_table_root sysctl_table_root;
62314 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
62315  int sysctl_legacy_va_layout;
62316  #endif
62317  
62318 +#ifdef CONFIG_PAX_SOFTMODE
62319 +static ctl_table pax_table[] = {
62320 +       {
62321 +               .procname       = "softmode",
62322 +               .data           = &pax_softmode,
62323 +               .maxlen         = sizeof(unsigned int),
62324 +               .mode           = 0600,
62325 +               .proc_handler   = &proc_dointvec,
62326 +       },
62327 +
62328 +       { }
62329 +};
62330 +#endif
62331 +
62332  /* The default sysctl tables: */
62333  
62334  static struct ctl_table root_table[] = {
62335 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
62336  #endif
62337  
62338  static struct ctl_table kern_table[] = {
62339 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62340 +       {
62341 +               .procname       = "grsecurity",
62342 +               .mode           = 0500,
62343 +               .child          = grsecurity_table,
62344 +       },
62345 +#endif
62346 +
62347 +#ifdef CONFIG_PAX_SOFTMODE
62348 +       {
62349 +               .procname       = "pax",
62350 +               .mode           = 0500,
62351 +               .child          = pax_table,
62352 +       },
62353 +#endif
62354 +
62355         {
62356                 .procname       = "sched_child_runs_first",
62357                 .data           = &sysctl_sched_child_runs_first,
62358 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
62359                 .data           = &modprobe_path,
62360                 .maxlen         = KMOD_PATH_LEN,
62361                 .mode           = 0644,
62362 -               .proc_handler   = proc_dostring,
62363 +               .proc_handler   = proc_dostring_modpriv,
62364         },
62365         {
62366                 .procname       = "modules_disabled",
62367 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
62368                 .extra1         = &zero,
62369                 .extra2         = &one,
62370         },
62371 +#endif
62372         {
62373                 .procname       = "kptr_restrict",
62374                 .data           = &kptr_restrict,
62375                 .maxlen         = sizeof(int),
62376                 .mode           = 0644,
62377                 .proc_handler   = proc_dmesg_restrict,
62378 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62379 +               .extra1         = &two,
62380 +#else
62381                 .extra1         = &zero,
62382 +#endif
62383                 .extra2         = &two,
62384         },
62385 -#endif
62386         {
62387                 .procname       = "ngroups_max",
62388                 .data           = &ngroups_max,
62389 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
62390                 .proc_handler   = proc_dointvec_minmax,
62391                 .extra1         = &zero,
62392         },
62393 +       {
62394 +               .procname       = "heap_stack_gap",
62395 +               .data           = &sysctl_heap_stack_gap,
62396 +               .maxlen         = sizeof(sysctl_heap_stack_gap),
62397 +               .mode           = 0644,
62398 +               .proc_handler   = proc_doulongvec_minmax,
62399 +       },
62400  #else
62401         {
62402                 .procname       = "nr_trim_pages",
62403 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
62404  int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
62405  {
62406         int mode;
62407 +       int error;
62408 +
62409 +       if (table->parent != NULL && table->parent->procname != NULL &&
62410 +          table->procname != NULL &&
62411 +           gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62412 +               return -EACCES;
62413 +       if (gr_handle_chroot_sysctl(op))
62414 +               return -EACCES;
62415 +       error = gr_handle_sysctl(table, op);
62416 +       if (error)
62417 +               return error;
62418  
62419         if (root->permissions)
62420                 mode = root->permissions(root, current->nsproxy, table);
62421 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
62422                                buffer, lenp, ppos);
62423  }
62424  
62425 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62426 +                 void __user *buffer, size_t *lenp, loff_t *ppos)
62427 +{
62428 +       if (write && !capable(CAP_SYS_MODULE))
62429 +               return -EPERM;
62430 +
62431 +       return _proc_do_string(table->data, table->maxlen, write,
62432 +                              buffer, lenp, ppos);
62433 +}
62434 +
62435  static size_t proc_skip_spaces(char **buf)
62436  {
62437         size_t ret;
62438 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
62439         len = strlen(tmp);
62440         if (len > *size)
62441                 len = *size;
62442 +       if (len > sizeof(tmp))
62443 +               len = sizeof(tmp);
62444         if (copy_to_user(*buf, tmp, len))
62445                 return -EFAULT;
62446         *size -= len;
62447 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
62448                         *i = val;
62449                 } else {
62450                         val = convdiv * (*i) / convmul;
62451 -                       if (!first)
62452 +                       if (!first) {
62453                                 err = proc_put_char(&buffer, &left, '\t');
62454 +                               if (err)
62455 +                                       break;
62456 +                       }
62457                         err = proc_put_long(&buffer, &left, val, false);
62458                         if (err)
62459                                 break;
62460 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
62461         return -ENOSYS;
62462  }
62463  
62464 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62465 +                 void __user *buffer, size_t *lenp, loff_t *ppos)
62466 +{
62467 +       return -ENOSYS;
62468 +}
62469 +
62470  int proc_dointvec(struct ctl_table *table, int write,
62471                   void __user *buffer, size_t *lenp, loff_t *ppos)
62472  {
62473 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62474  EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62475  EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62476  EXPORT_SYMBOL(proc_dostring);
62477 +EXPORT_SYMBOL(proc_dostring_modpriv);
62478  EXPORT_SYMBOL(proc_doulongvec_minmax);
62479  EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62480  EXPORT_SYMBOL(register_sysctl_table);
62481 diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
62482 --- linux-3.0.4/kernel/sysctl_check.c   2011-07-21 22:17:23.000000000 -0400
62483 +++ linux-3.0.4/kernel/sysctl_check.c   2011-08-23 21:48:14.000000000 -0400
62484 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
62485                                 set_fail(&fail, table, "Directory with extra2");
62486                 } else {
62487                         if ((table->proc_handler == proc_dostring) ||
62488 +                           (table->proc_handler == proc_dostring_modpriv) ||
62489                             (table->proc_handler == proc_dointvec) ||
62490                             (table->proc_handler == proc_dointvec_minmax) ||
62491                             (table->proc_handler == proc_dointvec_jiffies) ||
62492 diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
62493 --- linux-3.0.4/kernel/taskstats.c      2011-07-21 22:17:23.000000000 -0400
62494 +++ linux-3.0.4/kernel/taskstats.c      2011-08-23 21:48:14.000000000 -0400
62495 @@ -27,9 +27,12 @@
62496  #include <linux/cgroup.h>
62497  #include <linux/fs.h>
62498  #include <linux/file.h>
62499 +#include <linux/grsecurity.h>
62500  #include <net/genetlink.h>
62501  #include <asm/atomic.h>
62502  
62503 +extern int gr_is_taskstats_denied(int pid);
62504 +
62505  /*
62506   * Maximum length of a cpumask that can be specified in
62507   * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62508 @@ -558,6 +561,9 @@ err:
62509  
62510  static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
62511  {
62512 +       if (gr_is_taskstats_denied(current->pid))
62513 +               return -EACCES;
62514 +
62515         if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
62516                 return cmd_attr_register_cpumask(info);
62517         else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
62518 diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
62519 --- linux-3.0.4/kernel/time/alarmtimer.c        2011-07-21 22:17:23.000000000 -0400
62520 +++ linux-3.0.4/kernel/time/alarmtimer.c        2011-08-23 21:47:56.000000000 -0400
62521 @@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
62522  {
62523         int error = 0;
62524         int i;
62525 -       struct k_clock alarm_clock = {
62526 +       static struct k_clock alarm_clock = {
62527                 .clock_getres   = alarm_clock_getres,
62528                 .clock_get      = alarm_clock_get,
62529                 .timer_create   = alarm_timer_create,
62530 diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
62531 --- linux-3.0.4/kernel/time/tick-broadcast.c    2011-07-21 22:17:23.000000000 -0400
62532 +++ linux-3.0.4/kernel/time/tick-broadcast.c    2011-08-23 21:47:56.000000000 -0400
62533 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
62534                  * then clear the broadcast bit.
62535                  */
62536                 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62537 -                       int cpu = smp_processor_id();
62538 +                       cpu = smp_processor_id();
62539  
62540                         cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62541                         tick_broadcast_clear_oneshot(cpu);
62542 diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
62543 --- linux-3.0.4/kernel/time/timekeeping.c       2011-07-21 22:17:23.000000000 -0400
62544 +++ linux-3.0.4/kernel/time/timekeeping.c       2011-08-23 21:48:14.000000000 -0400
62545 @@ -14,6 +14,7 @@
62546  #include <linux/init.h>
62547  #include <linux/mm.h>
62548  #include <linux/sched.h>
62549 +#include <linux/grsecurity.h>
62550  #include <linux/syscore_ops.h>
62551  #include <linux/clocksource.h>
62552  #include <linux/jiffies.h>
62553 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
62554         if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62555                 return -EINVAL;
62556  
62557 +       gr_log_timechange();
62558 +
62559         write_seqlock_irqsave(&xtime_lock, flags);
62560  
62561         timekeeping_forward_now();
62562 diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
62563 --- linux-3.0.4/kernel/time/timer_list.c        2011-07-21 22:17:23.000000000 -0400
62564 +++ linux-3.0.4/kernel/time/timer_list.c        2011-08-23 21:48:14.000000000 -0400
62565 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62566  
62567  static void print_name_offset(struct seq_file *m, void *sym)
62568  {
62569 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62570 +       SEQ_printf(m, "<%p>", NULL);
62571 +#else
62572         char symname[KSYM_NAME_LEN];
62573  
62574         if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62575                 SEQ_printf(m, "<%pK>", sym);
62576         else
62577                 SEQ_printf(m, "%s", symname);
62578 +#endif
62579  }
62580  
62581  static void
62582 @@ -112,7 +116,11 @@ next_one:
62583  static void
62584  print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62585  {
62586 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62587 +       SEQ_printf(m, "  .base:       %p\n", NULL);
62588 +#else
62589         SEQ_printf(m, "  .base:       %pK\n", base);
62590 +#endif
62591         SEQ_printf(m, "  .index:      %d\n",
62592                         base->index);
62593         SEQ_printf(m, "  .resolution: %Lu nsecs\n",
62594 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
62595  {
62596         struct proc_dir_entry *pe;
62597  
62598 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62599 +       pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62600 +#else
62601         pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62602 +#endif
62603         if (!pe)
62604                 return -ENOMEM;
62605         return 0;
62606 diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
62607 --- linux-3.0.4/kernel/time/timer_stats.c       2011-07-21 22:17:23.000000000 -0400
62608 +++ linux-3.0.4/kernel/time/timer_stats.c       2011-08-23 21:48:14.000000000 -0400
62609 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62610  static unsigned long nr_entries;
62611  static struct entry entries[MAX_ENTRIES];
62612  
62613 -static atomic_t overflow_count;
62614 +static atomic_unchecked_t overflow_count;
62615  
62616  /*
62617   * The entries are in a hash-table, for fast lookup:
62618 @@ -140,7 +140,7 @@ static void reset_entries(void)
62619         nr_entries = 0;
62620         memset(entries, 0, sizeof(entries));
62621         memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62622 -       atomic_set(&overflow_count, 0);
62623 +       atomic_set_unchecked(&overflow_count, 0);
62624  }
62625  
62626  static struct entry *alloc_entry(void)
62627 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62628         if (likely(entry))
62629                 entry->count++;
62630         else
62631 -               atomic_inc(&overflow_count);
62632 +               atomic_inc_unchecked(&overflow_count);
62633  
62634   out_unlock:
62635         raw_spin_unlock_irqrestore(lock, flags);
62636 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62637  
62638  static void print_name_offset(struct seq_file *m, unsigned long addr)
62639  {
62640 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62641 +       seq_printf(m, "<%p>", NULL);
62642 +#else
62643         char symname[KSYM_NAME_LEN];
62644  
62645         if (lookup_symbol_name(addr, symname) < 0)
62646                 seq_printf(m, "<%p>", (void *)addr);
62647         else
62648                 seq_printf(m, "%s", symname);
62649 +#endif
62650  }
62651  
62652  static int tstats_show(struct seq_file *m, void *v)
62653 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62654  
62655         seq_puts(m, "Timer Stats Version: v0.2\n");
62656         seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62657 -       if (atomic_read(&overflow_count))
62658 +       if (atomic_read_unchecked(&overflow_count))
62659                 seq_printf(m, "Overflow: %d entries\n",
62660 -                       atomic_read(&overflow_count));
62661 +                       atomic_read_unchecked(&overflow_count));
62662  
62663         for (i = 0; i < nr_entries; i++) {
62664                 entry = entries + i;
62665 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
62666  {
62667         struct proc_dir_entry *pe;
62668  
62669 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62670 +       pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62671 +#else
62672         pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62673 +#endif
62674         if (!pe)
62675                 return -ENOMEM;
62676         return 0;
62677 diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
62678 --- linux-3.0.4/kernel/time.c   2011-07-21 22:17:23.000000000 -0400
62679 +++ linux-3.0.4/kernel/time.c   2011-08-23 21:48:14.000000000 -0400
62680 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
62681                 return error;
62682  
62683         if (tz) {
62684 +               /* we log in do_settimeofday called below, so don't log twice
62685 +               */
62686 +               if (!tv)
62687 +                       gr_log_timechange();
62688 +
62689                 /* SMP safe, global irq locking makes it work. */
62690                 sys_tz = *tz;
62691                 update_vsyscall_tz();
62692 diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
62693 --- linux-3.0.4/kernel/timer.c  2011-07-21 22:17:23.000000000 -0400
62694 +++ linux-3.0.4/kernel/timer.c  2011-08-23 21:47:56.000000000 -0400
62695 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
62696  /*
62697   * This function runs timers and the timer-tq in bottom half context.
62698   */
62699 -static void run_timer_softirq(struct softirq_action *h)
62700 +static void run_timer_softirq(void)
62701  {
62702         struct tvec_base *base = __this_cpu_read(tvec_bases);
62703  
62704 diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
62705 --- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
62706 +++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
62707 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
62708         struct blk_trace *bt = filp->private_data;
62709         char buf[16];
62710  
62711 -       snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62712 +       snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62713  
62714         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62715  }
62716 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
62717                 return 1;
62718  
62719         bt = buf->chan->private_data;
62720 -       atomic_inc(&bt->dropped);
62721 +       atomic_inc_unchecked(&bt->dropped);
62722         return 0;
62723  }
62724  
62725 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
62726  
62727         bt->dir = dir;
62728         bt->dev = dev;
62729 -       atomic_set(&bt->dropped, 0);
62730 +       atomic_set_unchecked(&bt->dropped, 0);
62731  
62732         ret = -EIO;
62733         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62734 diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
62735 --- linux-3.0.4/kernel/trace/ftrace.c   2011-07-21 22:17:23.000000000 -0400
62736 +++ linux-3.0.4/kernel/trace/ftrace.c   2011-08-23 21:47:56.000000000 -0400
62737 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod, 
62738         if (unlikely(ftrace_disabled))
62739                 return 0;
62740  
62741 +       ret = ftrace_arch_code_modify_prepare();
62742 +       FTRACE_WARN_ON(ret);
62743 +       if (ret)
62744 +               return 0;
62745 +
62746         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62747 +       FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62748         if (ret) {
62749                 ftrace_bug(ret, ip);
62750 -               return 0;
62751         }
62752 -       return 1;
62753 +       return ret ? 0 : 1;
62754  }
62755  
62756  /*
62757 @@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
62758  
62759  int
62760  register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
62761 -                             void *data)
62762 +                               void *data)
62763  {
62764         struct ftrace_func_probe *entry;
62765         struct ftrace_page *pg;
62766 diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
62767 --- linux-3.0.4/kernel/trace/trace.c    2011-07-21 22:17:23.000000000 -0400
62768 +++ linux-3.0.4/kernel/trace/trace.c    2011-08-23 21:48:14.000000000 -0400
62769 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
62770         size_t rem;
62771         unsigned int i;
62772  
62773 +       pax_track_stack();
62774 +
62775         if (splice_grow_spd(pipe, &spd))
62776                 return -ENOMEM;
62777  
62778 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file 
62779         int entries, size, i;
62780         size_t ret;
62781  
62782 +       pax_track_stack();
62783 +
62784         if (splice_grow_spd(pipe, &spd))
62785                 return -ENOMEM;
62786  
62787 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
62788  };
62789  #endif
62790  
62791 -static struct dentry *d_tracer;
62792 -
62793  struct dentry *tracing_init_dentry(void)
62794  {
62795 +       static struct dentry *d_tracer;
62796         static int once;
62797  
62798         if (d_tracer)
62799 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
62800         return d_tracer;
62801  }
62802  
62803 -static struct dentry *d_percpu;
62804 -
62805  struct dentry *tracing_dentry_percpu(void)
62806  {
62807 +       static struct dentry *d_percpu;
62808         static int once;
62809         struct dentry *d_tracer;
62810  
62811 diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
62812 --- linux-3.0.4/kernel/trace/trace_events.c     2011-08-23 21:44:40.000000000 -0400
62813 +++ linux-3.0.4/kernel/trace/trace_events.c     2011-08-23 21:47:56.000000000 -0400
62814 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
62815  struct ftrace_module_file_ops {
62816         struct list_head                list;
62817         struct module                   *mod;
62818 -       struct file_operations          id;
62819 -       struct file_operations          enable;
62820 -       struct file_operations          format;
62821 -       struct file_operations          filter;
62822  };
62823  
62824  static struct ftrace_module_file_ops *
62825 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
62826  
62827         file_ops->mod = mod;
62828  
62829 -       file_ops->id = ftrace_event_id_fops;
62830 -       file_ops->id.owner = mod;
62831 -
62832 -       file_ops->enable = ftrace_enable_fops;
62833 -       file_ops->enable.owner = mod;
62834 -
62835 -       file_ops->filter = ftrace_event_filter_fops;
62836 -       file_ops->filter.owner = mod;
62837 -
62838 -       file_ops->format = ftrace_event_format_fops;
62839 -       file_ops->format.owner = mod;
62840 +       pax_open_kernel();
62841 +       *(void **)&mod->trace_id.owner = mod;
62842 +       *(void **)&mod->trace_enable.owner = mod;
62843 +       *(void **)&mod->trace_filter.owner = mod;
62844 +       *(void **)&mod->trace_format.owner = mod;
62845 +       pax_close_kernel();
62846  
62847         list_add(&file_ops->list, &ftrace_module_file_list);
62848  
62849 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
62850  
62851         for_each_event(call, start, end) {
62852                 __trace_add_event_call(*call, mod,
62853 -                                      &file_ops->id, &file_ops->enable,
62854 -                                      &file_ops->filter, &file_ops->format);
62855 +                                      &mod->trace_id, &mod->trace_enable,
62856 +                                      &mod->trace_filter, &mod->trace_format);
62857         }
62858  }
62859  
62860 diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
62861 --- linux-3.0.4/kernel/trace/trace_mmiotrace.c  2011-07-21 22:17:23.000000000 -0400
62862 +++ linux-3.0.4/kernel/trace/trace_mmiotrace.c  2011-08-23 21:47:56.000000000 -0400
62863 @@ -24,7 +24,7 @@ struct header_iter {
62864  static struct trace_array *mmio_trace_array;
62865  static bool overrun_detected;
62866  static unsigned long prev_overruns;
62867 -static atomic_t dropped_count;
62868 +static atomic_unchecked_t dropped_count;
62869  
62870  static void mmio_reset_data(struct trace_array *tr)
62871  {
62872 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
62873  
62874  static unsigned long count_overruns(struct trace_iterator *iter)
62875  {
62876 -       unsigned long cnt = atomic_xchg(&dropped_count, 0);
62877 +       unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62878         unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62879  
62880         if (over > prev_overruns)
62881 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct 
62882         event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62883                                           sizeof(*entry), 0, pc);
62884         if (!event) {
62885 -               atomic_inc(&dropped_count);
62886 +               atomic_inc_unchecked(&dropped_count);
62887                 return;
62888         }
62889         entry   = ring_buffer_event_data(event);
62890 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
62891         event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62892                                           sizeof(*entry), 0, pc);
62893         if (!event) {
62894 -               atomic_inc(&dropped_count);
62895 +               atomic_inc_unchecked(&dropped_count);
62896                 return;
62897         }
62898         entry   = ring_buffer_event_data(event);
62899 diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
62900 --- linux-3.0.4/kernel/trace/trace_output.c     2011-07-21 22:17:23.000000000 -0400
62901 +++ linux-3.0.4/kernel/trace/trace_output.c     2011-08-23 21:47:56.000000000 -0400
62902 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, 
62903  
62904         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62905         if (!IS_ERR(p)) {
62906 -               p = mangle_path(s->buffer + s->len, p, "\n");
62907 +               p = mangle_path(s->buffer + s->len, p, "\n\\");
62908                 if (p) {
62909                         s->len = p - s->buffer;
62910                         return 1;
62911 diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
62912 --- linux-3.0.4/kernel/trace/trace_stack.c      2011-07-21 22:17:23.000000000 -0400
62913 +++ linux-3.0.4/kernel/trace/trace_stack.c      2011-08-23 21:47:56.000000000 -0400
62914 @@ -50,7 +50,7 @@ static inline void check_stack(void)
62915                 return;
62916  
62917         /* we do not handle interrupt stacks yet */
62918 -       if (!object_is_on_stack(&this_size))
62919 +       if (!object_starts_on_stack(&this_size))
62920                 return;
62921  
62922         local_irq_save(flags);
62923 diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
62924 --- linux-3.0.4/kernel/trace/trace_workqueue.c  2011-07-21 22:17:23.000000000 -0400
62925 +++ linux-3.0.4/kernel/trace/trace_workqueue.c  2011-08-23 21:47:56.000000000 -0400
62926 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
62927         int                         cpu;
62928         pid_t                       pid;
62929  /* Can be inserted from interrupt or user context, need to be atomic */
62930 -       atomic_t                    inserted;
62931 +       atomic_unchecked_t          inserted;
62932  /*
62933   *  Don't need to be atomic, works are serialized in a single workqueue thread
62934   *  on a single CPU.
62935 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
62936         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62937         list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62938                 if (node->pid == wq_thread->pid) {
62939 -                       atomic_inc(&node->inserted);
62940 +                       atomic_inc_unchecked(&node->inserted);
62941                         goto found;
62942                 }
62943         }
62944 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
62945                 tsk = get_pid_task(pid, PIDTYPE_PID);
62946                 if (tsk) {
62947                         seq_printf(s, "%3d %6d     %6u       %s\n", cws->cpu,
62948 -                                  atomic_read(&cws->inserted), cws->executed,
62949 +                                  atomic_read_unchecked(&cws->inserted), cws->executed,
62950                                    tsk->comm);
62951                         put_task_struct(tsk);
62952                 }
62953 diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
62954 --- linux-3.0.4/lib/bug.c       2011-07-21 22:17:23.000000000 -0400
62955 +++ linux-3.0.4/lib/bug.c       2011-08-23 21:47:56.000000000 -0400
62956 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
62957                 return BUG_TRAP_TYPE_NONE;
62958  
62959         bug = find_bug(bugaddr);
62960 +       if (!bug)
62961 +               return BUG_TRAP_TYPE_NONE;
62962  
62963         file = NULL;
62964         line = 0;
62965 diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
62966 --- linux-3.0.4/lib/debugobjects.c      2011-07-21 22:17:23.000000000 -0400
62967 +++ linux-3.0.4/lib/debugobjects.c      2011-08-23 21:47:56.000000000 -0400
62968 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
62969         if (limit > 4)
62970                 return;
62971  
62972 -       is_on_stack = object_is_on_stack(addr);
62973 +       is_on_stack = object_starts_on_stack(addr);
62974         if (is_on_stack == onstack)
62975                 return;
62976  
62977 diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
62978 --- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
62979 +++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
62980 @@ -870,7 +870,7 @@ out:
62981  
62982  static void check_for_stack(struct device *dev, void *addr)
62983  {
62984 -       if (object_is_on_stack(addr))
62985 +       if (object_starts_on_stack(addr))
62986                 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62987                                 "stack [addr=%p]\n", addr);
62988  }
62989 diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
62990 --- linux-3.0.4/lib/extable.c   2011-07-21 22:17:23.000000000 -0400
62991 +++ linux-3.0.4/lib/extable.c   2011-08-23 21:47:56.000000000 -0400
62992 @@ -13,6 +13,7 @@
62993  #include <linux/init.h>
62994  #include <linux/sort.h>
62995  #include <asm/uaccess.h>
62996 +#include <asm/pgtable.h>
62997  
62998  #ifndef ARCH_HAS_SORT_EXTABLE
62999  /*
63000 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
63001  void sort_extable(struct exception_table_entry *start,
63002                   struct exception_table_entry *finish)
63003  {
63004 +       pax_open_kernel();
63005         sort(start, finish - start, sizeof(struct exception_table_entry),
63006              cmp_ex, NULL);
63007 +       pax_close_kernel();
63008  }
63009  
63010  #ifdef CONFIG_MODULES
63011 diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
63012 --- linux-3.0.4/lib/inflate.c   2011-07-21 22:17:23.000000000 -0400
63013 +++ linux-3.0.4/lib/inflate.c   2011-08-23 21:47:56.000000000 -0400
63014 @@ -269,7 +269,7 @@ static void free(void *where)
63015                 malloc_ptr = free_mem_ptr;
63016  }
63017  #else
63018 -#define malloc(a) kmalloc(a, GFP_KERNEL)
63019 +#define malloc(a) kmalloc((a), GFP_KERNEL)
63020  #define free(a) kfree(a)
63021  #endif
63022  
63023 diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
63024 --- linux-3.0.4/lib/Kconfig.debug       2011-07-21 22:17:23.000000000 -0400
63025 +++ linux-3.0.4/lib/Kconfig.debug       2011-08-23 21:48:14.000000000 -0400
63026 @@ -1088,6 +1088,7 @@ config LATENCYTOP
63027         depends on DEBUG_KERNEL
63028         depends on STACKTRACE_SUPPORT
63029         depends on PROC_FS
63030 +       depends on !GRKERNSEC_HIDESYM
63031         select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
63032         select KALLSYMS
63033         select KALLSYMS_ALL
63034 diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
63035 --- linux-3.0.4/lib/kref.c      2011-07-21 22:17:23.000000000 -0400
63036 +++ linux-3.0.4/lib/kref.c      2011-08-23 21:47:56.000000000 -0400
63037 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
63038   */
63039  int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63040  {
63041 -       WARN_ON(release == NULL);
63042 +       BUG_ON(release == NULL);
63043         WARN_ON(release == (void (*)(struct kref *))kfree);
63044  
63045         if (atomic_dec_and_test(&kref->refcount)) {
63046 diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
63047 --- linux-3.0.4/lib/radix-tree.c        2011-07-21 22:17:23.000000000 -0400
63048 +++ linux-3.0.4/lib/radix-tree.c        2011-08-23 21:47:56.000000000 -0400
63049 @@ -80,7 +80,7 @@ struct radix_tree_preload {
63050         int nr;
63051         struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63052  };
63053 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63054 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63055  
63056  static inline void *ptr_to_indirect(void *ptr)
63057  {
63058 diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
63059 --- linux-3.0.4/lib/vsprintf.c  2011-07-21 22:17:23.000000000 -0400
63060 +++ linux-3.0.4/lib/vsprintf.c  2011-08-23 21:48:14.000000000 -0400
63061 @@ -16,6 +16,9 @@
63062   * - scnprintf and vscnprintf
63063   */
63064  
63065 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63066 +#define __INCLUDED_BY_HIDESYM 1
63067 +#endif
63068  #include <stdarg.h>
63069  #include <linux/module.h>
63070  #include <linux/types.h>
63071 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
63072         char sym[KSYM_SYMBOL_LEN];
63073         if (ext == 'B')
63074                 sprint_backtrace(sym, value);
63075 -       else if (ext != 'f' && ext != 's')
63076 +       else if (ext != 'f' && ext != 's' && ext != 'a')
63077                 sprint_symbol(sym, value);
63078         else
63079                 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63080 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end, 
63081         return string(buf, end, uuid, spec);
63082  }
63083  
63084 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63085 +int kptr_restrict __read_mostly = 2;
63086 +#else
63087  int kptr_restrict __read_mostly;
63088 +#endif
63089  
63090  /*
63091   * Show a '%p' thing.  A kernel extension is that the '%p' is followed
63092 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
63093   * - 'S' For symbolic direct pointers with offset
63094   * - 's' For symbolic direct pointers without offset
63095   * - 'B' For backtraced symbolic direct pointers with offset
63096 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63097 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63098   * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
63099   * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
63100   * - 'M' For a 6-byte MAC address, it prints the address in the
63101 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
63102  {
63103         if (!ptr && *fmt != 'K') {
63104                 /*
63105 -                * Print (null) with the same width as a pointer so it makes
63106 +                * Print (nil) with the same width as a pointer so it makes
63107                  * tabular output look nice.
63108                  */
63109                 if (spec.field_width == -1)
63110                         spec.field_width = 2 * sizeof(void *);
63111 -               return string(buf, end, "(null)", spec);
63112 +               return string(buf, end, "(nil)", spec);
63113         }
63114  
63115         switch (*fmt) {
63116 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
63117                 /* Fallthrough */
63118         case 'S':
63119         case 's':
63120 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63121 +               break;
63122 +#else
63123 +               return symbol_string(buf, end, ptr, spec, *fmt);
63124 +#endif
63125 +       case 'A':
63126 +       case 'a':
63127         case 'B':
63128                 return symbol_string(buf, end, ptr, spec, *fmt);
63129         case 'R':
63130 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size, 
63131         typeof(type) value;                                             \
63132         if (sizeof(type) == 8) {                                        \
63133                 args = PTR_ALIGN(args, sizeof(u32));                    \
63134 -               *(u32 *)&value = *(u32 *)args;                          \
63135 -               *((u32 *)&value + 1) = *(u32 *)(args + 4);              \
63136 +               *(u32 *)&value = *(const u32 *)args;                    \
63137 +               *((u32 *)&value + 1) = *(const u32 *)(args + 4);        \
63138         } else {                                                        \
63139                 args = PTR_ALIGN(args, sizeof(type));                   \
63140 -               value = *(typeof(type) *)args;                          \
63141 +               value = *(const typeof(type) *)args;                    \
63142         }                                                               \
63143         args += sizeof(type);                                           \
63144         value;                                                          \
63145 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size, 
63146                 case FORMAT_TYPE_STR: {
63147                         const char *str_arg = args;
63148                         args += strlen(str_arg) + 1;
63149 -                       str = string(str, end, (char *)str_arg, spec);
63150 +                       str = string(str, end, str_arg, spec);
63151                         break;
63152                 }
63153  
63154 diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
63155 --- linux-3.0.4/localversion-grsec      1969-12-31 19:00:00.000000000 -0500
63156 +++ linux-3.0.4/localversion-grsec      2011-08-23 21:48:14.000000000 -0400
63157 @@ -0,0 +1 @@
63158 +-grsec
63159 diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
63160 --- linux-3.0.4/Makefile        2011-08-29 23:26:13.000000000 -0400
63161 +++ linux-3.0.4/Makefile        2011-08-29 23:26:21.000000000 -0400
63162 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63163  
63164  HOSTCC       = gcc
63165  HOSTCXX      = g++
63166 -HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63167 -HOSTCXXFLAGS = -O2
63168 +HOSTCFLAGS   = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63169 +HOSTCFLAGS  += $(call cc-option, -Wno-empty-body)
63170 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63171  
63172  # Decide whether to build built-in, modular, or both.
63173  # Normally, just do built-in.
63174 @@ -365,10 +366,12 @@ LINUXINCLUDE    := -I$(srctree)/arch/$(h
63175  KBUILD_CPPFLAGS := -D__KERNEL__
63176  
63177  KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63178 +                  -W -Wno-unused-parameter -Wno-missing-field-initializers \
63179                    -fno-strict-aliasing -fno-common \
63180                    -Werror-implicit-function-declaration \
63181                    -Wno-format-security \
63182                    -fno-delete-null-pointer-checks
63183 +KBUILD_CFLAGS   += $(call cc-option, -Wno-empty-body)
63184  KBUILD_AFLAGS_KERNEL :=
63185  KBUILD_CFLAGS_KERNEL :=
63186  KBUILD_AFLAGS   := -D__ASSEMBLY__
63187 @@ -564,6 +567,25 @@ else
63188  KBUILD_CFLAGS  += -O2
63189  endif
63190  
63191 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
63192 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
63193 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
63194 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
63195 +endif
63196 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
63197 +gcc-plugins0:
63198 +       $(Q)$(MAKE) $(build)=tools/gcc
63199 +gcc-plugins: scripts_basic gcc-plugins0
63200 +else
63201 +gcc-plugins:
63202 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
63203 +       $(error Your gcc installation does not support plugins.  If the necessary headers for plugin support are missing, they should be installed.  On Debian, apt-get install gcc-<ver>-plugin-dev.))
63204 +else
63205 +       $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
63206 +endif
63207 +       $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
63208 +endif
63209 +
63210  include $(srctree)/arch/$(SRCARCH)/Makefile
63211  
63212  ifneq ($(CONFIG_FRAME_WARN),0)
63213 @@ -708,7 +730,7 @@ export mod_strip_cmd
63214  
63215  
63216  ifeq ($(KBUILD_EXTMOD),)
63217 -core-y         += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63218 +core-y         += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63219  
63220  vmlinux-dirs   := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63221                      $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63222 @@ -907,6 +929,7 @@ define rule_vmlinux-modpost
63223  endef
63224  
63225  # vmlinux image - including updated kernel symbols
63226 +vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63227  vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
63228  ifdef CONFIG_HEADERS_CHECK
63229         $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
63230 @@ -973,7 +996,7 @@ ifneq ($(KBUILD_SRC),)
63231  endif
63232  
63233  # prepare2 creates a makefile if using a separate output directory
63234 -prepare2: prepare3 outputmakefile asm-generic
63235 +prepare2: prepare3 outputmakefile asm-generic gcc-plugins
63236  
63237  prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
63238                     include/config/auto.conf
63239 @@ -1087,6 +1110,7 @@ all: modules
63240  #      using awk while concatenating to the final file.
63241  
63242  PHONY += modules
63243 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63244  modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
63245         $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
63246         @$(kecho) '  Building modules, stage 2.';
63247 @@ -1359,6 +1383,7 @@ PHONY += $(module-dirs) modules
63248  $(module-dirs): crmodverdir $(objtree)/Module.symvers
63249         $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
63250  
63251 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
63252  modules: $(module-dirs)
63253         @$(kecho) '  Building modules, stage 2.';
63254         $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
63255 @@ -1404,7 +1429,7 @@ clean: $(clean-dirs)
63256         $(call cmd,rmdirs)
63257         $(call cmd,rmfiles)
63258         @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
63259 -               \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
63260 +               \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
63261                 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
63262                 -o -name '*.symtypes' -o -name 'modules.order' \
63263                 -o -name modules.builtin -o -name '.tmp_*.o.*' \
63264 diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
63265 --- linux-3.0.4/mm/filemap.c    2011-07-21 22:17:23.000000000 -0400
63266 +++ linux-3.0.4/mm/filemap.c    2011-08-23 21:48:14.000000000 -0400
63267 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
63268         struct address_space *mapping = file->f_mapping;
63269  
63270         if (!mapping->a_ops->readpage)
63271 -               return -ENOEXEC;
63272 +               return -ENODEV;
63273         file_accessed(file);
63274         vma->vm_ops = &generic_file_vm_ops;
63275         vma->vm_flags |= VM_CAN_NONLINEAR;
63276 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
63277                          *pos = i_size_read(inode);
63278  
63279                 if (limit != RLIM_INFINITY) {
63280 +                       gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63281                         if (*pos >= limit) {
63282                                 send_sig(SIGXFSZ, current, 0);
63283                                 return -EFBIG;
63284 diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
63285 --- linux-3.0.4/mm/fremap.c     2011-07-21 22:17:23.000000000 -0400
63286 +++ linux-3.0.4/mm/fremap.c     2011-08-23 21:47:56.000000000 -0400
63287 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63288   retry:
63289         vma = find_vma(mm, start);
63290  
63291 +#ifdef CONFIG_PAX_SEGMEXEC
63292 +       if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63293 +               goto out;
63294 +#endif
63295 +
63296         /*
63297          * Make sure the vma is shared, that it supports prefaulting,
63298          * and that the remapped range is valid and fully within
63299 diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
63300 --- linux-3.0.4/mm/highmem.c    2011-07-21 22:17:23.000000000 -0400
63301 +++ linux-3.0.4/mm/highmem.c    2011-08-23 21:47:56.000000000 -0400
63302 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
63303                  * So no dangers, even with speculative execution.
63304                  */
63305                 page = pte_page(pkmap_page_table[i]);
63306 +               pax_open_kernel();
63307                 pte_clear(&init_mm, (unsigned long)page_address(page),
63308                           &pkmap_page_table[i]);
63309 -
63310 +               pax_close_kernel();
63311                 set_page_address(page, NULL);
63312                 need_flush = 1;
63313         }
63314 @@ -186,9 +187,11 @@ start:
63315                 }
63316         }
63317         vaddr = PKMAP_ADDR(last_pkmap_nr);
63318 +
63319 +       pax_open_kernel();
63320         set_pte_at(&init_mm, vaddr,
63321                    &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63322 -
63323 +       pax_close_kernel();
63324         pkmap_count[last_pkmap_nr] = 1;
63325         set_page_address(page, (void *)vaddr);
63326  
63327 diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
63328 --- linux-3.0.4/mm/huge_memory.c        2011-07-21 22:17:23.000000000 -0400
63329 +++ linux-3.0.4/mm/huge_memory.c        2011-08-23 21:47:56.000000000 -0400
63330 @@ -702,7 +702,7 @@ out:
63331          * run pte_offset_map on the pmd, if an huge pmd could
63332          * materialize from under us from a different thread.
63333          */
63334 -       if (unlikely(__pte_alloc(mm, vma, pmd, address)))
63335 +       if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
63336                 return VM_FAULT_OOM;
63337         /* if an huge pmd materialized from under us just retry later */
63338         if (unlikely(pmd_trans_huge(*pmd)))
63339 diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
63340 --- linux-3.0.4/mm/hugetlb.c    2011-07-21 22:17:23.000000000 -0400
63341 +++ linux-3.0.4/mm/hugetlb.c    2011-08-23 21:47:56.000000000 -0400
63342 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
63343         return 1;
63344  }
63345  
63346 +#ifdef CONFIG_PAX_SEGMEXEC
63347 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63348 +{
63349 +       struct mm_struct *mm = vma->vm_mm;
63350 +       struct vm_area_struct *vma_m;
63351 +       unsigned long address_m;
63352 +       pte_t *ptep_m;
63353 +
63354 +       vma_m = pax_find_mirror_vma(vma);
63355 +       if (!vma_m)
63356 +               return;
63357 +
63358 +       BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63359 +       address_m = address + SEGMEXEC_TASK_SIZE;
63360 +       ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63361 +       get_page(page_m);
63362 +       hugepage_add_anon_rmap(page_m, vma_m, address_m);
63363 +       set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63364 +}
63365 +#endif
63366 +
63367  /*
63368   * Hugetlb_cow() should be called with page lock of the original hugepage held.
63369   */
63370 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
63371                                 make_huge_pte(vma, new_page, 1));
63372                 page_remove_rmap(old_page);
63373                 hugepage_add_new_anon_rmap(new_page, vma, address);
63374 +
63375 +#ifdef CONFIG_PAX_SEGMEXEC
63376 +               pax_mirror_huge_pte(vma, address, new_page);
63377 +#endif
63378 +
63379                 /* Make the old page be freed below */
63380                 new_page = old_page;
63381                 mmu_notifier_invalidate_range_end(mm,
63382 @@ -2591,6 +2617,10 @@ retry:
63383                                 && (vma->vm_flags & VM_SHARED)));
63384         set_huge_pte_at(mm, address, ptep, new_pte);
63385  
63386 +#ifdef CONFIG_PAX_SEGMEXEC
63387 +       pax_mirror_huge_pte(vma, address, page);
63388 +#endif
63389 +
63390         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63391                 /* Optimization, do the COW without a second fault */
63392                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63393 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm, 
63394         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63395         struct hstate *h = hstate_vma(vma);
63396  
63397 +#ifdef CONFIG_PAX_SEGMEXEC
63398 +       struct vm_area_struct *vma_m;
63399 +#endif
63400 +
63401         ptep = huge_pte_offset(mm, address);
63402         if (ptep) {
63403                 entry = huge_ptep_get(ptep);
63404 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm, 
63405                                VM_FAULT_SET_HINDEX(h - hstates);
63406         }
63407  
63408 +#ifdef CONFIG_PAX_SEGMEXEC
63409 +       vma_m = pax_find_mirror_vma(vma);
63410 +       if (vma_m) {
63411 +               unsigned long address_m;
63412 +
63413 +               if (vma->vm_start > vma_m->vm_start) {
63414 +                       address_m = address;
63415 +                       address -= SEGMEXEC_TASK_SIZE;
63416 +                       vma = vma_m;
63417 +                       h = hstate_vma(vma);
63418 +               } else
63419 +                       address_m = address + SEGMEXEC_TASK_SIZE;
63420 +
63421 +               if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63422 +                       return VM_FAULT_OOM;
63423 +               address_m &= HPAGE_MASK;
63424 +               unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63425 +       }
63426 +#endif
63427 +
63428         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63429         if (!ptep)
63430                 return VM_FAULT_OOM;
63431 diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
63432 --- linux-3.0.4/mm/internal.h   2011-07-21 22:17:23.000000000 -0400
63433 +++ linux-3.0.4/mm/internal.h   2011-08-23 21:47:56.000000000 -0400
63434 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
63435   * in mm/page_alloc.c
63436   */
63437  extern void __free_pages_bootmem(struct page *page, unsigned int order);
63438 +extern void free_compound_page(struct page *page);
63439  extern void prep_compound_page(struct page *page, unsigned long order);
63440  #ifdef CONFIG_MEMORY_FAILURE
63441  extern bool is_free_buddy_page(struct page *page);
63442 diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
63443 --- linux-3.0.4/mm/Kconfig      2011-07-21 22:17:23.000000000 -0400
63444 +++ linux-3.0.4/mm/Kconfig      2011-08-23 21:48:14.000000000 -0400
63445 @@ -240,7 +240,7 @@ config KSM
63446  config DEFAULT_MMAP_MIN_ADDR
63447          int "Low address space to protect from user allocation"
63448         depends on MMU
63449 -        default 4096
63450 +        default 65536
63451          help
63452           This is the portion of low virtual memory which should be protected
63453           from userspace allocation.  Keeping a user from writing to low pages
63454 diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
63455 --- linux-3.0.4/mm/kmemleak.c   2011-07-21 22:17:23.000000000 -0400
63456 +++ linux-3.0.4/mm/kmemleak.c   2011-08-23 21:48:14.000000000 -0400
63457 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
63458  
63459         for (i = 0; i < object->trace_len; i++) {
63460                 void *ptr = (void *)object->trace[i];
63461 -               seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
63462 +               seq_printf(seq, "    [<%p>] %pA\n", ptr, ptr);
63463         }
63464  }
63465  
63466 diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
63467 --- linux-3.0.4/mm/madvise.c    2011-07-21 22:17:23.000000000 -0400
63468 +++ linux-3.0.4/mm/madvise.c    2011-08-23 21:47:56.000000000 -0400
63469 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
63470         pgoff_t pgoff;
63471         unsigned long new_flags = vma->vm_flags;
63472  
63473 +#ifdef CONFIG_PAX_SEGMEXEC
63474 +       struct vm_area_struct *vma_m;
63475 +#endif
63476 +
63477         switch (behavior) {
63478         case MADV_NORMAL:
63479                 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63480 @@ -110,6 +114,13 @@ success:
63481         /*
63482          * vm_flags is protected by the mmap_sem held in write mode.
63483          */
63484 +
63485 +#ifdef CONFIG_PAX_SEGMEXEC
63486 +       vma_m = pax_find_mirror_vma(vma);
63487 +       if (vma_m)
63488 +               vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63489 +#endif
63490 +
63491         vma->vm_flags = new_flags;
63492  
63493  out:
63494 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
63495                              struct vm_area_struct ** prev,
63496                              unsigned long start, unsigned long end)
63497  {
63498 +
63499 +#ifdef CONFIG_PAX_SEGMEXEC
63500 +       struct vm_area_struct *vma_m;
63501 +#endif
63502 +
63503         *prev = vma;
63504         if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63505                 return -EINVAL;
63506 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
63507                 zap_page_range(vma, start, end - start, &details);
63508         } else
63509                 zap_page_range(vma, start, end - start, NULL);
63510 +
63511 +#ifdef CONFIG_PAX_SEGMEXEC
63512 +       vma_m = pax_find_mirror_vma(vma);
63513 +       if (vma_m) {
63514 +               if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63515 +                       struct zap_details details = {
63516 +                               .nonlinear_vma = vma_m,
63517 +                               .last_index = ULONG_MAX,
63518 +                       };
63519 +                       zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63520 +               } else
63521 +                       zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63522 +       }
63523 +#endif
63524 +
63525         return 0;
63526  }
63527  
63528 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, 
63529         if (end < start)
63530                 goto out;
63531  
63532 +#ifdef CONFIG_PAX_SEGMEXEC
63533 +       if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63534 +               if (end > SEGMEXEC_TASK_SIZE)
63535 +                       goto out;
63536 +       } else
63537 +#endif
63538 +
63539 +       if (end > TASK_SIZE)
63540 +               goto out;
63541 +
63542         error = 0;
63543         if (end == start)
63544                 goto out;
63545 diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
63546 --- linux-3.0.4/mm/memory.c     2011-08-23 21:44:40.000000000 -0400
63547 +++ linux-3.0.4/mm/memory.c     2011-08-23 21:47:56.000000000 -0400
63548 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
63549                 return;
63550  
63551         pmd = pmd_offset(pud, start);
63552 +
63553 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63554         pud_clear(pud);
63555         pmd_free_tlb(tlb, pmd, start);
63556 +#endif
63557 +
63558  }
63559  
63560  static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63561 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
63562         if (end - 1 > ceiling - 1)
63563                 return;
63564  
63565 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63566         pud = pud_offset(pgd, start);
63567         pgd_clear(pgd);
63568         pud_free_tlb(tlb, pud, start);
63569 +#endif
63570 +
63571  }
63572  
63573  /*
63574 @@ -1577,12 +1584,6 @@ no_page_table:
63575         return page;
63576  }
63577  
63578 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
63579 -{
63580 -       return stack_guard_page_start(vma, addr) ||
63581 -              stack_guard_page_end(vma, addr+PAGE_SIZE);
63582 -}
63583 -
63584  /**
63585   * __get_user_pages() - pin user pages in memory
63586   * @tsk:       task_struct of target task
63587 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct 
63588                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63589         i = 0;
63590  
63591 -       do {
63592 +       while (nr_pages) {
63593                 struct vm_area_struct *vma;
63594  
63595 -               vma = find_extend_vma(mm, start);
63596 +               vma = find_vma(mm, start);
63597                 if (!vma && in_gate_area(mm, start)) {
63598                         unsigned long pg = start & PAGE_MASK;
63599                         pgd_t *pgd;
63600 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct 
63601                         goto next_page;
63602                 }
63603  
63604 -               if (!vma ||
63605 +               if (!vma || start < vma->vm_start ||
63606                     (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63607                     !(vm_flags & vma->vm_flags))
63608                         return i ? : -EFAULT;
63609 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct 
63610                                 int ret;
63611                                 unsigned int fault_flags = 0;
63612  
63613 -                               /* For mlock, just skip the stack guard page. */
63614 -                               if (foll_flags & FOLL_MLOCK) {
63615 -                                       if (stack_guard_page(vma, start))
63616 -                                               goto next_page;
63617 -                               }
63618                                 if (foll_flags & FOLL_WRITE)
63619                                         fault_flags |= FAULT_FLAG_WRITE;
63620                                 if (nonblocking)
63621 @@ -1811,7 +1807,7 @@ next_page:
63622                         start += PAGE_SIZE;
63623                         nr_pages--;
63624                 } while (nr_pages && start < vma->vm_end);
63625 -       } while (nr_pages);
63626 +       }
63627         return i;
63628  }
63629  EXPORT_SYMBOL(__get_user_pages);
63630 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
63631         page_add_file_rmap(page);
63632         set_pte_at(mm, addr, pte, mk_pte(page, prot));
63633  
63634 +#ifdef CONFIG_PAX_SEGMEXEC
63635 +       pax_mirror_file_pte(vma, addr, page, ptl);
63636 +#endif
63637 +
63638         retval = 0;
63639         pte_unmap_unlock(pte, ptl);
63640         return retval;
63641 @@ -2052,10 +2052,22 @@ out:
63642  int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63643                         struct page *page)
63644  {
63645 +
63646 +#ifdef CONFIG_PAX_SEGMEXEC
63647 +       struct vm_area_struct *vma_m;
63648 +#endif
63649 +
63650         if (addr < vma->vm_start || addr >= vma->vm_end)
63651                 return -EFAULT;
63652         if (!page_count(page))
63653                 return -EINVAL;
63654 +
63655 +#ifdef CONFIG_PAX_SEGMEXEC
63656 +       vma_m = pax_find_mirror_vma(vma);
63657 +       if (vma_m)
63658 +               vma_m->vm_flags |= VM_INSERTPAGE;
63659 +#endif
63660 +
63661         vma->vm_flags |= VM_INSERTPAGE;
63662         return insert_page(vma, addr, page, vma->vm_page_prot);
63663  }
63664 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
63665                         unsigned long pfn)
63666  {
63667         BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63668 +       BUG_ON(vma->vm_mirror);
63669  
63670         if (addr < vma->vm_start || addr >= vma->vm_end)
63671                 return -EFAULT;
63672 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct 
63673                 copy_user_highpage(dst, src, va, vma);
63674  }
63675  
63676 +#ifdef CONFIG_PAX_SEGMEXEC
63677 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63678 +{
63679 +       struct mm_struct *mm = vma->vm_mm;
63680 +       spinlock_t *ptl;
63681 +       pte_t *pte, entry;
63682 +
63683 +       pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63684 +       entry = *pte;
63685 +       if (!pte_present(entry)) {
63686 +               if (!pte_none(entry)) {
63687 +                       BUG_ON(pte_file(entry));
63688 +                       free_swap_and_cache(pte_to_swp_entry(entry));
63689 +                       pte_clear_not_present_full(mm, address, pte, 0);
63690 +               }
63691 +       } else {
63692 +               struct page *page;
63693 +
63694 +               flush_cache_page(vma, address, pte_pfn(entry));
63695 +               entry = ptep_clear_flush(vma, address, pte);
63696 +               BUG_ON(pte_dirty(entry));
63697 +               page = vm_normal_page(vma, address, entry);
63698 +               if (page) {
63699 +                       update_hiwater_rss(mm);
63700 +                       if (PageAnon(page))
63701 +                               dec_mm_counter_fast(mm, MM_ANONPAGES);
63702 +                       else
63703 +                               dec_mm_counter_fast(mm, MM_FILEPAGES);
63704 +                       page_remove_rmap(page);
63705 +                       page_cache_release(page);
63706 +               }
63707 +       }
63708 +       pte_unmap_unlock(pte, ptl);
63709 +}
63710 +
63711 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
63712 + *
63713 + * the ptl of the lower mapped page is held on entry and is not released on exit
63714 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63715 + */
63716 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63717 +{
63718 +       struct mm_struct *mm = vma->vm_mm;
63719 +       unsigned long address_m;
63720 +       spinlock_t *ptl_m;
63721 +       struct vm_area_struct *vma_m;
63722 +       pmd_t *pmd_m;
63723 +       pte_t *pte_m, entry_m;
63724 +
63725 +       BUG_ON(!page_m || !PageAnon(page_m));
63726 +
63727 +       vma_m = pax_find_mirror_vma(vma);
63728 +       if (!vma_m)
63729 +               return;
63730 +
63731 +       BUG_ON(!PageLocked(page_m));
63732 +       BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63733 +       address_m = address + SEGMEXEC_TASK_SIZE;
63734 +       pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63735 +       pte_m = pte_offset_map(pmd_m, address_m);
63736 +       ptl_m = pte_lockptr(mm, pmd_m);
63737 +       if (ptl != ptl_m) {
63738 +               spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63739 +               if (!pte_none(*pte_m))
63740 +                       goto out;
63741 +       }
63742 +
63743 +       entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63744 +       page_cache_get(page_m);
63745 +       page_add_anon_rmap(page_m, vma_m, address_m);
63746 +       inc_mm_counter_fast(mm, MM_ANONPAGES);
63747 +       set_pte_at(mm, address_m, pte_m, entry_m);
63748 +       update_mmu_cache(vma_m, address_m, entry_m);
63749 +out:
63750 +       if (ptl != ptl_m)
63751 +               spin_unlock(ptl_m);
63752 +       pte_unmap(pte_m);
63753 +       unlock_page(page_m);
63754 +}
63755 +
63756 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63757 +{
63758 +       struct mm_struct *mm = vma->vm_mm;
63759 +       unsigned long address_m;
63760 +       spinlock_t *ptl_m;
63761 +       struct vm_area_struct *vma_m;
63762 +       pmd_t *pmd_m;
63763 +       pte_t *pte_m, entry_m;
63764 +
63765 +       BUG_ON(!page_m || PageAnon(page_m));
63766 +
63767 +       vma_m = pax_find_mirror_vma(vma);
63768 +       if (!vma_m)
63769 +               return;
63770 +
63771 +       BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63772 +       address_m = address + SEGMEXEC_TASK_SIZE;
63773 +       pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63774 +       pte_m = pte_offset_map(pmd_m, address_m);
63775 +       ptl_m = pte_lockptr(mm, pmd_m);
63776 +       if (ptl != ptl_m) {
63777 +               spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63778 +               if (!pte_none(*pte_m))
63779 +                       goto out;
63780 +       }
63781 +
63782 +       entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63783 +       page_cache_get(page_m);
63784 +       page_add_file_rmap(page_m);
63785 +       inc_mm_counter_fast(mm, MM_FILEPAGES);
63786 +       set_pte_at(mm, address_m, pte_m, entry_m);
63787 +       update_mmu_cache(vma_m, address_m, entry_m);
63788 +out:
63789 +       if (ptl != ptl_m)
63790 +               spin_unlock(ptl_m);
63791 +       pte_unmap(pte_m);
63792 +}
63793 +
63794 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63795 +{
63796 +       struct mm_struct *mm = vma->vm_mm;
63797 +       unsigned long address_m;
63798 +       spinlock_t *ptl_m;
63799 +       struct vm_area_struct *vma_m;
63800 +       pmd_t *pmd_m;
63801 +       pte_t *pte_m, entry_m;
63802 +
63803 +       vma_m = pax_find_mirror_vma(vma);
63804 +       if (!vma_m)
63805 +               return;
63806 +
63807 +       BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63808 +       address_m = address + SEGMEXEC_TASK_SIZE;
63809 +       pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63810 +       pte_m = pte_offset_map(pmd_m, address_m);
63811 +       ptl_m = pte_lockptr(mm, pmd_m);
63812 +       if (ptl != ptl_m) {
63813 +               spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63814 +               if (!pte_none(*pte_m))
63815 +                       goto out;
63816 +       }
63817 +
63818 +       entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63819 +       set_pte_at(mm, address_m, pte_m, entry_m);
63820 +out:
63821 +       if (ptl != ptl_m)
63822 +               spin_unlock(ptl_m);
63823 +       pte_unmap(pte_m);
63824 +}
63825 +
63826 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63827 +{
63828 +       struct page *page_m;
63829 +       pte_t entry;
63830 +
63831 +       if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63832 +               goto out;
63833 +
63834 +       entry = *pte;
63835 +       page_m  = vm_normal_page(vma, address, entry);
63836 +       if (!page_m)
63837 +               pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63838 +       else if (PageAnon(page_m)) {
63839 +               if (pax_find_mirror_vma(vma)) {
63840 +                       pte_unmap_unlock(pte, ptl);
63841 +                       lock_page(page_m);
63842 +                       pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63843 +                       if (pte_same(entry, *pte))
63844 +                               pax_mirror_anon_pte(vma, address, page_m, ptl);
63845 +                       else
63846 +                               unlock_page(page_m);
63847 +               }
63848 +       } else
63849 +               pax_mirror_file_pte(vma, address, page_m, ptl);
63850 +
63851 +out:
63852 +       pte_unmap_unlock(pte, ptl);
63853 +}
63854 +#endif
63855 +
63856  /*
63857   * This routine handles present pages, when users try to write
63858   * to a shared page. It is done by copying the page to a new address
63859 @@ -2667,6 +2860,12 @@ gotten:
63860          */
63861         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63862         if (likely(pte_same(*page_table, orig_pte))) {
63863 +
63864 +#ifdef CONFIG_PAX_SEGMEXEC
63865 +               if (pax_find_mirror_vma(vma))
63866 +                       BUG_ON(!trylock_page(new_page));
63867 +#endif
63868 +
63869                 if (old_page) {
63870                         if (!PageAnon(old_page)) {
63871                                 dec_mm_counter_fast(mm, MM_FILEPAGES);
63872 @@ -2718,6 +2917,10 @@ gotten:
63873                         page_remove_rmap(old_page);
63874                 }
63875  
63876 +#ifdef CONFIG_PAX_SEGMEXEC
63877 +               pax_mirror_anon_pte(vma, address, new_page, ptl);
63878 +#endif
63879 +
63880                 /* Free the old page.. */
63881                 new_page = old_page;
63882                 ret |= VM_FAULT_WRITE;
63883 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
63884         swap_free(entry);
63885         if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63886                 try_to_free_swap(page);
63887 +
63888 +#ifdef CONFIG_PAX_SEGMEXEC
63889 +       if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63890 +#endif
63891 +
63892         unlock_page(page);
63893         if (swapcache) {
63894                 /*
63895 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
63896  
63897         /* No need to invalidate - it was non-present before */
63898         update_mmu_cache(vma, address, page_table);
63899 +
63900 +#ifdef CONFIG_PAX_SEGMEXEC
63901 +       pax_mirror_anon_pte(vma, address, page, ptl);
63902 +#endif
63903 +
63904  unlock:
63905         pte_unmap_unlock(page_table, ptl);
63906  out:
63907 @@ -3039,40 +3252,6 @@ out_release:
63908  }
63909  
63910  /*
63911 - * This is like a special single-page "expand_{down|up}wards()",
63912 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
63913 - * doesn't hit another vma.
63914 - */
63915 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63916 -{
63917 -       address &= PAGE_MASK;
63918 -       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63919 -               struct vm_area_struct *prev = vma->vm_prev;
63920 -
63921 -               /*
63922 -                * Is there a mapping abutting this one below?
63923 -                *
63924 -                * That's only ok if it's the same stack mapping
63925 -                * that has gotten split..
63926 -                */
63927 -               if (prev && prev->vm_end == address)
63928 -                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63929 -
63930 -               expand_downwards(vma, address - PAGE_SIZE);
63931 -       }
63932 -       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63933 -               struct vm_area_struct *next = vma->vm_next;
63934 -
63935 -               /* As VM_GROWSDOWN but s/below/above/ */
63936 -               if (next && next->vm_start == address + PAGE_SIZE)
63937 -                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63938 -
63939 -               expand_upwards(vma, address + PAGE_SIZE);
63940 -       }
63941 -       return 0;
63942 -}
63943 -
63944 -/*
63945   * We enter with non-exclusive mmap_sem (to exclude vma changes,
63946   * but allow concurrent faults), and pte mapped but not yet locked.
63947   * We return with mmap_sem still held, but pte unmapped and unlocked.
63948 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
63949                 unsigned long address, pte_t *page_table, pmd_t *pmd,
63950                 unsigned int flags)
63951  {
63952 -       struct page *page;
63953 +       struct page *page = NULL;
63954         spinlock_t *ptl;
63955         pte_t entry;
63956  
63957 -       pte_unmap(page_table);
63958 -
63959 -       /* Check if we need to add a guard page to the stack */
63960 -       if (check_stack_guard_page(vma, address) < 0)
63961 -               return VM_FAULT_SIGBUS;
63962 -
63963 -       /* Use the zero-page for reads */
63964         if (!(flags & FAULT_FLAG_WRITE)) {
63965                 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
63966                                                 vma->vm_page_prot));
63967 -               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63968 +               ptl = pte_lockptr(mm, pmd);
63969 +               spin_lock(ptl);
63970                 if (!pte_none(*page_table))
63971                         goto unlock;
63972                 goto setpte;
63973         }
63974  
63975         /* Allocate our own private page. */
63976 +       pte_unmap(page_table);
63977 +
63978         if (unlikely(anon_vma_prepare(vma)))
63979                 goto oom;
63980         page = alloc_zeroed_user_highpage_movable(vma, address);
63981 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
63982         if (!pte_none(*page_table))
63983                 goto release;
63984  
63985 +#ifdef CONFIG_PAX_SEGMEXEC
63986 +       if (pax_find_mirror_vma(vma))
63987 +               BUG_ON(!trylock_page(page));
63988 +#endif
63989 +
63990         inc_mm_counter_fast(mm, MM_ANONPAGES);
63991         page_add_new_anon_rmap(page, vma, address);
63992  setpte:
63993 @@ -3127,6 +3307,12 @@ setpte:
63994  
63995         /* No need to invalidate - it was non-present before */
63996         update_mmu_cache(vma, address, page_table);
63997 +
63998 +#ifdef CONFIG_PAX_SEGMEXEC
63999 +       if (page)
64000 +               pax_mirror_anon_pte(vma, address, page, ptl);
64001 +#endif
64002 +
64003  unlock:
64004         pte_unmap_unlock(page_table, ptl);
64005         return 0;
64006 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
64007          */
64008         /* Only go through if we didn't race with anybody else... */
64009         if (likely(pte_same(*page_table, orig_pte))) {
64010 +
64011 +#ifdef CONFIG_PAX_SEGMEXEC
64012 +               if (anon && pax_find_mirror_vma(vma))
64013 +                       BUG_ON(!trylock_page(page));
64014 +#endif
64015 +
64016                 flush_icache_page(vma, page);
64017                 entry = mk_pte(page, vma->vm_page_prot);
64018                 if (flags & FAULT_FLAG_WRITE)
64019 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
64020  
64021                 /* no need to invalidate: a not-present page won't be cached */
64022                 update_mmu_cache(vma, address, page_table);
64023 +
64024 +#ifdef CONFIG_PAX_SEGMEXEC
64025 +               if (anon)
64026 +                       pax_mirror_anon_pte(vma, address, page, ptl);
64027 +               else
64028 +                       pax_mirror_file_pte(vma, address, page, ptl);
64029 +#endif
64030 +
64031         } else {
64032                 if (charged)
64033                         mem_cgroup_uncharge_page(page);
64034 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
64035                 if (flags & FAULT_FLAG_WRITE)
64036                         flush_tlb_fix_spurious_fault(vma, address);
64037         }
64038 +
64039 +#ifdef CONFIG_PAX_SEGMEXEC
64040 +       pax_mirror_pte(vma, address, pte, pmd, ptl);
64041 +       return 0;
64042 +#endif
64043 +
64044  unlock:
64045         pte_unmap_unlock(pte, ptl);
64046         return 0;
64047 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
64048         pmd_t *pmd;
64049         pte_t *pte;
64050  
64051 +#ifdef CONFIG_PAX_SEGMEXEC
64052 +       struct vm_area_struct *vma_m;
64053 +#endif
64054 +
64055         __set_current_state(TASK_RUNNING);
64056  
64057         count_vm_event(PGFAULT);
64058 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
64059         if (unlikely(is_vm_hugetlb_page(vma)))
64060                 return hugetlb_fault(mm, vma, address, flags);
64061  
64062 +#ifdef CONFIG_PAX_SEGMEXEC
64063 +       vma_m = pax_find_mirror_vma(vma);
64064 +       if (vma_m) {
64065 +               unsigned long address_m;
64066 +               pgd_t *pgd_m;
64067 +               pud_t *pud_m;
64068 +               pmd_t *pmd_m;
64069 +
64070 +               if (vma->vm_start > vma_m->vm_start) {
64071 +                       address_m = address;
64072 +                       address -= SEGMEXEC_TASK_SIZE;
64073 +                       vma = vma_m;
64074 +               } else
64075 +                       address_m = address + SEGMEXEC_TASK_SIZE;
64076 +
64077 +               pgd_m = pgd_offset(mm, address_m);
64078 +               pud_m = pud_alloc(mm, pgd_m, address_m);
64079 +               if (!pud_m)
64080 +                       return VM_FAULT_OOM;
64081 +               pmd_m = pmd_alloc(mm, pud_m, address_m);
64082 +               if (!pmd_m)
64083 +                       return VM_FAULT_OOM;
64084 +               if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
64085 +                       return VM_FAULT_OOM;
64086 +               pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64087 +       }
64088 +#endif
64089 +
64090         pgd = pgd_offset(mm, address);
64091         pud = pud_alloc(mm, pgd, address);
64092         if (!pud)
64093 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
64094          * run pte_offset_map on the pmd, if an huge pmd could
64095          * materialize from under us from a different thread.
64096          */
64097 -       if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
64098 +       if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
64099                 return VM_FAULT_OOM;
64100         /* if an huge pmd materialized from under us just retry later */
64101         if (unlikely(pmd_trans_huge(*pmd)))
64102 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
64103         gate_vma.vm_start = FIXADDR_USER_START;
64104         gate_vma.vm_end = FIXADDR_USER_END;
64105         gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64106 -       gate_vma.vm_page_prot = __P101;
64107 +       gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64108         /*
64109          * Make sure the vDSO gets into every core dump.
64110          * Dumping its contents makes post-mortem fully interpretable later
64111 diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
64112 --- linux-3.0.4/mm/memory-failure.c     2011-07-21 22:17:23.000000000 -0400
64113 +++ linux-3.0.4/mm/memory-failure.c     2011-08-23 21:47:56.000000000 -0400
64114 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
64115  
64116  int sysctl_memory_failure_recovery __read_mostly = 1;
64117  
64118 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64119 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64120  
64121  #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
64122  
64123 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn, 
64124         }
64125  
64126         nr_pages = 1 << compound_trans_order(hpage);
64127 -       atomic_long_add(nr_pages, &mce_bad_pages);
64128 +       atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
64129  
64130         /*
64131          * We need/can do nothing about count=0 pages.
64132 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn, 
64133                         if (!PageHWPoison(hpage)
64134                             || (hwpoison_filter(p) && TestClearPageHWPoison(p))
64135                             || (p != hpage && TestSetPageHWPoison(hpage))) {
64136 -                               atomic_long_sub(nr_pages, &mce_bad_pages);
64137 +                               atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64138                                 return 0;
64139                         }
64140                         set_page_hwpoison_huge_page(hpage);
64141 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn, 
64142         }
64143         if (hwpoison_filter(p)) {
64144                 if (TestClearPageHWPoison(p))
64145 -                       atomic_long_sub(nr_pages, &mce_bad_pages);
64146 +                       atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64147                 unlock_page(hpage);
64148                 put_page(hpage);
64149                 return 0;
64150 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
64151                         return 0;
64152                 }
64153                 if (TestClearPageHWPoison(p))
64154 -                       atomic_long_sub(nr_pages, &mce_bad_pages);
64155 +                       atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64156                 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
64157                 return 0;
64158         }
64159 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
64160          */
64161         if (TestClearPageHWPoison(page)) {
64162                 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
64163 -               atomic_long_sub(nr_pages, &mce_bad_pages);
64164 +               atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64165                 freeit = 1;
64166                 if (PageHuge(page))
64167                         clear_page_hwpoison_huge_page(page);
64168 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
64169         }
64170  done:
64171         if (!PageHWPoison(hpage))
64172 -               atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
64173 +               atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
64174         set_page_hwpoison_huge_page(hpage);
64175         dequeue_hwpoisoned_huge_page(hpage);
64176         /* keep elevated page count for bad page */
64177 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
64178                 return ret;
64179  
64180  done:
64181 -       atomic_long_add(1, &mce_bad_pages);
64182 +       atomic_long_add_unchecked(1, &mce_bad_pages);
64183         SetPageHWPoison(page);
64184         /* keep elevated page count for bad page */
64185         return ret;
64186 diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
64187 --- linux-3.0.4/mm/mempolicy.c  2011-07-21 22:17:23.000000000 -0400
64188 +++ linux-3.0.4/mm/mempolicy.c  2011-08-23 21:48:14.000000000 -0400
64189 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct 
64190         unsigned long vmstart;
64191         unsigned long vmend;
64192  
64193 +#ifdef CONFIG_PAX_SEGMEXEC
64194 +       struct vm_area_struct *vma_m;
64195 +#endif
64196 +
64197         vma = find_vma_prev(mm, start, &prev);
64198         if (!vma || vma->vm_start > start)
64199                 return -EFAULT;
64200 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct 
64201                 err = policy_vma(vma, new_pol);
64202                 if (err)
64203                         goto out;
64204 +
64205 +#ifdef CONFIG_PAX_SEGMEXEC
64206 +               vma_m = pax_find_mirror_vma(vma);
64207 +               if (vma_m) {
64208 +                       err = policy_vma(vma_m, new_pol);
64209 +                       if (err)
64210 +                               goto out;
64211 +               }
64212 +#endif
64213 +
64214         }
64215  
64216   out:
64217 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
64218  
64219         if (end < start)
64220                 return -EINVAL;
64221 +
64222 +#ifdef CONFIG_PAX_SEGMEXEC
64223 +       if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64224 +               if (end > SEGMEXEC_TASK_SIZE)
64225 +                       return -EINVAL;
64226 +       } else
64227 +#endif
64228 +
64229 +       if (end > TASK_SIZE)
64230 +               return -EINVAL;
64231 +
64232         if (end == start)
64233                 return 0;
64234  
64235 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64236         if (!mm)
64237                 goto out;
64238  
64239 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64240 +       if (mm != current->mm &&
64241 +           (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64242 +               err = -EPERM;
64243 +               goto out;
64244 +       }
64245 +#endif
64246 +
64247         /*
64248          * Check if this process has the right to modify the specified
64249          * process. The right exists if the process has administrative
64250 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64251         rcu_read_lock();
64252         tcred = __task_cred(task);
64253         if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64254 -           cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
64255 -           !capable(CAP_SYS_NICE)) {
64256 +           cred->uid  != tcred->suid && !capable(CAP_SYS_NICE)) {
64257                 rcu_read_unlock();
64258                 err = -EPERM;
64259                 goto out;
64260 diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
64261 --- linux-3.0.4/mm/migrate.c    2011-07-21 22:17:23.000000000 -0400
64262 +++ linux-3.0.4/mm/migrate.c    2011-08-23 21:48:14.000000000 -0400
64263 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
64264         unsigned long chunk_start;
64265         int err;
64266  
64267 +       pax_track_stack();
64268 +
64269         task_nodes = cpuset_mems_allowed(task);
64270  
64271         err = -ENOMEM;
64272 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, 
64273         if (!mm)
64274                 return -EINVAL;
64275  
64276 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64277 +       if (mm != current->mm &&
64278 +           (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64279 +               err = -EPERM;
64280 +               goto out;
64281 +       }
64282 +#endif
64283 +
64284         /*
64285          * Check if this process has the right to modify the specified
64286          * process. The right exists if the process has administrative
64287 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, 
64288         rcu_read_lock();
64289         tcred = __task_cred(task);
64290         if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64291 -           cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
64292 -           !capable(CAP_SYS_NICE)) {
64293 +           cred->uid  != tcred->suid && !capable(CAP_SYS_NICE)) {
64294                 rcu_read_unlock();
64295                 err = -EPERM;
64296                 goto out;
64297 diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
64298 --- linux-3.0.4/mm/mlock.c      2011-07-21 22:17:23.000000000 -0400
64299 +++ linux-3.0.4/mm/mlock.c      2011-08-23 21:48:14.000000000 -0400
64300 @@ -13,6 +13,7 @@
64301  #include <linux/pagemap.h>
64302  #include <linux/mempolicy.h>
64303  #include <linux/syscalls.h>
64304 +#include <linux/security.h>
64305  #include <linux/sched.h>
64306  #include <linux/module.h>
64307  #include <linux/rmap.h>
64308 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
64309                 return -EINVAL;
64310         if (end == start)
64311                 return 0;
64312 +       if (end > TASK_SIZE)
64313 +               return -EINVAL;
64314 +
64315         vma = find_vma_prev(current->mm, start, &prev);
64316         if (!vma || vma->vm_start > start)
64317                 return -ENOMEM;
64318 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
64319         for (nstart = start ; ; ) {
64320                 vm_flags_t newflags;
64321  
64322 +#ifdef CONFIG_PAX_SEGMEXEC
64323 +               if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64324 +                       break;
64325 +#endif
64326 +
64327                 /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
64328  
64329                 newflags = vma->vm_flags | VM_LOCKED;
64330 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64331         lock_limit >>= PAGE_SHIFT;
64332  
64333         /* check against resource limits */
64334 +       gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64335         if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64336                 error = do_mlock(start, len, 1);
64337         up_write(&current->mm->mmap_sem);
64338 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, 
64339  static int do_mlockall(int flags)
64340  {
64341         struct vm_area_struct * vma, * prev = NULL;
64342 -       unsigned int def_flags = 0;
64343  
64344         if (flags & MCL_FUTURE)
64345 -               def_flags = VM_LOCKED;
64346 -       current->mm->def_flags = def_flags;
64347 +               current->mm->def_flags |= VM_LOCKED;
64348 +       else
64349 +               current->mm->def_flags &= ~VM_LOCKED;
64350         if (flags == MCL_FUTURE)
64351                 goto out;
64352  
64353         for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64354                 vm_flags_t newflags;
64355  
64356 +#ifdef CONFIG_PAX_SEGMEXEC
64357 +               if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64358 +                       break;
64359 +#endif
64360 +
64361 +               BUG_ON(vma->vm_end > TASK_SIZE);
64362                 newflags = vma->vm_flags | VM_LOCKED;
64363                 if (!(flags & MCL_CURRENT))
64364                         newflags &= ~VM_LOCKED;
64365 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64366         lock_limit >>= PAGE_SHIFT;
64367  
64368         ret = -ENOMEM;
64369 +       gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64370         if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64371             capable(CAP_IPC_LOCK))
64372                 ret = do_mlockall(flags);
64373 diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
64374 --- linux-3.0.4/mm/mmap.c       2011-07-21 22:17:23.000000000 -0400
64375 +++ linux-3.0.4/mm/mmap.c       2011-08-23 21:48:14.000000000 -0400
64376 @@ -46,6 +46,16 @@
64377  #define arch_rebalance_pgtables(addr, len)             (addr)
64378  #endif
64379  
64380 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64381 +{
64382 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64383 +       if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64384 +               up_read(&mm->mmap_sem);
64385 +               BUG();
64386 +       }
64387 +#endif
64388 +}
64389 +
64390  static void unmap_region(struct mm_struct *mm,
64391                 struct vm_area_struct *vma, struct vm_area_struct *prev,
64392                 unsigned long start, unsigned long end);
64393 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
64394   *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
64395   *
64396   */
64397 -pgprot_t protection_map[16] = {
64398 +pgprot_t protection_map[16] __read_only = {
64399         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64400         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64401  };
64402  
64403 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
64404 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
64405  {
64406 -       return __pgprot(pgprot_val(protection_map[vm_flags &
64407 +       pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64408                                 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64409                         pgprot_val(arch_vm_get_page_prot(vm_flags)));
64410 +
64411 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64412 +       if (!(__supported_pte_mask & _PAGE_NX) &&
64413 +           (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64414 +           (vm_flags & (VM_READ | VM_WRITE)))
64415 +               prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64416 +#endif
64417 +
64418 +       return prot;
64419  }
64420  EXPORT_SYMBOL(vm_get_page_prot);
64421  
64422  int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
64423  int sysctl_overcommit_ratio __read_mostly = 50;        /* default is 50% */
64424  int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64425 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64426  /*
64427   * Make sure vm_committed_as in one cacheline and not cacheline shared with
64428   * other variables. It can be updated by several CPUs frequently.
64429 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
64430         struct vm_area_struct *next = vma->vm_next;
64431  
64432         might_sleep();
64433 +       BUG_ON(vma->vm_mirror);
64434         if (vma->vm_ops && vma->vm_ops->close)
64435                 vma->vm_ops->close(vma);
64436         if (vma->vm_file) {
64437 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64438          * not page aligned -Ram Gupta
64439          */
64440         rlim = rlimit(RLIMIT_DATA);
64441 +       gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64442         if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64443                         (mm->end_data - mm->start_data) > rlim)
64444                 goto out;
64445 @@ -697,6 +719,12 @@ static int
64446  can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64447         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64448  {
64449 +
64450 +#ifdef CONFIG_PAX_SEGMEXEC
64451 +       if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64452 +               return 0;
64453 +#endif
64454 +
64455         if (is_mergeable_vma(vma, file, vm_flags) &&
64456             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
64457                 if (vma->vm_pgoff == vm_pgoff)
64458 @@ -716,6 +744,12 @@ static int
64459  can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64460         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64461  {
64462 +
64463 +#ifdef CONFIG_PAX_SEGMEXEC
64464 +       if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64465 +               return 0;
64466 +#endif
64467 +
64468         if (is_mergeable_vma(vma, file, vm_flags) &&
64469             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
64470                 pgoff_t vm_pglen;
64471 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
64472  struct vm_area_struct *vma_merge(struct mm_struct *mm,
64473                         struct vm_area_struct *prev, unsigned long addr,
64474                         unsigned long end, unsigned long vm_flags,
64475 -                       struct anon_vma *anon_vma, struct file *file,
64476 +                       struct anon_vma *anon_vma, struct file *file,
64477                         pgoff_t pgoff, struct mempolicy *policy)
64478  {
64479         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64480         struct vm_area_struct *area, *next;
64481         int err;
64482  
64483 +#ifdef CONFIG_PAX_SEGMEXEC
64484 +       unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64485 +       struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64486 +
64487 +       BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64488 +#endif
64489 +
64490         /*
64491          * We later require that vma->vm_flags == vm_flags,
64492          * so this tests vma->vm_flags & VM_SPECIAL, too.
64493 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct 
64494         if (next && next->vm_end == end)                /* cases 6, 7, 8 */
64495                 next = next->vm_next;
64496  
64497 +#ifdef CONFIG_PAX_SEGMEXEC
64498 +       if (prev)
64499 +               prev_m = pax_find_mirror_vma(prev);
64500 +       if (area)
64501 +               area_m = pax_find_mirror_vma(area);
64502 +       if (next)
64503 +               next_m = pax_find_mirror_vma(next);
64504 +#endif
64505 +
64506         /*
64507          * Can it merge with the predecessor?
64508          */
64509 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct 
64510                                                         /* cases 1, 6 */
64511                         err = vma_adjust(prev, prev->vm_start,
64512                                 next->vm_end, prev->vm_pgoff, NULL);
64513 -               } else                                  /* cases 2, 5, 7 */
64514 +
64515 +#ifdef CONFIG_PAX_SEGMEXEC
64516 +                       if (!err && prev_m)
64517 +                               err = vma_adjust(prev_m, prev_m->vm_start,
64518 +                                       next_m->vm_end, prev_m->vm_pgoff, NULL);
64519 +#endif
64520 +
64521 +               } else {                                /* cases 2, 5, 7 */
64522                         err = vma_adjust(prev, prev->vm_start,
64523                                 end, prev->vm_pgoff, NULL);
64524 +
64525 +#ifdef CONFIG_PAX_SEGMEXEC
64526 +                       if (!err && prev_m)
64527 +                               err = vma_adjust(prev_m, prev_m->vm_start,
64528 +                                               end_m, prev_m->vm_pgoff, NULL);
64529 +#endif
64530 +
64531 +               }
64532                 if (err)
64533                         return NULL;
64534                 khugepaged_enter_vma_merge(prev);
64535 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct 
64536                         mpol_equal(policy, vma_policy(next)) &&
64537                         can_vma_merge_before(next, vm_flags,
64538                                         anon_vma, file, pgoff+pglen)) {
64539 -               if (prev && addr < prev->vm_end)        /* case 4 */
64540 +               if (prev && addr < prev->vm_end) {      /* case 4 */
64541                         err = vma_adjust(prev, prev->vm_start,
64542                                 addr, prev->vm_pgoff, NULL);
64543 -               else                                    /* cases 3, 8 */
64544 +
64545 +#ifdef CONFIG_PAX_SEGMEXEC
64546 +                       if (!err && prev_m)
64547 +                               err = vma_adjust(prev_m, prev_m->vm_start,
64548 +                                               addr_m, prev_m->vm_pgoff, NULL);
64549 +#endif
64550 +
64551 +               } else {                                /* cases 3, 8 */
64552                         err = vma_adjust(area, addr, next->vm_end,
64553                                 next->vm_pgoff - pglen, NULL);
64554 +
64555 +#ifdef CONFIG_PAX_SEGMEXEC
64556 +                       if (!err && area_m)
64557 +                               err = vma_adjust(area_m, addr_m, next_m->vm_end,
64558 +                                               next_m->vm_pgoff - pglen, NULL);
64559 +#endif
64560 +
64561 +               }
64562                 if (err)
64563                         return NULL;
64564                 khugepaged_enter_vma_merge(area);
64565 @@ -929,14 +1009,11 @@ none:
64566  void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64567                                                 struct file *file, long pages)
64568  {
64569 -       const unsigned long stack_flags
64570 -               = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64571 -
64572         if (file) {
64573                 mm->shared_vm += pages;
64574                 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64575                         mm->exec_vm += pages;
64576 -       } else if (flags & stack_flags)
64577 +       } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64578                 mm->stack_vm += pages;
64579         if (flags & (VM_RESERVED|VM_IO))
64580                 mm->reserved_vm += pages;
64581 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file 
64582          * (the exception is when the underlying filesystem is noexec
64583          *  mounted, in which case we dont add PROT_EXEC.)
64584          */
64585 -       if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64586 +       if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64587                 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64588                         prot |= PROT_EXEC;
64589  
64590 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file 
64591         /* Obtain the address to map to. we verify (or select) it and ensure
64592          * that it represents a valid section of the address space.
64593          */
64594 -       addr = get_unmapped_area(file, addr, len, pgoff, flags);
64595 +       addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64596         if (addr & ~PAGE_MASK)
64597                 return addr;
64598  
64599 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file 
64600         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64601                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64602  
64603 +#ifdef CONFIG_PAX_MPROTECT
64604 +       if (mm->pax_flags & MF_PAX_MPROTECT) {
64605 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
64606 +               if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64607 +                       gr_log_rwxmmap(file);
64608 +
64609 +#ifdef CONFIG_PAX_EMUPLT
64610 +                       vm_flags &= ~VM_EXEC;
64611 +#else
64612 +                       return -EPERM;
64613 +#endif
64614 +
64615 +               }
64616 +
64617 +               if (!(vm_flags & VM_EXEC))
64618 +                       vm_flags &= ~VM_MAYEXEC;
64619 +#else
64620 +               if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64621 +                       vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64622 +#endif
64623 +               else
64624 +                       vm_flags &= ~VM_MAYWRITE;
64625 +       }
64626 +#endif
64627 +
64628 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64629 +       if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64630 +               vm_flags &= ~VM_PAGEEXEC;
64631 +#endif
64632 +
64633         if (flags & MAP_LOCKED)
64634                 if (!can_do_mlock())
64635                         return -EPERM;
64636 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file 
64637                 locked += mm->locked_vm;
64638                 lock_limit = rlimit(RLIMIT_MEMLOCK);
64639                 lock_limit >>= PAGE_SHIFT;
64640 +               gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64641                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64642                         return -EAGAIN;
64643         }
64644 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file 
64645         if (error)
64646                 return error;
64647  
64648 +       if (!gr_acl_handle_mmap(file, prot))
64649 +               return -EACCES;
64650 +
64651         return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64652  }
64653  EXPORT_SYMBOL(do_mmap_pgoff);
64654 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
64655         vm_flags_t vm_flags = vma->vm_flags;
64656  
64657         /* If it was private or non-writable, the write bit is already clear */
64658 -       if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64659 +       if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64660                 return 0;
64661  
64662         /* The backer wishes to know when pages are first written to? */
64663 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
64664         unsigned long charged = 0;
64665         struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
64666  
64667 +#ifdef CONFIG_PAX_SEGMEXEC
64668 +       struct vm_area_struct *vma_m = NULL;
64669 +#endif
64670 +
64671 +       /*
64672 +        * mm->mmap_sem is required to protect against another thread
64673 +        * changing the mappings in case we sleep.
64674 +        */
64675 +       verify_mm_writelocked(mm);
64676 +
64677         /* Clear old maps */
64678         error = -ENOMEM;
64679 -munmap_back:
64680         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64681         if (vma && vma->vm_start < addr + len) {
64682                 if (do_munmap(mm, addr, len))
64683                         return -ENOMEM;
64684 -               goto munmap_back;
64685 +               vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64686 +               BUG_ON(vma && vma->vm_start < addr + len);
64687         }
64688  
64689         /* Check against address space limit. */
64690 @@ -1266,6 +1387,16 @@ munmap_back:
64691                 goto unacct_error;
64692         }
64693  
64694 +#ifdef CONFIG_PAX_SEGMEXEC
64695 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64696 +               vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64697 +               if (!vma_m) {
64698 +                       error = -ENOMEM;
64699 +                       goto free_vma;
64700 +               }
64701 +       }
64702 +#endif
64703 +
64704         vma->vm_mm = mm;
64705         vma->vm_start = addr;
64706         vma->vm_end = addr + len;
64707 @@ -1289,6 +1420,19 @@ munmap_back:
64708                 error = file->f_op->mmap(file, vma);
64709                 if (error)
64710                         goto unmap_and_free_vma;
64711 +
64712 +#ifdef CONFIG_PAX_SEGMEXEC
64713 +               if (vma_m && (vm_flags & VM_EXECUTABLE))
64714 +                       added_exe_file_vma(mm);
64715 +#endif
64716 +
64717 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64718 +               if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64719 +                       vma->vm_flags |= VM_PAGEEXEC;
64720 +                       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64721 +               }
64722 +#endif
64723 +
64724                 if (vm_flags & VM_EXECUTABLE)
64725                         added_exe_file_vma(mm);
64726  
64727 @@ -1324,6 +1468,11 @@ munmap_back:
64728         vma_link(mm, vma, prev, rb_link, rb_parent);
64729         file = vma->vm_file;
64730  
64731 +#ifdef CONFIG_PAX_SEGMEXEC
64732 +       if (vma_m)
64733 +               BUG_ON(pax_mirror_vma(vma_m, vma));
64734 +#endif
64735 +
64736         /* Once vma denies write, undo our temporary denial count */
64737         if (correct_wcount)
64738                 atomic_inc(&inode->i_writecount);
64739 @@ -1332,6 +1481,7 @@ out:
64740  
64741         mm->total_vm += len >> PAGE_SHIFT;
64742         vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64743 +       track_exec_limit(mm, addr, addr + len, vm_flags);
64744         if (vm_flags & VM_LOCKED) {
64745                 if (!mlock_vma_pages_range(vma, addr, addr + len))
64746                         mm->locked_vm += (len >> PAGE_SHIFT);
64747 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
64748         unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64749         charged = 0;
64750  free_vma:
64751 +
64752 +#ifdef CONFIG_PAX_SEGMEXEC
64753 +       if (vma_m)
64754 +               kmem_cache_free(vm_area_cachep, vma_m);
64755 +#endif
64756 +
64757         kmem_cache_free(vm_area_cachep, vma);
64758  unacct_error:
64759         if (charged)
64760 @@ -1356,6 +1512,44 @@ unacct_error:
64761         return error;
64762  }
64763  
64764 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64765 +{
64766 +       if (!vma) {
64767 +#ifdef CONFIG_STACK_GROWSUP
64768 +               if (addr > sysctl_heap_stack_gap)
64769 +                       vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64770 +               else
64771 +                       vma = find_vma(current->mm, 0);
64772 +               if (vma && (vma->vm_flags & VM_GROWSUP))
64773 +                       return false;
64774 +#endif
64775 +               return true;
64776 +       }
64777 +
64778 +       if (addr + len > vma->vm_start)
64779 +               return false;
64780 +
64781 +       if (vma->vm_flags & VM_GROWSDOWN)
64782 +               return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64783 +#ifdef CONFIG_STACK_GROWSUP
64784 +       else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64785 +               return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64786 +#endif
64787 +
64788 +       return true;
64789 +}
64790 +
64791 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64792 +{
64793 +       if (vma->vm_start < len)
64794 +               return -ENOMEM;
64795 +       if (!(vma->vm_flags & VM_GROWSDOWN))
64796 +               return vma->vm_start - len;
64797 +       if (sysctl_heap_stack_gap <= vma->vm_start - len)
64798 +               return vma->vm_start - len - sysctl_heap_stack_gap;
64799 +       return -ENOMEM;
64800 +}
64801 +
64802  /* Get an address range which is currently unmapped.
64803   * For shmat() with addr=0.
64804   *
64805 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
64806         if (flags & MAP_FIXED)
64807                 return addr;
64808  
64809 +#ifdef CONFIG_PAX_RANDMMAP
64810 +       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64811 +#endif
64812 +
64813         if (addr) {
64814                 addr = PAGE_ALIGN(addr);
64815 -               vma = find_vma(mm, addr);
64816 -               if (TASK_SIZE - len >= addr &&
64817 -                   (!vma || addr + len <= vma->vm_start))
64818 -                       return addr;
64819 +               if (TASK_SIZE - len >= addr) {
64820 +                       vma = find_vma(mm, addr);
64821 +                       if (check_heap_stack_gap(vma, addr, len))
64822 +                               return addr;
64823 +               }
64824         }
64825         if (len > mm->cached_hole_size) {
64826 -               start_addr = addr = mm->free_area_cache;
64827 +               start_addr = addr = mm->free_area_cache;
64828         } else {
64829 -               start_addr = addr = TASK_UNMAPPED_BASE;
64830 -               mm->cached_hole_size = 0;
64831 +               start_addr = addr = mm->mmap_base;
64832 +               mm->cached_hole_size = 0;
64833         }
64834  
64835  full_search:
64836 @@ -1404,34 +1603,40 @@ full_search:
64837                          * Start a new search - just in case we missed
64838                          * some holes.
64839                          */
64840 -                       if (start_addr != TASK_UNMAPPED_BASE) {
64841 -                               addr = TASK_UNMAPPED_BASE;
64842 -                               start_addr = addr;
64843 +                       if (start_addr != mm->mmap_base) {
64844 +                               start_addr = addr = mm->mmap_base;
64845                                 mm->cached_hole_size = 0;
64846                                 goto full_search;
64847                         }
64848                         return -ENOMEM;
64849                 }
64850 -               if (!vma || addr + len <= vma->vm_start) {
64851 -                       /*
64852 -                        * Remember the place where we stopped the search:
64853 -                        */
64854 -                       mm->free_area_cache = addr + len;
64855 -                       return addr;
64856 -               }
64857 +               if (check_heap_stack_gap(vma, addr, len))
64858 +                       break;
64859                 if (addr + mm->cached_hole_size < vma->vm_start)
64860                         mm->cached_hole_size = vma->vm_start - addr;
64861                 addr = vma->vm_end;
64862         }
64863 +
64864 +       /*
64865 +        * Remember the place where we stopped the search:
64866 +        */
64867 +       mm->free_area_cache = addr + len;
64868 +       return addr;
64869  }
64870  #endif 
64871  
64872  void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64873  {
64874 +
64875 +#ifdef CONFIG_PAX_SEGMEXEC
64876 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64877 +               return;
64878 +#endif
64879 +
64880         /*
64881          * Is this a new hole at the lowest possible address?
64882          */
64883 -       if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64884 +       if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64885                 mm->free_area_cache = addr;
64886                 mm->cached_hole_size = ~0UL;
64887         }
64888 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
64889  {
64890         struct vm_area_struct *vma;
64891         struct mm_struct *mm = current->mm;
64892 -       unsigned long addr = addr0;
64893 +       unsigned long base = mm->mmap_base, addr = addr0;
64894  
64895         /* requested length too big for entire address space */
64896         if (len > TASK_SIZE)
64897 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
64898         if (flags & MAP_FIXED)
64899                 return addr;
64900  
64901 +#ifdef CONFIG_PAX_RANDMMAP
64902 +       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64903 +#endif
64904 +
64905         /* requesting a specific address */
64906         if (addr) {
64907                 addr = PAGE_ALIGN(addr);
64908 -               vma = find_vma(mm, addr);
64909 -               if (TASK_SIZE - len >= addr &&
64910 -                               (!vma || addr + len <= vma->vm_start))
64911 -                       return addr;
64912 +               if (TASK_SIZE - len >= addr) {
64913 +                       vma = find_vma(mm, addr);
64914 +                       if (check_heap_stack_gap(vma, addr, len))
64915 +                               return addr;
64916 +               }
64917         }
64918  
64919         /* check if free_area_cache is useful for us */
64920 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
64921         /* make sure it can fit in the remaining address space */
64922         if (addr > len) {
64923                 vma = find_vma(mm, addr-len);
64924 -               if (!vma || addr <= vma->vm_start)
64925 +               if (check_heap_stack_gap(vma, addr - len, len))
64926                         /* remember the address as a hint for next time */
64927                         return (mm->free_area_cache = addr-len);
64928         }
64929 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
64930                  * return with success:
64931                  */
64932                 vma = find_vma(mm, addr);
64933 -               if (!vma || addr+len <= vma->vm_start)
64934 +               if (check_heap_stack_gap(vma, addr, len))
64935                         /* remember the address as a hint for next time */
64936                         return (mm->free_area_cache = addr);
64937  
64938 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
64939                         mm->cached_hole_size = vma->vm_start - addr;
64940  
64941                 /* try just below the current vma->vm_start */
64942 -               addr = vma->vm_start-len;
64943 -       } while (len < vma->vm_start);
64944 +               addr = skip_heap_stack_gap(vma, len);
64945 +       } while (!IS_ERR_VALUE(addr));
64946  
64947  bottomup:
64948         /*
64949 @@ -1515,13 +1725,21 @@ bottomup:
64950          * can happen with large stack limits and large mmap()
64951          * allocations.
64952          */
64953 +       mm->mmap_base = TASK_UNMAPPED_BASE;
64954 +
64955 +#ifdef CONFIG_PAX_RANDMMAP
64956 +       if (mm->pax_flags & MF_PAX_RANDMMAP)
64957 +               mm->mmap_base += mm->delta_mmap;
64958 +#endif
64959 +
64960 +       mm->free_area_cache = mm->mmap_base;
64961         mm->cached_hole_size = ~0UL;
64962 -       mm->free_area_cache = TASK_UNMAPPED_BASE;
64963         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
64964         /*
64965          * Restore the topdown base:
64966          */
64967 -       mm->free_area_cache = mm->mmap_base;
64968 +       mm->mmap_base = base;
64969 +       mm->free_area_cache = base;
64970         mm->cached_hole_size = ~0UL;
64971  
64972         return addr;
64973 @@ -1530,6 +1748,12 @@ bottomup:
64974  
64975  void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
64976  {
64977 +
64978 +#ifdef CONFIG_PAX_SEGMEXEC
64979 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64980 +               return;
64981 +#endif
64982 +
64983         /*
64984          * Is this a new hole at the highest possible address?
64985          */
64986 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
64987                 mm->free_area_cache = addr;
64988  
64989         /* dont allow allocations above current base */
64990 -       if (mm->free_area_cache > mm->mmap_base)
64991 +       if (mm->free_area_cache > mm->mmap_base) {
64992                 mm->free_area_cache = mm->mmap_base;
64993 +               mm->cached_hole_size = ~0UL;
64994 +       }
64995  }
64996  
64997  unsigned long
64998 @@ -1646,6 +1872,28 @@ out:
64999         return prev ? prev->vm_next : vma;
65000  }
65001  
65002 +#ifdef CONFIG_PAX_SEGMEXEC
65003 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65004 +{
65005 +       struct vm_area_struct *vma_m;
65006 +
65007 +       BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65008 +       if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65009 +               BUG_ON(vma->vm_mirror);
65010 +               return NULL;
65011 +       }
65012 +       BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65013 +       vma_m = vma->vm_mirror;
65014 +       BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65015 +       BUG_ON(vma->vm_file != vma_m->vm_file);
65016 +       BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65017 +       BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
65018 +       BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
65019 +       BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65020 +       return vma_m;
65021 +}
65022 +#endif
65023 +
65024  /*
65025   * Verify that the stack growth is acceptable and
65026   * update accounting. This is shared with both the
65027 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
65028                 return -ENOMEM;
65029  
65030         /* Stack limit test */
65031 +       gr_learn_resource(current, RLIMIT_STACK, size, 1);
65032         if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
65033                 return -ENOMEM;
65034  
65035 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
65036                 locked = mm->locked_vm + grow;
65037                 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
65038                 limit >>= PAGE_SHIFT;
65039 +               gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65040                 if (locked > limit && !capable(CAP_IPC_LOCK))
65041                         return -ENOMEM;
65042         }
65043 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
65044   * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65045   * vma is the last one with address > vma->vm_end.  Have to extend vma.
65046   */
65047 +#ifndef CONFIG_IA64
65048 +static
65049 +#endif
65050  int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65051  {
65052         int error;
65053 +       bool locknext;
65054  
65055         if (!(vma->vm_flags & VM_GROWSUP))
65056                 return -EFAULT;
65057  
65058 +       /* Also guard against wrapping around to address 0. */
65059 +       if (address < PAGE_ALIGN(address+1))
65060 +               address = PAGE_ALIGN(address+1);
65061 +       else
65062 +               return -ENOMEM;
65063 +
65064         /*
65065          * We must make sure the anon_vma is allocated
65066          * so that the anon_vma locking is not a noop.
65067          */
65068         if (unlikely(anon_vma_prepare(vma)))
65069                 return -ENOMEM;
65070 +       locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65071 +       if (locknext && anon_vma_prepare(vma->vm_next))
65072 +               return -ENOMEM;
65073         vma_lock_anon_vma(vma);
65074 +       if (locknext)
65075 +               vma_lock_anon_vma(vma->vm_next);
65076  
65077         /*
65078          * vma->vm_start/vm_end cannot change under us because the caller
65079          * is required to hold the mmap_sem in read mode.  We need the
65080 -        * anon_vma lock to serialize against concurrent expand_stacks.
65081 -        * Also guard against wrapping around to address 0.
65082 +        * anon_vma locks to serialize against concurrent expand_stacks
65083 +        * and expand_upwards.
65084          */
65085 -       if (address < PAGE_ALIGN(address+4))
65086 -               address = PAGE_ALIGN(address+4);
65087 -       else {
65088 -               vma_unlock_anon_vma(vma);
65089 -               return -ENOMEM;
65090 -       }
65091         error = 0;
65092  
65093         /* Somebody else might have raced and expanded it already */
65094 -       if (address > vma->vm_end) {
65095 +       if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65096 +               error = -ENOMEM;
65097 +       else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65098                 unsigned long size, grow;
65099  
65100                 size = address - vma->vm_start;
65101 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
65102                         }
65103                 }
65104         }
65105 +       if (locknext)
65106 +               vma_unlock_anon_vma(vma->vm_next);
65107         vma_unlock_anon_vma(vma);
65108         khugepaged_enter_vma_merge(vma);
65109         return error;
65110 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
65111                                    unsigned long address)
65112  {
65113         int error;
65114 +       bool lockprev = false;
65115 +       struct vm_area_struct *prev;
65116  
65117         /*
65118          * We must make sure the anon_vma is allocated
65119 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
65120         if (error)
65121                 return error;
65122  
65123 +       prev = vma->vm_prev;
65124 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65125 +       lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65126 +#endif
65127 +       if (lockprev && anon_vma_prepare(prev))
65128 +               return -ENOMEM;
65129 +       if (lockprev)
65130 +               vma_lock_anon_vma(prev);
65131 +
65132         vma_lock_anon_vma(vma);
65133  
65134         /*
65135 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
65136          */
65137  
65138         /* Somebody else might have raced and expanded it already */
65139 -       if (address < vma->vm_start) {
65140 +       if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65141 +               error = -ENOMEM;
65142 +       else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65143                 unsigned long size, grow;
65144  
65145 +#ifdef CONFIG_PAX_SEGMEXEC
65146 +               struct vm_area_struct *vma_m;
65147 +
65148 +               vma_m = pax_find_mirror_vma(vma);
65149 +#endif
65150 +
65151                 size = vma->vm_end - address;
65152                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65153  
65154 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
65155                         if (!error) {
65156                                 vma->vm_start = address;
65157                                 vma->vm_pgoff -= grow;
65158 +                               track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65159 +
65160 +#ifdef CONFIG_PAX_SEGMEXEC
65161 +                               if (vma_m) {
65162 +                                       vma_m->vm_start -= grow << PAGE_SHIFT;
65163 +                                       vma_m->vm_pgoff -= grow;
65164 +                               }
65165 +#endif
65166 +
65167                                 perf_event_mmap(vma);
65168                         }
65169                 }
65170         }
65171         vma_unlock_anon_vma(vma);
65172 +       if (lockprev)
65173 +               vma_unlock_anon_vma(prev);
65174         khugepaged_enter_vma_merge(vma);
65175         return error;
65176  }
65177 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
65178         do {
65179                 long nrpages = vma_pages(vma);
65180  
65181 +#ifdef CONFIG_PAX_SEGMEXEC
65182 +               if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65183 +                       vma = remove_vma(vma);
65184 +                       continue;
65185 +               }
65186 +#endif
65187 +
65188                 mm->total_vm -= nrpages;
65189                 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65190                 vma = remove_vma(vma);
65191 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65192         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65193         vma->vm_prev = NULL;
65194         do {
65195 +
65196 +#ifdef CONFIG_PAX_SEGMEXEC
65197 +               if (vma->vm_mirror) {
65198 +                       BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65199 +                       vma->vm_mirror->vm_mirror = NULL;
65200 +                       vma->vm_mirror->vm_flags &= ~VM_EXEC;
65201 +                       vma->vm_mirror = NULL;
65202 +               }
65203 +#endif
65204 +
65205                 rb_erase(&vma->vm_rb, &mm->mm_rb);
65206                 mm->map_count--;
65207                 tail_vma = vma;
65208 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct 
65209         struct vm_area_struct *new;
65210         int err = -ENOMEM;
65211  
65212 +#ifdef CONFIG_PAX_SEGMEXEC
65213 +       struct vm_area_struct *vma_m, *new_m = NULL;
65214 +       unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65215 +#endif
65216 +
65217         if (is_vm_hugetlb_page(vma) && (addr &
65218                                         ~(huge_page_mask(hstate_vma(vma)))))
65219                 return -EINVAL;
65220  
65221 +#ifdef CONFIG_PAX_SEGMEXEC
65222 +       vma_m = pax_find_mirror_vma(vma);
65223 +#endif
65224 +
65225         new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65226         if (!new)
65227                 goto out_err;
65228  
65229 +#ifdef CONFIG_PAX_SEGMEXEC
65230 +       if (vma_m) {
65231 +               new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65232 +               if (!new_m) {
65233 +                       kmem_cache_free(vm_area_cachep, new);
65234 +                       goto out_err;
65235 +               }
65236 +       }
65237 +#endif
65238 +
65239         /* most fields are the same, copy all, and then fixup */
65240         *new = *vma;
65241  
65242 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct 
65243                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65244         }
65245  
65246 +#ifdef CONFIG_PAX_SEGMEXEC
65247 +       if (vma_m) {
65248 +               *new_m = *vma_m;
65249 +               INIT_LIST_HEAD(&new_m->anon_vma_chain);
65250 +               new_m->vm_mirror = new;
65251 +               new->vm_mirror = new_m;
65252 +
65253 +               if (new_below)
65254 +                       new_m->vm_end = addr_m;
65255 +               else {
65256 +                       new_m->vm_start = addr_m;
65257 +                       new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65258 +               }
65259 +       }
65260 +#endif
65261 +
65262         pol = mpol_dup(vma_policy(vma));
65263         if (IS_ERR(pol)) {
65264                 err = PTR_ERR(pol);
65265 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct 
65266         else
65267                 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65268  
65269 +#ifdef CONFIG_PAX_SEGMEXEC
65270 +       if (!err && vma_m) {
65271 +               if (anon_vma_clone(new_m, vma_m))
65272 +                       goto out_free_mpol;
65273 +
65274 +               mpol_get(pol);
65275 +               vma_set_policy(new_m, pol);
65276 +
65277 +               if (new_m->vm_file) {
65278 +                       get_file(new_m->vm_file);
65279 +                       if (vma_m->vm_flags & VM_EXECUTABLE)
65280 +                               added_exe_file_vma(mm);
65281 +               }
65282 +
65283 +               if (new_m->vm_ops && new_m->vm_ops->open)
65284 +                       new_m->vm_ops->open(new_m);
65285 +
65286 +               if (new_below)
65287 +                       err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65288 +                               ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65289 +               else
65290 +                       err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65291 +
65292 +               if (err) {
65293 +                       if (new_m->vm_ops && new_m->vm_ops->close)
65294 +                               new_m->vm_ops->close(new_m);
65295 +                       if (new_m->vm_file) {
65296 +                               if (vma_m->vm_flags & VM_EXECUTABLE)
65297 +                                       removed_exe_file_vma(mm);
65298 +                               fput(new_m->vm_file);
65299 +                       }
65300 +                       mpol_put(pol);
65301 +               }
65302 +       }
65303 +#endif
65304 +
65305         /* Success. */
65306         if (!err)
65307                 return 0;
65308 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct 
65309                         removed_exe_file_vma(mm);
65310                 fput(new->vm_file);
65311         }
65312 -       unlink_anon_vmas(new);
65313   out_free_mpol:
65314         mpol_put(pol);
65315   out_free_vma:
65316 +
65317 +#ifdef CONFIG_PAX_SEGMEXEC
65318 +       if (new_m) {
65319 +               unlink_anon_vmas(new_m);
65320 +               kmem_cache_free(vm_area_cachep, new_m);
65321 +       }
65322 +#endif
65323 +
65324 +       unlink_anon_vmas(new);
65325         kmem_cache_free(vm_area_cachep, new);
65326   out_err:
65327         return err;
65328 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct 
65329  int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
65330               unsigned long addr, int new_below)
65331  {
65332 +
65333 +#ifdef CONFIG_PAX_SEGMEXEC
65334 +       if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65335 +               BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65336 +               if (mm->map_count >= sysctl_max_map_count-1)
65337 +                       return -ENOMEM;
65338 +       } else
65339 +#endif
65340 +
65341         if (mm->map_count >= sysctl_max_map_count)
65342                 return -ENOMEM;
65343  
65344 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
65345   * work.  This now handles partial unmappings.
65346   * Jeremy Fitzhardinge <jeremy@goop.org>
65347   */
65348 +#ifdef CONFIG_PAX_SEGMEXEC
65349  int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65350  {
65351 +       int ret = __do_munmap(mm, start, len);
65352 +       if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65353 +               return ret;
65354 +
65355 +       return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65356 +}
65357 +
65358 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65359 +#else
65360 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65361 +#endif
65362 +{
65363         unsigned long end;
65364         struct vm_area_struct *vma, *prev, *last;
65365  
65366 +       /*
65367 +        * mm->mmap_sem is required to protect against another thread
65368 +        * changing the mappings in case we sleep.
65369 +        */
65370 +       verify_mm_writelocked(mm);
65371 +
65372         if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65373                 return -EINVAL;
65374  
65375 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
65376         /* Fix up all other VM information */
65377         remove_vma_list(mm, vma);
65378  
65379 +       track_exec_limit(mm, start, end, 0UL);
65380 +
65381         return 0;
65382  }
65383  
65384 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65385  
65386         profile_munmap(addr);
65387  
65388 +#ifdef CONFIG_PAX_SEGMEXEC
65389 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65390 +           (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65391 +               return -EINVAL;
65392 +#endif
65393 +
65394         down_write(&mm->mmap_sem);
65395         ret = do_munmap(mm, addr, len);
65396         up_write(&mm->mmap_sem);
65397         return ret;
65398  }
65399  
65400 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65401 -{
65402 -#ifdef CONFIG_DEBUG_VM
65403 -       if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65404 -               WARN_ON(1);
65405 -               up_read(&mm->mmap_sem);
65406 -       }
65407 -#endif
65408 -}
65409 -
65410  /*
65411   *  this is really a simplified "do_mmap".  it only handles
65412   *  anonymous maps.  eventually we may be able to do some
65413 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
65414         struct rb_node ** rb_link, * rb_parent;
65415         pgoff_t pgoff = addr >> PAGE_SHIFT;
65416         int error;
65417 +       unsigned long charged;
65418  
65419         len = PAGE_ALIGN(len);
65420         if (!len)
65421 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
65422  
65423         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65424  
65425 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65426 +       if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65427 +               flags &= ~VM_EXEC;
65428 +
65429 +#ifdef CONFIG_PAX_MPROTECT
65430 +               if (mm->pax_flags & MF_PAX_MPROTECT)
65431 +                       flags &= ~VM_MAYEXEC;
65432 +#endif
65433 +
65434 +       }
65435 +#endif
65436 +
65437         error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65438         if (error & ~PAGE_MASK)
65439                 return error;
65440  
65441 +       charged = len >> PAGE_SHIFT;
65442 +
65443         /*
65444          * mlock MCL_FUTURE?
65445          */
65446         if (mm->def_flags & VM_LOCKED) {
65447                 unsigned long locked, lock_limit;
65448 -               locked = len >> PAGE_SHIFT;
65449 +               locked = charged;
65450                 locked += mm->locked_vm;
65451                 lock_limit = rlimit(RLIMIT_MEMLOCK);
65452                 lock_limit >>= PAGE_SHIFT;
65453 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
65454         /*
65455          * Clear old maps.  this also does some error checking for us
65456          */
65457 - munmap_back:
65458         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65459         if (vma && vma->vm_start < addr + len) {
65460                 if (do_munmap(mm, addr, len))
65461                         return -ENOMEM;
65462 -               goto munmap_back;
65463 +               vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65464 +               BUG_ON(vma && vma->vm_start < addr + len);
65465         }
65466  
65467         /* Check against address space limits *after* clearing old maps... */
65468 -       if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65469 +       if (!may_expand_vm(mm, charged))
65470                 return -ENOMEM;
65471  
65472         if (mm->map_count > sysctl_max_map_count)
65473                 return -ENOMEM;
65474  
65475 -       if (security_vm_enough_memory(len >> PAGE_SHIFT))
65476 +       if (security_vm_enough_memory(charged))
65477                 return -ENOMEM;
65478  
65479         /* Can we just expand an old private anonymous mapping? */
65480 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
65481          */
65482         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65483         if (!vma) {
65484 -               vm_unacct_memory(len >> PAGE_SHIFT);
65485 +               vm_unacct_memory(charged);
65486                 return -ENOMEM;
65487         }
65488  
65489 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
65490         vma_link(mm, vma, prev, rb_link, rb_parent);
65491  out:
65492         perf_event_mmap(vma);
65493 -       mm->total_vm += len >> PAGE_SHIFT;
65494 +       mm->total_vm += charged;
65495         if (flags & VM_LOCKED) {
65496                 if (!mlock_vma_pages_range(vma, addr, addr + len))
65497 -                       mm->locked_vm += (len >> PAGE_SHIFT);
65498 +                       mm->locked_vm += charged;
65499         }
65500 +       track_exec_limit(mm, addr, addr + len, flags);
65501         return addr;
65502  }
65503  
65504 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
65505          * Walk the list again, actually closing and freeing it,
65506          * with preemption enabled, without holding any MM locks.
65507          */
65508 -       while (vma)
65509 +       while (vma) {
65510 +               vma->vm_mirror = NULL;
65511                 vma = remove_vma(vma);
65512 +       }
65513  
65514         BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65515  }
65516 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct * 
65517         struct vm_area_struct * __vma, * prev;
65518         struct rb_node ** rb_link, * rb_parent;
65519  
65520 +#ifdef CONFIG_PAX_SEGMEXEC
65521 +       struct vm_area_struct *vma_m = NULL;
65522 +#endif
65523 +
65524 +       if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
65525 +               return -EPERM;
65526 +
65527         /*
65528          * The vm_pgoff of a purely anonymous vma should be irrelevant
65529          * until its first write fault, when page's anon_vma and index
65530 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct * 
65531         if ((vma->vm_flags & VM_ACCOUNT) &&
65532              security_vm_enough_memory_mm(mm, vma_pages(vma)))
65533                 return -ENOMEM;
65534 +
65535 +#ifdef CONFIG_PAX_SEGMEXEC
65536 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65537 +               vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65538 +               if (!vma_m)
65539 +                       return -ENOMEM;
65540 +       }
65541 +#endif
65542 +
65543         vma_link(mm, vma, prev, rb_link, rb_parent);
65544 +
65545 +#ifdef CONFIG_PAX_SEGMEXEC
65546 +       if (vma_m)
65547 +               BUG_ON(pax_mirror_vma(vma_m, vma));
65548 +#endif
65549 +
65550         return 0;
65551  }
65552  
65553 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
65554         struct rb_node **rb_link, *rb_parent;
65555         struct mempolicy *pol;
65556  
65557 +       BUG_ON(vma->vm_mirror);
65558 +
65559         /*
65560          * If anonymous vma has not yet been faulted, update new pgoff
65561          * to match new location, to increase its chance of merging.
65562 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
65563         return NULL;
65564  }
65565  
65566 +#ifdef CONFIG_PAX_SEGMEXEC
65567 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65568 +{
65569 +       struct vm_area_struct *prev_m;
65570 +       struct rb_node **rb_link_m, *rb_parent_m;
65571 +       struct mempolicy *pol_m;
65572 +
65573 +       BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65574 +       BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65575 +       BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65576 +       *vma_m = *vma;
65577 +       INIT_LIST_HEAD(&vma_m->anon_vma_chain);
65578 +       if (anon_vma_clone(vma_m, vma))
65579 +               return -ENOMEM;
65580 +       pol_m = vma_policy(vma_m);
65581 +       mpol_get(pol_m);
65582 +       vma_set_policy(vma_m, pol_m);
65583 +       vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65584 +       vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65585 +       vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65586 +       vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65587 +       if (vma_m->vm_file)
65588 +               get_file(vma_m->vm_file);
65589 +       if (vma_m->vm_ops && vma_m->vm_ops->open)
65590 +               vma_m->vm_ops->open(vma_m);
65591 +       find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65592 +       vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65593 +       vma_m->vm_mirror = vma;
65594 +       vma->vm_mirror = vma_m;
65595 +       return 0;
65596 +}
65597 +#endif
65598 +
65599  /*
65600   * Return true if the calling process may expand its vm space by the passed
65601   * number of pages
65602 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm, 
65603         unsigned long lim;
65604  
65605         lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
65606 -
65607 +       gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65608         if (cur + npages > lim)
65609                 return 0;
65610         return 1;
65611 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
65612         vma->vm_start = addr;
65613         vma->vm_end = addr + len;
65614  
65615 +#ifdef CONFIG_PAX_MPROTECT
65616 +       if (mm->pax_flags & MF_PAX_MPROTECT) {
65617 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65618 +               if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65619 +                       return -EPERM;
65620 +               if (!(vm_flags & VM_EXEC))
65621 +                       vm_flags &= ~VM_MAYEXEC;
65622 +#else
65623 +               if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65624 +                       vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65625 +#endif
65626 +               else
65627 +                       vm_flags &= ~VM_MAYWRITE;
65628 +       }
65629 +#endif
65630 +
65631         vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65632         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65633  
65634 diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
65635 --- linux-3.0.4/mm/mprotect.c   2011-07-21 22:17:23.000000000 -0400
65636 +++ linux-3.0.4/mm/mprotect.c   2011-08-23 21:48:14.000000000 -0400
65637 @@ -23,10 +23,16 @@
65638  #include <linux/mmu_notifier.h>
65639  #include <linux/migrate.h>
65640  #include <linux/perf_event.h>
65641 +
65642 +#ifdef CONFIG_PAX_MPROTECT
65643 +#include <linux/elf.h>
65644 +#endif
65645 +
65646  #include <asm/uaccess.h>
65647  #include <asm/pgtable.h>
65648  #include <asm/cacheflush.h>
65649  #include <asm/tlbflush.h>
65650 +#include <asm/mmu_context.h>
65651  
65652  #ifndef pgprot_modify
65653  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65654 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
65655         flush_tlb_range(vma, start, end);
65656  }
65657  
65658 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65659 +/* called while holding the mmap semaphor for writing except stack expansion */
65660 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65661 +{
65662 +       unsigned long oldlimit, newlimit = 0UL;
65663 +
65664 +       if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
65665 +               return;
65666 +
65667 +       spin_lock(&mm->page_table_lock);
65668 +       oldlimit = mm->context.user_cs_limit;
65669 +       if ((prot & VM_EXEC) && oldlimit < end)
65670 +               /* USER_CS limit moved up */
65671 +               newlimit = end;
65672 +       else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65673 +               /* USER_CS limit moved down */
65674 +               newlimit = start;
65675 +
65676 +       if (newlimit) {
65677 +               mm->context.user_cs_limit = newlimit;
65678 +
65679 +#ifdef CONFIG_SMP
65680 +               wmb();
65681 +               cpus_clear(mm->context.cpu_user_cs_mask);
65682 +               cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65683 +#endif
65684 +
65685 +               set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65686 +       }
65687 +       spin_unlock(&mm->page_table_lock);
65688 +       if (newlimit == end) {
65689 +               struct vm_area_struct *vma = find_vma(mm, oldlimit);
65690 +
65691 +               for (; vma && vma->vm_start < end; vma = vma->vm_next)
65692 +                       if (is_vm_hugetlb_page(vma))
65693 +                               hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65694 +                       else
65695 +                               change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65696 +       }
65697 +}
65698 +#endif
65699 +
65700  int
65701  mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65702         unsigned long start, unsigned long end, unsigned long newflags)
65703 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
65704         int error;
65705         int dirty_accountable = 0;
65706  
65707 +#ifdef CONFIG_PAX_SEGMEXEC
65708 +       struct vm_area_struct *vma_m = NULL;
65709 +       unsigned long start_m, end_m;
65710 +
65711 +       start_m = start + SEGMEXEC_TASK_SIZE;
65712 +       end_m = end + SEGMEXEC_TASK_SIZE;
65713 +#endif
65714 +
65715         if (newflags == oldflags) {
65716                 *pprev = vma;
65717                 return 0;
65718         }
65719  
65720 +       if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65721 +               struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65722 +
65723 +               if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65724 +                       return -ENOMEM;
65725 +
65726 +               if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65727 +                       return -ENOMEM;
65728 +       }
65729 +
65730         /*
65731          * If we make a private mapping writable we increase our commit;
65732          * but (without finer accounting) cannot reduce our commit if we
65733 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
65734                 }
65735         }
65736  
65737 +#ifdef CONFIG_PAX_SEGMEXEC
65738 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65739 +               if (start != vma->vm_start) {
65740 +                       error = split_vma(mm, vma, start, 1);
65741 +                       if (error)
65742 +                               goto fail;
65743 +                       BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65744 +                       *pprev = (*pprev)->vm_next;
65745 +               }
65746 +
65747 +               if (end != vma->vm_end) {
65748 +                       error = split_vma(mm, vma, end, 0);
65749 +                       if (error)
65750 +                               goto fail;
65751 +               }
65752 +
65753 +               if (pax_find_mirror_vma(vma)) {
65754 +                       error = __do_munmap(mm, start_m, end_m - start_m);
65755 +                       if (error)
65756 +                               goto fail;
65757 +               } else {
65758 +                       vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65759 +                       if (!vma_m) {
65760 +                               error = -ENOMEM;
65761 +                               goto fail;
65762 +                       }
65763 +                       vma->vm_flags = newflags;
65764 +                       error = pax_mirror_vma(vma_m, vma);
65765 +                       if (error) {
65766 +                               vma->vm_flags = oldflags;
65767 +                               goto fail;
65768 +                       }
65769 +               }
65770 +       }
65771 +#endif
65772 +
65773         /*
65774          * First try to merge with previous and/or next vma.
65775          */
65776 @@ -204,9 +306,21 @@ success:
65777          * vm_flags and vm_page_prot are protected by the mmap_sem
65778          * held in write mode.
65779          */
65780 +
65781 +#ifdef CONFIG_PAX_SEGMEXEC
65782 +       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65783 +               pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65784 +#endif
65785 +
65786         vma->vm_flags = newflags;
65787 +
65788 +#ifdef CONFIG_PAX_MPROTECT
65789 +       if (mm->binfmt && mm->binfmt->handle_mprotect)
65790 +               mm->binfmt->handle_mprotect(vma, newflags);
65791 +#endif
65792 +
65793         vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65794 -                                         vm_get_page_prot(newflags));
65795 +                                         vm_get_page_prot(vma->vm_flags));
65796  
65797         if (vma_wants_writenotify(vma)) {
65798                 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65799 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65800         end = start + len;
65801         if (end <= start)
65802                 return -ENOMEM;
65803 +
65804 +#ifdef CONFIG_PAX_SEGMEXEC
65805 +       if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65806 +               if (end > SEGMEXEC_TASK_SIZE)
65807 +                       return -EINVAL;
65808 +       } else
65809 +#endif
65810 +
65811 +       if (end > TASK_SIZE)
65812 +               return -EINVAL;
65813 +
65814         if (!arch_validate_prot(prot))
65815                 return -EINVAL;
65816  
65817 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65818         /*
65819          * Does the application expect PROT_READ to imply PROT_EXEC:
65820          */
65821 -       if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65822 +       if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65823                 prot |= PROT_EXEC;
65824  
65825         vm_flags = calc_vm_prot_bits(prot);
65826 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65827         if (start > vma->vm_start)
65828                 prev = vma;
65829  
65830 +#ifdef CONFIG_PAX_MPROTECT
65831 +       if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65832 +               current->mm->binfmt->handle_mprotect(vma, vm_flags);
65833 +#endif
65834 +
65835         for (nstart = start ; ; ) {
65836                 unsigned long newflags;
65837  
65838 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65839  
65840                 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65841                 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65842 +                       if (prot & (PROT_WRITE | PROT_EXEC))
65843 +                               gr_log_rwxmprotect(vma->vm_file);
65844 +
65845 +                       error = -EACCES;
65846 +                       goto out;
65847 +               }
65848 +
65849 +               if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65850                         error = -EACCES;
65851                         goto out;
65852                 }
65853 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65854                 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65855                 if (error)
65856                         goto out;
65857 +
65858 +               track_exec_limit(current->mm, nstart, tmp, vm_flags);
65859 +
65860                 nstart = tmp;
65861  
65862                 if (nstart < prev->vm_end)
65863 diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
65864 --- linux-3.0.4/mm/mremap.c     2011-07-21 22:17:23.000000000 -0400
65865 +++ linux-3.0.4/mm/mremap.c     2011-08-23 21:47:56.000000000 -0400
65866 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
65867                         continue;
65868                 pte = ptep_clear_flush(vma, old_addr, old_pte);
65869                 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65870 +
65871 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65872 +               if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65873 +                       pte = pte_exprotect(pte);
65874 +#endif
65875 +
65876                 set_pte_at(mm, new_addr, new_pte, pte);
65877         }
65878  
65879 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
65880         if (is_vm_hugetlb_page(vma))
65881                 goto Einval;
65882  
65883 +#ifdef CONFIG_PAX_SEGMEXEC
65884 +       if (pax_find_mirror_vma(vma))
65885 +               goto Einval;
65886 +#endif
65887 +
65888         /* We can't remap across vm area boundaries */
65889         if (old_len > vma->vm_end - addr)
65890                 goto Efault;
65891 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned 
65892         unsigned long ret = -EINVAL;
65893         unsigned long charged = 0;
65894         unsigned long map_flags;
65895 +       unsigned long pax_task_size = TASK_SIZE;
65896  
65897         if (new_addr & ~PAGE_MASK)
65898                 goto out;
65899  
65900 -       if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65901 +#ifdef CONFIG_PAX_SEGMEXEC
65902 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
65903 +               pax_task_size = SEGMEXEC_TASK_SIZE;
65904 +#endif
65905 +
65906 +       pax_task_size -= PAGE_SIZE;
65907 +
65908 +       if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65909                 goto out;
65910  
65911         /* Check if the location we're moving into overlaps the
65912          * old location at all, and fail if it does.
65913          */
65914 -       if ((new_addr <= addr) && (new_addr+new_len) > addr)
65915 -               goto out;
65916 -
65917 -       if ((addr <= new_addr) && (addr+old_len) > new_addr)
65918 +       if (addr + old_len > new_addr && new_addr + new_len > addr)
65919                 goto out;
65920  
65921         ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65922 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
65923         struct vm_area_struct *vma;
65924         unsigned long ret = -EINVAL;
65925         unsigned long charged = 0;
65926 +       unsigned long pax_task_size = TASK_SIZE;
65927  
65928         if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65929                 goto out;
65930 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
65931         if (!new_len)
65932                 goto out;
65933  
65934 +#ifdef CONFIG_PAX_SEGMEXEC
65935 +       if (mm->pax_flags & MF_PAX_SEGMEXEC)
65936 +               pax_task_size = SEGMEXEC_TASK_SIZE;
65937 +#endif
65938 +
65939 +       pax_task_size -= PAGE_SIZE;
65940 +
65941 +       if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65942 +           old_len > pax_task_size || addr > pax_task_size-old_len)
65943 +               goto out;
65944 +
65945         if (flags & MREMAP_FIXED) {
65946                 if (flags & MREMAP_MAYMOVE)
65947                         ret = mremap_to(addr, old_len, new_addr, new_len);
65948 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
65949                                                    addr + new_len);
65950                         }
65951                         ret = addr;
65952 +                       track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65953                         goto out;
65954                 }
65955         }
65956 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
65957                 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65958                 if (ret)
65959                         goto out;
65960 +
65961 +               map_flags = vma->vm_flags;
65962                 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65963 +               if (!(ret & ~PAGE_MASK)) {
65964 +                       track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65965 +                       track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65966 +               }
65967         }
65968  out:
65969         if (ret & ~PAGE_MASK)
65970 diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
65971 --- linux-3.0.4/mm/nobootmem.c  2011-07-21 22:17:23.000000000 -0400
65972 +++ linux-3.0.4/mm/nobootmem.c  2011-08-23 21:47:56.000000000 -0400
65973 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
65974  unsigned long __init free_all_memory_core_early(int nodeid)
65975  {
65976         int i;
65977 -       u64 start, end;
65978 +       u64 start, end, startrange, endrange;
65979         unsigned long count = 0;
65980 -       struct range *range = NULL;
65981 +       struct range *range = NULL, rangerange = { 0, 0 };
65982         int nr_range;
65983  
65984         nr_range = get_free_all_memory_range(&range, nodeid);
65985 +       startrange = __pa(range) >> PAGE_SHIFT;
65986 +       endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
65987  
65988         for (i = 0; i < nr_range; i++) {
65989                 start = range[i].start;
65990                 end = range[i].end;
65991 +               if (start <= endrange && startrange < end) {
65992 +                       BUG_ON(rangerange.start | rangerange.end);
65993 +                       rangerange = range[i];
65994 +                       continue;
65995 +               }
65996                 count += end - start;
65997                 __free_pages_memory(start, end);
65998         }
65999 +       start = rangerange.start;
66000 +       end = rangerange.end;
66001 +       count += end - start;
66002 +       __free_pages_memory(start, end);
66003  
66004         return count;
66005  }
66006 diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
66007 --- linux-3.0.4/mm/nommu.c      2011-07-21 22:17:23.000000000 -0400
66008 +++ linux-3.0.4/mm/nommu.c      2011-08-23 21:47:56.000000000 -0400
66009 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66010  int sysctl_overcommit_ratio = 50; /* default is 50% */
66011  int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66012  int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66013 -int heap_stack_gap = 0;
66014  
66015  atomic_long_t mmap_pages_allocated;
66016  
66017 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
66018  EXPORT_SYMBOL(find_vma);
66019  
66020  /*
66021 - * find a VMA
66022 - * - we don't extend stack VMAs under NOMMU conditions
66023 - */
66024 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66025 -{
66026 -       return find_vma(mm, addr);
66027 -}
66028 -
66029 -/*
66030   * expand a stack to a given address
66031   * - not supported under NOMMU conditions
66032   */
66033 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
66034  
66035         /* most fields are the same, copy all, and then fixup */
66036         *new = *vma;
66037 +       INIT_LIST_HEAD(&new->anon_vma_chain);
66038         *region = *vma->vm_region;
66039         new->vm_region = region;
66040  
66041 diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
66042 --- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
66043 +++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
66044 @@ -340,7 +340,7 @@ out:
66045   * This usage means that zero-order pages may not be compound.
66046   */
66047  
66048 -static void free_compound_page(struct page *page)
66049 +void free_compound_page(struct page *page)
66050  {
66051         __free_pages_ok(page, compound_order(page));
66052  }
66053 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
66054         int i;
66055         int bad = 0;
66056  
66057 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66058 +       unsigned long index = 1UL << order;
66059 +#endif
66060 +
66061         trace_mm_page_free_direct(page, order);
66062         kmemcheck_free_shadow(page, order);
66063  
66064 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
66065                 debug_check_no_obj_freed(page_address(page),
66066                                            PAGE_SIZE << order);
66067         }
66068 +
66069 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66070 +       for (; index; --index)
66071 +               sanitize_highpage(page + index - 1);
66072 +#endif
66073 +
66074         arch_free_page(page, order);
66075         kernel_map_pages(page, 1 << order, 0);
66076  
66077 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
66078         arch_alloc_page(page, order);
66079         kernel_map_pages(page, 1 << order, 1);
66080  
66081 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
66082         if (gfp_flags & __GFP_ZERO)
66083                 prep_zero_page(page, order, gfp_flags);
66084 +#endif
66085  
66086         if (order && (gfp_flags & __GFP_COMP))
66087                 prep_compound_page(page, order);
66088 @@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
66089         int cpu;
66090         struct zone *zone;
66091  
66092 +       pax_track_stack();
66093 +
66094         for_each_populated_zone(zone) {
66095                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
66096                         continue;
66097 diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
66098 --- linux-3.0.4/mm/percpu.c     2011-07-21 22:17:23.000000000 -0400
66099 +++ linux-3.0.4/mm/percpu.c     2011-08-23 21:47:56.000000000 -0400
66100 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu 
66101  static unsigned int pcpu_last_unit_cpu __read_mostly;
66102  
66103  /* the address of the first chunk which starts with the kernel static area */
66104 -void *pcpu_base_addr __read_mostly;
66105 +void *pcpu_base_addr __read_only;
66106  EXPORT_SYMBOL_GPL(pcpu_base_addr);
66107  
66108  static const int *pcpu_unit_map __read_mostly;         /* cpu -> unit */
66109 diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
66110 --- linux-3.0.4/mm/rmap.c       2011-07-21 22:17:23.000000000 -0400
66111 +++ linux-3.0.4/mm/rmap.c       2011-08-23 21:47:56.000000000 -0400
66112 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
66113         struct anon_vma *anon_vma = vma->anon_vma;
66114         struct anon_vma_chain *avc;
66115  
66116 +#ifdef CONFIG_PAX_SEGMEXEC
66117 +       struct anon_vma_chain *avc_m = NULL;
66118 +#endif
66119 +
66120         might_sleep();
66121         if (unlikely(!anon_vma)) {
66122                 struct mm_struct *mm = vma->vm_mm;
66123 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
66124                 if (!avc)
66125                         goto out_enomem;
66126  
66127 +#ifdef CONFIG_PAX_SEGMEXEC
66128 +               avc_m = anon_vma_chain_alloc(GFP_KERNEL);
66129 +               if (!avc_m)
66130 +                       goto out_enomem_free_avc;
66131 +#endif
66132 +
66133                 anon_vma = find_mergeable_anon_vma(vma);
66134                 allocated = NULL;
66135                 if (!anon_vma) {
66136 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
66137                 /* page_table_lock to protect against threads */
66138                 spin_lock(&mm->page_table_lock);
66139                 if (likely(!vma->anon_vma)) {
66140 +
66141 +#ifdef CONFIG_PAX_SEGMEXEC
66142 +                       struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66143 +
66144 +                       if (vma_m) {
66145 +                               BUG_ON(vma_m->anon_vma);
66146 +                               vma_m->anon_vma = anon_vma;
66147 +                               avc_m->anon_vma = anon_vma;
66148 +                               avc_m->vma = vma;
66149 +                               list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
66150 +                               list_add(&avc_m->same_anon_vma, &anon_vma->head);
66151 +                               avc_m = NULL;
66152 +                       }
66153 +#endif
66154 +
66155                         vma->anon_vma = anon_vma;
66156                         avc->anon_vma = anon_vma;
66157                         avc->vma = vma;
66158 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
66159  
66160                 if (unlikely(allocated))
66161                         put_anon_vma(allocated);
66162 +
66163 +#ifdef CONFIG_PAX_SEGMEXEC
66164 +               if (unlikely(avc_m))
66165 +                       anon_vma_chain_free(avc_m);
66166 +#endif
66167 +
66168                 if (unlikely(avc))
66169                         anon_vma_chain_free(avc);
66170         }
66171         return 0;
66172  
66173   out_enomem_free_avc:
66174 +
66175 +#ifdef CONFIG_PAX_SEGMEXEC
66176 +       if (avc_m)
66177 +               anon_vma_chain_free(avc_m);
66178 +#endif
66179 +
66180         anon_vma_chain_free(avc);
66181   out_enomem:
66182         return -ENOMEM;
66183 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
66184   * Attach the anon_vmas from src to dst.
66185   * Returns 0 on success, -ENOMEM on failure.
66186   */
66187 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
66188 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
66189  {
66190         struct anon_vma_chain *avc, *pavc;
66191         struct anon_vma *root = NULL;
66192 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
66193   * the corresponding VMA in the parent process is attached to.
66194   * Returns 0 on success, non-zero on failure.
66195   */
66196 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
66197 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
66198  {
66199         struct anon_vma_chain *avc;
66200         struct anon_vma *anon_vma;
66201 diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
66202 --- linux-3.0.4/mm/shmem.c      2011-07-21 22:17:23.000000000 -0400
66203 +++ linux-3.0.4/mm/shmem.c      2011-08-23 21:48:14.000000000 -0400
66204 @@ -31,7 +31,7 @@
66205  #include <linux/percpu_counter.h>
66206  #include <linux/swap.h>
66207  
66208 -static struct vfsmount *shm_mnt;
66209 +struct vfsmount *shm_mnt;
66210  
66211  #ifdef CONFIG_SHMEM
66212  /*
66213 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
66214                 goto unlock;
66215         }
66216         entry = shmem_swp_entry(info, index, NULL);
66217 +       if (!entry)
66218 +               goto unlock;
66219         if (entry->val) {
66220                 /*
66221                  * The more uptodate page coming down from a stacked
66222 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
66223         struct vm_area_struct pvma;
66224         struct page *page;
66225  
66226 +       pax_track_stack();
66227 +
66228         spol = mpol_cond_copy(&mpol,
66229                                 mpol_shared_policy_lookup(&info->policy, idx));
66230  
66231 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block 
66232         int err = -ENOMEM;
66233  
66234         /* Round up to L1_CACHE_BYTES to resist false sharing */
66235 -       sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66236 -                               L1_CACHE_BYTES), GFP_KERNEL);
66237 +       sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66238         if (!sbinfo)
66239                 return -ENOMEM;
66240  
66241 diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
66242 --- linux-3.0.4/mm/slab.c       2011-07-21 22:17:23.000000000 -0400
66243 +++ linux-3.0.4/mm/slab.c       2011-08-23 21:48:14.000000000 -0400
66244 @@ -151,7 +151,7 @@
66245  
66246  /* Legal flag mask for kmem_cache_create(). */
66247  #if DEBUG
66248 -# define CREATE_MASK   (SLAB_RED_ZONE | \
66249 +# define CREATE_MASK   (SLAB_USERCOPY | SLAB_RED_ZONE | \
66250                          SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66251                          SLAB_CACHE_DMA | \
66252                          SLAB_STORE_USER | \
66253 @@ -159,7 +159,7 @@
66254                          SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66255                          SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66256  #else
66257 -# define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
66258 +# define CREATE_MASK   (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66259                          SLAB_CACHE_DMA | \
66260                          SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66261                          SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66262 @@ -288,7 +288,7 @@ struct kmem_list3 {
66263   * Need this for bootstrapping a per node allocator.
66264   */
66265  #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66266 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66267 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66268  #define        CACHE_CACHE 0
66269  #define        SIZE_AC MAX_NUMNODES
66270  #define        SIZE_L3 (2 * MAX_NUMNODES)
66271 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
66272                 if ((x)->max_freeable < i)                              \
66273                         (x)->max_freeable = i;                          \
66274         } while (0)
66275 -#define STATS_INC_ALLOCHIT(x)  atomic_inc(&(x)->allochit)
66276 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66277 -#define STATS_INC_FREEHIT(x)   atomic_inc(&(x)->freehit)
66278 -#define STATS_INC_FREEMISS(x)  atomic_inc(&(x)->freemiss)
66279 +#define STATS_INC_ALLOCHIT(x)  atomic_inc_unchecked(&(x)->allochit)
66280 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66281 +#define STATS_INC_FREEHIT(x)   atomic_inc_unchecked(&(x)->freehit)
66282 +#define STATS_INC_FREEMISS(x)  atomic_inc_unchecked(&(x)->freemiss)
66283  #else
66284  #define        STATS_INC_ACTIVE(x)     do { } while (0)
66285  #define        STATS_DEC_ACTIVE(x)     do { } while (0)
66286 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct 
66287   *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
66288   */
66289  static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66290 -                                       const struct slab *slab, void *obj)
66291 +                                       const struct slab *slab, const void *obj)
66292  {
66293         u32 offset = (obj - slab->s_mem);
66294         return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66295 @@ -564,7 +564,7 @@ struct cache_names {
66296  static struct cache_names __initdata cache_names[] = {
66297  #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
66298  #include <linux/kmalloc_sizes.h>
66299 -       {NULL,}
66300 +       {NULL}
66301  #undef CACHE
66302  };
66303  
66304 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
66305         sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66306                                         sizes[INDEX_AC].cs_size,
66307                                         ARCH_KMALLOC_MINALIGN,
66308 -                                       ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66309 +                                       ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66310                                         NULL);
66311  
66312         if (INDEX_AC != INDEX_L3) {
66313 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
66314                         kmem_cache_create(names[INDEX_L3].name,
66315                                 sizes[INDEX_L3].cs_size,
66316                                 ARCH_KMALLOC_MINALIGN,
66317 -                               ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66318 +                               ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66319                                 NULL);
66320         }
66321  
66322 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
66323                         sizes->cs_cachep = kmem_cache_create(names->name,
66324                                         sizes->cs_size,
66325                                         ARCH_KMALLOC_MINALIGN,
66326 -                                       ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66327 +                                       ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66328                                         NULL);
66329                 }
66330  #ifdef CONFIG_ZONE_DMA
66331 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
66332         }
66333         /* cpu stats */
66334         {
66335 -               unsigned long allochit = atomic_read(&cachep->allochit);
66336 -               unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66337 -               unsigned long freehit = atomic_read(&cachep->freehit);
66338 -               unsigned long freemiss = atomic_read(&cachep->freemiss);
66339 +               unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66340 +               unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66341 +               unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66342 +               unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66343  
66344                 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66345                            allochit, allocmiss, freehit, freemiss);
66346 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
66347  
66348  static int __init slab_proc_init(void)
66349  {
66350 -       proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66351 +       mode_t gr_mode = S_IRUGO;
66352 +
66353 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66354 +       gr_mode = S_IRUSR;
66355 +#endif
66356 +
66357 +       proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66358  #ifdef CONFIG_DEBUG_SLAB_LEAK
66359 -       proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66360 +       proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66361  #endif
66362         return 0;
66363  }
66364  module_init(slab_proc_init);
66365  #endif
66366  
66367 +void check_object_size(const void *ptr, unsigned long n, bool to)
66368 +{
66369 +
66370 +#ifdef CONFIG_PAX_USERCOPY
66371 +       struct page *page;
66372 +       struct kmem_cache *cachep = NULL;
66373 +       struct slab *slabp;
66374 +       unsigned int objnr;
66375 +       unsigned long offset;
66376 +
66377 +       if (!n)
66378 +               return;
66379 +
66380 +       if (ZERO_OR_NULL_PTR(ptr))
66381 +               goto report;
66382 +
66383 +       if (!virt_addr_valid(ptr))
66384 +               return;
66385 +
66386 +       page = virt_to_head_page(ptr);
66387 +
66388 +       if (!PageSlab(page)) {
66389 +               if (object_is_on_stack(ptr, n) == -1)
66390 +                       goto report;
66391 +               return;
66392 +       }
66393 +
66394 +       cachep = page_get_cache(page);
66395 +       if (!(cachep->flags & SLAB_USERCOPY))
66396 +               goto report;
66397 +
66398 +       slabp = page_get_slab(page);
66399 +       objnr = obj_to_index(cachep, slabp, ptr);
66400 +       BUG_ON(objnr >= cachep->num);
66401 +       offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66402 +       if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66403 +               return;
66404 +
66405 +report:
66406 +       pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66407 +#endif
66408 +
66409 +}
66410 +EXPORT_SYMBOL(check_object_size);
66411 +
66412  /**
66413   * ksize - get the actual amount of memory allocated for a given object
66414   * @objp: Pointer to the object
66415 diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
66416 --- linux-3.0.4/mm/slob.c       2011-07-21 22:17:23.000000000 -0400
66417 +++ linux-3.0.4/mm/slob.c       2011-08-23 21:47:56.000000000 -0400
66418 @@ -29,7 +29,7 @@
66419   * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66420   * alloc_pages() directly, allocating compound pages so the page order
66421   * does not have to be separately tracked, and also stores the exact
66422 - * allocation size in page->private so that it can be used to accurately
66423 + * allocation size in slob_page->size so that it can be used to accurately
66424   * provide ksize(). These objects are detected in kfree() because slob_page()
66425   * is false for them.
66426   *
66427 @@ -58,6 +58,7 @@
66428   */
66429  
66430  #include <linux/kernel.h>
66431 +#include <linux/sched.h>
66432  #include <linux/slab.h>
66433  #include <linux/mm.h>
66434  #include <linux/swap.h> /* struct reclaim_state */
66435 @@ -102,7 +103,8 @@ struct slob_page {
66436                         unsigned long flags;    /* mandatory */
66437                         atomic_t _count;        /* mandatory */
66438                         slobidx_t units;        /* free units left in page */
66439 -                       unsigned long pad[2];
66440 +                       unsigned long pad[1];
66441 +                       unsigned long size;     /* size when >=PAGE_SIZE */
66442                         slob_t *free;           /* first free slob_t in page */
66443                         struct list_head list;  /* linked list of free pages */
66444                 };
66445 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
66446   */
66447  static inline int is_slob_page(struct slob_page *sp)
66448  {
66449 -       return PageSlab((struct page *)sp);
66450 +       return PageSlab((struct page *)sp) && !sp->size;
66451  }
66452  
66453  static inline void set_slob_page(struct slob_page *sp)
66454 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
66455  
66456  static inline struct slob_page *slob_page(const void *addr)
66457  {
66458 -       return (struct slob_page *)virt_to_page(addr);
66459 +       return (struct slob_page *)virt_to_head_page(addr);
66460  }
66461  
66462  /*
66463 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
66464  /*
66465   * Return the size of a slob block.
66466   */
66467 -static slobidx_t slob_units(slob_t *s)
66468 +static slobidx_t slob_units(const slob_t *s)
66469  {
66470         if (s->units > 0)
66471                 return s->units;
66472 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
66473  /*
66474   * Return the next free slob block pointer after this one.
66475   */
66476 -static slob_t *slob_next(slob_t *s)
66477 +static slob_t *slob_next(const slob_t *s)
66478  {
66479         slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66480         slobidx_t next;
66481 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
66482  /*
66483   * Returns true if s is the last free block in its page.
66484   */
66485 -static int slob_last(slob_t *s)
66486 +static int slob_last(const slob_t *s)
66487  {
66488         return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66489  }
66490 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
66491         if (!page)
66492                 return NULL;
66493  
66494 +       set_slob_page(page);
66495         return page_address(page);
66496  }
66497  
66498 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
66499                 if (!b)
66500                         return NULL;
66501                 sp = slob_page(b);
66502 -               set_slob_page(sp);
66503  
66504                 spin_lock_irqsave(&slob_lock, flags);
66505                 sp->units = SLOB_UNITS(PAGE_SIZE);
66506                 sp->free = b;
66507 +               sp->size = 0;
66508                 INIT_LIST_HEAD(&sp->list);
66509                 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66510                 set_slob_page_free(sp, slob_list);
66511 @@ -476,10 +479,9 @@ out:
66512   * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
66513   */
66514  
66515 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66516 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66517  {
66518 -       unsigned int *m;
66519 -       int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66520 +       slob_t *m;
66521         void *ret;
66522  
66523         lockdep_trace_alloc(gfp);
66524 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t 
66525  
66526                 if (!m)
66527                         return NULL;
66528 -               *m = size;
66529 +               BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66530 +               BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66531 +               m[0].units = size;
66532 +               m[1].units = align;
66533                 ret = (void *)m + align;
66534  
66535                 trace_kmalloc_node(_RET_IP_, ret,
66536 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t 
66537                         gfp |= __GFP_COMP;
66538                 ret = slob_new_pages(gfp, order, node);
66539                 if (ret) {
66540 -                       struct page *page;
66541 -                       page = virt_to_page(ret);
66542 -                       page->private = size;
66543 +                       struct slob_page *sp;
66544 +                       sp = slob_page(ret);
66545 +                       sp->size = size;
66546                 }
66547  
66548                 trace_kmalloc_node(_RET_IP_, ret,
66549                                    size, PAGE_SIZE << order, gfp, node);
66550         }
66551  
66552 -       kmemleak_alloc(ret, size, 1, gfp);
66553 +       return ret;
66554 +}
66555 +
66556 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66557 +{
66558 +       int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66559 +       void *ret = __kmalloc_node_align(size, gfp, node, align);
66560 +
66561 +       if (!ZERO_OR_NULL_PTR(ret))
66562 +               kmemleak_alloc(ret, size, 1, gfp);
66563         return ret;
66564  }
66565  EXPORT_SYMBOL(__kmalloc_node);
66566 @@ -531,13 +545,88 @@ void kfree(const void *block)
66567         sp = slob_page(block);
66568         if (is_slob_page(sp)) {
66569                 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66570 -               unsigned int *m = (unsigned int *)(block - align);
66571 -               slob_free(m, *m + align);
66572 -       } else
66573 +               slob_t *m = (slob_t *)(block - align);
66574 +               slob_free(m, m[0].units + align);
66575 +       } else {
66576 +               clear_slob_page(sp);
66577 +               free_slob_page(sp);
66578 +               sp->size = 0;
66579                 put_page(&sp->page);
66580 +       }
66581  }
66582  EXPORT_SYMBOL(kfree);
66583  
66584 +void check_object_size(const void *ptr, unsigned long n, bool to)
66585 +{
66586 +
66587 +#ifdef CONFIG_PAX_USERCOPY
66588 +       struct slob_page *sp;
66589 +       const slob_t *free;
66590 +       const void *base;
66591 +       unsigned long flags;
66592 +
66593 +       if (!n)
66594 +               return;
66595 +
66596 +       if (ZERO_OR_NULL_PTR(ptr))
66597 +               goto report;
66598 +
66599 +       if (!virt_addr_valid(ptr))
66600 +               return;
66601 +
66602 +       sp = slob_page(ptr);
66603 +       if (!PageSlab((struct page*)sp)) {
66604 +               if (object_is_on_stack(ptr, n) == -1)
66605 +                       goto report;
66606 +               return;
66607 +       }
66608 +
66609 +       if (sp->size) {
66610 +               base = page_address(&sp->page);
66611 +               if (base <= ptr && n <= sp->size - (ptr - base))
66612 +                       return;
66613 +               goto report;
66614 +       }
66615 +
66616 +       /* some tricky double walking to find the chunk */
66617 +       spin_lock_irqsave(&slob_lock, flags);
66618 +       base = (void *)((unsigned long)ptr & PAGE_MASK);
66619 +       free = sp->free;
66620 +
66621 +       while (!slob_last(free) && (void *)free <= ptr) {
66622 +               base = free + slob_units(free);
66623 +               free = slob_next(free);
66624 +       }
66625 +
66626 +       while (base < (void *)free) {
66627 +               slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66628 +               int size = SLOB_UNIT * SLOB_UNITS(m + align);
66629 +               int offset;
66630 +
66631 +               if (ptr < base + align)
66632 +                       break;
66633 +
66634 +               offset = ptr - base - align;
66635 +               if (offset >= m) {
66636 +                       base += size;
66637 +                       continue;
66638 +               }
66639 +
66640 +               if (n > m - offset)
66641 +                       break;
66642 +
66643 +               spin_unlock_irqrestore(&slob_lock, flags);
66644 +               return;
66645 +       }
66646 +
66647 +       spin_unlock_irqrestore(&slob_lock, flags);
66648 +report:
66649 +       pax_report_usercopy(ptr, n, to, NULL);
66650 +#endif
66651 +
66652 +}
66653 +EXPORT_SYMBOL(check_object_size);
66654 +
66655  /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66656  size_t ksize(const void *block)
66657  {
66658 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
66659         sp = slob_page(block);
66660         if (is_slob_page(sp)) {
66661                 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66662 -               unsigned int *m = (unsigned int *)(block - align);
66663 -               return SLOB_UNITS(*m) * SLOB_UNIT;
66664 +               slob_t *m = (slob_t *)(block - align);
66665 +               return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66666         } else
66667 -               return sp->page.private;
66668 +               return sp->size;
66669  }
66670  EXPORT_SYMBOL(ksize);
66671  
66672 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
66673  {
66674         struct kmem_cache *c;
66675  
66676 +#ifdef CONFIG_PAX_USERCOPY
66677 +       c = __kmalloc_node_align(sizeof(struct kmem_cache),
66678 +               GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
66679 +#else
66680         c = slob_alloc(sizeof(struct kmem_cache),
66681                 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
66682 +#endif
66683  
66684         if (c) {
66685                 c->name = name;
66686 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
66687  {
66688         void *b;
66689  
66690 +#ifdef CONFIG_PAX_USERCOPY
66691 +       b = __kmalloc_node_align(c->size, flags, node, c->align);
66692 +#else
66693         if (c->size < PAGE_SIZE) {
66694                 b = slob_alloc(c->size, flags, c->align, node);
66695                 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66696                                             SLOB_UNITS(c->size) * SLOB_UNIT,
66697                                             flags, node);
66698         } else {
66699 +               struct slob_page *sp;
66700 +
66701                 b = slob_new_pages(flags, get_order(c->size), node);
66702 +               sp = slob_page(b);
66703 +               sp->size = c->size;
66704                 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66705                                             PAGE_SIZE << get_order(c->size),
66706                                             flags, node);
66707         }
66708 +#endif
66709  
66710         if (c->ctor)
66711                 c->ctor(b);
66712 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66713  
66714  static void __kmem_cache_free(void *b, int size)
66715  {
66716 -       if (size < PAGE_SIZE)
66717 +       struct slob_page *sp = slob_page(b);
66718 +
66719 +       if (is_slob_page(sp))
66720                 slob_free(b, size);
66721 -       else
66722 +       else {
66723 +               clear_slob_page(sp);
66724 +               free_slob_page(sp);
66725 +               sp->size = 0;
66726                 slob_free_pages(b, get_order(size));
66727 +       }
66728  }
66729  
66730  static void kmem_rcu_free(struct rcu_head *head)
66731 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
66732  
66733  void kmem_cache_free(struct kmem_cache *c, void *b)
66734  {
66735 +       int size = c->size;
66736 +
66737 +#ifdef CONFIG_PAX_USERCOPY
66738 +       if (size + c->align < PAGE_SIZE) {
66739 +               size += c->align;
66740 +               b -= c->align;
66741 +       }
66742 +#endif
66743 +
66744         kmemleak_free_recursive(b, c->flags);
66745         if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66746                 struct slob_rcu *slob_rcu;
66747 -               slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66748 -               slob_rcu->size = c->size;
66749 +               slob_rcu = b + (size - sizeof(struct slob_rcu));
66750 +               slob_rcu->size = size;
66751                 call_rcu(&slob_rcu->head, kmem_rcu_free);
66752         } else {
66753 -               __kmem_cache_free(b, c->size);
66754 +               __kmem_cache_free(b, size);
66755         }
66756  
66757 +#ifdef CONFIG_PAX_USERCOPY
66758 +       trace_kfree(_RET_IP_, b);
66759 +#else
66760         trace_kmem_cache_free(_RET_IP_, b);
66761 +#endif
66762 +
66763  }
66764  EXPORT_SYMBOL(kmem_cache_free);
66765  
66766 diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
66767 --- linux-3.0.4/mm/slub.c       2011-07-21 22:17:23.000000000 -0400
66768 +++ linux-3.0.4/mm/slub.c       2011-08-23 21:48:14.000000000 -0400
66769 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
66770         if (!t->addr)
66771                 return;
66772  
66773 -       printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66774 +       printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66775                 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66776  }
66777  
66778 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
66779  
66780         page = virt_to_head_page(x);
66781  
66782 +       BUG_ON(!PageSlab(page));
66783 +
66784         slab_free(s, page, x, _RET_IP_);
66785  
66786         trace_kmem_cache_free(_RET_IP_, x);
66787 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
66788   * Merge control. If this is set then no merging of slab caches will occur.
66789   * (Could be removed. This was introduced to pacify the merge skeptics.)
66790   */
66791 -static int slub_nomerge;
66792 +static int slub_nomerge = 1;
66793  
66794  /*
66795   * Calculate the order of allocation given an slab object size.
66796 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
66797          * list to avoid pounding the page allocator excessively.
66798          */
66799         set_min_partial(s, ilog2(s->size));
66800 -       s->refcount = 1;
66801 +       atomic_set(&s->refcount, 1);
66802  #ifdef CONFIG_NUMA
66803         s->remote_node_defrag_ratio = 1000;
66804  #endif
66805 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
66806  void kmem_cache_destroy(struct kmem_cache *s)
66807  {
66808         down_write(&slub_lock);
66809 -       s->refcount--;
66810 -       if (!s->refcount) {
66811 +       if (atomic_dec_and_test(&s->refcount)) {
66812                 list_del(&s->list);
66813                 if (kmem_cache_close(s)) {
66814                         printk(KERN_ERR "SLUB %s: %s called for cache that "
66815 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t 
66816  EXPORT_SYMBOL(__kmalloc_node);
66817  #endif
66818  
66819 +void check_object_size(const void *ptr, unsigned long n, bool to)
66820 +{
66821 +
66822 +#ifdef CONFIG_PAX_USERCOPY
66823 +       struct page *page;
66824 +       struct kmem_cache *s = NULL;
66825 +       unsigned long offset;
66826 +
66827 +       if (!n)
66828 +               return;
66829 +
66830 +       if (ZERO_OR_NULL_PTR(ptr))
66831 +               goto report;
66832 +
66833 +       if (!virt_addr_valid(ptr))
66834 +               return;
66835 +
66836 +       page = virt_to_head_page(ptr);
66837 +
66838 +       if (!PageSlab(page)) {
66839 +               if (object_is_on_stack(ptr, n) == -1)
66840 +                       goto report;
66841 +               return;
66842 +       }
66843 +
66844 +       s = page->slab;
66845 +       if (!(s->flags & SLAB_USERCOPY))
66846 +               goto report;
66847 +
66848 +       offset = (ptr - page_address(page)) % s->size;
66849 +       if (offset <= s->objsize && n <= s->objsize - offset)
66850 +               return;
66851 +
66852 +report:
66853 +       pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66854 +#endif
66855 +
66856 +}
66857 +EXPORT_SYMBOL(check_object_size);
66858 +
66859  size_t ksize(const void *object)
66860  {
66861         struct page *page;
66862 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
66863         int node;
66864  
66865         list_add(&s->list, &slab_caches);
66866 -       s->refcount = -1;
66867 +       atomic_set(&s->refcount, -1);
66868  
66869         for_each_node_state(node, N_NORMAL_MEMORY) {
66870                 struct kmem_cache_node *n = get_node(s, node);
66871 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
66872  
66873         /* Caches that are not of the two-to-the-power-of size */
66874         if (KMALLOC_MIN_SIZE <= 32) {
66875 -               kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
66876 +               kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
66877                 caches++;
66878         }
66879  
66880         if (KMALLOC_MIN_SIZE <= 64) {
66881 -               kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
66882 +               kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
66883                 caches++;
66884         }
66885  
66886         for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66887 -               kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
66888 +               kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
66889                 caches++;
66890         }
66891  
66892 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
66893         /*
66894          * We may have set a slab to be unmergeable during bootstrap.
66895          */
66896 -       if (s->refcount < 0)
66897 +       if (atomic_read(&s->refcount) < 0)
66898                 return 1;
66899  
66900         return 0;
66901 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
66902         down_write(&slub_lock);
66903         s = find_mergeable(size, align, flags, name, ctor);
66904         if (s) {
66905 -               s->refcount++;
66906 +               atomic_inc(&s->refcount);
66907                 /*
66908                  * Adjust the object sizes so that we clear
66909                  * the complete object on kzalloc.
66910 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
66911                 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
66912  
66913                 if (sysfs_slab_alias(s, name)) {
66914 -                       s->refcount--;
66915 +                       atomic_dec(&s->refcount);
66916                         goto err;
66917                 }
66918                 up_write(&slub_lock);
66919 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
66920  
66921  static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66922  {
66923 -       return sprintf(buf, "%d\n", s->refcount - 1);
66924 +       return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66925  }
66926  SLAB_ATTR_RO(aliases);
66927  
66928 @@ -4894,7 +4935,13 @@ static const struct file_operations proc
66929  
66930  static int __init slab_proc_init(void)
66931  {
66932 -       proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66933 +       mode_t gr_mode = S_IRUGO;
66934 +
66935 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66936 +       gr_mode = S_IRUSR;
66937 +#endif
66938 +
66939 +       proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66940         return 0;
66941  }
66942  module_init(slab_proc_init);
66943 diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
66944 --- linux-3.0.4/mm/swap.c       2011-07-21 22:17:23.000000000 -0400
66945 +++ linux-3.0.4/mm/swap.c       2011-08-23 21:47:56.000000000 -0400
66946 @@ -31,6 +31,7 @@
66947  #include <linux/backing-dev.h>
66948  #include <linux/memcontrol.h>
66949  #include <linux/gfp.h>
66950 +#include <linux/hugetlb.h>
66951  
66952  #include "internal.h"
66953  
66954 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
66955  
66956         __page_cache_release(page);
66957         dtor = get_compound_page_dtor(page);
66958 +       if (!PageHuge(page))
66959 +               BUG_ON(dtor != free_compound_page);
66960         (*dtor)(page);
66961  }
66962  
66963 diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
66964 --- linux-3.0.4/mm/swapfile.c   2011-07-21 22:17:23.000000000 -0400
66965 +++ linux-3.0.4/mm/swapfile.c   2011-08-23 21:47:56.000000000 -0400
66966 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
66967  
66968  static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
66969  /* Activity counter to indicate that a swapon or swapoff has occurred */
66970 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
66971 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
66972  
66973  static inline unsigned char swap_count(unsigned char ent)
66974  {
66975 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
66976         }
66977         filp_close(swap_file, NULL);
66978         err = 0;
66979 -       atomic_inc(&proc_poll_event);
66980 +       atomic_inc_unchecked(&proc_poll_event);
66981         wake_up_interruptible(&proc_poll_wait);
66982  
66983  out_dput:
66984 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
66985  
66986         poll_wait(file, &proc_poll_wait, wait);
66987  
66988 -       if (s->event != atomic_read(&proc_poll_event)) {
66989 -               s->event = atomic_read(&proc_poll_event);
66990 +       if (s->event != atomic_read_unchecked(&proc_poll_event)) {
66991 +               s->event = atomic_read_unchecked(&proc_poll_event);
66992                 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
66993         }
66994  
66995 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
66996         }
66997  
66998         s->seq.private = s;
66999 -       s->event = atomic_read(&proc_poll_event);
67000 +       s->event = atomic_read_unchecked(&proc_poll_event);
67001         return ret;
67002  }
67003  
67004 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
67005                 (p->flags & SWP_DISCARDABLE) ? "D" : "");
67006  
67007         mutex_unlock(&swapon_mutex);
67008 -       atomic_inc(&proc_poll_event);
67009 +       atomic_inc_unchecked(&proc_poll_event);
67010         wake_up_interruptible(&proc_poll_wait);
67011  
67012         if (S_ISREG(inode->i_mode))
67013 diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
67014 --- linux-3.0.4/mm/util.c       2011-07-21 22:17:23.000000000 -0400
67015 +++ linux-3.0.4/mm/util.c       2011-08-23 21:47:56.000000000 -0400
67016 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
67017   * allocated buffer. Use this if you don't want to free the buffer immediately
67018   * like, for example, with RCU.
67019   */
67020 +#undef __krealloc
67021  void *__krealloc(const void *p, size_t new_size, gfp_t flags)
67022  {
67023         void *ret;
67024 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
67025   * behaves exactly like kmalloc().  If @size is 0 and @p is not a
67026   * %NULL pointer, the object pointed to is freed.
67027   */
67028 +#undef krealloc
67029  void *krealloc(const void *p, size_t new_size, gfp_t flags)
67030  {
67031         void *ret;
67032 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
67033  void arch_pick_mmap_layout(struct mm_struct *mm)
67034  {
67035         mm->mmap_base = TASK_UNMAPPED_BASE;
67036 +
67037 +#ifdef CONFIG_PAX_RANDMMAP
67038 +       if (mm->pax_flags & MF_PAX_RANDMMAP)
67039 +               mm->mmap_base += mm->delta_mmap;
67040 +#endif
67041 +
67042         mm->get_unmapped_area = arch_get_unmapped_area;
67043         mm->unmap_area = arch_unmap_area;
67044  }
67045 diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
67046 --- linux-3.0.4/mm/vmalloc.c    2011-08-23 21:44:40.000000000 -0400
67047 +++ linux-3.0.4/mm/vmalloc.c    2011-08-23 21:47:56.000000000 -0400
67048 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67049  
67050         pte = pte_offset_kernel(pmd, addr);
67051         do {
67052 -               pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67053 -               WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67054 +
67055 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67056 +               if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67057 +                       BUG_ON(!pte_exec(*pte));
67058 +                       set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67059 +                       continue;
67060 +               }
67061 +#endif
67062 +
67063 +               {
67064 +                       pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67065 +                       WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67066 +               }
67067         } while (pte++, addr += PAGE_SIZE, addr != end);
67068  }
67069  
67070 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67071                 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67072  {
67073         pte_t *pte;
67074 +       int ret = -ENOMEM;
67075  
67076         /*
67077          * nr is a running index into the array which helps higher level
67078 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
67079         pte = pte_alloc_kernel(pmd, addr);
67080         if (!pte)
67081                 return -ENOMEM;
67082 +
67083 +       pax_open_kernel();
67084         do {
67085                 struct page *page = pages[*nr];
67086  
67087 -               if (WARN_ON(!pte_none(*pte)))
67088 -                       return -EBUSY;
67089 -               if (WARN_ON(!page))
67090 -                       return -ENOMEM;
67091 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67092 +               if (pgprot_val(prot) & _PAGE_NX)
67093 +#endif
67094 +
67095 +               if (WARN_ON(!pte_none(*pte))) {
67096 +                       ret = -EBUSY;
67097 +                       goto out;
67098 +               }
67099 +               if (WARN_ON(!page)) {
67100 +                       ret = -ENOMEM;
67101 +                       goto out;
67102 +               }
67103                 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67104                 (*nr)++;
67105         } while (pte++, addr += PAGE_SIZE, addr != end);
67106 -       return 0;
67107 +       ret = 0;
67108 +out:
67109 +       pax_close_kernel();
67110 +       return ret;
67111  }
67112  
67113  static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67114 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
67115          * and fall back on vmalloc() if that fails. Others
67116          * just put it in the vmalloc space.
67117          */
67118 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67119 +#ifdef CONFIG_MODULES
67120 +#ifdef MODULES_VADDR
67121         unsigned long addr = (unsigned long)x;
67122         if (addr >= MODULES_VADDR && addr < MODULES_END)
67123                 return 1;
67124  #endif
67125 +
67126 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67127 +       if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67128 +               return 1;
67129 +#endif
67130 +
67131 +#endif
67132 +
67133         return is_vmalloc_addr(x);
67134  }
67135  
67136 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void 
67137  
67138         if (!pgd_none(*pgd)) {
67139                 pud_t *pud = pud_offset(pgd, addr);
67140 +#ifdef CONFIG_X86
67141 +               if (!pud_large(*pud))
67142 +#endif
67143                 if (!pud_none(*pud)) {
67144                         pmd_t *pmd = pmd_offset(pud, addr);
67145 +#ifdef CONFIG_X86
67146 +                       if (!pmd_large(*pmd))
67147 +#endif
67148                         if (!pmd_none(*pmd)) {
67149                                 pte_t *ptep, pte;
67150  
67151 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
67152         struct vm_struct *area;
67153  
67154         BUG_ON(in_interrupt());
67155 +
67156 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67157 +       if (flags & VM_KERNEXEC) {
67158 +               if (start != VMALLOC_START || end != VMALLOC_END)
67159 +                       return NULL;
67160 +               start = (unsigned long)MODULES_EXEC_VADDR;
67161 +               end = (unsigned long)MODULES_EXEC_END;
67162 +       }
67163 +#endif
67164 +
67165         if (flags & VM_IOREMAP) {
67166                 int bit = fls(size);
67167  
67168 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
67169         if (count > totalram_pages)
67170                 return NULL;
67171  
67172 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67173 +       if (!(pgprot_val(prot) & _PAGE_NX))
67174 +               flags |= VM_KERNEXEC;
67175 +#endif
67176 +
67177         area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67178                                         __builtin_return_address(0));
67179         if (!area)
67180 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
67181         if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67182                 return NULL;
67183  
67184 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67185 +       if (!(pgprot_val(prot) & _PAGE_NX))
67186 +               area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67187 +                                               node, gfp_mask, caller);
67188 +       else
67189 +#endif
67190 +
67191         area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
67192                                   gfp_mask, caller);
67193  
67194 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
67195                                 gfp_mask, prot, node, caller);
67196  }
67197  
67198 +#undef __vmalloc
67199  void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67200  {
67201         return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67202 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
67203   *     For tight control over page level allocator and protection flags
67204   *     use __vmalloc() instead.
67205   */
67206 +#undef vmalloc
67207  void *vmalloc(unsigned long size)
67208  {
67209         return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
67210 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
67211   *     For tight control over page level allocator and protection flags
67212   *     use __vmalloc() instead.
67213   */
67214 +#undef vzalloc
67215  void *vzalloc(unsigned long size)
67216  {
67217         return __vmalloc_node_flags(size, -1,
67218 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
67219   * The resulting memory area is zeroed so it can be mapped to userspace
67220   * without leaking data.
67221   */
67222 +#undef vmalloc_user
67223  void *vmalloc_user(unsigned long size)
67224  {
67225         struct vm_struct *area;
67226 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
67227   *     For tight control over page level allocator and protection flags
67228   *     use __vmalloc() instead.
67229   */
67230 +#undef vmalloc_node
67231  void *vmalloc_node(unsigned long size, int node)
67232  {
67233         return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67234 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
67235   * For tight control over page level allocator and protection flags
67236   * use __vmalloc_node() instead.
67237   */
67238 +#undef vzalloc_node
67239  void *vzalloc_node(unsigned long size, int node)
67240  {
67241         return __vmalloc_node_flags(size, node,
67242 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
67243   *     For tight control over page level allocator and protection flags
67244   *     use __vmalloc() instead.
67245   */
67246 -
67247 +#undef vmalloc_exec
67248  void *vmalloc_exec(unsigned long size)
67249  {
67250 -       return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67251 +       return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67252                               -1, __builtin_return_address(0));
67253  }
67254  
67255 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
67256   *     Allocate enough 32bit PA addressable pages to cover @size from the
67257   *     page level allocator and map them into contiguous kernel virtual space.
67258   */
67259 +#undef vmalloc_32
67260  void *vmalloc_32(unsigned long size)
67261  {
67262         return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67263 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
67264   * The resulting memory area is 32bit addressable and zeroed so it can be
67265   * mapped to userspace without leaking data.
67266   */
67267 +#undef vmalloc_32_user
67268  void *vmalloc_32_user(unsigned long size)
67269  {
67270         struct vm_struct *area;
67271 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
67272         unsigned long uaddr = vma->vm_start;
67273         unsigned long usize = vma->vm_end - vma->vm_start;
67274  
67275 +       BUG_ON(vma->vm_mirror);
67276 +
67277         if ((PAGE_SIZE-1) & (unsigned long)addr)
67278                 return -EINVAL;
67279  
67280 diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
67281 --- linux-3.0.4/mm/vmstat.c     2011-07-21 22:17:23.000000000 -0400
67282 +++ linux-3.0.4/mm/vmstat.c     2011-08-23 21:48:14.000000000 -0400
67283 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
67284   *
67285   * vm_stat contains the global counters
67286   */
67287 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67288 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67289  EXPORT_SYMBOL(vm_stat);
67290  
67291  #ifdef CONFIG_SMP
67292 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
67293                                 v = p->vm_stat_diff[i];
67294                                 p->vm_stat_diff[i] = 0;
67295                                 local_irq_restore(flags);
67296 -                               atomic_long_add(v, &zone->vm_stat[i]);
67297 +                               atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67298                                 global_diff[i] += v;
67299  #ifdef CONFIG_NUMA
67300                                 /* 3 seconds idle till flush */
67301 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
67302  
67303         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67304                 if (global_diff[i])
67305 -                       atomic_long_add(global_diff[i], &vm_stat[i]);
67306 +                       atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67307  }
67308  
67309  #endif
67310 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
67311                 start_cpu_timer(cpu);
67312  #endif
67313  #ifdef CONFIG_PROC_FS
67314 -       proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67315 -       proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67316 -       proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67317 -       proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67318 +       {
67319 +               mode_t gr_mode = S_IRUGO;
67320 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67321 +               gr_mode = S_IRUSR;
67322 +#endif
67323 +               proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67324 +               proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67325 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67326 +               proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67327 +#else
67328 +               proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67329 +#endif
67330 +               proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67331 +       }
67332  #endif
67333         return 0;
67334  }
67335 diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
67336 --- linux-3.0.4/net/8021q/vlan.c        2011-07-21 22:17:23.000000000 -0400
67337 +++ linux-3.0.4/net/8021q/vlan.c        2011-08-23 21:47:56.000000000 -0400
67338 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
67339                 err = -EPERM;
67340                 if (!capable(CAP_NET_ADMIN))
67341                         break;
67342 -               if ((args.u.name_type >= 0) &&
67343 -                   (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67344 +               if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67345                         struct vlan_net *vn;
67346  
67347                         vn = net_generic(net, vlan_net_id);
67348 diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
67349 --- linux-3.0.4/net/atm/atm_misc.c      2011-07-21 22:17:23.000000000 -0400
67350 +++ linux-3.0.4/net/atm/atm_misc.c      2011-08-23 21:47:56.000000000 -0400
67351 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int 
67352         if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67353                 return 1;
67354         atm_return(vcc, truesize);
67355 -       atomic_inc(&vcc->stats->rx_drop);
67356 +       atomic_inc_unchecked(&vcc->stats->rx_drop);
67357         return 0;
67358  }
67359  EXPORT_SYMBOL(atm_charge);
67360 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct 
67361                 }
67362         }
67363         atm_return(vcc, guess);
67364 -       atomic_inc(&vcc->stats->rx_drop);
67365 +       atomic_inc_unchecked(&vcc->stats->rx_drop);
67366         return NULL;
67367  }
67368  EXPORT_SYMBOL(atm_alloc_charge);
67369 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
67370  
67371  void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
67372  {
67373 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67374 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67375         __SONET_ITEMS
67376  #undef __HANDLE_ITEM
67377  }
67378 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
67379  
67380  void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
67381  {
67382 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67383 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67384         __SONET_ITEMS
67385  #undef __HANDLE_ITEM
67386  }
67387 diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
67388 --- linux-3.0.4/net/atm/lec.h   2011-07-21 22:17:23.000000000 -0400
67389 +++ linux-3.0.4/net/atm/lec.h   2011-08-23 21:47:56.000000000 -0400
67390 @@ -48,7 +48,7 @@ struct lane2_ops {
67391                               const u8 *tlvs, u32 sizeoftlvs);
67392         void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
67393                                      const u8 *tlvs, u32 sizeoftlvs);
67394 -};
67395 +} __no_const;
67396  
67397  /*
67398   * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
67399 diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
67400 --- linux-3.0.4/net/atm/mpc.h   2011-07-21 22:17:23.000000000 -0400
67401 +++ linux-3.0.4/net/atm/mpc.h   2011-08-23 21:47:56.000000000 -0400
67402 @@ -33,7 +33,7 @@ struct mpoa_client {
67403         struct mpc_parameters parameters;  /* parameters for this client    */
67404  
67405         const struct net_device_ops *old_ops;
67406 -       struct net_device_ops new_ops;
67407 +       net_device_ops_no_const new_ops;
67408  };
67409  
67410  
67411 diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
67412 --- linux-3.0.4/net/atm/mpoa_caches.c   2011-07-21 22:17:23.000000000 -0400
67413 +++ linux-3.0.4/net/atm/mpoa_caches.c   2011-08-23 21:48:14.000000000 -0400
67414 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
67415         struct timeval now;
67416         struct k_message msg;
67417  
67418 +       pax_track_stack();
67419 +
67420         do_gettimeofday(&now);
67421  
67422         read_lock_bh(&client->ingress_lock);
67423 diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
67424 --- linux-3.0.4/net/atm/proc.c  2011-07-21 22:17:23.000000000 -0400
67425 +++ linux-3.0.4/net/atm/proc.c  2011-08-23 21:47:56.000000000 -0400
67426 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
67427    const struct k_atm_aal_stats *stats)
67428  {
67429         seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67430 -                  atomic_read(&stats->tx), atomic_read(&stats->tx_err),
67431 -                  atomic_read(&stats->rx), atomic_read(&stats->rx_err),
67432 -                  atomic_read(&stats->rx_drop));
67433 +                  atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67434 +                  atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67435 +                  atomic_read_unchecked(&stats->rx_drop));
67436  }
67437  
67438  static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67439 diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
67440 --- linux-3.0.4/net/atm/resources.c     2011-07-21 22:17:23.000000000 -0400
67441 +++ linux-3.0.4/net/atm/resources.c     2011-08-23 21:47:56.000000000 -0400
67442 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
67443  static void copy_aal_stats(struct k_atm_aal_stats *from,
67444      struct atm_aal_stats *to)
67445  {
67446 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67447 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67448         __AAL_STAT_ITEMS
67449  #undef __HANDLE_ITEM
67450  }
67451 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
67452  static void subtract_aal_stats(struct k_atm_aal_stats *from,
67453      struct atm_aal_stats *to)
67454  {
67455 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67456 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67457         __AAL_STAT_ITEMS
67458  #undef __HANDLE_ITEM
67459  }
67460 diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
67461 --- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
67462 +++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
67463 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
67464         hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
67465         dev_add_pack(&hard_iface->batman_adv_ptype);
67466  
67467 -       atomic_set(&hard_iface->seqno, 1);
67468 -       atomic_set(&hard_iface->frag_seqno, 1);
67469 +       atomic_set_unchecked(&hard_iface->seqno, 1);
67470 +       atomic_set_unchecked(&hard_iface->frag_seqno, 1);
67471         bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
67472                  hard_iface->net_dev->name);
67473  
67474 diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
67475 --- linux-3.0.4/net/batman-adv/routing.c        2011-07-21 22:17:23.000000000 -0400
67476 +++ linux-3.0.4/net/batman-adv/routing.c        2011-08-23 21:47:56.000000000 -0400
67477 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
67478                 return;
67479  
67480         /* could be changed by schedule_own_packet() */
67481 -       if_incoming_seqno = atomic_read(&if_incoming->seqno);
67482 +       if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
67483  
67484         has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
67485  
67486 diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
67487 --- linux-3.0.4/net/batman-adv/send.c   2011-07-21 22:17:23.000000000 -0400
67488 +++ linux-3.0.4/net/batman-adv/send.c   2011-08-23 21:47:56.000000000 -0400
67489 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
67490  
67491         /* change sequence number to network order */
67492         batman_packet->seqno =
67493 -               htonl((uint32_t)atomic_read(&hard_iface->seqno));
67494 +               htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
67495  
67496         if (vis_server == VIS_TYPE_SERVER_SYNC)
67497                 batman_packet->flags |= VIS_SERVER;
67498 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
67499         else
67500                 batman_packet->gw_flags = 0;
67501  
67502 -       atomic_inc(&hard_iface->seqno);
67503 +       atomic_inc_unchecked(&hard_iface->seqno);
67504  
67505         slide_own_bcast_window(hard_iface);
67506         send_time = own_send_time(bat_priv);
67507 diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
67508 --- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
67509 +++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
67510 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
67511  
67512                 /* set broadcast sequence number */
67513                 bcast_packet->seqno =
67514 -                       htonl(atomic_inc_return(&bat_priv->bcast_seqno));
67515 +                       htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
67516  
67517                 add_bcast_packet_to_list(bat_priv, skb);
67518  
67519 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
67520         atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
67521  
67522         atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
67523 -       atomic_set(&bat_priv->bcast_seqno, 1);
67524 +       atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
67525         atomic_set(&bat_priv->tt_local_changed, 0);
67526  
67527         bat_priv->primary_if = NULL;
67528 diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
67529 --- linux-3.0.4/net/batman-adv/types.h  2011-07-21 22:17:23.000000000 -0400
67530 +++ linux-3.0.4/net/batman-adv/types.h  2011-08-23 21:47:56.000000000 -0400
67531 @@ -38,8 +38,8 @@ struct hard_iface {
67532         int16_t if_num;
67533         char if_status;
67534         struct net_device *net_dev;
67535 -       atomic_t seqno;
67536 -       atomic_t frag_seqno;
67537 +       atomic_unchecked_t seqno;
67538 +       atomic_unchecked_t frag_seqno;
67539         unsigned char *packet_buff;
67540         int packet_len;
67541         struct kobject *hardif_obj;
67542 @@ -142,7 +142,7 @@ struct bat_priv {
67543         atomic_t orig_interval;         /* uint */
67544         atomic_t hop_penalty;           /* uint */
67545         atomic_t log_level;             /* uint */
67546 -       atomic_t bcast_seqno;
67547 +       atomic_unchecked_t bcast_seqno;
67548         atomic_t bcast_queue_left;
67549         atomic_t batman_queue_left;
67550         char num_ifaces;
67551 diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
67552 --- linux-3.0.4/net/batman-adv/unicast.c        2011-07-21 22:17:23.000000000 -0400
67553 +++ linux-3.0.4/net/batman-adv/unicast.c        2011-08-23 21:47:56.000000000 -0400
67554 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
67555         frag1->flags = UNI_FRAG_HEAD | large_tail;
67556         frag2->flags = large_tail;
67557  
67558 -       seqno = atomic_add_return(2, &hard_iface->frag_seqno);
67559 +       seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
67560         frag1->seqno = htons(seqno - 1);
67561         frag2->seqno = htons(seqno);
67562  
67563 diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
67564 --- linux-3.0.4/net/bridge/br_multicast.c       2011-07-21 22:17:23.000000000 -0400
67565 +++ linux-3.0.4/net/bridge/br_multicast.c       2011-08-23 21:47:56.000000000 -0400
67566 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct 
67567         nexthdr = ip6h->nexthdr;
67568         offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
67569  
67570 -       if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
67571 +       if (nexthdr != IPPROTO_ICMPV6)
67572                 return 0;
67573  
67574         /* Okay, we found ICMPv6 header */
67575 diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
67576 --- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
67577 +++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
67578 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
67579                         tmp.valid_hooks = t->table->valid_hooks;
67580                 }
67581                 mutex_unlock(&ebt_mutex);
67582 -               if (copy_to_user(user, &tmp, *len) != 0){
67583 +               if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
67584                         BUGPRINT("c2u Didn't work\n");
67585                         ret = -EFAULT;
67586                         break;
67587 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
67588         int ret;
67589         void __user *pos;
67590  
67591 +       pax_track_stack();
67592 +
67593         memset(&tinfo, 0, sizeof(tinfo));
67594  
67595         if (cmd == EBT_SO_GET_ENTRIES) {
67596 diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
67597 --- linux-3.0.4/net/caif/caif_socket.c  2011-07-21 22:17:23.000000000 -0400
67598 +++ linux-3.0.4/net/caif/caif_socket.c  2011-08-23 21:47:56.000000000 -0400
67599 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
67600  #ifdef CONFIG_DEBUG_FS
67601  struct debug_fs_counter {
67602         atomic_t caif_nr_socks;
67603 -       atomic_t caif_sock_create;
67604 -       atomic_t num_connect_req;
67605 -       atomic_t num_connect_resp;
67606 -       atomic_t num_connect_fail_resp;
67607 -       atomic_t num_disconnect;
67608 -       atomic_t num_remote_shutdown_ind;
67609 -       atomic_t num_tx_flow_off_ind;
67610 -       atomic_t num_tx_flow_on_ind;
67611 -       atomic_t num_rx_flow_off;
67612 -       atomic_t num_rx_flow_on;
67613 +       atomic_unchecked_t caif_sock_create;
67614 +       atomic_unchecked_t num_connect_req;
67615 +       atomic_unchecked_t num_connect_resp;
67616 +       atomic_unchecked_t num_connect_fail_resp;
67617 +       atomic_unchecked_t num_disconnect;
67618 +       atomic_unchecked_t num_remote_shutdown_ind;
67619 +       atomic_unchecked_t num_tx_flow_off_ind;
67620 +       atomic_unchecked_t num_tx_flow_on_ind;
67621 +       atomic_unchecked_t num_rx_flow_off;
67622 +       atomic_unchecked_t num_rx_flow_on;
67623  };
67624  static struct debug_fs_counter cnt;
67625  #define        dbfs_atomic_inc(v) atomic_inc_return(v)
67626 +#define        dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
67627  #define        dbfs_atomic_dec(v) atomic_dec_return(v)
67628  #else
67629  #define        dbfs_atomic_inc(v) 0
67630 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
67631                                         atomic_read(&cf_sk->sk.sk_rmem_alloc),
67632                                         sk_rcvbuf_lowwater(cf_sk));
67633                 set_rx_flow_off(cf_sk);
67634 -               dbfs_atomic_inc(&cnt.num_rx_flow_off);
67635 +               dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
67636                 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
67637         }
67638  
67639 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
67640                 set_rx_flow_off(cf_sk);
67641                 if (net_ratelimit())
67642                         pr_debug("sending flow OFF due to rmem_schedule\n");
67643 -               dbfs_atomic_inc(&cnt.num_rx_flow_off);
67644 +               dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
67645                 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
67646         }
67647         skb->dev = NULL;
67648 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer 
67649         switch (flow) {
67650         case CAIF_CTRLCMD_FLOW_ON_IND:
67651                 /* OK from modem to start sending again */
67652 -               dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
67653 +               dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
67654                 set_tx_flow_on(cf_sk);
67655                 cf_sk->sk.sk_state_change(&cf_sk->sk);
67656                 break;
67657  
67658         case CAIF_CTRLCMD_FLOW_OFF_IND:
67659                 /* Modem asks us to shut up */
67660 -               dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
67661 +               dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
67662                 set_tx_flow_off(cf_sk);
67663                 cf_sk->sk.sk_state_change(&cf_sk->sk);
67664                 break;
67665 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer 
67666                 /* We're now connected */
67667                 caif_client_register_refcnt(&cf_sk->layer,
67668                                                 cfsk_hold, cfsk_put);
67669 -               dbfs_atomic_inc(&cnt.num_connect_resp);
67670 +               dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
67671                 cf_sk->sk.sk_state = CAIF_CONNECTED;
67672                 set_tx_flow_on(cf_sk);
67673                 cf_sk->sk.sk_state_change(&cf_sk->sk);
67674 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer 
67675  
67676         case CAIF_CTRLCMD_INIT_FAIL_RSP:
67677                 /* Connect request failed */
67678 -               dbfs_atomic_inc(&cnt.num_connect_fail_resp);
67679 +               dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
67680                 cf_sk->sk.sk_err = ECONNREFUSED;
67681                 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
67682                 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
67683 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer 
67684  
67685         case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
67686                 /* Modem has closed this connection, or device is down. */
67687 -               dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
67688 +               dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
67689                 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
67690                 cf_sk->sk.sk_err = ECONNRESET;
67691                 set_rx_flow_on(cf_sk);
67692 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
67693                 return;
67694  
67695         if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
67696 -                       dbfs_atomic_inc(&cnt.num_rx_flow_on);
67697 +                       dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
67698                         set_rx_flow_on(cf_sk);
67699                         caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
67700         }
67701 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
67702         /*ifindex = id of the interface.*/
67703         cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
67704  
67705 -       dbfs_atomic_inc(&cnt.num_connect_req);
67706 +       dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
67707         cf_sk->layer.receive = caif_sktrecv_cb;
67708  
67709         err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
67710 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
67711         spin_unlock_bh(&sk->sk_receive_queue.lock);
67712         sock->sk = NULL;
67713  
67714 -       dbfs_atomic_inc(&cnt.num_disconnect);
67715 +       dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
67716  
67717         WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
67718         if (cf_sk->debugfs_socket_dir != NULL)
67719 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, 
67720         cf_sk->conn_req.protocol = protocol;
67721         /* Increase the number of sockets created. */
67722         dbfs_atomic_inc(&cnt.caif_nr_socks);
67723 -       num = dbfs_atomic_inc(&cnt.caif_sock_create);
67724 +       num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
67725  #ifdef CONFIG_DEBUG_FS
67726         if (!IS_ERR(debugfsdir)) {
67727  
67728 diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
67729 --- linux-3.0.4/net/caif/cfctrl.c       2011-07-21 22:17:23.000000000 -0400
67730 +++ linux-3.0.4/net/caif/cfctrl.c       2011-08-23 21:48:14.000000000 -0400
67731 @@ -9,6 +9,7 @@
67732  #include <linux/stddef.h>
67733  #include <linux/spinlock.h>
67734  #include <linux/slab.h>
67735 +#include <linux/sched.h>
67736  #include <net/caif/caif_layer.h>
67737  #include <net/caif/cfpkt.h>
67738  #include <net/caif/cfctrl.h>
67739 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
67740         dev_info.id = 0xff;
67741         memset(this, 0, sizeof(*this));
67742         cfsrvl_init(&this->serv, 0, &dev_info, false);
67743 -       atomic_set(&this->req_seq_no, 1);
67744 -       atomic_set(&this->rsp_seq_no, 1);
67745 +       atomic_set_unchecked(&this->req_seq_no, 1);
67746 +       atomic_set_unchecked(&this->rsp_seq_no, 1);
67747         this->serv.layer.receive = cfctrl_recv;
67748         sprintf(this->serv.layer.name, "ctrl");
67749         this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
67750 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
67751                               struct cfctrl_request_info *req)
67752  {
67753         spin_lock_bh(&ctrl->info_list_lock);
67754 -       atomic_inc(&ctrl->req_seq_no);
67755 -       req->sequence_no = atomic_read(&ctrl->req_seq_no);
67756 +       atomic_inc_unchecked(&ctrl->req_seq_no);
67757 +       req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
67758         list_add_tail(&req->list, &ctrl->list);
67759         spin_unlock_bh(&ctrl->info_list_lock);
67760  }
67761 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
67762                         if (p != first)
67763                                 pr_warn("Requests are not received in order\n");
67764  
67765 -                       atomic_set(&ctrl->rsp_seq_no,
67766 +                       atomic_set_unchecked(&ctrl->rsp_seq_no,
67767                                          p->sequence_no);
67768                         list_del(&p->list);
67769                         goto out;
67770 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
67771         struct cfctrl *cfctrl = container_obj(layer);
67772         struct cfctrl_request_info rsp, *req;
67773  
67774 +       pax_track_stack();
67775  
67776         cfpkt_extr_head(pkt, &cmdrsp, 1);
67777         cmd = cmdrsp & CFCTRL_CMD_MASK;
67778 diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
67779 --- linux-3.0.4/net/core/datagram.c     2011-07-21 22:17:23.000000000 -0400
67780 +++ linux-3.0.4/net/core/datagram.c     2011-08-23 21:47:56.000000000 -0400
67781 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
67782         }
67783  
67784         kfree_skb(skb);
67785 -       atomic_inc(&sk->sk_drops);
67786 +       atomic_inc_unchecked(&sk->sk_drops);
67787         sk_mem_reclaim_partial(sk);
67788  
67789         return err;
67790 diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
67791 --- linux-3.0.4/net/core/dev.c  2011-07-21 22:17:23.000000000 -0400
67792 +++ linux-3.0.4/net/core/dev.c  2011-08-23 21:48:14.000000000 -0400
67793 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
67794         if (no_module && capable(CAP_NET_ADMIN))
67795                 no_module = request_module("netdev-%s", name);
67796         if (no_module && capable(CAP_SYS_MODULE)) {
67797 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67798 +               ___request_module(true, "grsec_modharden_netdev", "%s", name);
67799 +#else
67800                 if (!request_module("%s", name))
67801                         pr_err("Loading kernel module for a network device "
67802  "with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s "
67803  "instead\n", name);
67804 +#endif
67805         }
67806  }
67807  EXPORT_SYMBOL(dev_load);
67808 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
67809  
67810  struct dev_gso_cb {
67811         void (*destructor)(struct sk_buff *skb);
67812 -};
67813 +} __no_const;
67814  
67815  #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
67816  
67817 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
67818  }
67819  EXPORT_SYMBOL(netif_rx_ni);
67820  
67821 -static void net_tx_action(struct softirq_action *h)
67822 +static void net_tx_action(void)
67823  {
67824         struct softnet_data *sd = &__get_cpu_var(softnet_data);
67825  
67826 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
67827  }
67828  EXPORT_SYMBOL(netif_napi_del);
67829  
67830 -static void net_rx_action(struct softirq_action *h)
67831 +static void net_rx_action(void)
67832  {
67833         struct softnet_data *sd = &__get_cpu_var(softnet_data);
67834         unsigned long time_limit = jiffies + 2;
67835 diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
67836 --- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
67837 +++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
67838 @@ -60,7 +60,7 @@ struct flow_cache {
67839         struct timer_list               rnd_timer;
67840  };
67841  
67842 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
67843 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67844  EXPORT_SYMBOL(flow_cache_genid);
67845  static struct flow_cache flow_cache_global;
67846  static struct kmem_cache *flow_cachep __read_mostly;
67847 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
67848  
67849  static int flow_entry_valid(struct flow_cache_entry *fle)
67850  {
67851 -       if (atomic_read(&flow_cache_genid) != fle->genid)
67852 +       if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
67853                 return 0;
67854         if (fle->object && !fle->object->ops->check(fle->object))
67855                 return 0;
67856 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
67857                         hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
67858                         fcp->hash_count++;
67859                 }
67860 -       } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
67861 +       } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
67862                 flo = fle->object;
67863                 if (!flo)
67864                         goto ret_object;
67865 @@ -274,7 +274,7 @@ nocache:
67866         }
67867         flo = resolver(net, key, family, dir, flo, ctx);
67868         if (fle) {
67869 -               fle->genid = atomic_read(&flow_cache_genid);
67870 +               fle->genid = atomic_read_unchecked(&flow_cache_genid);
67871                 if (!IS_ERR(flo))
67872                         fle->object = flo;
67873                 else
67874 diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
67875 --- linux-3.0.4/net/core/rtnetlink.c    2011-07-21 22:17:23.000000000 -0400
67876 +++ linux-3.0.4/net/core/rtnetlink.c    2011-08-23 21:47:56.000000000 -0400
67877 @@ -56,7 +56,7 @@
67878  struct rtnl_link {
67879         rtnl_doit_func          doit;
67880         rtnl_dumpit_func        dumpit;
67881 -};
67882 +} __no_const;
67883  
67884  static DEFINE_MUTEX(rtnl_mutex);
67885  
67886 diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
67887 --- linux-3.0.4/net/core/skbuff.c       2011-07-21 22:17:23.000000000 -0400
67888 +++ linux-3.0.4/net/core/skbuff.c       2011-08-23 21:48:14.000000000 -0400
67889 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
67890         struct sock *sk = skb->sk;
67891         int ret = 0;
67892  
67893 +       pax_track_stack();
67894 +
67895         if (splice_grow_spd(pipe, &spd))
67896                 return -ENOMEM;
67897  
67898 diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
67899 --- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
67900 +++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
67901 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk, 
67902          */
67903         if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
67904             (unsigned)sk->sk_rcvbuf) {
67905 -               atomic_inc(&sk->sk_drops);
67906 +               atomic_inc_unchecked(&sk->sk_drops);
67907                 return -ENOMEM;
67908         }
67909  
67910 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk, 
67911                 return err;
67912  
67913         if (!sk_rmem_schedule(sk, skb->truesize)) {
67914 -               atomic_inc(&sk->sk_drops);
67915 +               atomic_inc_unchecked(&sk->sk_drops);
67916                 return -ENOBUFS;
67917         }
67918  
67919 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk, 
67920         skb_dst_force(skb);
67921  
67922         spin_lock_irqsave(&list->lock, flags);
67923 -       skb->dropcount = atomic_read(&sk->sk_drops);
67924 +       skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67925         __skb_queue_tail(list, skb);
67926         spin_unlock_irqrestore(&list->lock, flags);
67927  
67928 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
67929         skb->dev = NULL;
67930  
67931         if (sk_rcvqueues_full(sk, skb)) {
67932 -               atomic_inc(&sk->sk_drops);
67933 +               atomic_inc_unchecked(&sk->sk_drops);
67934                 goto discard_and_relse;
67935         }
67936         if (nested)
67937 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
67938                 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
67939         } else if (sk_add_backlog(sk, skb)) {
67940                 bh_unlock_sock(sk);
67941 -               atomic_inc(&sk->sk_drops);
67942 +               atomic_inc_unchecked(&sk->sk_drops);
67943                 goto discard_and_relse;
67944         }
67945  
67946 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
67947                 if (len > sizeof(peercred))
67948                         len = sizeof(peercred);
67949                 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
67950 -               if (copy_to_user(optval, &peercred, len))
67951 +               if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
67952                         return -EFAULT;
67953                 goto lenout;
67954         }
67955 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
67956                         return -ENOTCONN;
67957                 if (lv < len)
67958                         return -EINVAL;
67959 -               if (copy_to_user(optval, address, len))
67960 +               if (len > sizeof(address) || copy_to_user(optval, address, len))
67961                         return -EFAULT;
67962                 goto lenout;
67963         }
67964 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
67965  
67966         if (len > lv)
67967                 len = lv;
67968 -       if (copy_to_user(optval, &v, len))
67969 +       if (len > sizeof(v) || copy_to_user(optval, &v, len))
67970                 return -EFAULT;
67971  lenout:
67972         if (put_user(len, optlen))
67973 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
67974          */
67975         smp_wmb();
67976         atomic_set(&sk->sk_refcnt, 1);
67977 -       atomic_set(&sk->sk_drops, 0);
67978 +       atomic_set_unchecked(&sk->sk_drops, 0);
67979  }
67980  EXPORT_SYMBOL(sock_init_data);
67981  
67982 diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
67983 --- linux-3.0.4/net/decnet/sysctl_net_decnet.c  2011-07-21 22:17:23.000000000 -0400
67984 +++ linux-3.0.4/net/decnet/sysctl_net_decnet.c  2011-08-23 21:47:56.000000000 -0400
67985 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
67986  
67987         if (len > *lenp) len = *lenp;
67988  
67989 -       if (copy_to_user(buffer, addr, len))
67990 +       if (len > sizeof addr || copy_to_user(buffer, addr, len))
67991                 return -EFAULT;
67992  
67993         *lenp = len;
67994 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table 
67995  
67996         if (len > *lenp) len = *lenp;
67997  
67998 -       if (copy_to_user(buffer, devname, len))
67999 +       if (len > sizeof devname || copy_to_user(buffer, devname, len))
68000                 return -EFAULT;
68001  
68002         *lenp = len;
68003 diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
68004 --- linux-3.0.4/net/econet/Kconfig      2011-07-21 22:17:23.000000000 -0400
68005 +++ linux-3.0.4/net/econet/Kconfig      2011-08-23 21:48:14.000000000 -0400
68006 @@ -4,7 +4,7 @@
68007  
68008  config ECONET
68009         tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68010 -       depends on EXPERIMENTAL && INET
68011 +       depends on EXPERIMENTAL && INET && BROKEN
68012         ---help---
68013           Econet is a fairly old and slow networking protocol mainly used by
68014           Acorn computers to access file and print servers. It uses native
68015 diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
68016 --- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
68017 +++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
68018 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
68019  #ifdef CONFIG_IP_ROUTE_MULTIPATH
68020                 fib_sync_up(dev);
68021  #endif
68022 -               atomic_inc(&net->ipv4.dev_addr_genid);
68023 +               atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68024                 rt_cache_flush(dev_net(dev), -1);
68025                 break;
68026         case NETDEV_DOWN:
68027                 fib_del_ifaddr(ifa, NULL);
68028 -               atomic_inc(&net->ipv4.dev_addr_genid);
68029 +               atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68030                 if (ifa->ifa_dev->ifa_list == NULL) {
68031                         /* Last address was deleted from this interface.
68032                          * Disable IP.
68033 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
68034  #ifdef CONFIG_IP_ROUTE_MULTIPATH
68035                 fib_sync_up(dev);
68036  #endif
68037 -               atomic_inc(&net->ipv4.dev_addr_genid);
68038 +               atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68039                 rt_cache_flush(dev_net(dev), -1);
68040                 break;
68041         case NETDEV_DOWN:
68042 diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
68043 --- linux-3.0.4/net/ipv4/fib_semantics.c        2011-07-21 22:17:23.000000000 -0400
68044 +++ linux-3.0.4/net/ipv4/fib_semantics.c        2011-08-23 21:47:56.000000000 -0400
68045 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
68046         nh->nh_saddr = inet_select_addr(nh->nh_dev,
68047                                         nh->nh_gw,
68048                                         nh->nh_parent->fib_scope);
68049 -       nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
68050 +       nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
68051  
68052         return nh->nh_saddr;
68053  }
68054 diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
68055 --- linux-3.0.4/net/ipv4/inet_diag.c    2011-07-21 22:17:23.000000000 -0400
68056 +++ linux-3.0.4/net/ipv4/inet_diag.c    2011-08-23 21:48:14.000000000 -0400
68057 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
68058         r->idiag_retrans = 0;
68059  
68060         r->id.idiag_if = sk->sk_bound_dev_if;
68061 +
68062 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68063 +       r->id.idiag_cookie[0] = 0;
68064 +       r->id.idiag_cookie[1] = 0;
68065 +#else
68066         r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68067         r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68068 +#endif
68069  
68070         r->id.idiag_sport = inet->inet_sport;
68071         r->id.idiag_dport = inet->inet_dport;
68072 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
68073         r->idiag_family       = tw->tw_family;
68074         r->idiag_retrans      = 0;
68075         r->id.idiag_if        = tw->tw_bound_dev_if;
68076 +
68077 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68078 +       r->id.idiag_cookie[0] = 0;
68079 +       r->id.idiag_cookie[1] = 0;
68080 +#else
68081         r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68082         r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68083 +#endif
68084 +
68085         r->id.idiag_sport     = tw->tw_sport;
68086         r->id.idiag_dport     = tw->tw_dport;
68087         r->id.idiag_src[0]    = tw->tw_rcv_saddr;
68088 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
68089         if (sk == NULL)
68090                 goto unlock;
68091  
68092 +#ifndef CONFIG_GRKERNSEC_HIDESYM
68093         err = -ESTALE;
68094         if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68095              req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68096             ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68097              (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68098                 goto out;
68099 +#endif
68100  
68101         err = -ENOMEM;
68102         rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68103 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
68104         r->idiag_retrans = req->retrans;
68105  
68106         r->id.idiag_if = sk->sk_bound_dev_if;
68107 +
68108 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68109 +       r->id.idiag_cookie[0] = 0;
68110 +       r->id.idiag_cookie[1] = 0;
68111 +#else
68112         r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68113         r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68114 +#endif
68115  
68116         tmo = req->expires - jiffies;
68117         if (tmo < 0)
68118 diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
68119 --- linux-3.0.4/net/ipv4/inet_hashtables.c      2011-08-23 21:44:40.000000000 -0400
68120 +++ linux-3.0.4/net/ipv4/inet_hashtables.c      2011-08-23 21:55:24.000000000 -0400
68121 @@ -18,12 +18,15 @@
68122  #include <linux/sched.h>
68123  #include <linux/slab.h>
68124  #include <linux/wait.h>
68125 +#include <linux/security.h>
68126  
68127  #include <net/inet_connection_sock.h>
68128  #include <net/inet_hashtables.h>
68129  #include <net/secure_seq.h>
68130  #include <net/route.h>
68131  #include <net/ip.h>
68132  
68133 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68134 +
68135  /*
68136   * Allocate and initialize a new local port bind bucket.
68137 @@ -530,6 +533,8 @@ ok:
68138                         twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
68139                 spin_unlock(&head->lock);
68140  
68141 +               gr_update_task_in_ip_table(current, inet_sk(sk));
68142 +
68143                 if (tw) {
68144                         inet_twsk_deschedule(tw, death_row);
68145                         while (twrefcnt) {
68146 diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
68147 --- linux-3.0.4/net/ipv4/inetpeer.c     2011-08-23 21:44:40.000000000 -0400
68148 +++ linux-3.0.4/net/ipv4/inetpeer.c     2011-08-23 21:48:14.000000000 -0400
68149 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
68150         unsigned int sequence;
68151         int invalidated, newrefcnt = 0;
68152  
68153 +       pax_track_stack();
68154 +
68155         /* Look up for the address quickly, lockless.
68156          * Because of a concurrent writer, we might not find an existing entry.
68157          */
68158 @@ -517,8 +519,8 @@ found:              /* The existing node has been fo
68159         if (p) {
68160                 p->daddr = *daddr;
68161                 atomic_set(&p->refcnt, 1);
68162 -               atomic_set(&p->rid, 0);
68163 -               atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68164 +               atomic_set_unchecked(&p->rid, 0);
68165 +               atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68166                 p->tcp_ts_stamp = 0;
68167                 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
68168                 p->rate_tokens = 0;
68169 diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
68170 --- linux-3.0.4/net/ipv4/ip_fragment.c  2011-07-21 22:17:23.000000000 -0400
68171 +++ linux-3.0.4/net/ipv4/ip_fragment.c  2011-08-23 21:47:56.000000000 -0400
68172 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
68173                 return 0;
68174  
68175         start = qp->rid;
68176 -       end = atomic_inc_return(&peer->rid);
68177 +       end = atomic_inc_return_unchecked(&peer->rid);
68178         qp->rid = end;
68179  
68180         rc = qp->q.fragments && (end - start) > max;
68181 diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
68182 --- linux-3.0.4/net/ipv4/ip_sockglue.c  2011-07-21 22:17:23.000000000 -0400
68183 +++ linux-3.0.4/net/ipv4/ip_sockglue.c  2011-08-23 21:48:14.000000000 -0400
68184 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock 
68185         int val;
68186         int len;
68187  
68188 +       pax_track_stack();
68189 +
68190         if (level != SOL_IP)
68191                 return -EOPNOTSUPP;
68192  
68193 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock 
68194                 len = min_t(unsigned int, len, opt->optlen);
68195                 if (put_user(len, optlen))
68196                         return -EFAULT;
68197 -               if (copy_to_user(optval, opt->__data, len))
68198 +               if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
68199 +                   copy_to_user(optval, opt->__data, len))
68200                         return -EFAULT;
68201                 return 0;
68202         }
68203 diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
68204 --- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c  2011-07-21 22:17:23.000000000 -0400
68205 +++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c  2011-08-23 21:47:56.000000000 -0400
68206 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
68207  
68208         *len = 0;
68209  
68210 -       *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68211 +       *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68212         if (*octets == NULL) {
68213                 if (net_ratelimit())
68214                         pr_notice("OOM in bsalg (%d)\n", __LINE__);
68215 diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
68216 --- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
68217 +++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
68218 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
68219                 sk_rmem_alloc_get(sp),
68220                 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68221                 atomic_read(&sp->sk_refcnt), sp,
68222 -               atomic_read(&sp->sk_drops), len);
68223 +               atomic_read_unchecked(&sp->sk_drops), len);
68224  }
68225  
68226  static int ping_seq_show(struct seq_file *seq, void *v)
68227 diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
68228 --- linux-3.0.4/net/ipv4/raw.c  2011-07-21 22:17:23.000000000 -0400
68229 +++ linux-3.0.4/net/ipv4/raw.c  2011-08-23 21:48:14.000000000 -0400
68230 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
68231  int raw_rcv(struct sock *sk, struct sk_buff *skb)
68232  {
68233         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68234 -               atomic_inc(&sk->sk_drops);
68235 +               atomic_inc_unchecked(&sk->sk_drops);
68236                 kfree_skb(skb);
68237                 return NET_RX_DROP;
68238         }
68239 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
68240  
68241  static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68242  {
68243 +       struct icmp_filter filter;
68244 +
68245         if (optlen > sizeof(struct icmp_filter))
68246                 optlen = sizeof(struct icmp_filter);
68247 -       if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
68248 +       if (copy_from_user(&filter, optval, optlen))
68249                 return -EFAULT;
68250 +       raw_sk(sk)->filter = filter;
68251         return 0;
68252  }
68253  
68254  static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
68255  {
68256         int len, ret = -EFAULT;
68257 +       struct icmp_filter filter;
68258  
68259         if (get_user(len, optlen))
68260                 goto out;
68261 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
68262         if (len > sizeof(struct icmp_filter))
68263                 len = sizeof(struct icmp_filter);
68264         ret = -EFAULT;
68265 -       if (put_user(len, optlen) ||
68266 -           copy_to_user(optval, &raw_sk(sk)->filter, len))
68267 +       filter = raw_sk(sk)->filter;
68268 +       if (put_user(len, optlen) || len > sizeof filter ||
68269 +           copy_to_user(optval, &filter, len))
68270                 goto out;
68271         ret = 0;
68272  out:   return ret;
68273 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
68274                 sk_wmem_alloc_get(sp),
68275                 sk_rmem_alloc_get(sp),
68276                 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68277 -               atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68278 +               atomic_read(&sp->sk_refcnt),
68279 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68280 +               NULL,
68281 +#else
68282 +               sp,
68283 +#endif
68284 +               atomic_read_unchecked(&sp->sk_drops));
68285  }
68286  
68287  static int raw_seq_show(struct seq_file *seq, void *v)
68288 diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
68289 --- linux-3.0.4/net/ipv4/route.c        2011-08-23 21:44:40.000000000 -0400
68290 +++ linux-3.0.4/net/ipv4/route.c        2011-08-23 21:47:56.000000000 -0400
68291 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
68292  
68293  static inline int rt_genid(struct net *net)
68294  {
68295 -       return atomic_read(&net->ipv4.rt_genid);
68296 +       return atomic_read_unchecked(&net->ipv4.rt_genid);
68297  }
68298  
68299  #ifdef CONFIG_PROC_FS
68300 @@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
68301         unsigned char shuffle;
68302  
68303         get_random_bytes(&shuffle, sizeof(shuffle));
68304 -       atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
68305 +       atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
68306  }
68307  
68308  /*
68309 @@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
68310         error = rt->dst.error;
68311         if (peer) {
68312                 inet_peer_refcheck(rt->peer);
68313 -               id = atomic_read(&peer->ip_id_count) & 0xffff;
68314 +               id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
68315                 if (peer->tcp_ts_stamp) {
68316                         ts = peer->tcp_ts;
68317                         tsage = get_seconds() - peer->tcp_ts_stamp;
68318 diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
68319 --- linux-3.0.4/net/ipv4/tcp.c  2011-07-21 22:17:23.000000000 -0400
68320 +++ linux-3.0.4/net/ipv4/tcp.c  2011-08-23 21:48:14.000000000 -0400
68321 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
68322         int val;
68323         int err = 0;
68324  
68325 +       pax_track_stack();
68326 +
68327         /* These are data/string values, all the others are ints */
68328         switch (optname) {
68329         case TCP_CONGESTION: {
68330 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
68331         struct tcp_sock *tp = tcp_sk(sk);
68332         int val, len;
68333  
68334 +       pax_track_stack();
68335 +
68336         if (get_user(len, optlen))
68337                 return -EFAULT;
68338  
68339 diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
68340 --- linux-3.0.4/net/ipv4/tcp_ipv4.c     2011-08-23 21:44:40.000000000 -0400
68341 +++ linux-3.0.4/net/ipv4/tcp_ipv4.c     2011-08-23 21:48:14.000000000 -0400
68342 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
68343  int sysctl_tcp_low_latency __read_mostly;
68344  EXPORT_SYMBOL(sysctl_tcp_low_latency);
68345  
68346 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68347 +extern int grsec_enable_blackhole;
68348 +#endif
68349  
68350  #ifdef CONFIG_TCP_MD5SIG
68351  static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
68352 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
68353         return 0;
68354  
68355  reset:
68356 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68357 +       if (!grsec_enable_blackhole)
68358 +#endif
68359         tcp_v4_send_reset(rsk, skb);
68360  discard:
68361         kfree_skb(skb);
68362 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
68363         TCP_SKB_CB(skb)->sacked  = 0;
68364  
68365         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68366 -       if (!sk)
68367 +       if (!sk) {
68368 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68369 +               ret = 1;
68370 +#endif
68371                 goto no_tcp_socket;
68372 -
68373 +       }
68374  process:
68375 -       if (sk->sk_state == TCP_TIME_WAIT)
68376 +       if (sk->sk_state == TCP_TIME_WAIT) {
68377 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68378 +               ret = 2;
68379 +#endif
68380                 goto do_time_wait;
68381 +       }
68382  
68383         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
68384                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
68385 @@ -1724,6 +1737,10 @@ no_tcp_socket:
68386  bad_packet:
68387                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68388         } else {
68389 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68390 +               if (!grsec_enable_blackhole || (ret == 1 &&
68391 +                   (skb->dev->flags & IFF_LOOPBACK)))
68392 +#endif
68393                 tcp_v4_send_reset(NULL, skb);
68394         }
68395  
68396 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
68397                 0,  /* non standard timer */
68398                 0, /* open_requests have no inode */
68399                 atomic_read(&sk->sk_refcnt),
68400 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68401 +               NULL,
68402 +#else
68403                 req,
68404 +#endif
68405                 len);
68406  }
68407  
68408 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
68409                 sock_i_uid(sk),
68410                 icsk->icsk_probes_out,
68411                 sock_i_ino(sk),
68412 -               atomic_read(&sk->sk_refcnt), sk,
68413 +               atomic_read(&sk->sk_refcnt),
68414 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68415 +               NULL,
68416 +#else
68417 +               sk,
68418 +#endif
68419                 jiffies_to_clock_t(icsk->icsk_rto),
68420                 jiffies_to_clock_t(icsk->icsk_ack.ato),
68421                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68422 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
68423                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
68424                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68425                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68426 -               atomic_read(&tw->tw_refcnt), tw, len);
68427 +               atomic_read(&tw->tw_refcnt),
68428 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68429 +               NULL,
68430 +#else
68431 +               tw,
68432 +#endif
68433 +               len);
68434  }
68435  
68436  #define TMPSZ 150
68437 diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
68438 --- linux-3.0.4/net/ipv4/tcp_minisocks.c        2011-07-21 22:17:23.000000000 -0400
68439 +++ linux-3.0.4/net/ipv4/tcp_minisocks.c        2011-08-23 21:48:14.000000000 -0400
68440 @@ -27,6 +27,10 @@
68441  #include <net/inet_common.h>
68442  #include <net/xfrm.h>
68443  
68444 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68445 +extern int grsec_enable_blackhole;
68446 +#endif
68447 +
68448  int sysctl_tcp_syncookies __read_mostly = 1;
68449  EXPORT_SYMBOL(sysctl_tcp_syncookies);
68450  
68451 @@ -745,6 +749,10 @@ listen_overflow:
68452  
68453  embryonic_reset:
68454         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68455 +
68456 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68457 +       if (!grsec_enable_blackhole)
68458 +#endif
68459         if (!(flg & TCP_FLAG_RST))
68460                 req->rsk_ops->send_reset(sk, skb);
68461  
68462 diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
68463 --- linux-3.0.4/net/ipv4/tcp_output.c   2011-07-21 22:17:23.000000000 -0400
68464 +++ linux-3.0.4/net/ipv4/tcp_output.c   2011-08-23 21:48:14.000000000 -0400
68465 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
68466         int mss;
68467         int s_data_desired = 0;
68468  
68469 +       pax_track_stack();
68470 +
68471         if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
68472                 s_data_desired = cvp->s_data_desired;
68473         skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
68474 diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
68475 --- linux-3.0.4/net/ipv4/tcp_probe.c    2011-07-21 22:17:23.000000000 -0400
68476 +++ linux-3.0.4/net/ipv4/tcp_probe.c    2011-08-23 21:47:56.000000000 -0400
68477 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
68478                 if (cnt + width >= len)
68479                         break;
68480  
68481 -               if (copy_to_user(buf + cnt, tbuf, width))
68482 +               if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68483                         return -EFAULT;
68484                 cnt += width;
68485         }
68486 diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
68487 --- linux-3.0.4/net/ipv4/tcp_timer.c    2011-07-21 22:17:23.000000000 -0400
68488 +++ linux-3.0.4/net/ipv4/tcp_timer.c    2011-08-23 21:48:14.000000000 -0400
68489 @@ -22,6 +22,10 @@
68490  #include <linux/gfp.h>
68491  #include <net/tcp.h>
68492  
68493 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68494 +extern int grsec_lastack_retries;
68495 +#endif
68496 +
68497  int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68498  int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68499  int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68500 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
68501                 }
68502         }
68503  
68504 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68505 +       if ((sk->sk_state == TCP_LAST_ACK) &&
68506 +           (grsec_lastack_retries > 0) &&
68507 +           (grsec_lastack_retries < retry_until))
68508 +               retry_until = grsec_lastack_retries;
68509 +#endif
68510 +
68511         if (retransmits_timed_out(sk, retry_until,
68512                                   syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
68513                 /* Has it gone just too far? */
68514 diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
68515 --- linux-3.0.4/net/ipv4/udp.c  2011-07-21 22:17:23.000000000 -0400
68516 +++ linux-3.0.4/net/ipv4/udp.c  2011-08-23 21:48:14.000000000 -0400
68517 @@ -86,6 +86,7 @@
68518  #include <linux/types.h>
68519  #include <linux/fcntl.h>
68520  #include <linux/module.h>
68521 +#include <linux/security.h>
68522  #include <linux/socket.h>
68523  #include <linux/sockios.h>
68524  #include <linux/igmp.h>
68525 @@ -107,6 +108,10 @@
68526  #include <net/xfrm.h>
68527  #include "udp_impl.h"
68528  
68529 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68530 +extern int grsec_enable_blackhole;
68531 +#endif
68532 +
68533  struct udp_table udp_table __read_mostly;
68534  EXPORT_SYMBOL(udp_table);
68535  
68536 @@ -564,6 +569,9 @@ found:
68537         return s;
68538  }
68539  
68540 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68541 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68542 +
68543  /*
68544   * This routine is called by the ICMP module when it gets some
68545   * sort of error condition.  If err < 0 then the socket should
68546 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68547                 dport = usin->sin_port;
68548                 if (dport == 0)
68549                         return -EINVAL;
68550 +
68551 +               err = gr_search_udp_sendmsg(sk, usin);
68552 +               if (err)
68553 +                       return err;
68554         } else {
68555                 if (sk->sk_state != TCP_ESTABLISHED)
68556                         return -EDESTADDRREQ;
68557 +
68558 +               err = gr_search_udp_sendmsg(sk, NULL);
68559 +               if (err)
68560 +                       return err;
68561 +
68562                 daddr = inet->inet_daddr;
68563                 dport = inet->inet_dport;
68564                 /* Open fast path for connected socket.
68565 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
68566                 udp_lib_checksum_complete(skb)) {
68567                 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
68568                                  IS_UDPLITE(sk));
68569 -               atomic_inc(&sk->sk_drops);
68570 +               atomic_inc_unchecked(&sk->sk_drops);
68571                 __skb_unlink(skb, rcvq);
68572                 __skb_queue_tail(&list_kill, skb);
68573         }
68574 @@ -1184,6 +1201,10 @@ try_again:
68575         if (!skb)
68576                 goto out;
68577  
68578 +       err = gr_search_udp_recvmsg(sk, skb);
68579 +       if (err)
68580 +               goto out_free;
68581 +
68582         ulen = skb->len - sizeof(struct udphdr);
68583         if (len > ulen)
68584                 len = ulen;
68585 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
68586  
68587  drop:
68588         UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
68589 -       atomic_inc(&sk->sk_drops);
68590 +       atomic_inc_unchecked(&sk->sk_drops);
68591         kfree_skb(skb);
68592         return -1;
68593  }
68594 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
68595                         skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
68596  
68597                 if (!skb1) {
68598 -                       atomic_inc(&sk->sk_drops);
68599 +                       atomic_inc_unchecked(&sk->sk_drops);
68600                         UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68601                                          IS_UDPLITE(sk));
68602                         UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
68603 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, 
68604                 goto csum_error;
68605  
68606         UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68607 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68608 +       if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68609 +#endif
68610         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68611  
68612         /*
68613 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
68614                 sk_wmem_alloc_get(sp),
68615                 sk_rmem_alloc_get(sp),
68616                 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68617 -               atomic_read(&sp->sk_refcnt), sp,
68618 -               atomic_read(&sp->sk_drops), len);
68619 +               atomic_read(&sp->sk_refcnt),
68620 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68621 +               NULL,
68622 +#else
68623 +               sp,
68624 +#endif
68625 +               atomic_read_unchecked(&sp->sk_drops), len);
68626  }
68627  
68628  int udp4_seq_show(struct seq_file *seq, void *v)
68629 diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
68630 --- linux-3.0.4/net/ipv6/inet6_connection_sock.c        2011-07-21 22:17:23.000000000 -0400
68631 +++ linux-3.0.4/net/ipv6/inet6_connection_sock.c        2011-08-23 21:47:56.000000000 -0400
68632 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
68633  #ifdef CONFIG_XFRM
68634         {
68635                 struct rt6_info *rt = (struct rt6_info  *)dst;
68636 -               rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68637 +               rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68638         }
68639  #endif
68640  }
68641 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
68642  #ifdef CONFIG_XFRM
68643         if (dst) {
68644                 struct rt6_info *rt = (struct rt6_info *)dst;
68645 -               if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68646 +               if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68647                         __sk_dst_reset(sk);
68648                         dst = NULL;
68649                 }
68650 diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
68651 --- linux-3.0.4/net/ipv6/ipv6_sockglue.c        2011-07-21 22:17:23.000000000 -0400
68652 +++ linux-3.0.4/net/ipv6/ipv6_sockglue.c        2011-08-23 21:48:14.000000000 -0400
68653 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
68654         int val, valbool;
68655         int retv = -ENOPROTOOPT;
68656  
68657 +       pax_track_stack();
68658 +
68659         if (optval == NULL)
68660                 val=0;
68661         else {
68662 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
68663         int len;
68664         int val;
68665  
68666 +       pax_track_stack();
68667 +
68668         if (ip6_mroute_opt(optname))
68669                 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68670  
68671 diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
68672 --- linux-3.0.4/net/ipv6/raw.c  2011-07-21 22:17:23.000000000 -0400
68673 +++ linux-3.0.4/net/ipv6/raw.c  2011-08-23 21:48:14.000000000 -0400
68674 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
68675  {
68676         if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
68677             skb_checksum_complete(skb)) {
68678 -               atomic_inc(&sk->sk_drops);
68679 +               atomic_inc_unchecked(&sk->sk_drops);
68680                 kfree_skb(skb);
68681                 return NET_RX_DROP;
68682         }
68683 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68684         struct raw6_sock *rp = raw6_sk(sk);
68685  
68686         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68687 -               atomic_inc(&sk->sk_drops);
68688 +               atomic_inc_unchecked(&sk->sk_drops);
68689                 kfree_skb(skb);
68690                 return NET_RX_DROP;
68691         }
68692 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68693  
68694         if (inet->hdrincl) {
68695                 if (skb_checksum_complete(skb)) {
68696 -                       atomic_inc(&sk->sk_drops);
68697 +                       atomic_inc_unchecked(&sk->sk_drops);
68698                         kfree_skb(skb);
68699                         return NET_RX_DROP;
68700                 }
68701 @@ -601,7 +601,7 @@ out:
68702         return err;
68703  }
68704  
68705 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68706 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68707                         struct flowi6 *fl6, struct dst_entry **dstp,
68708                         unsigned int flags)
68709  {
68710 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
68711         u16 proto;
68712         int err;
68713  
68714 +       pax_track_stack();
68715 +
68716         /* Rough check on arithmetic overflow,
68717            better check is made in ip6_append_data().
68718          */
68719 @@ -909,12 +911,15 @@ do_confirm:
68720  static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68721                                char __user *optval, int optlen)
68722  {
68723 +       struct icmp6_filter filter;
68724 +
68725         switch (optname) {
68726         case ICMPV6_FILTER:
68727                 if (optlen > sizeof(struct icmp6_filter))
68728                         optlen = sizeof(struct icmp6_filter);
68729 -               if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68730 +               if (copy_from_user(&filter, optval, optlen))
68731                         return -EFAULT;
68732 +               raw6_sk(sk)->filter = filter;
68733                 return 0;
68734         default:
68735                 return -ENOPROTOOPT;
68736 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
68737                                char __user *optval, int __user *optlen)
68738  {
68739         int len;
68740 +       struct icmp6_filter filter;
68741  
68742         switch (optname) {
68743         case ICMPV6_FILTER:
68744 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
68745                         len = sizeof(struct icmp6_filter);
68746                 if (put_user(len, optlen))
68747                         return -EFAULT;
68748 -               if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68749 +               filter = raw6_sk(sk)->filter;
68750 +               if (len > sizeof filter || copy_to_user(optval, &filter, len))
68751                         return -EFAULT;
68752                 return 0;
68753         default:
68754 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
68755                    0, 0L, 0,
68756                    sock_i_uid(sp), 0,
68757                    sock_i_ino(sp),
68758 -                  atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68759 +                  atomic_read(&sp->sk_refcnt),
68760 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68761 +                  NULL,
68762 +#else
68763 +                  sp,
68764 +#endif
68765 +                  atomic_read_unchecked(&sp->sk_drops));
68766  }
68767  
68768  static int raw6_seq_show(struct seq_file *seq, void *v)
68769 diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
68770 --- linux-3.0.4/net/ipv6/tcp_ipv6.c     2011-08-23 21:44:40.000000000 -0400
68771 +++ linux-3.0.4/net/ipv6/tcp_ipv6.c     2011-08-23 21:48:14.000000000 -0400
68772 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68773  }
68774  #endif
68775  
68776 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68777 +extern int grsec_enable_blackhole;
68778 +#endif
68779 +
68780  static void tcp_v6_hash(struct sock *sk)
68781  {
68782         if (sk->sk_state != TCP_CLOSE) {
68783 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68784         return 0;
68785  
68786  reset:
68787 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68788 +       if (!grsec_enable_blackhole)
68789 +#endif
68790         tcp_v6_send_reset(sk, skb);
68791  discard:
68792         if (opt_skb)
68793 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68794         TCP_SKB_CB(skb)->sacked = 0;
68795  
68796         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68797 -       if (!sk)
68798 +       if (!sk) {
68799 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68800 +               ret = 1;
68801 +#endif
68802                 goto no_tcp_socket;
68803 +       }
68804  
68805  process:
68806 -       if (sk->sk_state == TCP_TIME_WAIT)
68807 +       if (sk->sk_state == TCP_TIME_WAIT) {
68808 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68809 +               ret = 2;
68810 +#endif
68811                 goto do_time_wait;
68812 +       }
68813  
68814         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
68815                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
68816 @@ -1794,6 +1809,10 @@ no_tcp_socket:
68817  bad_packet:
68818                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68819         } else {
68820 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68821 +               if (!grsec_enable_blackhole || (ret == 1 &&
68822 +                   (skb->dev->flags & IFF_LOOPBACK)))
68823 +#endif
68824                 tcp_v6_send_reset(NULL, skb);
68825         }
68826  
68827 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
68828                    uid,
68829                    0,  /* non standard timer */
68830                    0, /* open_requests have no inode */
68831 -                  0, req);
68832 +                  0,
68833 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68834 +                  NULL
68835 +#else
68836 +                  req
68837 +#endif
68838 +                  );
68839  }
68840  
68841  static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68842 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
68843                    sock_i_uid(sp),
68844                    icsk->icsk_probes_out,
68845                    sock_i_ino(sp),
68846 -                  atomic_read(&sp->sk_refcnt), sp,
68847 +                  atomic_read(&sp->sk_refcnt),
68848 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68849 +                  NULL,
68850 +#else
68851 +                  sp,
68852 +#endif
68853                    jiffies_to_clock_t(icsk->icsk_rto),
68854                    jiffies_to_clock_t(icsk->icsk_ack.ato),
68855                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68856 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
68857                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
68858                    tw->tw_substate, 0, 0,
68859                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68860 -                  atomic_read(&tw->tw_refcnt), tw);
68861 +                  atomic_read(&tw->tw_refcnt),
68862 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68863 +                  NULL
68864 +#else
68865 +                  tw
68866 +#endif
68867 +                  );
68868  }
68869  
68870  static int tcp6_seq_show(struct seq_file *seq, void *v)
68871 diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
68872 --- linux-3.0.4/net/ipv6/udp.c  2011-08-23 21:44:40.000000000 -0400
68873 +++ linux-3.0.4/net/ipv6/udp.c  2011-08-23 21:48:14.000000000 -0400
68874 @@ -50,6 +50,10 @@
68875  #include <linux/seq_file.h>
68876  #include "udp_impl.h"
68877  
68878 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68879 +extern int grsec_enable_blackhole;
68880 +#endif
68881 +
68882  int ipv6_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
68883  {
68884         const struct in6_addr *sk1_rcv_saddr6 = &inet6_sk(sk1)->rcv_saddr;
68885 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68886  
68887         return 0;
68888  drop:
68889 -       atomic_inc(&sk->sk_drops);
68890 +       atomic_inc_unchecked(&sk->sk_drops);
68891  drop_no_sk_drops_inc:
68892         UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
68893         kfree_skb(skb);
68894 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
68895                         continue;
68896                 }
68897  drop:
68898 -               atomic_inc(&sk->sk_drops);
68899 +               atomic_inc_unchecked(&sk->sk_drops);
68900                 UDP6_INC_STATS_BH(sock_net(sk),
68901                                 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
68902                 UDP6_INC_STATS_BH(sock_net(sk),
68903 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, 
68904                 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68905                                 proto == IPPROTO_UDPLITE);
68906  
68907 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68908 +               if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68909 +#endif
68910                 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
68911  
68912                 kfree_skb(skb);
68913 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, 
68914         if (!sock_owned_by_user(sk))
68915                 udpv6_queue_rcv_skb(sk, skb);
68916         else if (sk_add_backlog(sk, skb)) {
68917 -               atomic_inc(&sk->sk_drops);
68918 +               atomic_inc_unchecked(&sk->sk_drops);
68919                 bh_unlock_sock(sk);
68920                 sock_put(sk);
68921                 goto discard;
68922 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
68923                    0, 0L, 0,
68924                    sock_i_uid(sp), 0,
68925                    sock_i_ino(sp),
68926 -                  atomic_read(&sp->sk_refcnt), sp,
68927 -                  atomic_read(&sp->sk_drops));
68928 +                  atomic_read(&sp->sk_refcnt),
68929 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68930 +                  NULL,
68931 +#else
68932 +                  sp,
68933 +#endif
68934 +                  atomic_read_unchecked(&sp->sk_drops));
68935  }
68936  
68937  int udp6_seq_show(struct seq_file *seq, void *v)
68938 diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
68939 --- linux-3.0.4/net/irda/ircomm/ircomm_tty.c    2011-07-21 22:17:23.000000000 -0400
68940 +++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c    2011-08-23 21:47:56.000000000 -0400
68941 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
68942         add_wait_queue(&self->open_wait, &wait);
68943  
68944         IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68945 -             __FILE__,__LINE__, tty->driver->name, self->open_count );
68946 +             __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68947  
68948         /* As far as I can see, we protect open_count - Jean II */
68949         spin_lock_irqsave(&self->spinlock, flags);
68950         if (!tty_hung_up_p(filp)) {
68951                 extra_count = 1;
68952 -               self->open_count--;
68953 +               local_dec(&self->open_count);
68954         }
68955         spin_unlock_irqrestore(&self->spinlock, flags);
68956 -       self->blocked_open++;
68957 +       local_inc(&self->blocked_open);
68958  
68959         while (1) {
68960                 if (tty->termios->c_cflag & CBAUD) {
68961 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
68962                 }
68963  
68964                 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68965 -                     __FILE__,__LINE__, tty->driver->name, self->open_count );
68966 +                     __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68967  
68968                 schedule();
68969         }
68970 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
68971         if (extra_count) {
68972                 /* ++ is not atomic, so this should be protected - Jean II */
68973                 spin_lock_irqsave(&self->spinlock, flags);
68974 -               self->open_count++;
68975 +               local_inc(&self->open_count);
68976                 spin_unlock_irqrestore(&self->spinlock, flags);
68977         }
68978 -       self->blocked_open--;
68979 +       local_dec(&self->blocked_open);
68980  
68981         IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68982 -             __FILE__,__LINE__, tty->driver->name, self->open_count);
68983 +             __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68984  
68985         if (!retval)
68986                 self->flags |= ASYNC_NORMAL_ACTIVE;
68987 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
68988         }
68989         /* ++ is not atomic, so this should be protected - Jean II */
68990         spin_lock_irqsave(&self->spinlock, flags);
68991 -       self->open_count++;
68992 +       local_inc(&self->open_count);
68993  
68994         tty->driver_data = self;
68995         self->tty = tty;
68996         spin_unlock_irqrestore(&self->spinlock, flags);
68997  
68998         IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68999 -                  self->line, self->open_count);
69000 +                  self->line, local_read(&self->open_count));
69001  
69002         /* Not really used by us, but lets do it anyway */
69003         self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69004 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
69005                 return;
69006         }
69007  
69008 -       if ((tty->count == 1) && (self->open_count != 1)) {
69009 +       if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69010                 /*
69011                  * Uh, oh.  tty->count is 1, which means that the tty
69012                  * structure will be freed.  state->count should always
69013 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
69014                  */
69015                 IRDA_DEBUG(0, "%s(), bad serial port count; "
69016                            "tty->count is 1, state->count is %d\n", __func__ ,
69017 -                          self->open_count);
69018 -               self->open_count = 1;
69019 +                          local_read(&self->open_count));
69020 +               local_set(&self->open_count, 1);
69021         }
69022  
69023 -       if (--self->open_count < 0) {
69024 +       if (local_dec_return(&self->open_count) < 0) {
69025                 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69026 -                          __func__, self->line, self->open_count);
69027 -               self->open_count = 0;
69028 +                          __func__, self->line, local_read(&self->open_count));
69029 +               local_set(&self->open_count, 0);
69030         }
69031 -       if (self->open_count) {
69032 +       if (local_read(&self->open_count)) {
69033                 spin_unlock_irqrestore(&self->spinlock, flags);
69034  
69035                 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69036 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
69037         tty->closing = 0;
69038         self->tty = NULL;
69039  
69040 -       if (self->blocked_open) {
69041 +       if (local_read(&self->blocked_open)) {
69042                 if (self->close_delay)
69043                         schedule_timeout_interruptible(self->close_delay);
69044                 wake_up_interruptible(&self->open_wait);
69045 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
69046         spin_lock_irqsave(&self->spinlock, flags);
69047         self->flags &= ~ASYNC_NORMAL_ACTIVE;
69048         self->tty = NULL;
69049 -       self->open_count = 0;
69050 +       local_set(&self->open_count, 0);
69051         spin_unlock_irqrestore(&self->spinlock, flags);
69052  
69053         wake_up_interruptible(&self->open_wait);
69054 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct 
69055         seq_putc(m, '\n');
69056  
69057         seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69058 -       seq_printf(m, "Open count: %d\n", self->open_count);
69059 +       seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69060         seq_printf(m, "Max data size: %d\n", self->max_data_size);
69061         seq_printf(m, "Max header size: %d\n", self->max_header_size);
69062  
69063 diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
69064 --- linux-3.0.4/net/iucv/af_iucv.c      2011-07-21 22:17:23.000000000 -0400
69065 +++ linux-3.0.4/net/iucv/af_iucv.c      2011-08-23 21:47:56.000000000 -0400
69066 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
69067  
69068         write_lock_bh(&iucv_sk_list.lock);
69069  
69070 -       sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69071 +       sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69072         while (__iucv_get_sock_by_name(name)) {
69073                 sprintf(name, "%08x",
69074 -                       atomic_inc_return(&iucv_sk_list.autobind_name));
69075 +                       atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69076         }
69077  
69078         write_unlock_bh(&iucv_sk_list.lock);
69079 diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
69080 --- linux-3.0.4/net/key/af_key.c        2011-07-21 22:17:23.000000000 -0400
69081 +++ linux-3.0.4/net/key/af_key.c        2011-08-23 21:48:14.000000000 -0400
69082 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
69083         struct xfrm_migrate m[XFRM_MAX_DEPTH];
69084         struct xfrm_kmaddress k;
69085  
69086 +       pax_track_stack();
69087 +
69088         if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69089                                      ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69090             !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69091 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
69092  static u32 get_acqseq(void)
69093  {
69094         u32 res;
69095 -       static atomic_t acqseq;
69096 +       static atomic_unchecked_t acqseq;
69097  
69098         do {
69099 -               res = atomic_inc_return(&acqseq);
69100 +               res = atomic_inc_return_unchecked(&acqseq);
69101         } while (!res);
69102         return res;
69103  }
69104 diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
69105 --- linux-3.0.4/net/lapb/lapb_iface.c   2011-07-21 22:17:23.000000000 -0400
69106 +++ linux-3.0.4/net/lapb/lapb_iface.c   2011-08-23 21:47:56.000000000 -0400
69107 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
69108                 goto out;
69109  
69110         lapb->dev       = dev;
69111 -       lapb->callbacks = *callbacks;
69112 +       lapb->callbacks = callbacks;
69113  
69114         __lapb_insert_cb(lapb);
69115  
69116 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
69117  
69118  void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
69119  {
69120 -       if (lapb->callbacks.connect_confirmation)
69121 -               lapb->callbacks.connect_confirmation(lapb->dev, reason);
69122 +       if (lapb->callbacks->connect_confirmation)
69123 +               lapb->callbacks->connect_confirmation(lapb->dev, reason);
69124  }
69125  
69126  void lapb_connect_indication(struct lapb_cb *lapb, int reason)
69127  {
69128 -       if (lapb->callbacks.connect_indication)
69129 -               lapb->callbacks.connect_indication(lapb->dev, reason);
69130 +       if (lapb->callbacks->connect_indication)
69131 +               lapb->callbacks->connect_indication(lapb->dev, reason);
69132  }
69133  
69134  void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
69135  {
69136 -       if (lapb->callbacks.disconnect_confirmation)
69137 -               lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
69138 +       if (lapb->callbacks->disconnect_confirmation)
69139 +               lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
69140  }
69141  
69142  void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
69143  {
69144 -       if (lapb->callbacks.disconnect_indication)
69145 -               lapb->callbacks.disconnect_indication(lapb->dev, reason);
69146 +       if (lapb->callbacks->disconnect_indication)
69147 +               lapb->callbacks->disconnect_indication(lapb->dev, reason);
69148  }
69149  
69150  int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
69151  {
69152 -       if (lapb->callbacks.data_indication)
69153 -               return lapb->callbacks.data_indication(lapb->dev, skb);
69154 +       if (lapb->callbacks->data_indication)
69155 +               return lapb->callbacks->data_indication(lapb->dev, skb);
69156  
69157         kfree_skb(skb);
69158         return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
69159 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
69160  {
69161         int used = 0;
69162  
69163 -       if (lapb->callbacks.data_transmit) {
69164 -               lapb->callbacks.data_transmit(lapb->dev, skb);
69165 +       if (lapb->callbacks->data_transmit) {
69166 +               lapb->callbacks->data_transmit(lapb->dev, skb);
69167                 used = 1;
69168         }
69169  
69170 diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
69171 --- linux-3.0.4/net/mac80211/debugfs_sta.c      2011-07-21 22:17:23.000000000 -0400
69172 +++ linux-3.0.4/net/mac80211/debugfs_sta.c      2011-08-23 21:48:14.000000000 -0400
69173 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
69174         struct tid_ampdu_rx *tid_rx;
69175         struct tid_ampdu_tx *tid_tx;
69176  
69177 +       pax_track_stack();
69178 +
69179         rcu_read_lock();
69180  
69181         p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
69182 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
69183         struct sta_info *sta = file->private_data;
69184         struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
69185  
69186 +       pax_track_stack();
69187 +
69188         p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
69189                         htc->ht_supported ? "" : "not ");
69190         if (htc->ht_supported) {
69191 diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
69192 --- linux-3.0.4/net/mac80211/ieee80211_i.h      2011-07-21 22:17:23.000000000 -0400
69193 +++ linux-3.0.4/net/mac80211/ieee80211_i.h      2011-08-23 21:47:56.000000000 -0400
69194 @@ -27,6 +27,7 @@
69195  #include <net/ieee80211_radiotap.h>
69196  #include <net/cfg80211.h>
69197  #include <net/mac80211.h>
69198 +#include <asm/local.h>
69199  #include "key.h"
69200  #include "sta_info.h"
69201  
69202 @@ -721,7 +722,7 @@ struct ieee80211_local {
69203         /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69204         spinlock_t queue_stop_reason_lock;
69205  
69206 -       int open_count;
69207 +       local_t open_count;
69208         int monitors, cooked_mntrs;
69209         /* number of interfaces with corresponding FIF_ flags */
69210         int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
69211 diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
69212 --- linux-3.0.4/net/mac80211/iface.c    2011-08-23 21:44:40.000000000 -0400
69213 +++ linux-3.0.4/net/mac80211/iface.c    2011-08-23 21:47:56.000000000 -0400
69214 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
69215                 break;
69216         }
69217  
69218 -       if (local->open_count == 0) {
69219 +       if (local_read(&local->open_count) == 0) {
69220                 res = drv_start(local);
69221                 if (res)
69222                         goto err_del_bss;
69223 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
69224                 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
69225  
69226                 if (!is_valid_ether_addr(dev->dev_addr)) {
69227 -                       if (!local->open_count)
69228 +                       if (!local_read(&local->open_count))
69229                                 drv_stop(local);
69230                         return -EADDRNOTAVAIL;
69231                 }
69232 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
69233         mutex_unlock(&local->mtx);
69234  
69235         if (coming_up)
69236 -               local->open_count++;
69237 +               local_inc(&local->open_count);
69238  
69239         if (hw_reconf_flags) {
69240                 ieee80211_hw_config(local, hw_reconf_flags);
69241 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
69242   err_del_interface:
69243         drv_remove_interface(local, &sdata->vif);
69244   err_stop:
69245 -       if (!local->open_count)
69246 +       if (!local_read(&local->open_count))
69247                 drv_stop(local);
69248   err_del_bss:
69249         sdata->bss = NULL;
69250 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
69251         }
69252  
69253         if (going_down)
69254 -               local->open_count--;
69255 +               local_dec(&local->open_count);
69256  
69257         switch (sdata->vif.type) {
69258         case NL80211_IFTYPE_AP_VLAN:
69259 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
69260  
69261         ieee80211_recalc_ps(local, -1);
69262  
69263 -       if (local->open_count == 0) {
69264 +       if (local_read(&local->open_count) == 0) {
69265                 if (local->ops->napi_poll)
69266                         napi_disable(&local->napi);
69267                 ieee80211_clear_tx_pending(local);
69268 diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
69269 --- linux-3.0.4/net/mac80211/main.c     2011-07-21 22:17:23.000000000 -0400
69270 +++ linux-3.0.4/net/mac80211/main.c     2011-08-23 21:47:56.000000000 -0400
69271 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
69272                 local->hw.conf.power_level = power;
69273         }
69274  
69275 -       if (changed && local->open_count) {
69276 +       if (changed && local_read(&local->open_count)) {
69277                 ret = drv_config(local, changed);
69278                 /*
69279                  * Goal:
69280 diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
69281 --- linux-3.0.4/net/mac80211/mlme.c     2011-08-23 21:44:40.000000000 -0400
69282 +++ linux-3.0.4/net/mac80211/mlme.c     2011-08-23 21:48:14.000000000 -0400
69283 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
69284         bool have_higher_than_11mbit = false;
69285         u16 ap_ht_cap_flags;
69286  
69287 +       pax_track_stack();
69288 +
69289         /* AssocResp and ReassocResp have identical structure */
69290  
69291         aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
69292 diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
69293 --- linux-3.0.4/net/mac80211/pm.c       2011-07-21 22:17:23.000000000 -0400
69294 +++ linux-3.0.4/net/mac80211/pm.c       2011-08-23 21:47:56.000000000 -0400
69295 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
69296         cancel_work_sync(&local->dynamic_ps_enable_work);
69297         del_timer_sync(&local->dynamic_ps_timer);
69298  
69299 -       local->wowlan = wowlan && local->open_count;
69300 +       local->wowlan = wowlan && local_read(&local->open_count);
69301         if (local->wowlan) {
69302                 int err = drv_suspend(local, wowlan);
69303                 if (err) {
69304 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
69305         }
69306  
69307         /* stop hardware - this must stop RX */
69308 -       if (local->open_count)
69309 +       if (local_read(&local->open_count))
69310                 ieee80211_stop_device(local);
69311  
69312   suspend:
69313 diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
69314 --- linux-3.0.4/net/mac80211/rate.c     2011-07-21 22:17:23.000000000 -0400
69315 +++ linux-3.0.4/net/mac80211/rate.c     2011-08-23 21:47:56.000000000 -0400
69316 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct 
69317  
69318         ASSERT_RTNL();
69319  
69320 -       if (local->open_count)
69321 +       if (local_read(&local->open_count))
69322                 return -EBUSY;
69323  
69324         if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
69325 diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
69326 --- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c      2011-07-21 22:17:23.000000000 -0400
69327 +++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c      2011-08-23 21:47:56.000000000 -0400
69328 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
69329  
69330         spin_unlock_irqrestore(&events->lock, status);
69331  
69332 -       if (copy_to_user(buf, pb, p))
69333 +       if (p > sizeof(pb) || copy_to_user(buf, pb, p))
69334                 return -EFAULT;
69335  
69336         return p;
69337 diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
69338 --- linux-3.0.4/net/mac80211/util.c     2011-07-21 22:17:23.000000000 -0400
69339 +++ linux-3.0.4/net/mac80211/util.c     2011-08-23 21:47:56.000000000 -0400
69340 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
69341  #endif
69342  
69343         /* restart hardware */
69344 -       if (local->open_count) {
69345 +       if (local_read(&local->open_count)) {
69346                 /*
69347                  * Upon resume hardware can sometimes be goofy due to
69348                  * various platform / driver / bus issues, so restarting
69349 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
69350 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
69351 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
69352 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
69353         /* Increase the refcnt counter of the dest */
69354         atomic_inc(&dest->refcnt);
69355  
69356 -       conn_flags = atomic_read(&dest->conn_flags);
69357 +       conn_flags = atomic_read_unchecked(&dest->conn_flags);
69358         if (cp->protocol != IPPROTO_UDP)
69359                 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
69360         /* Bind with the destination and its corresponding transmitter */
69361 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
69362         atomic_set(&cp->refcnt, 1);
69363  
69364         atomic_set(&cp->n_control, 0);
69365 -       atomic_set(&cp->in_pkts, 0);
69366 +       atomic_set_unchecked(&cp->in_pkts, 0);
69367  
69368         atomic_inc(&ipvs->conn_count);
69369         if (flags & IP_VS_CONN_F_NO_CPORT)
69370 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
69371  
69372         /* Don't drop the entry if its number of incoming packets is not
69373            located in [0, 8] */
69374 -       i = atomic_read(&cp->in_pkts);
69375 +       i = atomic_read_unchecked(&cp->in_pkts);
69376         if (i > 8 || i < 0) return 0;
69377  
69378         if (!todrop_rate[i]) return 0;
69379 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
69380 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
69381 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
69382 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
69383                 ret = cp->packet_xmit(skb, cp, pd->pp);
69384                 /* do not touch skb anymore */
69385  
69386 -               atomic_inc(&cp->in_pkts);
69387 +               atomic_inc_unchecked(&cp->in_pkts);
69388                 ip_vs_conn_put(cp);
69389                 return ret;
69390         }
69391 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
69392         if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
69393                 pkts = sysctl_sync_threshold(ipvs);
69394         else
69395 -               pkts = atomic_add_return(1, &cp->in_pkts);
69396 +               pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69397  
69398         if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
69399             cp->protocol == IPPROTO_SCTP) {
69400 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
69401 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c  2011-08-23 21:44:40.000000000 -0400
69402 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c  2011-08-23 21:48:14.000000000 -0400
69403 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
69404                 ip_vs_rs_hash(ipvs, dest);
69405                 write_unlock_bh(&ipvs->rs_lock);
69406         }
69407 -       atomic_set(&dest->conn_flags, conn_flags);
69408 +       atomic_set_unchecked(&dest->conn_flags, conn_flags);
69409  
69410         /* bind the service */
69411         if (!dest->svc) {
69412 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
69413                                            "      %-7s %-6d %-10d %-10d\n",
69414                                            &dest->addr.in6,
69415                                            ntohs(dest->port),
69416 -                                          ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69417 +                                          ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69418                                            atomic_read(&dest->weight),
69419                                            atomic_read(&dest->activeconns),
69420                                            atomic_read(&dest->inactconns));
69421 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
69422                                            "%-7s %-6d %-10d %-10d\n",
69423                                            ntohl(dest->addr.ip),
69424                                            ntohs(dest->port),
69425 -                                          ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69426 +                                          ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69427                                            atomic_read(&dest->weight),
69428                                            atomic_read(&dest->activeconns),
69429                                            atomic_read(&dest->inactconns));
69430 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
69431         struct ip_vs_dest_user *udest_compat;
69432         struct ip_vs_dest_user_kern udest;
69433  
69434 +       pax_track_stack();
69435 +
69436         if (!capable(CAP_NET_ADMIN))
69437                 return -EPERM;
69438  
69439 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
69440  
69441                         entry.addr = dest->addr.ip;
69442                         entry.port = dest->port;
69443 -                       entry.conn_flags = atomic_read(&dest->conn_flags);
69444 +                       entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69445                         entry.weight = atomic_read(&dest->weight);
69446                         entry.u_threshold = dest->u_threshold;
69447                         entry.l_threshold = dest->l_threshold;
69448 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
69449         NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69450  
69451         NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69452 -                   atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69453 +                   atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69454         NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69455         NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69456         NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69457 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
69458 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
69459 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
69460 @@ -648,7 +648,7 @@ control:
69461          * i.e only increment in_pkts for Templates.
69462          */
69463         if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
69464 -               int pkts = atomic_add_return(1, &cp->in_pkts);
69465 +               int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69466  
69467                 if (pkts % sysctl_sync_period(ipvs) != 1)
69468                         return;
69469 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
69470  
69471         if (opt)
69472                 memcpy(&cp->in_seq, opt, sizeof(*opt));
69473 -       atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
69474 +       atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
69475         cp->state = state;
69476         cp->old_state = cp->state;
69477         /*
69478 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
69479 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
69480 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
69481 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69482                 else
69483                         rc = NF_ACCEPT;
69484                 /* do not touch skb anymore */
69485 -               atomic_inc(&cp->in_pkts);
69486 +               atomic_inc_unchecked(&cp->in_pkts);
69487                 goto out;
69488         }
69489  
69490 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, 
69491                 else
69492                         rc = NF_ACCEPT;
69493                 /* do not touch skb anymore */
69494 -               atomic_inc(&cp->in_pkts);
69495 +               atomic_inc_unchecked(&cp->in_pkts);
69496                 goto out;
69497         }
69498  
69499 diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
69500 --- linux-3.0.4/net/netfilter/Kconfig   2011-07-21 22:17:23.000000000 -0400
69501 +++ linux-3.0.4/net/netfilter/Kconfig   2011-08-23 21:48:14.000000000 -0400
69502 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
69503  
69504           To compile it as a module, choose M here.  If unsure, say N.
69505  
69506 +config NETFILTER_XT_MATCH_GRADM
69507 +       tristate '"gradm" match support'
69508 +       depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69509 +       depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69510 +       ---help---
69511 +         The gradm match allows to match on grsecurity RBAC being enabled.
69512 +         It is useful when iptables rules are applied early on bootup to
69513 +         prevent connections to the machine (except from a trusted host)
69514 +         while the RBAC system is disabled.
69515 +
69516  config NETFILTER_XT_MATCH_HASHLIMIT
69517         tristate '"hashlimit" match support'
69518         depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69519 diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
69520 --- linux-3.0.4/net/netfilter/Makefile  2011-07-21 22:17:23.000000000 -0400
69521 +++ linux-3.0.4/net/netfilter/Makefile  2011-08-23 21:48:14.000000000 -0400
69522 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
69523  obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
69524  obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69525  obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69526 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69527  obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69528  obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69529  obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69530 diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
69531 --- linux-3.0.4/net/netfilter/nfnetlink_log.c   2011-07-21 22:17:23.000000000 -0400
69532 +++ linux-3.0.4/net/netfilter/nfnetlink_log.c   2011-08-23 21:47:56.000000000 -0400
69533 @@ -70,7 +70,7 @@ struct nfulnl_instance {
69534  };
69535  
69536  static DEFINE_SPINLOCK(instances_lock);
69537 -static atomic_t global_seq;
69538 +static atomic_unchecked_t global_seq;
69539  
69540  #define INSTANCE_BUCKETS       16
69541  static struct hlist_head instance_table[INSTANCE_BUCKETS];
69542 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
69543         /* global sequence number */
69544         if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69545                 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69546 -                            htonl(atomic_inc_return(&global_seq)));
69547 +                            htonl(atomic_inc_return_unchecked(&global_seq)));
69548  
69549         if (data_len) {
69550                 struct nlattr *nla;
69551 diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
69552 --- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
69553 +++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
69554 @@ -58,7 +58,7 @@ struct nfqnl_instance {
69555   */
69556         spinlock_t      lock;
69557         unsigned int    queue_total;
69558 -       atomic_t        id_sequence;            /* 'sequence' of pkt ids */
69559 +       atomic_unchecked_t      id_sequence;    /* 'sequence' of pkt ids */
69560         struct list_head queue_list;            /* packets in queue */
69561  };
69562  
69563 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
69564         nfmsg->version = NFNETLINK_V0;
69565         nfmsg->res_id = htons(queue->queue_num);
69566  
69567 -       entry->id = atomic_inc_return(&queue->id_sequence);
69568 +       entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
69569         pmsg.packet_id          = htonl(entry->id);
69570         pmsg.hw_protocol        = entskb->protocol;
69571         pmsg.hook               = entry->hook;
69572 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s, 
69573                           inst->peer_pid, inst->queue_total,
69574                           inst->copy_mode, inst->copy_range,
69575                           inst->queue_dropped, inst->queue_user_dropped,
69576 -                         atomic_read(&inst->id_sequence), 1);
69577 +                         atomic_read_unchecked(&inst->id_sequence), 1);
69578  }
69579  
69580  static const struct seq_operations nfqnl_seq_ops = {
69581 diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
69582 --- linux-3.0.4/net/netfilter/xt_gradm.c        1969-12-31 19:00:00.000000000 -0500
69583 +++ linux-3.0.4/net/netfilter/xt_gradm.c        2011-08-23 21:48:14.000000000 -0400
69584 @@ -0,0 +1,51 @@
69585 +/*
69586 + *     gradm match for netfilter
69587 + *     Copyright Â© Zbigniew Krzystolik, 2010
69588 + *
69589 + *     This program is free software; you can redistribute it and/or modify
69590 + *     it under the terms of the GNU General Public License; either version
69591 + *     2 or 3 as published by the Free Software Foundation.
69592 + */
69593 +#include <linux/module.h>
69594 +#include <linux/moduleparam.h>
69595 +#include <linux/skbuff.h>
69596 +#include <linux/netfilter/x_tables.h>
69597 +#include <linux/grsecurity.h>
69598 +#include <linux/netfilter/xt_gradm.h>
69599 +
69600 +static bool
69601 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
69602 +{
69603 +       const struct xt_gradm_mtinfo *info = par->matchinfo;
69604 +       bool retval = false;
69605 +       if (gr_acl_is_enabled())
69606 +               retval = true;
69607 +       return retval ^ info->invflags;
69608 +}
69609 +
69610 +static struct xt_match gradm_mt_reg __read_mostly = {
69611 +               .name       = "gradm",
69612 +               .revision   = 0,
69613 +               .family     = NFPROTO_UNSPEC,
69614 +               .match      = gradm_mt,
69615 +               .matchsize  = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
69616 +               .me         = THIS_MODULE,
69617 +};
69618 +
69619 +static int __init gradm_mt_init(void)
69620 +{       
69621 +               return xt_register_match(&gradm_mt_reg);
69622 +}
69623 +
69624 +static void __exit gradm_mt_exit(void)
69625 +{       
69626 +               xt_unregister_match(&gradm_mt_reg);
69627 +}
69628 +
69629 +module_init(gradm_mt_init);
69630 +module_exit(gradm_mt_exit);
69631 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
69632 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
69633 +MODULE_LICENSE("GPL");
69634 +MODULE_ALIAS("ipt_gradm");
69635 +MODULE_ALIAS("ip6t_gradm");
69636 diff -urNp linux-3.0.4/net/netfilter/xt_statistic.c linux-3.0.4/net/netfilter/xt_statistic.c
69637 --- linux-3.0.4/net/netfilter/xt_statistic.c    2011-07-21 22:17:23.000000000 -0400
69638 +++ linux-3.0.4/net/netfilter/xt_statistic.c    2011-08-23 21:47:56.000000000 -0400
69639 @@ -18,7 +18,7 @@
69640  #include <linux/netfilter/x_tables.h>
69641  
69642  struct xt_statistic_priv {
69643 -       atomic_t count;
69644 +       atomic_unchecked_t count;
69645  } ____cacheline_aligned_in_smp;
69646  
69647  MODULE_LICENSE("GPL");
69648 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb, 
69649                 break;
69650         case XT_STATISTIC_MODE_NTH:
69651                 do {
69652 -                       oval = atomic_read(&info->master->count);
69653 +                       oval = atomic_read_unchecked(&info->master->count);
69654                         nval = (oval == info->u.nth.every) ? 0 : oval + 1;
69655 -               } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
69656 +               } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
69657                 if (nval == 0)
69658                         ret = !ret;
69659                 break;
69660 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
69661         info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
69662         if (info->master == NULL)
69663                 return -ENOMEM;
69664 -       atomic_set(&info->master->count, info->u.nth.count);
69665 +       atomic_set_unchecked(&info->master->count, info->u.nth.count);
69666  
69667         return 0;
69668  }
69669 diff -urNp linux-3.0.4/net/netlink/af_netlink.c linux-3.0.4/net/netlink/af_netlink.c
69670 --- linux-3.0.4/net/netlink/af_netlink.c        2011-07-21 22:17:23.000000000 -0400
69671 +++ linux-3.0.4/net/netlink/af_netlink.c        2011-08-23 21:47:56.000000000 -0400
69672 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock 
69673                         sk->sk_error_report(sk);
69674                 }
69675         }
69676 -       atomic_inc(&sk->sk_drops);
69677 +       atomic_inc_unchecked(&sk->sk_drops);
69678  }
69679  
69680  static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
69681 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
69682                            sk_wmem_alloc_get(s),
69683                            nlk->cb,
69684                            atomic_read(&s->sk_refcnt),
69685 -                          atomic_read(&s->sk_drops),
69686 +                          atomic_read_unchecked(&s->sk_drops),
69687                            sock_i_ino(s)
69688                         );
69689  
69690 diff -urNp linux-3.0.4/net/netrom/af_netrom.c linux-3.0.4/net/netrom/af_netrom.c
69691 --- linux-3.0.4/net/netrom/af_netrom.c  2011-07-21 22:17:23.000000000 -0400
69692 +++ linux-3.0.4/net/netrom/af_netrom.c  2011-08-23 21:48:14.000000000 -0400
69693 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
69694         struct sock *sk = sock->sk;
69695         struct nr_sock *nr = nr_sk(sk);
69696  
69697 +       memset(sax, 0, sizeof(*sax));
69698         lock_sock(sk);
69699         if (peer != 0) {
69700                 if (sk->sk_state != TCP_ESTABLISHED) {
69701 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
69702                 *uaddr_len = sizeof(struct full_sockaddr_ax25);
69703         } else {
69704                 sax->fsa_ax25.sax25_family = AF_NETROM;
69705 -               sax->fsa_ax25.sax25_ndigis = 0;
69706                 sax->fsa_ax25.sax25_call   = nr->source_addr;
69707                 *uaddr_len = sizeof(struct sockaddr_ax25);
69708         }
69709 diff -urNp linux-3.0.4/net/packet/af_packet.c linux-3.0.4/net/packet/af_packet.c
69710 --- linux-3.0.4/net/packet/af_packet.c  2011-07-21 22:17:23.000000000 -0400
69711 +++ linux-3.0.4/net/packet/af_packet.c  2011-08-23 21:47:56.000000000 -0400
69712 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
69713  
69714         spin_lock(&sk->sk_receive_queue.lock);
69715         po->stats.tp_packets++;
69716 -       skb->dropcount = atomic_read(&sk->sk_drops);
69717 +       skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
69718         __skb_queue_tail(&sk->sk_receive_queue, skb);
69719         spin_unlock(&sk->sk_receive_queue.lock);
69720         sk->sk_data_ready(sk, skb->len);
69721         return 0;
69722  
69723  drop_n_acct:
69724 -       po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
69725 +       po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
69726  
69727  drop_n_restore:
69728         if (skb_head != skb->data && skb_shared(skb)) {
69729 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
69730         case PACKET_HDRLEN:
69731                 if (len > sizeof(int))
69732                         len = sizeof(int);
69733 -               if (copy_from_user(&val, optval, len))
69734 +               if (len > sizeof(val) || copy_from_user(&val, optval, len))
69735                         return -EFAULT;
69736                 switch (val) {
69737                 case TPACKET_V1:
69738 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
69739  
69740         if (put_user(len, optlen))
69741                 return -EFAULT;
69742 -       if (copy_to_user(optval, data, len))
69743 +       if (len > sizeof(st) || copy_to_user(optval, data, len))
69744                 return -EFAULT;
69745         return 0;
69746  }
69747 diff -urNp linux-3.0.4/net/phonet/af_phonet.c linux-3.0.4/net/phonet/af_phonet.c
69748 --- linux-3.0.4/net/phonet/af_phonet.c  2011-07-21 22:17:23.000000000 -0400
69749 +++ linux-3.0.4/net/phonet/af_phonet.c  2011-08-23 21:48:14.000000000 -0400
69750 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
69751  {
69752         struct phonet_protocol *pp;
69753  
69754 -       if (protocol >= PHONET_NPROTO)
69755 +       if (protocol < 0 || protocol >= PHONET_NPROTO)
69756                 return NULL;
69757  
69758         rcu_read_lock();
69759 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
69760  {
69761         int err = 0;
69762  
69763 -       if (protocol >= PHONET_NPROTO)
69764 +       if (protocol < 0 || protocol >= PHONET_NPROTO)
69765                 return -EINVAL;
69766  
69767         err = proto_register(pp->prot, 1);
69768 diff -urNp linux-3.0.4/net/phonet/pep.c linux-3.0.4/net/phonet/pep.c
69769 --- linux-3.0.4/net/phonet/pep.c        2011-07-21 22:17:23.000000000 -0400
69770 +++ linux-3.0.4/net/phonet/pep.c        2011-08-23 21:47:56.000000000 -0400
69771 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk, 
69772  
69773         case PNS_PEP_CTRL_REQ:
69774                 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
69775 -                       atomic_inc(&sk->sk_drops);
69776 +                       atomic_inc_unchecked(&sk->sk_drops);
69777                         break;
69778                 }
69779                 __skb_pull(skb, 4);
69780 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk, 
69781                 }
69782  
69783                 if (pn->rx_credits == 0) {
69784 -                       atomic_inc(&sk->sk_drops);
69785 +                       atomic_inc_unchecked(&sk->sk_drops);
69786                         err = -ENOBUFS;
69787                         break;
69788                 }
69789 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
69790                 }
69791  
69792                 if (pn->rx_credits == 0) {
69793 -                       atomic_inc(&sk->sk_drops);
69794 +                       atomic_inc_unchecked(&sk->sk_drops);
69795                         err = NET_RX_DROP;
69796                         break;
69797                 }
69798 diff -urNp linux-3.0.4/net/phonet/socket.c linux-3.0.4/net/phonet/socket.c
69799 --- linux-3.0.4/net/phonet/socket.c     2011-07-21 22:17:23.000000000 -0400
69800 +++ linux-3.0.4/net/phonet/socket.c     2011-08-23 21:48:14.000000000 -0400
69801 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
69802                         pn->resource, sk->sk_state,
69803                         sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
69804                         sock_i_uid(sk), sock_i_ino(sk),
69805 -                       atomic_read(&sk->sk_refcnt), sk,
69806 -                       atomic_read(&sk->sk_drops), &len);
69807 +                       atomic_read(&sk->sk_refcnt),
69808 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69809 +                       NULL,
69810 +#else
69811 +                       sk,
69812 +#endif
69813 +                       atomic_read_unchecked(&sk->sk_drops), &len);
69814         }
69815         seq_printf(seq, "%*s\n", 127 - len, "");
69816         return 0;
69817 diff -urNp linux-3.0.4/net/rds/cong.c linux-3.0.4/net/rds/cong.c
69818 --- linux-3.0.4/net/rds/cong.c  2011-07-21 22:17:23.000000000 -0400
69819 +++ linux-3.0.4/net/rds/cong.c  2011-08-23 21:47:56.000000000 -0400
69820 @@ -77,7 +77,7 @@
69821   * finds that the saved generation number is smaller than the global generation
69822   * number, it wakes up the process.
69823   */
69824 -static atomic_t                rds_cong_generation = ATOMIC_INIT(0);
69825 +static atomic_unchecked_t              rds_cong_generation = ATOMIC_INIT(0);
69826  
69827  /*
69828   * Congestion monitoring
69829 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
69830         rdsdebug("waking map %p for %pI4\n",
69831           map, &map->m_addr);
69832         rds_stats_inc(s_cong_update_received);
69833 -       atomic_inc(&rds_cong_generation);
69834 +       atomic_inc_unchecked(&rds_cong_generation);
69835         if (waitqueue_active(&map->m_waitq))
69836                 wake_up(&map->m_waitq);
69837         if (waitqueue_active(&rds_poll_waitq))
69838 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
69839  
69840  int rds_cong_updated_since(unsigned long *recent)
69841  {
69842 -       unsigned long gen = atomic_read(&rds_cong_generation);
69843 +       unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
69844  
69845         if (likely(*recent == gen))
69846                 return 0;
69847 diff -urNp linux-3.0.4/net/rds/ib_cm.c linux-3.0.4/net/rds/ib_cm.c
69848 --- linux-3.0.4/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
69849 +++ linux-3.0.4/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
69850 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
69851         /* Clear the ACK state */
69852         clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
69853  #ifdef KERNEL_HAS_ATOMIC64
69854 -       atomic64_set(&ic->i_ack_next, 0);
69855 +       atomic64_set_unchecked(&ic->i_ack_next, 0);
69856  #else
69857         ic->i_ack_next = 0;
69858  #endif
69859 diff -urNp linux-3.0.4/net/rds/ib.h linux-3.0.4/net/rds/ib.h
69860 --- linux-3.0.4/net/rds/ib.h    2011-07-21 22:17:23.000000000 -0400
69861 +++ linux-3.0.4/net/rds/ib.h    2011-08-23 21:47:56.000000000 -0400
69862 @@ -127,7 +127,7 @@ struct rds_ib_connection {
69863         /* sending acks */
69864         unsigned long           i_ack_flags;
69865  #ifdef KERNEL_HAS_ATOMIC64
69866 -       atomic64_t              i_ack_next;     /* next ACK to send */
69867 +       atomic64_unchecked_t    i_ack_next;     /* next ACK to send */
69868  #else
69869         spinlock_t              i_ack_lock;     /* protect i_ack_next */
69870         u64                     i_ack_next;     /* next ACK to send */
69871 diff -urNp linux-3.0.4/net/rds/ib_recv.c linux-3.0.4/net/rds/ib_recv.c
69872 --- linux-3.0.4/net/rds/ib_recv.c       2011-07-21 22:17:23.000000000 -0400
69873 +++ linux-3.0.4/net/rds/ib_recv.c       2011-08-23 21:47:56.000000000 -0400
69874 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
69875  static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
69876                                 int ack_required)
69877  {
69878 -       atomic64_set(&ic->i_ack_next, seq);
69879 +       atomic64_set_unchecked(&ic->i_ack_next, seq);
69880         if (ack_required) {
69881                 smp_mb__before_clear_bit();
69882                 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
69883 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
69884         clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
69885         smp_mb__after_clear_bit();
69886  
69887 -       return atomic64_read(&ic->i_ack_next);
69888 +       return atomic64_read_unchecked(&ic->i_ack_next);
69889  }
69890  #endif
69891  
69892 diff -urNp linux-3.0.4/net/rds/iw_cm.c linux-3.0.4/net/rds/iw_cm.c
69893 --- linux-3.0.4/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
69894 +++ linux-3.0.4/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
69895 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
69896         /* Clear the ACK state */
69897         clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
69898  #ifdef KERNEL_HAS_ATOMIC64
69899 -       atomic64_set(&ic->i_ack_next, 0);
69900 +       atomic64_set_unchecked(&ic->i_ack_next, 0);
69901  #else
69902         ic->i_ack_next = 0;
69903  #endif
69904 diff -urNp linux-3.0.4/net/rds/iw.h linux-3.0.4/net/rds/iw.h
69905 --- linux-3.0.4/net/rds/iw.h    2011-07-21 22:17:23.000000000 -0400
69906 +++ linux-3.0.4/net/rds/iw.h    2011-08-23 21:47:56.000000000 -0400
69907 @@ -133,7 +133,7 @@ struct rds_iw_connection {
69908         /* sending acks */
69909         unsigned long           i_ack_flags;
69910  #ifdef KERNEL_HAS_ATOMIC64
69911 -       atomic64_t              i_ack_next;     /* next ACK to send */
69912 +       atomic64_unchecked_t    i_ack_next;     /* next ACK to send */
69913  #else
69914         spinlock_t              i_ack_lock;     /* protect i_ack_next */
69915         u64                     i_ack_next;     /* next ACK to send */
69916 diff -urNp linux-3.0.4/net/rds/iw_rdma.c linux-3.0.4/net/rds/iw_rdma.c
69917 --- linux-3.0.4/net/rds/iw_rdma.c       2011-07-21 22:17:23.000000000 -0400
69918 +++ linux-3.0.4/net/rds/iw_rdma.c       2011-08-23 21:48:14.000000000 -0400
69919 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
69920         struct rdma_cm_id *pcm_id;
69921         int rc;
69922  
69923 +       pax_track_stack();
69924 +
69925         src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
69926         dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
69927  
69928 diff -urNp linux-3.0.4/net/rds/iw_recv.c linux-3.0.4/net/rds/iw_recv.c
69929 --- linux-3.0.4/net/rds/iw_recv.c       2011-07-21 22:17:23.000000000 -0400
69930 +++ linux-3.0.4/net/rds/iw_recv.c       2011-08-23 21:47:56.000000000 -0400
69931 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
69932  static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
69933                                 int ack_required)
69934  {
69935 -       atomic64_set(&ic->i_ack_next, seq);
69936 +       atomic64_set_unchecked(&ic->i_ack_next, seq);
69937         if (ack_required) {
69938                 smp_mb__before_clear_bit();
69939                 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
69940 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
69941         clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
69942         smp_mb__after_clear_bit();
69943  
69944 -       return atomic64_read(&ic->i_ack_next);
69945 +       return atomic64_read_unchecked(&ic->i_ack_next);
69946  }
69947  #endif
69948  
69949 diff -urNp linux-3.0.4/net/rxrpc/af_rxrpc.c linux-3.0.4/net/rxrpc/af_rxrpc.c
69950 --- linux-3.0.4/net/rxrpc/af_rxrpc.c    2011-07-21 22:17:23.000000000 -0400
69951 +++ linux-3.0.4/net/rxrpc/af_rxrpc.c    2011-08-23 21:47:56.000000000 -0400
69952 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
69953  __be32 rxrpc_epoch;
69954  
69955  /* current debugging ID */
69956 -atomic_t rxrpc_debug_id;
69957 +atomic_unchecked_t rxrpc_debug_id;
69958  
69959  /* count of skbs currently in use */
69960  atomic_t rxrpc_n_skbs;
69961 diff -urNp linux-3.0.4/net/rxrpc/ar-ack.c linux-3.0.4/net/rxrpc/ar-ack.c
69962 --- linux-3.0.4/net/rxrpc/ar-ack.c      2011-07-21 22:17:23.000000000 -0400
69963 +++ linux-3.0.4/net/rxrpc/ar-ack.c      2011-08-23 21:48:14.000000000 -0400
69964 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
69965  
69966         _enter("{%d,%d,%d,%d},",
69967                call->acks_hard, call->acks_unacked,
69968 -              atomic_read(&call->sequence),
69969 +              atomic_read_unchecked(&call->sequence),
69970                CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
69971  
69972         stop = 0;
69973 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
69974  
69975                         /* each Tx packet has a new serial number */
69976                         sp->hdr.serial =
69977 -                               htonl(atomic_inc_return(&call->conn->serial));
69978 +                               htonl(atomic_inc_return_unchecked(&call->conn->serial));
69979  
69980                         hdr = (struct rxrpc_header *) txb->head;
69981                         hdr->serial = sp->hdr.serial;
69982 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
69983   */
69984  static void rxrpc_clear_tx_window(struct rxrpc_call *call)
69985  {
69986 -       rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
69987 +       rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
69988  }
69989  
69990  /*
69991 @@ -629,7 +629,7 @@ process_further:
69992  
69993                 latest = ntohl(sp->hdr.serial);
69994                 hard = ntohl(ack.firstPacket);
69995 -               tx = atomic_read(&call->sequence);
69996 +               tx = atomic_read_unchecked(&call->sequence);
69997  
69998                 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69999                        latest,
70000 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
70001         u32 abort_code = RX_PROTOCOL_ERROR;
70002         u8 *acks = NULL;
70003  
70004 +       pax_track_stack();
70005 +
70006         //printk("\n--------------------\n");
70007         _enter("{%d,%s,%lx} [%lu]",
70008                call->debug_id, rxrpc_call_states[call->state], call->events,
70009 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
70010         goto maybe_reschedule;
70011  
70012  send_ACK_with_skew:
70013 -       ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
70014 +       ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
70015                             ntohl(ack.serial));
70016  send_ACK:
70017         mtu = call->conn->trans->peer->if_mtu;
70018 @@ -1173,7 +1175,7 @@ send_ACK:
70019         ackinfo.rxMTU   = htonl(5692);
70020         ackinfo.jumbo_max = htonl(4);
70021  
70022 -       hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
70023 +       hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
70024         _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
70025                ntohl(hdr.serial),
70026                ntohs(ack.maxSkew),
70027 @@ -1191,7 +1193,7 @@ send_ACK:
70028  send_message:
70029         _debug("send message");
70030  
70031 -       hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
70032 +       hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
70033         _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
70034  send_message_2:
70035  
70036 diff -urNp linux-3.0.4/net/rxrpc/ar-call.c linux-3.0.4/net/rxrpc/ar-call.c
70037 --- linux-3.0.4/net/rxrpc/ar-call.c     2011-07-21 22:17:23.000000000 -0400
70038 +++ linux-3.0.4/net/rxrpc/ar-call.c     2011-08-23 21:47:56.000000000 -0400
70039 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
70040         spin_lock_init(&call->lock);
70041         rwlock_init(&call->state_lock);
70042         atomic_set(&call->usage, 1);
70043 -       call->debug_id = atomic_inc_return(&rxrpc_debug_id);
70044 +       call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70045         call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
70046  
70047         memset(&call->sock_node, 0xed, sizeof(call->sock_node));
70048 diff -urNp linux-3.0.4/net/rxrpc/ar-connection.c linux-3.0.4/net/rxrpc/ar-connection.c
70049 --- linux-3.0.4/net/rxrpc/ar-connection.c       2011-07-21 22:17:23.000000000 -0400
70050 +++ linux-3.0.4/net/rxrpc/ar-connection.c       2011-08-23 21:47:56.000000000 -0400
70051 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
70052                 rwlock_init(&conn->lock);
70053                 spin_lock_init(&conn->state_lock);
70054                 atomic_set(&conn->usage, 1);
70055 -               conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
70056 +               conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70057                 conn->avail_calls = RXRPC_MAXCALLS;
70058                 conn->size_align = 4;
70059                 conn->header_size = sizeof(struct rxrpc_header);
70060 diff -urNp linux-3.0.4/net/rxrpc/ar-connevent.c linux-3.0.4/net/rxrpc/ar-connevent.c
70061 --- linux-3.0.4/net/rxrpc/ar-connevent.c        2011-07-21 22:17:23.000000000 -0400
70062 +++ linux-3.0.4/net/rxrpc/ar-connevent.c        2011-08-23 21:47:56.000000000 -0400
70063 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
70064  
70065         len = iov[0].iov_len + iov[1].iov_len;
70066  
70067 -       hdr.serial = htonl(atomic_inc_return(&conn->serial));
70068 +       hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70069         _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
70070  
70071         ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
70072 diff -urNp linux-3.0.4/net/rxrpc/ar-input.c linux-3.0.4/net/rxrpc/ar-input.c
70073 --- linux-3.0.4/net/rxrpc/ar-input.c    2011-07-21 22:17:23.000000000 -0400
70074 +++ linux-3.0.4/net/rxrpc/ar-input.c    2011-08-23 21:47:56.000000000 -0400
70075 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
70076         /* track the latest serial number on this connection for ACK packet
70077          * information */
70078         serial = ntohl(sp->hdr.serial);
70079 -       hi_serial = atomic_read(&call->conn->hi_serial);
70080 +       hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
70081         while (serial > hi_serial)
70082 -               hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
70083 +               hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
70084                                            serial);
70085  
70086         /* request ACK generation for any ACK or DATA packet that requests
70087 diff -urNp linux-3.0.4/net/rxrpc/ar-internal.h linux-3.0.4/net/rxrpc/ar-internal.h
70088 --- linux-3.0.4/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
70089 +++ linux-3.0.4/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
70090 @@ -272,8 +272,8 @@ struct rxrpc_connection {
70091         int                     error;          /* error code for local abort */
70092         int                     debug_id;       /* debug ID for printks */
70093         unsigned                call_counter;   /* call ID counter */
70094 -       atomic_t                serial;         /* packet serial number counter */
70095 -       atomic_t                hi_serial;      /* highest serial number received */
70096 +       atomic_unchecked_t      serial;         /* packet serial number counter */
70097 +       atomic_unchecked_t      hi_serial;      /* highest serial number received */
70098         u8                      avail_calls;    /* number of calls available */
70099         u8                      size_align;     /* data size alignment (for security) */
70100         u8                      header_size;    /* rxrpc + security header size */
70101 @@ -346,7 +346,7 @@ struct rxrpc_call {
70102         spinlock_t              lock;
70103         rwlock_t                state_lock;     /* lock for state transition */
70104         atomic_t                usage;
70105 -       atomic_t                sequence;       /* Tx data packet sequence counter */
70106 +       atomic_unchecked_t      sequence;       /* Tx data packet sequence counter */
70107         u32                     abort_code;     /* local/remote abort code */
70108         enum {                                  /* current state of call */
70109                 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
70110 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
70111   */
70112  extern atomic_t rxrpc_n_skbs;
70113  extern __be32 rxrpc_epoch;
70114 -extern atomic_t rxrpc_debug_id;
70115 +extern atomic_unchecked_t rxrpc_debug_id;
70116  extern struct workqueue_struct *rxrpc_workqueue;
70117  
70118  /*
70119 diff -urNp linux-3.0.4/net/rxrpc/ar-local.c linux-3.0.4/net/rxrpc/ar-local.c
70120 --- linux-3.0.4/net/rxrpc/ar-local.c    2011-07-21 22:17:23.000000000 -0400
70121 +++ linux-3.0.4/net/rxrpc/ar-local.c    2011-08-23 21:47:56.000000000 -0400
70122 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
70123                 spin_lock_init(&local->lock);
70124                 rwlock_init(&local->services_lock);
70125                 atomic_set(&local->usage, 1);
70126 -               local->debug_id = atomic_inc_return(&rxrpc_debug_id);
70127 +               local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70128                 memcpy(&local->srx, srx, sizeof(*srx));
70129         }
70130  
70131 diff -urNp linux-3.0.4/net/rxrpc/ar-output.c linux-3.0.4/net/rxrpc/ar-output.c
70132 --- linux-3.0.4/net/rxrpc/ar-output.c   2011-07-21 22:17:23.000000000 -0400
70133 +++ linux-3.0.4/net/rxrpc/ar-output.c   2011-08-23 21:47:56.000000000 -0400
70134 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb 
70135                         sp->hdr.cid = call->cid;
70136                         sp->hdr.callNumber = call->call_id;
70137                         sp->hdr.seq =
70138 -                               htonl(atomic_inc_return(&call->sequence));
70139 +                               htonl(atomic_inc_return_unchecked(&call->sequence));
70140                         sp->hdr.serial =
70141 -                               htonl(atomic_inc_return(&conn->serial));
70142 +                               htonl(atomic_inc_return_unchecked(&conn->serial));
70143                         sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
70144                         sp->hdr.userStatus = 0;
70145                         sp->hdr.securityIndex = conn->security_ix;
70146 diff -urNp linux-3.0.4/net/rxrpc/ar-peer.c linux-3.0.4/net/rxrpc/ar-peer.c
70147 --- linux-3.0.4/net/rxrpc/ar-peer.c     2011-07-21 22:17:23.000000000 -0400
70148 +++ linux-3.0.4/net/rxrpc/ar-peer.c     2011-08-23 21:47:56.000000000 -0400
70149 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
70150                 INIT_LIST_HEAD(&peer->error_targets);
70151                 spin_lock_init(&peer->lock);
70152                 atomic_set(&peer->usage, 1);
70153 -               peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
70154 +               peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70155                 memcpy(&peer->srx, srx, sizeof(*srx));
70156  
70157                 rxrpc_assess_MTU_size(peer);
70158 diff -urNp linux-3.0.4/net/rxrpc/ar-proc.c linux-3.0.4/net/rxrpc/ar-proc.c
70159 --- linux-3.0.4/net/rxrpc/ar-proc.c     2011-07-21 22:17:23.000000000 -0400
70160 +++ linux-3.0.4/net/rxrpc/ar-proc.c     2011-08-23 21:47:56.000000000 -0400
70161 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
70162                    atomic_read(&conn->usage),
70163                    rxrpc_conn_states[conn->state],
70164                    key_serial(conn->key),
70165 -                  atomic_read(&conn->serial),
70166 -                  atomic_read(&conn->hi_serial));
70167 +                  atomic_read_unchecked(&conn->serial),
70168 +                  atomic_read_unchecked(&conn->hi_serial));
70169  
70170         return 0;
70171  }
70172 diff -urNp linux-3.0.4/net/rxrpc/ar-transport.c linux-3.0.4/net/rxrpc/ar-transport.c
70173 --- linux-3.0.4/net/rxrpc/ar-transport.c        2011-07-21 22:17:23.000000000 -0400
70174 +++ linux-3.0.4/net/rxrpc/ar-transport.c        2011-08-23 21:47:56.000000000 -0400
70175 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
70176                 spin_lock_init(&trans->client_lock);
70177                 rwlock_init(&trans->conn_lock);
70178                 atomic_set(&trans->usage, 1);
70179 -               trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
70180 +               trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70181  
70182                 if (peer->srx.transport.family == AF_INET) {
70183                         switch (peer->srx.transport_type) {
70184 diff -urNp linux-3.0.4/net/rxrpc/rxkad.c linux-3.0.4/net/rxrpc/rxkad.c
70185 --- linux-3.0.4/net/rxrpc/rxkad.c       2011-07-21 22:17:23.000000000 -0400
70186 +++ linux-3.0.4/net/rxrpc/rxkad.c       2011-08-23 21:48:14.000000000 -0400
70187 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
70188         u16 check;
70189         int nsg;
70190  
70191 +       pax_track_stack();
70192 +
70193         sp = rxrpc_skb(skb);
70194  
70195         _enter("");
70196 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
70197         u16 check;
70198         int nsg;
70199  
70200 +       pax_track_stack();
70201 +
70202         _enter("");
70203  
70204         sp = rxrpc_skb(skb);
70205 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct 
70206  
70207         len = iov[0].iov_len + iov[1].iov_len;
70208  
70209 -       hdr.serial = htonl(atomic_inc_return(&conn->serial));
70210 +       hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70211         _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
70212  
70213         ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
70214 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
70215  
70216         len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
70217  
70218 -       hdr->serial = htonl(atomic_inc_return(&conn->serial));
70219 +       hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70220         _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
70221  
70222         ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
70223 diff -urNp linux-3.0.4/net/sctp/proc.c linux-3.0.4/net/sctp/proc.c
70224 --- linux-3.0.4/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
70225 +++ linux-3.0.4/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
70226 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
70227                 seq_printf(seq,
70228                            "%8pK %8pK %-3d %-3d %-2d %-4d "
70229                            "%4d %8d %8d %7d %5lu %-5d %5d ",
70230 -                          assoc, sk, sctp_sk(sk)->type, sk->sk_state,
70231 +                          assoc, sk,
70232 +                          sctp_sk(sk)->type, sk->sk_state,
70233                            assoc->state, hash,
70234                            assoc->assoc_id,
70235                            assoc->sndbuf_used,
70236 diff -urNp linux-3.0.4/net/sctp/socket.c linux-3.0.4/net/sctp/socket.c
70237 --- linux-3.0.4/net/sctp/socket.c       2011-07-21 22:17:23.000000000 -0400
70238 +++ linux-3.0.4/net/sctp/socket.c       2011-08-23 21:47:56.000000000 -0400
70239 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
70240                 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
70241                 if (space_left < addrlen)
70242                         return -ENOMEM;
70243 -               if (copy_to_user(to, &temp, addrlen))
70244 +               if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
70245                         return -EFAULT;
70246                 to += addrlen;
70247                 cnt++;
70248 diff -urNp linux-3.0.4/net/socket.c linux-3.0.4/net/socket.c
70249 --- linux-3.0.4/net/socket.c    2011-08-23 21:44:40.000000000 -0400
70250 +++ linux-3.0.4/net/socket.c    2011-08-23 21:48:14.000000000 -0400
70251 @@ -88,6 +88,7 @@
70252  #include <linux/nsproxy.h>
70253  #include <linux/magic.h>
70254  #include <linux/slab.h>
70255 +#include <linux/in.h>
70256  
70257  #include <asm/uaccess.h>
70258  #include <asm/unistd.h>
70259 @@ -105,6 +106,8 @@
70260  #include <linux/sockios.h>
70261  #include <linux/atalk.h>
70262  
70263 +#include <linux/grsock.h>
70264 +
70265  static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
70266  static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
70267                          unsigned long nr_segs, loff_t pos);
70268 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
70269                 &sockfs_dentry_operations, SOCKFS_MAGIC);
70270  }
70271  
70272 -static struct vfsmount *sock_mnt __read_mostly;
70273 +struct vfsmount *sock_mnt __read_mostly;
70274  
70275  static struct file_system_type sock_fs_type = {
70276         .name =         "sockfs",
70277 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
70278                 return -EAFNOSUPPORT;
70279         if (type < 0 || type >= SOCK_MAX)
70280                 return -EINVAL;
70281 +       if (protocol < 0)
70282 +               return -EINVAL;
70283  
70284         /* Compatibility.
70285  
70286 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
70287         if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
70288                 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
70289  
70290 +       if(!gr_search_socket(family, type, protocol)) {
70291 +               retval = -EACCES;
70292 +               goto out;
70293 +       }
70294 +
70295 +       if (gr_handle_sock_all(family, type, protocol)) {
70296 +               retval = -EACCES;
70297 +               goto out;
70298 +       }
70299 +
70300         retval = sock_create(family, type, protocol, &sock);
70301         if (retval < 0)
70302                 goto out;
70303 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
70304         if (sock) {
70305                 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
70306                 if (err >= 0) {
70307 +                       if (gr_handle_sock_server((struct sockaddr *)&address)) {
70308 +                               err = -EACCES;
70309 +                               goto error;
70310 +                       }
70311 +                       err = gr_search_bind(sock, (struct sockaddr_in *)&address);
70312 +                       if (err)
70313 +                               goto error;
70314 +
70315                         err = security_socket_bind(sock,
70316                                                    (struct sockaddr *)&address,
70317                                                    addrlen);
70318 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
70319                                                       (struct sockaddr *)
70320                                                       &address, addrlen);
70321                 }
70322 +error:
70323                 fput_light(sock->file, fput_needed);
70324         }
70325         return err;
70326 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
70327                 if ((unsigned)backlog > somaxconn)
70328                         backlog = somaxconn;
70329  
70330 +               if (gr_handle_sock_server_other(sock->sk)) {
70331 +                       err = -EPERM;
70332 +                       goto error;
70333 +               }
70334 +
70335 +               err = gr_search_listen(sock);
70336 +               if (err)
70337 +                       goto error;
70338 +
70339                 err = security_socket_listen(sock, backlog);
70340                 if (!err)
70341                         err = sock->ops->listen(sock, backlog);
70342  
70343 +error:
70344                 fput_light(sock->file, fput_needed);
70345         }
70346         return err;
70347 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
70348         newsock->type = sock->type;
70349         newsock->ops = sock->ops;
70350  
70351 +       if (gr_handle_sock_server_other(sock->sk)) {
70352 +               err = -EPERM;
70353 +               sock_release(newsock);
70354 +               goto out_put;
70355 +       }
70356 +
70357 +       err = gr_search_accept(sock);
70358 +       if (err) {
70359 +               sock_release(newsock);
70360 +               goto out_put;
70361 +       }
70362 +
70363         /*
70364          * We don't need try_module_get here, as the listening socket (sock)
70365          * has the protocol module (sock->ops->owner) held.
70366 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
70367         fd_install(newfd, newfile);
70368         err = newfd;
70369  
70370 +       gr_attach_curr_ip(newsock->sk);
70371 +
70372  out_put:
70373         fput_light(sock->file, fput_needed);
70374  out:
70375 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
70376                 int, addrlen)
70377  {
70378         struct socket *sock;
70379 +       struct sockaddr *sck;
70380         struct sockaddr_storage address;
70381         int err, fput_needed;
70382  
70383 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
70384         if (err < 0)
70385                 goto out_put;
70386  
70387 +       sck = (struct sockaddr *)&address;
70388 +
70389 +       if (gr_handle_sock_client(sck)) {
70390 +               err = -EACCES;
70391 +               goto out_put;
70392 +       }
70393 +
70394 +       err = gr_search_connect(sock, (struct sockaddr_in *)sck);
70395 +       if (err)
70396 +               goto out_put;
70397 +
70398         err =
70399             security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
70400         if (err)
70401 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
70402         unsigned char *ctl_buf = ctl;
70403         int err, ctl_len, iov_size, total_len;
70404  
70405 +       pax_track_stack();
70406 +
70407         err = -EFAULT;
70408         if (MSG_CMSG_COMPAT & flags) {
70409                 if (get_compat_msghdr(msg_sys, msg_compat))
70410 diff -urNp linux-3.0.4/net/sunrpc/sched.c linux-3.0.4/net/sunrpc/sched.c
70411 --- linux-3.0.4/net/sunrpc/sched.c      2011-07-21 22:17:23.000000000 -0400
70412 +++ linux-3.0.4/net/sunrpc/sched.c      2011-08-23 21:47:56.000000000 -0400
70413 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
70414  #ifdef RPC_DEBUG
70415  static void rpc_task_set_debuginfo(struct rpc_task *task)
70416  {
70417 -       static atomic_t rpc_pid;
70418 +       static atomic_unchecked_t rpc_pid;
70419  
70420 -       task->tk_pid = atomic_inc_return(&rpc_pid);
70421 +       task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
70422  }
70423  #else
70424  static inline void rpc_task_set_debuginfo(struct rpc_task *task)
70425 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c
70426 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c  2011-07-21 22:17:23.000000000 -0400
70427 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c  2011-08-23 21:47:56.000000000 -0400
70428 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
70429  static unsigned int min_max_inline = 4096;
70430  static unsigned int max_max_inline = 65536;
70431  
70432 -atomic_t rdma_stat_recv;
70433 -atomic_t rdma_stat_read;
70434 -atomic_t rdma_stat_write;
70435 -atomic_t rdma_stat_sq_starve;
70436 -atomic_t rdma_stat_rq_starve;
70437 -atomic_t rdma_stat_rq_poll;
70438 -atomic_t rdma_stat_rq_prod;
70439 -atomic_t rdma_stat_sq_poll;
70440 -atomic_t rdma_stat_sq_prod;
70441 +atomic_unchecked_t rdma_stat_recv;
70442 +atomic_unchecked_t rdma_stat_read;
70443 +atomic_unchecked_t rdma_stat_write;
70444 +atomic_unchecked_t rdma_stat_sq_starve;
70445 +atomic_unchecked_t rdma_stat_rq_starve;
70446 +atomic_unchecked_t rdma_stat_rq_poll;
70447 +atomic_unchecked_t rdma_stat_rq_prod;
70448 +atomic_unchecked_t rdma_stat_sq_poll;
70449 +atomic_unchecked_t rdma_stat_sq_prod;
70450  
70451  /* Temporary NFS request map and context caches */
70452  struct kmem_cache *svc_rdma_map_cachep;
70453 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
70454                 len -= *ppos;
70455                 if (len > *lenp)
70456                         len = *lenp;
70457 -               if (len && copy_to_user(buffer, str_buf, len))
70458 +               if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
70459                         return -EFAULT;
70460                 *lenp = len;
70461                 *ppos += len;
70462 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = 
70463         {
70464                 .procname       = "rdma_stat_read",
70465                 .data           = &rdma_stat_read,
70466 -               .maxlen         = sizeof(atomic_t),
70467 +               .maxlen         = sizeof(atomic_unchecked_t),
70468                 .mode           = 0644,
70469                 .proc_handler   = read_reset_stat,
70470         },
70471         {
70472                 .procname       = "rdma_stat_recv",
70473                 .data           = &rdma_stat_recv,
70474 -               .maxlen         = sizeof(atomic_t),
70475 +               .maxlen         = sizeof(atomic_unchecked_t),
70476                 .mode           = 0644,
70477                 .proc_handler   = read_reset_stat,
70478         },
70479         {
70480                 .procname       = "rdma_stat_write",
70481                 .data           = &rdma_stat_write,
70482 -               .maxlen         = sizeof(atomic_t),
70483 +               .maxlen         = sizeof(atomic_unchecked_t),
70484                 .mode           = 0644,
70485                 .proc_handler   = read_reset_stat,
70486         },
70487         {
70488                 .procname       = "rdma_stat_sq_starve",
70489                 .data           = &rdma_stat_sq_starve,
70490 -               .maxlen         = sizeof(atomic_t),
70491 +               .maxlen         = sizeof(atomic_unchecked_t),
70492                 .mode           = 0644,
70493                 .proc_handler   = read_reset_stat,
70494         },
70495         {
70496                 .procname       = "rdma_stat_rq_starve",
70497                 .data           = &rdma_stat_rq_starve,
70498 -               .maxlen         = sizeof(atomic_t),
70499 +               .maxlen         = sizeof(atomic_unchecked_t),
70500                 .mode           = 0644,
70501                 .proc_handler   = read_reset_stat,
70502         },
70503         {
70504                 .procname       = "rdma_stat_rq_poll",
70505                 .data           = &rdma_stat_rq_poll,
70506 -               .maxlen         = sizeof(atomic_t),
70507 +               .maxlen         = sizeof(atomic_unchecked_t),
70508                 .mode           = 0644,
70509                 .proc_handler   = read_reset_stat,
70510         },
70511         {
70512                 .procname       = "rdma_stat_rq_prod",
70513                 .data           = &rdma_stat_rq_prod,
70514 -               .maxlen         = sizeof(atomic_t),
70515 +               .maxlen         = sizeof(atomic_unchecked_t),
70516                 .mode           = 0644,
70517                 .proc_handler   = read_reset_stat,
70518         },
70519         {
70520                 .procname       = "rdma_stat_sq_poll",
70521                 .data           = &rdma_stat_sq_poll,
70522 -               .maxlen         = sizeof(atomic_t),
70523 +               .maxlen         = sizeof(atomic_unchecked_t),
70524                 .mode           = 0644,
70525                 .proc_handler   = read_reset_stat,
70526         },
70527         {
70528                 .procname       = "rdma_stat_sq_prod",
70529                 .data           = &rdma_stat_sq_prod,
70530 -               .maxlen         = sizeof(atomic_t),
70531 +               .maxlen         = sizeof(atomic_unchecked_t),
70532                 .mode           = 0644,
70533                 .proc_handler   = read_reset_stat,
70534         },
70535 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
70536 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
70537 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
70538 @@ -499,7 +499,7 @@ next_sge:
70539                         svc_rdma_put_context(ctxt, 0);
70540                         goto out;
70541                 }
70542 -               atomic_inc(&rdma_stat_read);
70543 +               atomic_inc_unchecked(&rdma_stat_read);
70544  
70545                 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
70546                         chl_map->ch[ch_no].count -= read_wr.num_sge;
70547 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70548                                   dto_q);
70549                 list_del_init(&ctxt->dto_q);
70550         } else {
70551 -               atomic_inc(&rdma_stat_rq_starve);
70552 +               atomic_inc_unchecked(&rdma_stat_rq_starve);
70553                 clear_bit(XPT_DATA, &xprt->xpt_flags);
70554                 ctxt = NULL;
70555         }
70556 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70557         dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
70558                 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
70559         BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
70560 -       atomic_inc(&rdma_stat_recv);
70561 +       atomic_inc_unchecked(&rdma_stat_recv);
70562  
70563         /* Build up the XDR from the receive buffers. */
70564         rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
70565 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
70566 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c   2011-07-21 22:17:23.000000000 -0400
70567 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c   2011-08-23 21:47:56.000000000 -0400
70568 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
70569         write_wr.wr.rdma.remote_addr = to;
70570  
70571         /* Post It */
70572 -       atomic_inc(&rdma_stat_write);
70573 +       atomic_inc_unchecked(&rdma_stat_write);
70574         if (svc_rdma_send(xprt, &write_wr))
70575                 goto err;
70576         return 0;
70577 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
70578 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c        2011-07-21 22:17:23.000000000 -0400
70579 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c        2011-08-23 21:47:56.000000000 -0400
70580 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
70581                 return;
70582  
70583         ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
70584 -       atomic_inc(&rdma_stat_rq_poll);
70585 +       atomic_inc_unchecked(&rdma_stat_rq_poll);
70586  
70587         while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
70588                 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
70589 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
70590         }
70591  
70592         if (ctxt)
70593 -               atomic_inc(&rdma_stat_rq_prod);
70594 +               atomic_inc_unchecked(&rdma_stat_rq_prod);
70595  
70596         set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
70597         /*
70598 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
70599                 return;
70600  
70601         ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
70602 -       atomic_inc(&rdma_stat_sq_poll);
70603 +       atomic_inc_unchecked(&rdma_stat_sq_poll);
70604         while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
70605                 if (wc.status != IB_WC_SUCCESS)
70606                         /* Close the transport */
70607 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
70608         }
70609  
70610         if (ctxt)
70611 -               atomic_inc(&rdma_stat_sq_prod);
70612 +               atomic_inc_unchecked(&rdma_stat_sq_prod);
70613  }
70614  
70615  static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
70616 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
70617                 spin_lock_bh(&xprt->sc_lock);
70618                 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
70619                         spin_unlock_bh(&xprt->sc_lock);
70620 -                       atomic_inc(&rdma_stat_sq_starve);
70621 +                       atomic_inc_unchecked(&rdma_stat_sq_starve);
70622  
70623                         /* See if we can opportunistically reap SQ WR to make room */
70624                         sq_cq_reap(xprt);
70625 diff -urNp linux-3.0.4/net/sysctl_net.c linux-3.0.4/net/sysctl_net.c
70626 --- linux-3.0.4/net/sysctl_net.c        2011-07-21 22:17:23.000000000 -0400
70627 +++ linux-3.0.4/net/sysctl_net.c        2011-08-23 21:48:14.000000000 -0400
70628 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
70629                                struct ctl_table *table)
70630  {
70631         /* Allow network administrator to have same access as root. */
70632 -       if (capable(CAP_NET_ADMIN)) {
70633 +       if (capable_nolog(CAP_NET_ADMIN)) {
70634                 int mode = (table->mode >> 6) & 7;
70635                 return (mode << 6) | (mode << 3) | mode;
70636         }
70637 diff -urNp linux-3.0.4/net/unix/af_unix.c linux-3.0.4/net/unix/af_unix.c
70638 --- linux-3.0.4/net/unix/af_unix.c      2011-07-21 22:17:23.000000000 -0400
70639 +++ linux-3.0.4/net/unix/af_unix.c      2011-08-23 21:48:14.000000000 -0400
70640 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
70641                 err = -ECONNREFUSED;
70642                 if (!S_ISSOCK(inode->i_mode))
70643                         goto put_fail;
70644 +
70645 +               if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
70646 +                       err = -EACCES;
70647 +                       goto put_fail;
70648 +               }
70649 +
70650                 u = unix_find_socket_byinode(inode);
70651                 if (!u)
70652                         goto put_fail;
70653 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
70654                 if (u) {
70655                         struct dentry *dentry;
70656                         dentry = unix_sk(u)->dentry;
70657 +
70658 +                       if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
70659 +                               err = -EPERM;
70660 +                               sock_put(u);
70661 +                               goto fail;
70662 +                       }
70663 +
70664                         if (dentry)
70665                                 touch_atime(unix_sk(u)->mnt, dentry);
70666                 } else
70667 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
70668                 err = security_path_mknod(&nd.path, dentry, mode, 0);
70669                 if (err)
70670                         goto out_mknod_drop_write;
70671 +               if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70672 +                       err = -EACCES;
70673 +                       goto out_mknod_drop_write;
70674 +               }
70675                 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
70676  out_mknod_drop_write:
70677                 mnt_drop_write(nd.path.mnt);
70678                 if (err)
70679                         goto out_mknod_dput;
70680 +
70681 +               gr_handle_create(dentry, nd.path.mnt);
70682 +
70683                 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
70684                 dput(nd.path.dentry);
70685                 nd.path.dentry = dentry;
70686 diff -urNp linux-3.0.4/net/wireless/core.h linux-3.0.4/net/wireless/core.h
70687 --- linux-3.0.4/net/wireless/core.h     2011-07-21 22:17:23.000000000 -0400
70688 +++ linux-3.0.4/net/wireless/core.h     2011-08-23 21:47:56.000000000 -0400
70689 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
70690         struct mutex mtx;
70691  
70692         /* rfkill support */
70693 -       struct rfkill_ops rfkill_ops;
70694 +       rfkill_ops_no_const rfkill_ops;
70695         struct rfkill *rfkill;
70696         struct work_struct rfkill_sync;
70697  
70698 diff -urNp linux-3.0.4/net/wireless/wext-core.c linux-3.0.4/net/wireless/wext-core.c
70699 --- linux-3.0.4/net/wireless/wext-core.c        2011-07-21 22:17:23.000000000 -0400
70700 +++ linux-3.0.4/net/wireless/wext-core.c        2011-08-23 21:47:56.000000000 -0400
70701 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
70702                  */
70703  
70704                 /* Support for very large requests */
70705 -               if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
70706 -                   (user_length > descr->max_tokens)) {
70707 +               if (user_length > descr->max_tokens) {
70708                         /* Allow userspace to GET more than max so
70709                          * we can support any size GET requests.
70710                          * There is still a limit : -ENOMEM.
70711 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
70712                 }
70713         }
70714  
70715 -       if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
70716 -               /*
70717 -                * If this is a GET, but not NOMAX, it means that the extra
70718 -                * data is not bounded by userspace, but by max_tokens. Thus
70719 -                * set the length to max_tokens. This matches the extra data
70720 -                * allocation.
70721 -                * The driver should fill it with the number of tokens it
70722 -                * provided, and it may check iwp->length rather than having
70723 -                * knowledge of max_tokens. If the driver doesn't change the
70724 -                * iwp->length, this ioctl just copies back max_token tokens
70725 -                * filled with zeroes. Hopefully the driver isn't claiming
70726 -                * them to be valid data.
70727 -                */
70728 -               iwp->length = descr->max_tokens;
70729 -       }
70730 -
70731         err = handler(dev, info, (union iwreq_data *) iwp, extra);
70732  
70733         iwp->length += essid_compat;
70734 diff -urNp linux-3.0.4/net/xfrm/xfrm_policy.c linux-3.0.4/net/xfrm/xfrm_policy.c
70735 --- linux-3.0.4/net/xfrm/xfrm_policy.c  2011-07-21 22:17:23.000000000 -0400
70736 +++ linux-3.0.4/net/xfrm/xfrm_policy.c  2011-08-23 21:47:56.000000000 -0400
70737 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
70738  {
70739         policy->walk.dead = 1;
70740  
70741 -       atomic_inc(&policy->genid);
70742 +       atomic_inc_unchecked(&policy->genid);
70743  
70744         if (del_timer(&policy->timer))
70745                 xfrm_pol_put(policy);
70746 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
70747                 hlist_add_head(&policy->bydst, chain);
70748         xfrm_pol_hold(policy);
70749         net->xfrm.policy_count[dir]++;
70750 -       atomic_inc(&flow_cache_genid);
70751 +       atomic_inc_unchecked(&flow_cache_genid);
70752         if (delpol)
70753                 __xfrm_policy_unlink(delpol, dir);
70754         policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
70755 @@ -1528,7 +1528,7 @@ free_dst:
70756         goto out;
70757  }
70758  
70759 -static int inline
70760 +static inline int
70761  xfrm_dst_alloc_copy(void **target, const void *src, int size)
70762  {
70763         if (!*target) {
70764 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
70765         return 0;
70766  }
70767  
70768 -static int inline
70769 +static inline int
70770  xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
70771  {
70772  #ifdef CONFIG_XFRM_SUB_POLICY
70773 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry 
70774  #endif
70775  }
70776  
70777 -static int inline
70778 +static inline int
70779  xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
70780  {
70781  #ifdef CONFIG_XFRM_SUB_POLICY
70782 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
70783  
70784         xdst->num_pols = num_pols;
70785         memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
70786 -       xdst->policy_genid = atomic_read(&pols[0]->genid);
70787 +       xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
70788  
70789         return xdst;
70790  }
70791 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
70792                 if (xdst->xfrm_genid != dst->xfrm->genid)
70793                         return 0;
70794                 if (xdst->num_pols > 0 &&
70795 -                   xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
70796 +                   xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
70797                         return 0;
70798  
70799                 mtu = dst_mtu(dst->child);
70800 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
70801                                sizeof(pol->xfrm_vec[i].saddr));
70802                         pol->xfrm_vec[i].encap_family = mp->new_family;
70803                         /* flush bundles */
70804 -                       atomic_inc(&pol->genid);
70805 +                       atomic_inc_unchecked(&pol->genid);
70806                 }
70807         }
70808  
70809 diff -urNp linux-3.0.4/net/xfrm/xfrm_user.c linux-3.0.4/net/xfrm/xfrm_user.c
70810 --- linux-3.0.4/net/xfrm/xfrm_user.c    2011-07-21 22:17:23.000000000 -0400
70811 +++ linux-3.0.4/net/xfrm/xfrm_user.c    2011-08-23 21:48:14.000000000 -0400
70812 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
70813         struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
70814         int i;
70815  
70816 +       pax_track_stack();
70817 +
70818         if (xp->xfrm_nr == 0)
70819                 return 0;
70820  
70821 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
70822         int err;
70823         int n = 0;
70824  
70825 +       pax_track_stack();
70826 +
70827         if (attrs[XFRMA_MIGRATE] == NULL)
70828                 return -EINVAL;
70829  
70830 diff -urNp linux-3.0.4/scripts/basic/fixdep.c linux-3.0.4/scripts/basic/fixdep.c
70831 --- linux-3.0.4/scripts/basic/fixdep.c  2011-07-21 22:17:23.000000000 -0400
70832 +++ linux-3.0.4/scripts/basic/fixdep.c  2011-08-23 21:47:56.000000000 -0400
70833 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
70834  
70835  static void parse_config_file(const char *map, size_t len)
70836  {
70837 -       const int *end = (const int *) (map + len);
70838 +       const unsigned int *end = (const unsigned int *) (map + len);
70839         /* start at +1, so that p can never be < map */
70840 -       const int *m   = (const int *) map + 1;
70841 +       const unsigned int *m   = (const unsigned int *) map + 1;
70842         const char *p, *q;
70843  
70844         for (; m < end; m++) {
70845 @@ -405,7 +405,7 @@ static void print_deps(void)
70846  static void traps(void)
70847  {
70848         static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
70849 -       int *p = (int *)test;
70850 +       unsigned int *p = (unsigned int *)test;
70851  
70852         if (*p != INT_CONF) {
70853                 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
70854 diff -urNp linux-3.0.4/scripts/gcc-plugin.sh linux-3.0.4/scripts/gcc-plugin.sh
70855 --- linux-3.0.4/scripts/gcc-plugin.sh   1969-12-31 19:00:00.000000000 -0500
70856 +++ linux-3.0.4/scripts/gcc-plugin.sh   2011-08-23 21:47:56.000000000 -0400
70857 @@ -0,0 +1,2 @@
70858 +#!/bin/sh
70859 +echo "#include \"gcc-plugin.h\"" | $* -x c -shared - -o /dev/null -I`$* -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
70860 diff -urNp linux-3.0.4/scripts/Makefile.build linux-3.0.4/scripts/Makefile.build
70861 --- linux-3.0.4/scripts/Makefile.build  2011-07-21 22:17:23.000000000 -0400
70862 +++ linux-3.0.4/scripts/Makefile.build  2011-08-23 21:47:56.000000000 -0400
70863 @@ -109,7 +109,7 @@ endif
70864  endif
70865  
70866  # Do not include host rules unless needed
70867 -ifneq ($(hostprogs-y)$(hostprogs-m),)
70868 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
70869  include scripts/Makefile.host
70870  endif
70871  
70872 diff -urNp linux-3.0.4/scripts/Makefile.clean linux-3.0.4/scripts/Makefile.clean
70873 --- linux-3.0.4/scripts/Makefile.clean  2011-07-21 22:17:23.000000000 -0400
70874 +++ linux-3.0.4/scripts/Makefile.clean  2011-08-23 21:47:56.000000000 -0400
70875 @@ -43,7 +43,8 @@ subdir-ymn    := $(addprefix $(obj)/,$(subd
70876  __clean-files  := $(extra-y) $(always)                  \
70877                    $(targets) $(clean-files)             \
70878                    $(host-progs)                         \
70879 -                  $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
70880 +                  $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
70881 +                  $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
70882  
70883  __clean-files   := $(filter-out $(no-clean-files), $(__clean-files))
70884  
70885 diff -urNp linux-3.0.4/scripts/Makefile.host linux-3.0.4/scripts/Makefile.host
70886 --- linux-3.0.4/scripts/Makefile.host   2011-07-21 22:17:23.000000000 -0400
70887 +++ linux-3.0.4/scripts/Makefile.host   2011-08-23 21:47:56.000000000 -0400
70888 @@ -31,6 +31,7 @@
70889  # Note: Shared libraries consisting of C++ files are not supported
70890  
70891  __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
70892 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
70893  
70894  # C code
70895  # Executables compiled from a single .c file
70896 @@ -54,6 +55,7 @@ host-cxxobjs  := $(sort $(foreach m,$(hos
70897  # Shared libaries (only .c supported)
70898  # Shared libraries (.so) - all .so files referenced in "xxx-objs"
70899  host-cshlib    := $(sort $(filter %.so, $(host-cobjs)))
70900 +host-cshlib    += $(sort $(filter %.so, $(__hostlibs)))
70901  # Remove .so files from "xxx-objs"
70902  host-cobjs     := $(filter-out %.so,$(host-cobjs))
70903  
70904 diff -urNp linux-3.0.4/scripts/mod/file2alias.c linux-3.0.4/scripts/mod/file2alias.c
70905 --- linux-3.0.4/scripts/mod/file2alias.c        2011-07-21 22:17:23.000000000 -0400
70906 +++ linux-3.0.4/scripts/mod/file2alias.c        2011-08-23 21:47:56.000000000 -0400
70907 @@ -72,7 +72,7 @@ static void device_id_check(const char *
70908                             unsigned long size, unsigned long id_size,
70909                             void *symval)
70910  {
70911 -       int i;
70912 +       unsigned int i;
70913  
70914         if (size % id_size || size < id_size) {
70915                 if (cross_build != 0)
70916 @@ -102,7 +102,7 @@ static void device_id_check(const char *
70917  /* USB is special because the bcdDevice can be matched against a numeric range */
70918  /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
70919  static void do_usb_entry(struct usb_device_id *id,
70920 -                        unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
70921 +                        unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
70922                          unsigned char range_lo, unsigned char range_hi,
70923                          unsigned char max, struct module *mod)
70924  {
70925 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
70926         for (i = 0; i < count; i++) {
70927                 const char *id = (char *)devs[i].id;
70928                 char acpi_id[sizeof(devs[0].id)];
70929 -               int j;
70930 +               unsigned int j;
70931  
70932                 buf_printf(&mod->dev_table_buf,
70933                            "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70934 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
70935  
70936                 for (j = 0; j < PNP_MAX_DEVICES; j++) {
70937                         const char *id = (char *)card->devs[j].id;
70938 -                       int i2, j2;
70939 +                       unsigned int i2, j2;
70940                         int dup = 0;
70941  
70942                         if (!id[0])
70943 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
70944                         /* add an individual alias for every device entry */
70945                         if (!dup) {
70946                                 char acpi_id[sizeof(card->devs[0].id)];
70947 -                               int k;
70948 +                               unsigned int k;
70949  
70950                                 buf_printf(&mod->dev_table_buf,
70951                                            "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70952 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
70953  static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70954                         char *alias)
70955  {
70956 -       int i, j;
70957 +       unsigned int i, j;
70958  
70959         sprintf(alias, "dmi*");
70960  
70961 diff -urNp linux-3.0.4/scripts/mod/modpost.c linux-3.0.4/scripts/mod/modpost.c
70962 --- linux-3.0.4/scripts/mod/modpost.c   2011-07-21 22:17:23.000000000 -0400
70963 +++ linux-3.0.4/scripts/mod/modpost.c   2011-08-23 21:47:56.000000000 -0400
70964 @@ -892,6 +892,7 @@ enum mismatch {
70965         ANY_INIT_TO_ANY_EXIT,
70966         ANY_EXIT_TO_ANY_INIT,
70967         EXPORT_TO_INIT_EXIT,
70968 +       DATA_TO_TEXT
70969  };
70970  
70971  struct sectioncheck {
70972 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
70973         .tosec   = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70974         .mismatch = EXPORT_TO_INIT_EXIT,
70975         .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
70976 +},
70977 +/* Do not reference code from writable data */
70978 +{
70979 +       .fromsec = { DATA_SECTIONS, NULL },
70980 +       .tosec   = { TEXT_SECTIONS, NULL },
70981 +       .mismatch = DATA_TO_TEXT
70982  }
70983  };
70984  
70985 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
70986                         continue;
70987                 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70988                         continue;
70989 -               if (sym->st_value == addr)
70990 -                       return sym;
70991                 /* Find a symbol nearby - addr are maybe negative */
70992                 d = sym->st_value - addr;
70993 +               if (d == 0)
70994 +                       return sym;
70995                 if (d < 0)
70996                         d = addr - sym->st_value;
70997                 if (d < distance) {
70998 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
70999                 tosym, prl_to, prl_to, tosym);
71000                 free(prl_to);
71001                 break;
71002 +       case DATA_TO_TEXT:
71003 +/*
71004 +               fprintf(stderr,
71005 +               "The variable %s references\n"
71006 +               "the %s %s%s%s\n",
71007 +               fromsym, to, sec2annotation(tosec), tosym, to_p);
71008 +*/
71009 +               break;
71010         }
71011         fprintf(stderr, "\n");
71012  }
71013 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
71014  static void check_sec_ref(struct module *mod, const char *modname,
71015                            struct elf_info *elf)
71016  {
71017 -       int i;
71018 +       unsigned int i;
71019         Elf_Shdr *sechdrs = elf->sechdrs;
71020  
71021         /* Walk through all sections */
71022 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
71023         va_end(ap);
71024  }
71025  
71026 -void buf_write(struct buffer *buf, const char *s, int len)
71027 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
71028  {
71029         if (buf->size - buf->pos < len) {
71030                 buf->size += len + SZ;
71031 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
71032         if (fstat(fileno(file), &st) < 0)
71033                 goto close_write;
71034  
71035 -       if (st.st_size != b->pos)
71036 +       if (st.st_size != (off_t)b->pos)
71037                 goto close_write;
71038  
71039         tmp = NOFAIL(malloc(b->pos));
71040 diff -urNp linux-3.0.4/scripts/mod/modpost.h linux-3.0.4/scripts/mod/modpost.h
71041 --- linux-3.0.4/scripts/mod/modpost.h   2011-07-21 22:17:23.000000000 -0400
71042 +++ linux-3.0.4/scripts/mod/modpost.h   2011-08-23 21:47:56.000000000 -0400
71043 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
71044  
71045  struct buffer {
71046         char *p;
71047 -       int pos;
71048 -       int size;
71049 +       unsigned int pos;
71050 +       unsigned int size;
71051  };
71052  
71053  void __attribute__((format(printf, 2, 3)))
71054  buf_printf(struct buffer *buf, const char *fmt, ...);
71055  
71056  void
71057 -buf_write(struct buffer *buf, const char *s, int len);
71058 +buf_write(struct buffer *buf, const char *s, unsigned int len);
71059  
71060  struct module {
71061         struct module *next;
71062 diff -urNp linux-3.0.4/scripts/mod/sumversion.c linux-3.0.4/scripts/mod/sumversion.c
71063 --- linux-3.0.4/scripts/mod/sumversion.c        2011-07-21 22:17:23.000000000 -0400
71064 +++ linux-3.0.4/scripts/mod/sumversion.c        2011-08-23 21:47:56.000000000 -0400
71065 @@ -470,7 +470,7 @@ static void write_version(const char *fi
71066                 goto out;
71067         }
71068  
71069 -       if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
71070 +       if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
71071                 warn("writing sum in %s failed: %s\n",
71072                         filename, strerror(errno));
71073                 goto out;
71074 diff -urNp linux-3.0.4/scripts/pnmtologo.c linux-3.0.4/scripts/pnmtologo.c
71075 --- linux-3.0.4/scripts/pnmtologo.c     2011-07-21 22:17:23.000000000 -0400
71076 +++ linux-3.0.4/scripts/pnmtologo.c     2011-08-23 21:47:56.000000000 -0400
71077 @@ -237,14 +237,14 @@ static void write_header(void)
71078      fprintf(out, " *  Linux logo %s\n", logoname);
71079      fputs(" */\n\n", out);
71080      fputs("#include <linux/linux_logo.h>\n\n", out);
71081 -    fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
71082 +    fprintf(out, "static unsigned char %s_data[] = {\n",
71083             logoname);
71084  }
71085  
71086  static void write_footer(void)
71087  {
71088      fputs("\n};\n\n", out);
71089 -    fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
71090 +    fprintf(out, "const struct linux_logo %s = {\n", logoname);
71091      fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
71092      fprintf(out, "\t.width\t\t= %d,\n", logo_width);
71093      fprintf(out, "\t.height\t\t= %d,\n", logo_height);
71094 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
71095      fputs("\n};\n\n", out);
71096  
71097      /* write logo clut */
71098 -    fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
71099 +    fprintf(out, "static unsigned char %s_clut[] = {\n",
71100             logoname);
71101      write_hex_cnt = 0;
71102      for (i = 0; i < logo_clutsize; i++) {
71103 diff -urNp linux-3.0.4/security/apparmor/lsm.c linux-3.0.4/security/apparmor/lsm.c
71104 --- linux-3.0.4/security/apparmor/lsm.c 2011-08-23 21:44:40.000000000 -0400
71105 +++ linux-3.0.4/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
71106 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
71107         return error;
71108  }
71109  
71110 -static struct security_operations apparmor_ops = {
71111 +static struct security_operations apparmor_ops __read_only = {
71112         .name =                         "apparmor",
71113  
71114         .ptrace_access_check =          apparmor_ptrace_access_check,
71115 diff -urNp linux-3.0.4/security/commoncap.c linux-3.0.4/security/commoncap.c
71116 --- linux-3.0.4/security/commoncap.c    2011-07-21 22:17:23.000000000 -0400
71117 +++ linux-3.0.4/security/commoncap.c    2011-08-23 21:48:14.000000000 -0400
71118 @@ -28,6 +28,7 @@
71119  #include <linux/prctl.h>
71120  #include <linux/securebits.h>
71121  #include <linux/user_namespace.h>
71122 +#include <net/sock.h>
71123  
71124  /*
71125   * If a non-root user executes a setuid-root binary in
71126 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
71127  
71128  int cap_netlink_recv(struct sk_buff *skb, int cap)
71129  {
71130 -       if (!cap_raised(current_cap(), cap))
71131 +       if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
71132                 return -EPERM;
71133         return 0;
71134  }
71135 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
71136  {
71137         const struct cred *cred = current_cred();
71138  
71139 +       if (gr_acl_enable_at_secure())
71140 +               return 1;
71141 +
71142         if (cred->uid != 0) {
71143                 if (bprm->cap_effective)
71144                         return 1;
71145 diff -urNp linux-3.0.4/security/integrity/ima/ima_api.c linux-3.0.4/security/integrity/ima/ima_api.c
71146 --- linux-3.0.4/security/integrity/ima/ima_api.c        2011-07-21 22:17:23.000000000 -0400
71147 +++ linux-3.0.4/security/integrity/ima/ima_api.c        2011-08-23 21:47:56.000000000 -0400
71148 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
71149         int result;
71150  
71151         /* can overflow, only indicator */
71152 -       atomic_long_inc(&ima_htable.violations);
71153 +       atomic_long_inc_unchecked(&ima_htable.violations);
71154  
71155         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
71156         if (!entry) {
71157 diff -urNp linux-3.0.4/security/integrity/ima/ima_fs.c linux-3.0.4/security/integrity/ima/ima_fs.c
71158 --- linux-3.0.4/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
71159 +++ linux-3.0.4/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
71160 @@ -28,12 +28,12 @@
71161  static int valid_policy = 1;
71162  #define TMPBUFLEN 12
71163  static ssize_t ima_show_htable_value(char __user *buf, size_t count,
71164 -                                    loff_t *ppos, atomic_long_t *val)
71165 +                                    loff_t *ppos, atomic_long_unchecked_t *val)
71166  {
71167         char tmpbuf[TMPBUFLEN];
71168         ssize_t len;
71169  
71170 -       len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
71171 +       len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
71172         return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
71173  }
71174  
71175 diff -urNp linux-3.0.4/security/integrity/ima/ima.h linux-3.0.4/security/integrity/ima/ima.h
71176 --- linux-3.0.4/security/integrity/ima/ima.h    2011-07-21 22:17:23.000000000 -0400
71177 +++ linux-3.0.4/security/integrity/ima/ima.h    2011-08-23 21:47:56.000000000 -0400
71178 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
71179  extern spinlock_t ima_queue_lock;
71180  
71181  struct ima_h_table {
71182 -       atomic_long_t len;      /* number of stored measurements in the list */
71183 -       atomic_long_t violations;
71184 +       atomic_long_unchecked_t len;    /* number of stored measurements in the list */
71185 +       atomic_long_unchecked_t violations;
71186         struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
71187  };
71188  extern struct ima_h_table ima_htable;
71189 diff -urNp linux-3.0.4/security/integrity/ima/ima_queue.c linux-3.0.4/security/integrity/ima/ima_queue.c
71190 --- linux-3.0.4/security/integrity/ima/ima_queue.c      2011-07-21 22:17:23.000000000 -0400
71191 +++ linux-3.0.4/security/integrity/ima/ima_queue.c      2011-08-23 21:47:56.000000000 -0400
71192 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
71193         INIT_LIST_HEAD(&qe->later);
71194         list_add_tail_rcu(&qe->later, &ima_measurements);
71195  
71196 -       atomic_long_inc(&ima_htable.len);
71197 +       atomic_long_inc_unchecked(&ima_htable.len);
71198         key = ima_hash_key(entry->digest);
71199         hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
71200         return 0;
71201 diff -urNp linux-3.0.4/security/Kconfig linux-3.0.4/security/Kconfig
71202 --- linux-3.0.4/security/Kconfig        2011-07-21 22:17:23.000000000 -0400
71203 +++ linux-3.0.4/security/Kconfig        2011-08-23 21:48:14.000000000 -0400
71204 @@ -4,6 +4,554 @@
71205  
71206  menu "Security options"
71207  
71208 +source grsecurity/Kconfig
71209 +
71210 +menu "PaX"
71211 +
71212 +       config ARCH_TRACK_EXEC_LIMIT
71213 +       bool
71214 +
71215 +       config PAX_PER_CPU_PGD
71216 +       bool
71217 +
71218 +       config TASK_SIZE_MAX_SHIFT
71219 +       int
71220 +       depends on X86_64
71221 +       default 47 if !PAX_PER_CPU_PGD
71222 +       default 42 if PAX_PER_CPU_PGD
71223 +
71224 +       config PAX_ENABLE_PAE
71225 +       bool
71226 +       default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
71227 +       
71228 +config PAX
71229 +       bool "Enable various PaX features"
71230 +       depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
71231 +       help
71232 +         This allows you to enable various PaX features.  PaX adds
71233 +         intrusion prevention mechanisms to the kernel that reduce
71234 +         the risks posed by exploitable memory corruption bugs.
71235 +
71236 +menu "PaX Control"
71237 +       depends on PAX
71238 +
71239 +config PAX_SOFTMODE
71240 +       bool 'Support soft mode'
71241 +       select PAX_PT_PAX_FLAGS
71242 +       help
71243 +         Enabling this option will allow you to run PaX in soft mode, that
71244 +         is, PaX features will not be enforced by default, only on executables
71245 +         marked explicitly.  You must also enable PT_PAX_FLAGS support as it
71246 +         is the only way to mark executables for soft mode use.
71247 +
71248 +         Soft mode can be activated by using the "pax_softmode=1" kernel command
71249 +         line option on boot.  Furthermore you can control various PaX features
71250 +         at runtime via the entries in /proc/sys/kernel/pax.
71251 +
71252 +config PAX_EI_PAX
71253 +       bool 'Use legacy ELF header marking'
71254 +       help
71255 +         Enabling this option will allow you to control PaX features on
71256 +         a per executable basis via the 'chpax' utility available at
71257 +         http://pax.grsecurity.net/.  The control flags will be read from
71258 +         an otherwise reserved part of the ELF header.  This marking has
71259 +         numerous drawbacks (no support for soft-mode, toolchain does not
71260 +         know about the non-standard use of the ELF header) therefore it
71261 +         has been deprecated in favour of PT_PAX_FLAGS support.
71262 +
71263 +         Note that if you enable PT_PAX_FLAGS marking support as well,
71264 +         the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
71265 +
71266 +config PAX_PT_PAX_FLAGS
71267 +       bool 'Use ELF program header marking'
71268 +       help
71269 +         Enabling this option will allow you to control PaX features on
71270 +         a per executable basis via the 'paxctl' utility available at
71271 +         http://pax.grsecurity.net/.  The control flags will be read from
71272 +         a PaX specific ELF program header (PT_PAX_FLAGS).  This marking
71273 +         has the benefits of supporting both soft mode and being fully
71274 +         integrated into the toolchain (the binutils patch is available
71275 +         from http://pax.grsecurity.net).
71276 +
71277 +         If your toolchain does not support PT_PAX_FLAGS markings,
71278 +         you can create one in most cases with 'paxctl -C'.
71279 +
71280 +         Note that if you enable the legacy EI_PAX marking support as well,
71281 +         the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
71282 +
71283 +choice
71284 +       prompt 'MAC system integration'
71285 +       default PAX_HAVE_ACL_FLAGS
71286 +       help
71287 +         Mandatory Access Control systems have the option of controlling
71288 +         PaX flags on a per executable basis, choose the method supported
71289 +         by your particular system.
71290 +
71291 +         - "none": if your MAC system does not interact with PaX,
71292 +         - "direct": if your MAC system defines pax_set_initial_flags() itself,
71293 +         - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
71294 +
71295 +         NOTE: this option is for developers/integrators only.
71296 +
71297 +       config PAX_NO_ACL_FLAGS
71298 +               bool 'none'
71299 +
71300 +       config PAX_HAVE_ACL_FLAGS
71301 +               bool 'direct'
71302 +
71303 +       config PAX_HOOK_ACL_FLAGS
71304 +               bool 'hook'
71305 +endchoice
71306 +
71307 +endmenu
71308 +
71309 +menu "Non-executable pages"
71310 +       depends on PAX
71311 +
71312 +config PAX_NOEXEC
71313 +       bool "Enforce non-executable pages"
71314 +       depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
71315 +       help
71316 +         By design some architectures do not allow for protecting memory
71317 +         pages against execution or even if they do, Linux does not make
71318 +         use of this feature.  In practice this means that if a page is
71319 +         readable (such as the stack or heap) it is also executable.
71320 +
71321 +         There is a well known exploit technique that makes use of this
71322 +         fact and a common programming mistake where an attacker can
71323 +         introduce code of his choice somewhere in the attacked program's
71324 +         memory (typically the stack or the heap) and then execute it.
71325 +
71326 +         If the attacked program was running with different (typically
71327 +         higher) privileges than that of the attacker, then he can elevate
71328 +         his own privilege level (e.g. get a root shell, write to files for
71329 +         which he does not have write access to, etc).
71330 +
71331 +         Enabling this option will let you choose from various features
71332 +         that prevent the injection and execution of 'foreign' code in
71333 +         a program.
71334 +
71335 +         This will also break programs that rely on the old behaviour and
71336 +         expect that dynamically allocated memory via the malloc() family
71337 +         of functions is executable (which it is not).  Notable examples
71338 +         are the XFree86 4.x server, the java runtime and wine.
71339 +
71340 +config PAX_PAGEEXEC
71341 +       bool "Paging based non-executable pages"
71342 +       depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
71343 +       select S390_SWITCH_AMODE if S390
71344 +       select S390_EXEC_PROTECT if S390
71345 +       select ARCH_TRACK_EXEC_LIMIT if X86_32
71346 +       help
71347 +         This implementation is based on the paging feature of the CPU.
71348 +         On i386 without hardware non-executable bit support there is a
71349 +         variable but usually low performance impact, however on Intel's
71350 +         P4 core based CPUs it is very high so you should not enable this
71351 +         for kernels meant to be used on such CPUs.
71352 +
71353 +         On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
71354 +         with hardware non-executable bit support there is no performance
71355 +         impact, on ppc the impact is negligible.
71356 +
71357 +         Note that several architectures require various emulations due to
71358 +         badly designed userland ABIs, this will cause a performance impact
71359 +         but will disappear as soon as userland is fixed. For example, ppc
71360 +         userland MUST have been built with secure-plt by a recent toolchain.
71361 +
71362 +config PAX_SEGMEXEC
71363 +       bool "Segmentation based non-executable pages"
71364 +       depends on PAX_NOEXEC && X86_32
71365 +       help
71366 +         This implementation is based on the segmentation feature of the
71367 +         CPU and has a very small performance impact, however applications
71368 +         will be limited to a 1.5 GB address space instead of the normal
71369 +         3 GB.
71370 +
71371 +config PAX_EMUTRAMP
71372 +       bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
71373 +       default y if PARISC
71374 +       help
71375 +         There are some programs and libraries that for one reason or
71376 +         another attempt to execute special small code snippets from
71377 +         non-executable memory pages.  Most notable examples are the
71378 +         signal handler return code generated by the kernel itself and
71379 +         the GCC trampolines.
71380 +
71381 +         If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
71382 +         such programs will no longer work under your kernel.
71383 +
71384 +         As a remedy you can say Y here and use the 'chpax' or 'paxctl'
71385 +         utilities to enable trampoline emulation for the affected programs
71386 +         yet still have the protection provided by the non-executable pages.
71387 +
71388 +         On parisc you MUST enable this option and EMUSIGRT as well, otherwise
71389 +         your system will not even boot.
71390 +
71391 +         Alternatively you can say N here and use the 'chpax' or 'paxctl'
71392 +         utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
71393 +         for the affected files.
71394 +
71395 +         NOTE: enabling this feature *may* open up a loophole in the
71396 +         protection provided by non-executable pages that an attacker
71397 +         could abuse.  Therefore the best solution is to not have any
71398 +         files on your system that would require this option.  This can
71399 +         be achieved by not using libc5 (which relies on the kernel
71400 +         signal handler return code) and not using or rewriting programs
71401 +         that make use of the nested function implementation of GCC.
71402 +         Skilled users can just fix GCC itself so that it implements
71403 +         nested function calls in a way that does not interfere with PaX.
71404 +
71405 +config PAX_EMUSIGRT
71406 +       bool "Automatically emulate sigreturn trampolines"
71407 +       depends on PAX_EMUTRAMP && PARISC
71408 +       default y
71409 +       help
71410 +         Enabling this option will have the kernel automatically detect
71411 +         and emulate signal return trampolines executing on the stack
71412 +         that would otherwise lead to task termination.
71413 +
71414 +         This solution is intended as a temporary one for users with
71415 +         legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
71416 +         Modula-3 runtime, etc) or executables linked to such, basically
71417 +         everything that does not specify its own SA_RESTORER function in
71418 +         normal executable memory like glibc 2.1+ does.
71419 +
71420 +         On parisc you MUST enable this option, otherwise your system will
71421 +         not even boot.
71422 +
71423 +         NOTE: this feature cannot be disabled on a per executable basis
71424 +         and since it *does* open up a loophole in the protection provided
71425 +         by non-executable pages, the best solution is to not have any
71426 +         files on your system that would require this option.
71427 +
71428 +config PAX_MPROTECT
71429 +       bool "Restrict mprotect()"
71430 +       depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
71431 +       help
71432 +         Enabling this option will prevent programs from
71433 +          - changing the executable status of memory pages that were
71434 +            not originally created as executable,
71435 +          - making read-only executable pages writable again,
71436 +          - creating executable pages from anonymous memory,
71437 +          - making read-only-after-relocations (RELRO) data pages writable again.
71438 +
71439 +         You should say Y here to complete the protection provided by
71440 +         the enforcement of non-executable pages.
71441 +
71442 +         NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71443 +         this feature on a per file basis.
71444 +
71445 +config PAX_MPROTECT_COMPAT
71446 +       bool "Use legacy/compat protection demoting (read help)"
71447 +       depends on PAX_MPROTECT
71448 +       default n
71449 +       help
71450 +         The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
71451 +         by sending the proper error code to the application.  For some broken 
71452 +         userland, this can cause problems with Python or other applications.  The
71453 +         current implementation however allows for applications like clamav to
71454 +         detect if JIT compilation/execution is allowed and to fall back gracefully
71455 +         to an interpreter-based mode if it does not.  While we encourage everyone
71456 +         to use the current implementation as-is and push upstream to fix broken
71457 +         userland (note that the RWX logging option can assist with this), in some
71458 +         environments this may not be possible.  Having to disable MPROTECT
71459 +         completely on certain binaries reduces the security benefit of PaX,
71460 +         so this option is provided for those environments to revert to the old
71461 +         behavior.
71462 +         
71463 +config PAX_ELFRELOCS
71464 +       bool "Allow ELF text relocations (read help)"
71465 +       depends on PAX_MPROTECT
71466 +       default n
71467 +       help
71468 +         Non-executable pages and mprotect() restrictions are effective
71469 +         in preventing the introduction of new executable code into an
71470 +         attacked task's address space.  There remain only two venues
71471 +         for this kind of attack: if the attacker can execute already
71472 +         existing code in the attacked task then he can either have it
71473 +         create and mmap() a file containing his code or have it mmap()
71474 +         an already existing ELF library that does not have position
71475 +         independent code in it and use mprotect() on it to make it
71476 +         writable and copy his code there.  While protecting against
71477 +         the former approach is beyond PaX, the latter can be prevented
71478 +         by having only PIC ELF libraries on one's system (which do not
71479 +         need to relocate their code).  If you are sure this is your case,
71480 +         as is the case with all modern Linux distributions, then leave
71481 +         this option disabled.  You should say 'n' here.
71482 +
71483 +config PAX_ETEXECRELOCS
71484 +       bool "Allow ELF ET_EXEC text relocations"
71485 +       depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
71486 +       select PAX_ELFRELOCS
71487 +       default y
71488 +       help
71489 +         On some architectures there are incorrectly created applications
71490 +         that require text relocations and would not work without enabling
71491 +         this option.  If you are an alpha, ia64 or parisc user, you should
71492 +         enable this option and disable it once you have made sure that
71493 +         none of your applications need it.
71494 +
71495 +config PAX_EMUPLT
71496 +       bool "Automatically emulate ELF PLT"
71497 +       depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
71498 +       default y
71499 +       help
71500 +         Enabling this option will have the kernel automatically detect
71501 +         and emulate the Procedure Linkage Table entries in ELF files.
71502 +         On some architectures such entries are in writable memory, and
71503 +         become non-executable leading to task termination.  Therefore
71504 +         it is mandatory that you enable this option on alpha, parisc,
71505 +         sparc and sparc64, otherwise your system would not even boot.
71506 +
71507 +         NOTE: this feature *does* open up a loophole in the protection
71508 +         provided by the non-executable pages, therefore the proper
71509 +         solution is to modify the toolchain to produce a PLT that does
71510 +         not need to be writable.
71511 +
71512 +config PAX_DLRESOLVE
71513 +       bool 'Emulate old glibc resolver stub'
71514 +       depends on PAX_EMUPLT && SPARC
71515 +       default n
71516 +       help
71517 +         This option is needed if userland has an old glibc (before 2.4)
71518 +         that puts a 'save' instruction into the runtime generated resolver
71519 +         stub that needs special emulation.
71520 +
71521 +config PAX_KERNEXEC
71522 +       bool "Enforce non-executable kernel pages"
71523 +       depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
71524 +       select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
71525 +       help
71526 +         This is the kernel land equivalent of PAGEEXEC and MPROTECT,
71527 +         that is, enabling this option will make it harder to inject
71528 +         and execute 'foreign' code in kernel memory itself.
71529 +
71530 +         Note that on x86_64 kernels there is a known regression when
71531 +         this feature and KVM/VMX are both enabled in the host kernel.
71532 +
71533 +config PAX_KERNEXEC_MODULE_TEXT
71534 +       int "Minimum amount of memory reserved for module code"
71535 +       default "4"
71536 +       depends on PAX_KERNEXEC && X86_32 && MODULES
71537 +       help
71538 +         Due to implementation details the kernel must reserve a fixed
71539 +         amount of memory for module code at compile time that cannot be
71540 +         changed at runtime.  Here you can specify the minimum amount
71541 +         in MB that will be reserved.  Due to the same implementation
71542 +         details this size will always be rounded up to the next 2/4 MB
71543 +         boundary (depends on PAE) so the actually available memory for
71544 +         module code will usually be more than this minimum.
71545 +
71546 +         The default 4 MB should be enough for most users but if you have
71547 +         an excessive number of modules (e.g., most distribution configs
71548 +         compile many drivers as modules) or use huge modules such as
71549 +         nvidia's kernel driver, you will need to adjust this amount.
71550 +         A good rule of thumb is to look at your currently loaded kernel
71551 +         modules and add up their sizes.
71552 +
71553 +endmenu
71554 +
71555 +menu "Address Space Layout Randomization"
71556 +       depends on PAX
71557 +
71558 +config PAX_ASLR
71559 +       bool "Address Space Layout Randomization"
71560 +       depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
71561 +       help
71562 +         Many if not most exploit techniques rely on the knowledge of
71563 +         certain addresses in the attacked program.  The following options
71564 +         will allow the kernel to apply a certain amount of randomization
71565 +         to specific parts of the program thereby forcing an attacker to
71566 +         guess them in most cases.  Any failed guess will most likely crash
71567 +         the attacked program which allows the kernel to detect such attempts
71568 +         and react on them.  PaX itself provides no reaction mechanisms,
71569 +         instead it is strongly encouraged that you make use of Nergal's
71570 +         segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
71571 +         (http://www.grsecurity.net/) built-in crash detection features or
71572 +         develop one yourself.
71573 +
71574 +         By saying Y here you can choose to randomize the following areas:
71575 +          - top of the task's kernel stack
71576 +          - top of the task's userland stack
71577 +          - base address for mmap() requests that do not specify one
71578 +            (this includes all libraries)
71579 +          - base address of the main executable
71580 +
71581 +         It is strongly recommended to say Y here as address space layout
71582 +         randomization has negligible impact on performance yet it provides
71583 +         a very effective protection.
71584 +
71585 +         NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71586 +         this feature on a per file basis.
71587 +
71588 +config PAX_RANDKSTACK
71589 +       bool "Randomize kernel stack base"
71590 +       depends on PAX_ASLR && X86_TSC && X86
71591 +       help
71592 +         By saying Y here the kernel will randomize every task's kernel
71593 +         stack on every system call.  This will not only force an attacker
71594 +         to guess it but also prevent him from making use of possible
71595 +         leaked information about it.
71596 +
71597 +         Since the kernel stack is a rather scarce resource, randomization
71598 +         may cause unexpected stack overflows, therefore you should very
71599 +         carefully test your system.  Note that once enabled in the kernel
71600 +         configuration, this feature cannot be disabled on a per file basis.
71601 +
71602 +config PAX_RANDUSTACK
71603 +       bool "Randomize user stack base"
71604 +       depends on PAX_ASLR
71605 +       help
71606 +         By saying Y here the kernel will randomize every task's userland
71607 +         stack.  The randomization is done in two steps where the second
71608 +         one may apply a big amount of shift to the top of the stack and
71609 +         cause problems for programs that want to use lots of memory (more
71610 +         than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
71611 +         For this reason the second step can be controlled by 'chpax' or
71612 +         'paxctl' on a per file basis.
71613 +
71614 +config PAX_RANDMMAP
71615 +       bool "Randomize mmap() base"
71616 +       depends on PAX_ASLR
71617 +       help
71618 +         By saying Y here the kernel will use a randomized base address for
71619 +         mmap() requests that do not specify one themselves.  As a result
71620 +         all dynamically loaded libraries will appear at random addresses
71621 +         and therefore be harder to exploit by a technique where an attacker
71622 +         attempts to execute library code for his purposes (e.g. spawn a
71623 +         shell from an exploited program that is running at an elevated
71624 +         privilege level).
71625 +
71626 +         Furthermore, if a program is relinked as a dynamic ELF file, its
71627 +         base address will be randomized as well, completing the full
71628 +         randomization of the address space layout.  Attacking such programs
71629 +         becomes a guess game.  You can find an example of doing this at
71630 +         http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
71631 +         http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
71632 +
71633 +         NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
71634 +         feature on a per file basis.
71635 +
71636 +endmenu
71637 +
71638 +menu "Miscellaneous hardening features"
71639 +
71640 +config PAX_MEMORY_SANITIZE
71641 +       bool "Sanitize all freed memory"
71642 +       help
71643 +         By saying Y here the kernel will erase memory pages as soon as they
71644 +         are freed.  This in turn reduces the lifetime of data stored in the
71645 +         pages, making it less likely that sensitive information such as
71646 +         passwords, cryptographic secrets, etc stay in memory for too long.
71647 +
71648 +         This is especially useful for programs whose runtime is short, long
71649 +         lived processes and the kernel itself benefit from this as long as
71650 +         they operate on whole memory pages and ensure timely freeing of pages
71651 +         that may hold sensitive information.
71652 +
71653 +         The tradeoff is performance impact, on a single CPU system kernel
71654 +         compilation sees a 3% slowdown, other systems and workloads may vary
71655 +         and you are advised to test this feature on your expected workload
71656 +         before deploying it.
71657 +
71658 +         Note that this feature does not protect data stored in live pages,
71659 +         e.g., process memory swapped to disk may stay there for a long time.
71660 +
71661 +config PAX_MEMORY_STACKLEAK
71662 +       bool "Sanitize kernel stack"
71663 +       depends on X86
71664 +       help
71665 +         By saying Y here the kernel will erase the kernel stack before it
71666 +         returns from a system call.  This in turn reduces the information
71667 +         that a kernel stack leak bug can reveal.
71668 +
71669 +         Note that such a bug can still leak information that was put on
71670 +         the stack by the current system call (the one eventually triggering
71671 +         the bug) but traces of earlier system calls on the kernel stack
71672 +         cannot leak anymore.
71673 +
71674 +         The tradeoff is performance impact: on a single CPU system kernel
71675 +         compilation sees a 1% slowdown, other systems and workloads may vary
71676 +         and you are advised to test this feature on your expected workload
71677 +         before deploying it.
71678 +
71679 +         Note: full support for this feature requires gcc with plugin support
71680 +         so make sure your compiler is at least gcc 4.5.0 (cross compilation
71681 +         is not supported).  Using older gcc versions means that functions
71682 +         with large enough stack frames may leave uninitialized memory behind
71683 +         that may be exposed to a later syscall leaking the stack.
71684 +
71685 +config PAX_MEMORY_UDEREF
71686 +       bool "Prevent invalid userland pointer dereference"
71687 +       depends on X86 && !UML_X86 && !XEN
71688 +       select PAX_PER_CPU_PGD if X86_64
71689 +       help
71690 +         By saying Y here the kernel will be prevented from dereferencing
71691 +         userland pointers in contexts where the kernel expects only kernel
71692 +         pointers.  This is both a useful runtime debugging feature and a
71693 +         security measure that prevents exploiting a class of kernel bugs.
71694 +
71695 +         The tradeoff is that some virtualization solutions may experience
71696 +         a huge slowdown and therefore you should not enable this feature
71697 +         for kernels meant to run in such environments.  Whether a given VM
71698 +         solution is affected or not is best determined by simply trying it
71699 +         out, the performance impact will be obvious right on boot as this
71700 +         mechanism engages from very early on.  A good rule of thumb is that
71701 +         VMs running on CPUs without hardware virtualization support (i.e.,
71702 +         the majority of IA-32 CPUs) will likely experience the slowdown.
71703 +
71704 +config PAX_REFCOUNT
71705 +       bool "Prevent various kernel object reference counter overflows"
71706 +       depends on GRKERNSEC && (X86 || SPARC64)
71707 +       help
71708 +         By saying Y here the kernel will detect and prevent overflowing
71709 +         various (but not all) kinds of object reference counters.  Such
71710 +         overflows can normally occur due to bugs only and are often, if
71711 +         not always, exploitable.
71712 +
71713 +         The tradeoff is that data structures protected by an overflowed
71714 +         refcount will never be freed and therefore will leak memory.  Note
71715 +         that this leak also happens even without this protection but in
71716 +         that case the overflow can eventually trigger the freeing of the
71717 +         data structure while it is still being used elsewhere, resulting
71718 +         in the exploitable situation that this feature prevents.
71719 +
71720 +         Since this has a negligible performance impact, you should enable
71721 +         this feature.
71722 +
71723 +config PAX_USERCOPY
71724 +       bool "Harden heap object copies between kernel and userland"
71725 +       depends on X86 || PPC || SPARC || ARM
71726 +       depends on GRKERNSEC && (SLAB || SLUB || SLOB)
71727 +       help
71728 +         By saying Y here the kernel will enforce the size of heap objects
71729 +         when they are copied in either direction between the kernel and
71730 +         userland, even if only a part of the heap object is copied.
71731 +
71732 +         Specifically, this checking prevents information leaking from the
71733 +         kernel heap during kernel to userland copies (if the kernel heap
71734 +         object is otherwise fully initialized) and prevents kernel heap
71735 +         overflows during userland to kernel copies.
71736 +
71737 +         Note that the current implementation provides the strictest bounds
71738 +         checks for the SLUB allocator.
71739 +
71740 +         Enabling this option also enables per-slab cache protection against
71741 +         data in a given cache being copied into/out of via userland
71742 +         accessors.  Though the whitelist of regions will be reduced over
71743 +         time, it notably protects important data structures like task structs.
71744 +
71745 +         If frame pointers are enabled on x86, this option will also restrict
71746 +         copies into and out of the kernel stack to local variables within a
71747 +         single frame.
71748 +
71749 +         Since this has a negligible performance impact, you should enable
71750 +         this feature.
71751 +
71752 +endmenu
71753 +
71754 +endmenu
71755 +
71756  config KEYS
71757         bool "Enable access key retention support"
71758         help
71759 @@ -167,7 +715,7 @@ config INTEL_TXT
71760  config LSM_MMAP_MIN_ADDR
71761         int "Low address space for LSM to protect from user allocation"
71762         depends on SECURITY && SECURITY_SELINUX
71763 -       default 32768 if ARM
71764 +       default 32768 if ALPHA || ARM || PARISC || SPARC32
71765         default 65536
71766         help
71767           This is the portion of low virtual memory which should be protected
71768 diff -urNp linux-3.0.4/security/keys/keyring.c linux-3.0.4/security/keys/keyring.c
71769 --- linux-3.0.4/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
71770 +++ linux-3.0.4/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
71771 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
71772                         ret = -EFAULT;
71773  
71774                         for (loop = 0; loop < klist->nkeys; loop++) {
71775 +                               key_serial_t serial;
71776                                 key = klist->keys[loop];
71777 +                               serial = key->serial;
71778  
71779                                 tmp = sizeof(key_serial_t);
71780                                 if (tmp > buflen)
71781                                         tmp = buflen;
71782  
71783 -                               if (copy_to_user(buffer,
71784 -                                                &key->serial,
71785 -                                                tmp) != 0)
71786 +                               if (copy_to_user(buffer, &serial, tmp))
71787                                         goto error;
71788  
71789                                 buflen -= tmp;
71790 diff -urNp linux-3.0.4/security/min_addr.c linux-3.0.4/security/min_addr.c
71791 --- linux-3.0.4/security/min_addr.c     2011-07-21 22:17:23.000000000 -0400
71792 +++ linux-3.0.4/security/min_addr.c     2011-08-23 21:48:14.000000000 -0400
71793 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
71794   */
71795  static void update_mmap_min_addr(void)
71796  {
71797 +#ifndef SPARC
71798  #ifdef CONFIG_LSM_MMAP_MIN_ADDR
71799         if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
71800                 mmap_min_addr = dac_mmap_min_addr;
71801 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
71802  #else
71803         mmap_min_addr = dac_mmap_min_addr;
71804  #endif
71805 +#endif
71806  }
71807  
71808  /*
71809 diff -urNp linux-3.0.4/security/security.c linux-3.0.4/security/security.c
71810 --- linux-3.0.4/security/security.c     2011-07-21 22:17:23.000000000 -0400
71811 +++ linux-3.0.4/security/security.c     2011-08-23 21:48:14.000000000 -0400
71812 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
71813  /* things that live in capability.c */
71814  extern void __init security_fixup_ops(struct security_operations *ops);
71815  
71816 -static struct security_operations *security_ops;
71817 -static struct security_operations default_security_ops = {
71818 +static struct security_operations *security_ops __read_only;
71819 +static struct security_operations default_security_ops __read_only = {
71820         .name   = "default",
71821  };
71822  
71823 @@ -67,7 +67,9 @@ int __init security_init(void)
71824  
71825  void reset_security_ops(void)
71826  {
71827 +       pax_open_kernel();
71828         security_ops = &default_security_ops;
71829 +       pax_close_kernel();
71830  }
71831  
71832  /* Save user chosen LSM */
71833 diff -urNp linux-3.0.4/security/selinux/hooks.c linux-3.0.4/security/selinux/hooks.c
71834 --- linux-3.0.4/security/selinux/hooks.c        2011-07-21 22:17:23.000000000 -0400
71835 +++ linux-3.0.4/security/selinux/hooks.c        2011-08-23 21:48:14.000000000 -0400
71836 @@ -93,7 +93,6 @@
71837  #define NUM_SEL_MNT_OPTS 5
71838  
71839  extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
71840 -extern struct security_operations *security_ops;
71841  
71842  /* SECMARK reference count */
71843  atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
71844 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
71845  
71846  #endif
71847  
71848 -static struct security_operations selinux_ops = {
71849 +static struct security_operations selinux_ops __read_only = {
71850         .name =                         "selinux",
71851  
71852         .ptrace_access_check =          selinux_ptrace_access_check,
71853 diff -urNp linux-3.0.4/security/selinux/include/xfrm.h linux-3.0.4/security/selinux/include/xfrm.h
71854 --- linux-3.0.4/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
71855 +++ linux-3.0.4/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
71856 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
71857  
71858  static inline void selinux_xfrm_notify_policyload(void)
71859  {
71860 -       atomic_inc(&flow_cache_genid);
71861 +       atomic_inc_unchecked(&flow_cache_genid);
71862  }
71863  #else
71864  static inline int selinux_xfrm_enabled(void)
71865 diff -urNp linux-3.0.4/security/selinux/ss/services.c linux-3.0.4/security/selinux/ss/services.c
71866 --- linux-3.0.4/security/selinux/ss/services.c  2011-07-21 22:17:23.000000000 -0400
71867 +++ linux-3.0.4/security/selinux/ss/services.c  2011-08-23 21:48:14.000000000 -0400
71868 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
71869         int rc = 0;
71870         struct policy_file file = { data, len }, *fp = &file;
71871  
71872 +       pax_track_stack();
71873 +
71874         if (!ss_initialized) {
71875                 avtab_cache_init();
71876                 rc = policydb_read(&policydb, fp);
71877 diff -urNp linux-3.0.4/security/smack/smack_lsm.c linux-3.0.4/security/smack/smack_lsm.c
71878 --- linux-3.0.4/security/smack/smack_lsm.c      2011-07-21 22:17:23.000000000 -0400
71879 +++ linux-3.0.4/security/smack/smack_lsm.c      2011-08-23 21:47:56.000000000 -0400
71880 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct 
71881         return 0;
71882  }
71883  
71884 -struct security_operations smack_ops = {
71885 +struct security_operations smack_ops __read_only = {
71886         .name =                         "smack",
71887  
71888         .ptrace_access_check =          smack_ptrace_access_check,
71889 diff -urNp linux-3.0.4/security/tomoyo/tomoyo.c linux-3.0.4/security/tomoyo/tomoyo.c
71890 --- linux-3.0.4/security/tomoyo/tomoyo.c        2011-07-21 22:17:23.000000000 -0400
71891 +++ linux-3.0.4/security/tomoyo/tomoyo.c        2011-08-23 21:47:56.000000000 -0400
71892 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
71893   * tomoyo_security_ops is a "struct security_operations" which is used for
71894   * registering TOMOYO.
71895   */
71896 -static struct security_operations tomoyo_security_ops = {
71897 +static struct security_operations tomoyo_security_ops __read_only = {
71898         .name                = "tomoyo",
71899         .cred_alloc_blank    = tomoyo_cred_alloc_blank,
71900         .cred_prepare        = tomoyo_cred_prepare,
71901 diff -urNp linux-3.0.4/sound/aoa/codecs/onyx.c linux-3.0.4/sound/aoa/codecs/onyx.c
71902 --- linux-3.0.4/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
71903 +++ linux-3.0.4/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
71904 @@ -54,7 +54,7 @@ struct onyx {
71905                                 spdif_locked:1,
71906                                 analog_locked:1,
71907                                 original_mute:2;
71908 -       int                     open_count;
71909 +       local_t                 open_count;
71910         struct codec_info       *codec_info;
71911  
71912         /* mutex serializes concurrent access to the device
71913 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
71914         struct onyx *onyx = cii->codec_data;
71915  
71916         mutex_lock(&onyx->mutex);
71917 -       onyx->open_count++;
71918 +       local_inc(&onyx->open_count);
71919         mutex_unlock(&onyx->mutex);
71920  
71921         return 0;
71922 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
71923         struct onyx *onyx = cii->codec_data;
71924  
71925         mutex_lock(&onyx->mutex);
71926 -       onyx->open_count--;
71927 -       if (!onyx->open_count)
71928 +       if (local_dec_and_test(&onyx->open_count))
71929                 onyx->spdif_locked = onyx->analog_locked = 0;
71930         mutex_unlock(&onyx->mutex);
71931  
71932 diff -urNp linux-3.0.4/sound/aoa/codecs/onyx.h linux-3.0.4/sound/aoa/codecs/onyx.h
71933 --- linux-3.0.4/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
71934 +++ linux-3.0.4/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
71935 @@ -11,6 +11,7 @@
71936  #include <linux/i2c.h>
71937  #include <asm/pmac_low_i2c.h>
71938  #include <asm/prom.h>
71939 +#include <asm/local.h>
71940  
71941  /* PCM3052 register definitions */
71942  
71943 diff -urNp linux-3.0.4/sound/core/seq/seq_device.c linux-3.0.4/sound/core/seq/seq_device.c
71944 --- linux-3.0.4/sound/core/seq/seq_device.c     2011-07-21 22:17:23.000000000 -0400
71945 +++ linux-3.0.4/sound/core/seq/seq_device.c     2011-08-23 21:47:56.000000000 -0400
71946 @@ -63,7 +63,7 @@ struct ops_list {
71947         int argsize;            /* argument size */
71948  
71949         /* operators */
71950 -       struct snd_seq_dev_ops ops;
71951 +       struct snd_seq_dev_ops *ops;
71952  
71953         /* registred devices */
71954         struct list_head dev_list;      /* list of devices */
71955 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char 
71956  
71957         mutex_lock(&ops->reg_mutex);
71958         /* copy driver operators */
71959 -       ops->ops = *entry;
71960 +       ops->ops = entry;
71961         ops->driver |= DRIVER_LOADED;
71962         ops->argsize = argsize;
71963  
71964 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
71965                            dev->name, ops->id, ops->argsize, dev->argsize);
71966                 return -EINVAL;
71967         }
71968 -       if (ops->ops.init_device(dev) >= 0) {
71969 +       if (ops->ops->init_device(dev) >= 0) {
71970                 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
71971                 ops->num_init_devices++;
71972         } else {
71973 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
71974                            dev->name, ops->id, ops->argsize, dev->argsize);
71975                 return -EINVAL;
71976         }
71977 -       if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
71978 +       if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
71979                 dev->status = SNDRV_SEQ_DEVICE_FREE;
71980                 dev->driver_data = NULL;
71981                 ops->num_init_devices--;
71982 diff -urNp linux-3.0.4/sound/drivers/mts64.c linux-3.0.4/sound/drivers/mts64.c
71983 --- linux-3.0.4/sound/drivers/mts64.c   2011-07-21 22:17:23.000000000 -0400
71984 +++ linux-3.0.4/sound/drivers/mts64.c   2011-08-23 21:47:56.000000000 -0400
71985 @@ -28,6 +28,7 @@
71986  #include <sound/initval.h>
71987  #include <sound/rawmidi.h>
71988  #include <sound/control.h>
71989 +#include <asm/local.h>
71990  
71991  #define CARD_NAME "Miditerminal 4140"
71992  #define DRIVER_NAME "MTS64"
71993 @@ -66,7 +67,7 @@ struct mts64 {
71994         struct pardevice *pardev;
71995         int pardev_claimed;
71996  
71997 -       int open_count;
71998 +       local_t open_count;
71999         int current_midi_output_port;
72000         int current_midi_input_port;
72001         u8 mode[MTS64_NUM_INPUT_PORTS];
72002 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
72003  {
72004         struct mts64 *mts = substream->rmidi->private_data;
72005  
72006 -       if (mts->open_count == 0) {
72007 +       if (local_read(&mts->open_count) == 0) {
72008                 /* We don't need a spinlock here, because this is just called 
72009                    if the device has not been opened before. 
72010                    So there aren't any IRQs from the device */
72011 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
72012  
72013                 msleep(50);
72014         }
72015 -       ++(mts->open_count);
72016 +       local_inc(&mts->open_count);
72017  
72018         return 0;
72019  }
72020 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
72021         struct mts64 *mts = substream->rmidi->private_data;
72022         unsigned long flags;
72023  
72024 -       --(mts->open_count);
72025 -       if (mts->open_count == 0) {
72026 +       if (local_dec_return(&mts->open_count) == 0) {
72027                 /* We need the spinlock_irqsave here because we can still
72028                    have IRQs at this point */
72029                 spin_lock_irqsave(&mts->lock, flags);
72030 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
72031  
72032                 msleep(500);
72033  
72034 -       } else if (mts->open_count < 0)
72035 -               mts->open_count = 0;
72036 +       } else if (local_read(&mts->open_count) < 0)
72037 +               local_set(&mts->open_count, 0);
72038  
72039         return 0;
72040  }
72041 diff -urNp linux-3.0.4/sound/drivers/opl4/opl4_lib.c linux-3.0.4/sound/drivers/opl4/opl4_lib.c
72042 --- linux-3.0.4/sound/drivers/opl4/opl4_lib.c   2011-07-21 22:17:23.000000000 -0400
72043 +++ linux-3.0.4/sound/drivers/opl4/opl4_lib.c   2011-08-23 21:47:56.000000000 -0400
72044 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
72045  MODULE_DESCRIPTION("OPL4 driver");
72046  MODULE_LICENSE("GPL");
72047  
72048 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
72049 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
72050  {
72051         int timeout = 10;
72052         while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
72053 diff -urNp linux-3.0.4/sound/drivers/portman2x4.c linux-3.0.4/sound/drivers/portman2x4.c
72054 --- linux-3.0.4/sound/drivers/portman2x4.c      2011-07-21 22:17:23.000000000 -0400
72055 +++ linux-3.0.4/sound/drivers/portman2x4.c      2011-08-23 21:47:56.000000000 -0400
72056 @@ -47,6 +47,7 @@
72057  #include <sound/initval.h>
72058  #include <sound/rawmidi.h>
72059  #include <sound/control.h>
72060 +#include <asm/local.h>
72061  
72062  #define CARD_NAME "Portman 2x4"
72063  #define DRIVER_NAME "portman"
72064 @@ -84,7 +85,7 @@ struct portman {
72065         struct pardevice *pardev;
72066         int pardev_claimed;
72067  
72068 -       int open_count;
72069 +       local_t open_count;
72070         int mode[PORTMAN_NUM_INPUT_PORTS];
72071         struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
72072  };
72073 diff -urNp linux-3.0.4/sound/firewire/amdtp.c linux-3.0.4/sound/firewire/amdtp.c
72074 --- linux-3.0.4/sound/firewire/amdtp.c  2011-07-21 22:17:23.000000000 -0400
72075 +++ linux-3.0.4/sound/firewire/amdtp.c  2011-08-23 21:47:56.000000000 -0400
72076 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
72077                 ptr = s->pcm_buffer_pointer + data_blocks;
72078                 if (ptr >= pcm->runtime->buffer_size)
72079                         ptr -= pcm->runtime->buffer_size;
72080 -               ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
72081 +               ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
72082  
72083                 s->pcm_period_pointer += data_blocks;
72084                 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
72085 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
72086   */
72087  void amdtp_out_stream_update(struct amdtp_out_stream *s)
72088  {
72089 -       ACCESS_ONCE(s->source_node_id_field) =
72090 +       ACCESS_ONCE_RW(s->source_node_id_field) =
72091                 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
72092  }
72093  EXPORT_SYMBOL(amdtp_out_stream_update);
72094 diff -urNp linux-3.0.4/sound/firewire/amdtp.h linux-3.0.4/sound/firewire/amdtp.h
72095 --- linux-3.0.4/sound/firewire/amdtp.h  2011-07-21 22:17:23.000000000 -0400
72096 +++ linux-3.0.4/sound/firewire/amdtp.h  2011-08-23 21:47:56.000000000 -0400
72097 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
72098  static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
72099                                                 struct snd_pcm_substream *pcm)
72100  {
72101 -       ACCESS_ONCE(s->pcm) = pcm;
72102 +       ACCESS_ONCE_RW(s->pcm) = pcm;
72103  }
72104  
72105  /**
72106 diff -urNp linux-3.0.4/sound/firewire/isight.c linux-3.0.4/sound/firewire/isight.c
72107 --- linux-3.0.4/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
72108 +++ linux-3.0.4/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
72109 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
72110         ptr += count;
72111         if (ptr >= runtime->buffer_size)
72112                 ptr -= runtime->buffer_size;
72113 -       ACCESS_ONCE(isight->buffer_pointer) = ptr;
72114 +       ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
72115  
72116         isight->period_counter += count;
72117         if (isight->period_counter >= runtime->period_size) {
72118 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
72119         if (err < 0)
72120                 return err;
72121  
72122 -       ACCESS_ONCE(isight->pcm_active) = true;
72123 +       ACCESS_ONCE_RW(isight->pcm_active) = true;
72124  
72125         return 0;
72126  }
72127 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
72128  {
72129         struct isight *isight = substream->private_data;
72130  
72131 -       ACCESS_ONCE(isight->pcm_active) = false;
72132 +       ACCESS_ONCE_RW(isight->pcm_active) = false;
72133  
72134         mutex_lock(&isight->mutex);
72135         isight_stop_streaming(isight);
72136 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
72137  
72138         switch (cmd) {
72139         case SNDRV_PCM_TRIGGER_START:
72140 -               ACCESS_ONCE(isight->pcm_running) = true;
72141 +               ACCESS_ONCE_RW(isight->pcm_running) = true;
72142                 break;
72143         case SNDRV_PCM_TRIGGER_STOP:
72144 -               ACCESS_ONCE(isight->pcm_running) = false;
72145 +               ACCESS_ONCE_RW(isight->pcm_running) = false;
72146                 break;
72147         default:
72148                 return -EINVAL;
72149 diff -urNp linux-3.0.4/sound/isa/cmi8330.c linux-3.0.4/sound/isa/cmi8330.c
72150 --- linux-3.0.4/sound/isa/cmi8330.c     2011-07-21 22:17:23.000000000 -0400
72151 +++ linux-3.0.4/sound/isa/cmi8330.c     2011-08-23 21:47:56.000000000 -0400
72152 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
72153  
72154         struct snd_pcm *pcm;
72155         struct snd_cmi8330_stream {
72156 -               struct snd_pcm_ops ops;
72157 +               snd_pcm_ops_no_const ops;
72158                 snd_pcm_open_callback_t open;
72159                 void *private_data; /* sb or wss */
72160         } streams[2];
72161 diff -urNp linux-3.0.4/sound/oss/sb_audio.c linux-3.0.4/sound/oss/sb_audio.c
72162 --- linux-3.0.4/sound/oss/sb_audio.c    2011-07-21 22:17:23.000000000 -0400
72163 +++ linux-3.0.4/sound/oss/sb_audio.c    2011-08-23 21:47:56.000000000 -0400
72164 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
72165                 buf16 = (signed short *)(localbuf + localoffs);
72166                 while (c)
72167                 {
72168 -                       locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72169 +                       locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72170                         if (copy_from_user(lbuf8,
72171                                            userbuf+useroffs + p,
72172                                            locallen))
72173 diff -urNp linux-3.0.4/sound/oss/swarm_cs4297a.c linux-3.0.4/sound/oss/swarm_cs4297a.c
72174 --- linux-3.0.4/sound/oss/swarm_cs4297a.c       2011-07-21 22:17:23.000000000 -0400
72175 +++ linux-3.0.4/sound/oss/swarm_cs4297a.c       2011-08-23 21:47:56.000000000 -0400
72176 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
72177  {
72178         struct cs4297a_state *s;
72179         u32 pwr, id;
72180 -       mm_segment_t fs;
72181         int rval;
72182  #ifndef CONFIG_BCM_CS4297A_CSWARM
72183         u64 cfg;
72184 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
72185          if (!rval) {
72186                 char *sb1250_duart_present;
72187  
72188 +#if 0
72189 +                mm_segment_t fs;
72190                  fs = get_fs();
72191                  set_fs(KERNEL_DS);
72192 -#if 0
72193                  val = SOUND_MASK_LINE;
72194                  mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
72195                  for (i = 0; i < ARRAY_SIZE(initvol); i++) {
72196                          val = initvol[i].vol;
72197                          mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
72198                  }
72199 +                set_fs(fs);
72200  //                cs4297a_write_ac97(s, 0x18, 0x0808);
72201  #else
72202                  //                cs4297a_write_ac97(s, 0x5e, 0x180);
72203                  cs4297a_write_ac97(s, 0x02, 0x0808);
72204                  cs4297a_write_ac97(s, 0x18, 0x0808);
72205  #endif
72206 -                set_fs(fs);
72207  
72208                  list_add(&s->list, &cs4297a_devs);
72209  
72210 diff -urNp linux-3.0.4/sound/pci/hda/hda_codec.h linux-3.0.4/sound/pci/hda/hda_codec.h
72211 --- linux-3.0.4/sound/pci/hda/hda_codec.h       2011-07-21 22:17:23.000000000 -0400
72212 +++ linux-3.0.4/sound/pci/hda/hda_codec.h       2011-08-23 21:47:56.000000000 -0400
72213 @@ -615,7 +615,7 @@ struct hda_bus_ops {
72214         /* notify power-up/down from codec to controller */
72215         void (*pm_notify)(struct hda_bus *bus);
72216  #endif
72217 -};
72218 +} __no_const;
72219  
72220  /* template to pass to the bus constructor */
72221  struct hda_bus_template {
72222 @@ -713,6 +713,7 @@ struct hda_codec_ops {
72223  #endif
72224         void (*reboot_notify)(struct hda_codec *codec);
72225  };
72226 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
72227  
72228  /* record for amp information cache */
72229  struct hda_cache_head {
72230 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
72231                        struct snd_pcm_substream *substream);
72232         int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
72233                        struct snd_pcm_substream *substream);
72234 -};
72235 +} __no_const;
72236  
72237  /* PCM information for each substream */
72238  struct hda_pcm_stream {
72239 @@ -801,7 +802,7 @@ struct hda_codec {
72240         const char *modelname;  /* model name for preset */
72241  
72242         /* set by patch */
72243 -       struct hda_codec_ops patch_ops;
72244 +       hda_codec_ops_no_const patch_ops;
72245  
72246         /* PCM to create, set by patch_ops.build_pcms callback */
72247         unsigned int num_pcms;
72248 diff -urNp linux-3.0.4/sound/pci/ice1712/ice1712.h linux-3.0.4/sound/pci/ice1712/ice1712.h
72249 --- linux-3.0.4/sound/pci/ice1712/ice1712.h     2011-07-21 22:17:23.000000000 -0400
72250 +++ linux-3.0.4/sound/pci/ice1712/ice1712.h     2011-08-23 21:47:56.000000000 -0400
72251 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
72252         unsigned int mask_flags;        /* total mask bits */
72253         struct snd_akm4xxx_ops {
72254                 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
72255 -       } ops;
72256 +       } __no_const ops;
72257  };
72258  
72259  struct snd_ice1712_spdif {
72260 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
72261                 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
72262                 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
72263                 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
72264 -       } ops;
72265 +       } __no_const ops;
72266  };
72267  
72268  
72269 diff -urNp linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c
72270 --- linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c  2011-07-21 22:17:23.000000000 -0400
72271 +++ linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c  2011-08-23 21:47:56.000000000 -0400
72272 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
72273                 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
72274                         break;
72275         }
72276 -       if (atomic_read(&chip->interrupt_sleep_count)) {
72277 -               atomic_set(&chip->interrupt_sleep_count, 0);
72278 +       if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72279 +               atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72280                 wake_up(&chip->interrupt_sleep);
72281         }
72282        __end:
72283 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
72284                         continue;
72285                 init_waitqueue_entry(&wait, current);
72286                 add_wait_queue(&chip->interrupt_sleep, &wait);
72287 -               atomic_inc(&chip->interrupt_sleep_count);
72288 +               atomic_inc_unchecked(&chip->interrupt_sleep_count);
72289                 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
72290                 remove_wait_queue(&chip->interrupt_sleep, &wait);
72291         }
72292 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
72293                 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
72294                 spin_unlock(&chip->reg_lock);
72295  
72296 -               if (atomic_read(&chip->interrupt_sleep_count)) {
72297 -                       atomic_set(&chip->interrupt_sleep_count, 0);
72298 +               if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72299 +                       atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72300                         wake_up(&chip->interrupt_sleep);
72301                 }
72302         }
72303 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
72304         spin_lock_init(&chip->reg_lock);
72305         spin_lock_init(&chip->voice_lock);
72306         init_waitqueue_head(&chip->interrupt_sleep);
72307 -       atomic_set(&chip->interrupt_sleep_count, 0);
72308 +       atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72309         chip->card = card;
72310         chip->pci = pci;
72311         chip->irq = -1;
72312 diff -urNp linux-3.0.4/sound/soc/soc-core.c linux-3.0.4/sound/soc/soc-core.c
72313 --- linux-3.0.4/sound/soc/soc-core.c    2011-08-23 21:44:40.000000000 -0400
72314 +++ linux-3.0.4/sound/soc/soc-core.c    2011-08-23 21:47:56.000000000 -0400
72315 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
72316  }
72317  
72318  /* ASoC PCM operations */
72319 -static struct snd_pcm_ops soc_pcm_ops = {
72320 +static snd_pcm_ops_no_const soc_pcm_ops = {
72321         .open           = soc_pcm_open,
72322         .close          = soc_codec_close,
72323         .hw_params      = soc_pcm_hw_params,
72324 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
72325         rtd->pcm = pcm;
72326         pcm->private_data = rtd;
72327         if (platform->driver->ops) {
72328 +               /* this whole logic is broken... */
72329                 soc_pcm_ops.mmap = platform->driver->ops->mmap;
72330                 soc_pcm_ops.pointer = platform->driver->ops->pointer;
72331                 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
72332 diff -urNp linux-3.0.4/sound/usb/card.h linux-3.0.4/sound/usb/card.h
72333 --- linux-3.0.4/sound/usb/card.h        2011-07-21 22:17:23.000000000 -0400
72334 +++ linux-3.0.4/sound/usb/card.h        2011-08-23 21:47:56.000000000 -0400
72335 @@ -44,6 +44,7 @@ struct snd_urb_ops {
72336         int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
72337         int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
72338  };
72339 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
72340  
72341  struct snd_usb_substream {
72342         struct snd_usb_stream *stream;
72343 @@ -93,7 +94,7 @@ struct snd_usb_substream {
72344         struct snd_pcm_hw_constraint_list rate_list;    /* limited rates */
72345         spinlock_t lock;
72346  
72347 -       struct snd_urb_ops ops;         /* callbacks (must be filled at init) */
72348 +       snd_urb_ops_no_const ops;               /* callbacks (must be filled at init) */
72349  };
72350  
72351  struct snd_usb_stream {
72352 diff -urNp linux-3.0.4/tools/gcc/constify_plugin.c linux-3.0.4/tools/gcc/constify_plugin.c
72353 --- linux-3.0.4/tools/gcc/constify_plugin.c     1969-12-31 19:00:00.000000000 -0500
72354 +++ linux-3.0.4/tools/gcc/constify_plugin.c     2011-08-29 22:01:36.000000000 -0400
72355 @@ -0,0 +1,289 @@
72356 +/*
72357 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
72358 + * Licensed under the GPL v2, or (at your option) v3
72359 + *
72360 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
72361 + *
72362 + * Usage:
72363 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
72364 + * $ gcc -fplugin=constify_plugin.so test.c  -O2
72365 + */
72366 +
72367 +#include "gcc-plugin.h"
72368 +#include "config.h"
72369 +#include "system.h"
72370 +#include "coretypes.h"
72371 +#include "tree.h"
72372 +#include "tree-pass.h"
72373 +#include "intl.h"
72374 +#include "plugin-version.h"
72375 +#include "tm.h"
72376 +#include "toplev.h"
72377 +#include "function.h"
72378 +#include "tree-flow.h"
72379 +#include "plugin.h"
72380 +#include "diagnostic.h"
72381 +//#include "c-tree.h"
72382 +
72383 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
72384 +
72385 +int plugin_is_GPL_compatible;
72386 +
72387 +static struct plugin_info const_plugin_info = {
72388 +       .version        = "20110826",
72389 +       .help           = "no-constify\tturn off constification\n",
72390 +};
72391 +
72392 +static void constify_type(tree type);
72393 +static bool walk_struct(tree node);
72394 +
72395 +static tree deconstify_type(tree old_type)
72396 +{
72397 +       tree new_type, field;
72398 +
72399 +       new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
72400 +       TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
72401 +       for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
72402 +               DECL_FIELD_CONTEXT(field) = new_type;
72403 +       TYPE_READONLY(new_type) = 0;
72404 +       C_TYPE_FIELDS_READONLY(new_type) = 0;
72405 +       return new_type;
72406 +}
72407 +
72408 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
72409 +{
72410 +       tree type;
72411 +
72412 +       *no_add_attrs = true;
72413 +       if (TREE_CODE(*node) == FUNCTION_DECL) {
72414 +               error("%qE attribute does not apply to functions", name);
72415 +               return NULL_TREE;
72416 +       }
72417 +
72418 +       if (TREE_CODE(*node) == VAR_DECL) {
72419 +               error("%qE attribute does not apply to variables", name);
72420 +               return NULL_TREE;
72421 +       }
72422 +
72423 +       if (TYPE_P(*node)) {
72424 +               if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
72425 +                       *no_add_attrs = false;
72426 +               else
72427 +                       error("%qE attribute applies to struct and union types only", name);
72428 +               return NULL_TREE;
72429 +       }
72430 +
72431 +       type = TREE_TYPE(*node);
72432 +
72433 +       if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
72434 +               error("%qE attribute applies to struct and union types only", name);
72435 +               return NULL_TREE;
72436 +       }
72437 +
72438 +       if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
72439 +               error("%qE attribute is already applied to the type", name);
72440 +               return NULL_TREE;
72441 +       }
72442 +
72443 +       if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
72444 +               error("%qE attribute used on type that is not constified", name);
72445 +               return NULL_TREE;
72446 +       }
72447 +
72448 +       if (TREE_CODE(*node) == TYPE_DECL) {
72449 +               TREE_TYPE(*node) = deconstify_type(type);
72450 +               TREE_READONLY(*node) = 0;
72451 +               return NULL_TREE;
72452 +       }
72453 +
72454 +       return NULL_TREE;
72455 +}
72456 +
72457 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
72458 +{
72459 +       *no_add_attrs = true;
72460 +       if (!TYPE_P(*node)) {
72461 +               error("%qE attribute applies to types only", name);
72462 +               return NULL_TREE;
72463 +       }
72464 +
72465 +       if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
72466 +               error("%qE attribute applies to struct and union types only", name);
72467 +               return NULL_TREE;
72468 +       }
72469 +
72470 +       *no_add_attrs = false;
72471 +       constify_type(*node);
72472 +       return NULL_TREE;
72473 +}
72474 +
72475 +static struct attribute_spec no_const_attr = {
72476 +       .name                   = "no_const",
72477 +       .min_length             = 0,
72478 +       .max_length             = 0,
72479 +       .decl_required          = false,
72480 +       .type_required          = false,
72481 +       .function_type_required = false,
72482 +       .handler                = handle_no_const_attribute
72483 +};
72484 +
72485 +static struct attribute_spec do_const_attr = {
72486 +       .name                   = "do_const",
72487 +       .min_length             = 0,
72488 +       .max_length             = 0,
72489 +       .decl_required          = false,
72490 +       .type_required          = false,
72491 +       .function_type_required = false,
72492 +       .handler                = handle_do_const_attribute
72493 +};
72494 +
72495 +static void register_attributes(void *event_data, void *data)
72496 +{
72497 +       register_attribute(&no_const_attr);
72498 +       register_attribute(&do_const_attr);
72499 +}
72500 +
72501 +static void constify_type(tree type)
72502 +{
72503 +       TYPE_READONLY(type) = 1;
72504 +       C_TYPE_FIELDS_READONLY(type) = 1;
72505 +}
72506 +
72507 +static bool is_fptr(tree field)
72508 +{
72509 +       tree ptr = TREE_TYPE(field);
72510 +
72511 +       if (TREE_CODE(ptr) != POINTER_TYPE)
72512 +               return false;
72513 +
72514 +       return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
72515 +}
72516 +
72517 +static bool walk_struct(tree node)
72518 +{
72519 +       tree field;
72520 +
72521 +       if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
72522 +               return false;
72523 +
72524 +       if (TYPE_FIELDS(node) == NULL_TREE)
72525 +               return false;
72526 +
72527 +       for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
72528 +               tree type = TREE_TYPE(field);
72529 +               enum tree_code code = TREE_CODE(type);
72530 +               if (code == RECORD_TYPE || code == UNION_TYPE) {
72531 +                       if (!(walk_struct(type)))
72532 +                               return false;
72533 +               } else if (!is_fptr(field) && !TREE_READONLY(field))
72534 +                       return false;
72535 +       }
72536 +       return true;
72537 +}
72538 +
72539 +static void finish_type(void *event_data, void *data)
72540 +{
72541 +       tree type = (tree)event_data;
72542 +
72543 +       if (type == NULL_TREE)
72544 +               return;
72545 +
72546 +       if (TYPE_READONLY(type))
72547 +               return;
72548 +
72549 +       if (walk_struct(type))
72550 +               constify_type(type);
72551 +}
72552 +
72553 +static unsigned int check_local_variables(void);
72554 +
72555 +struct gimple_opt_pass pass_local_variable = {
72556 +       {
72557 +               .type                   = GIMPLE_PASS,
72558 +               .name                   = "check_local_variables",
72559 +               .gate                   = NULL,
72560 +               .execute                = check_local_variables,
72561 +               .sub                    = NULL,
72562 +               .next                   = NULL,
72563 +               .static_pass_number     = 0,
72564 +               .tv_id                  = TV_NONE,
72565 +               .properties_required    = 0,
72566 +               .properties_provided    = 0,
72567 +               .properties_destroyed   = 0,
72568 +               .todo_flags_start       = 0,
72569 +               .todo_flags_finish      = 0
72570 +       }
72571 +};
72572 +
72573 +static unsigned int check_local_variables(void)
72574 +{
72575 +       tree var;
72576 +       referenced_var_iterator rvi;
72577 +
72578 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
72579 +       FOR_EACH_REFERENCED_VAR(var, rvi) {
72580 +#else
72581 +       FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
72582 +#endif
72583 +               tree type = TREE_TYPE(var);
72584 +
72585 +               if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
72586 +                       continue;
72587 +
72588 +               if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
72589 +                       continue;
72590 +
72591 +               if (!TYPE_READONLY(type))
72592 +                       continue;
72593 +
72594 +//             if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
72595 +//                     continue;
72596 +
72597 +//             if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
72598 +//                     continue;
72599 +
72600 +               if (walk_struct(type)) {
72601 +                       error("constified variable %qE cannot be local", var);
72602 +                       return 1;
72603 +               }
72604 +       }
72605 +       return 0;
72606 +}
72607 +
72608 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72609 +{
72610 +       const char * const plugin_name = plugin_info->base_name;
72611 +       const int argc = plugin_info->argc;
72612 +       const struct plugin_argument * const argv = plugin_info->argv;
72613 +       int i;
72614 +       bool constify = true;
72615 +
72616 +       struct register_pass_info local_variable_pass_info = {
72617 +               .pass                           = &pass_local_variable.pass,
72618 +               .reference_pass_name            = "*referenced_vars",
72619 +               .ref_pass_instance_number       = 0,
72620 +               .pos_op                         = PASS_POS_INSERT_AFTER
72621 +       };
72622 +
72623 +       if (!plugin_default_version_check(version, &gcc_version)) {
72624 +               error(G_("incompatible gcc/plugin versions"));
72625 +               return 1;
72626 +       }
72627 +
72628 +       for (i = 0; i < argc; ++i) {
72629 +               if (!(strcmp(argv[i].key, "no-constify"))) {
72630 +                       constify = false;
72631 +                       continue;
72632 +               }
72633 +               error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72634 +       }
72635 +
72636 +       register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
72637 +       if (constify) {
72638 +               register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
72639 +               register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
72640 +       }
72641 +       register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
72642 +
72643 +       return 0;
72644 +}
72645 diff -urNp linux-3.0.4/tools/gcc/Makefile linux-3.0.4/tools/gcc/Makefile
72646 --- linux-3.0.4/tools/gcc/Makefile      1969-12-31 19:00:00.000000000 -0500
72647 +++ linux-3.0.4/tools/gcc/Makefile      2011-08-23 21:47:56.000000000 -0400
72648 @@ -0,0 +1,12 @@
72649 +#CC := gcc
72650 +#PLUGIN_SOURCE_FILES := pax_plugin.c
72651 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
72652 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
72653 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
72654 +
72655 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
72656 +
72657 +hostlibs-y := stackleak_plugin.so constify_plugin.so
72658 +always := $(hostlibs-y)
72659 +stackleak_plugin-objs := stackleak_plugin.o
72660 +constify_plugin-objs := constify_plugin.o
72661 diff -urNp linux-3.0.4/tools/gcc/stackleak_plugin.c linux-3.0.4/tools/gcc/stackleak_plugin.c
72662 --- linux-3.0.4/tools/gcc/stackleak_plugin.c    1969-12-31 19:00:00.000000000 -0500
72663 +++ linux-3.0.4/tools/gcc/stackleak_plugin.c    2011-08-23 21:47:56.000000000 -0400
72664 @@ -0,0 +1,243 @@
72665 +/*
72666 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
72667 + * Licensed under the GPL v2
72668 + *
72669 + * Note: the choice of the license means that the compilation process is
72670 + *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
72671 + *       but for the kernel it doesn't matter since it doesn't link against
72672 + *       any of the gcc libraries
72673 + *
72674 + * gcc plugin to help implement various PaX features
72675 + *
72676 + * - track lowest stack pointer
72677 + *
72678 + * TODO:
72679 + * - initialize all local variables
72680 + *
72681 + * BUGS:
72682 + * - cloned functions are instrumented twice
72683 + */
72684 +#include "gcc-plugin.h"
72685 +#include "config.h"
72686 +#include "system.h"
72687 +#include "coretypes.h"
72688 +#include "tree.h"
72689 +#include "tree-pass.h"
72690 +#include "intl.h"
72691 +#include "plugin-version.h"
72692 +#include "tm.h"
72693 +#include "toplev.h"
72694 +#include "basic-block.h"
72695 +#include "gimple.h"
72696 +//#include "expr.h" where are you...
72697 +#include "diagnostic.h"
72698 +#include "rtl.h"
72699 +#include "emit-rtl.h"
72700 +#include "function.h"
72701 +
72702 +int plugin_is_GPL_compatible;
72703 +
72704 +static int track_frame_size = -1;
72705 +static const char track_function[] = "pax_track_stack";
72706 +static bool init_locals;
72707 +
72708 +static struct plugin_info stackleak_plugin_info = {
72709 +       .version        = "201106030000",
72710 +       .help           = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
72711 +//                       "initialize-locals\t\tforcibly initialize all stack frames\n"
72712 +};
72713 +
72714 +static bool gate_stackleak_track_stack(void);
72715 +static unsigned int execute_stackleak_tree_instrument(void);
72716 +static unsigned int execute_stackleak_final(void);
72717 +
72718 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
72719 +       .pass = {
72720 +               .type                   = GIMPLE_PASS,
72721 +               .name                   = "stackleak_tree_instrument",
72722 +               .gate                   = gate_stackleak_track_stack,
72723 +               .execute                = execute_stackleak_tree_instrument,
72724 +               .sub                    = NULL,
72725 +               .next                   = NULL,
72726 +               .static_pass_number     = 0,
72727 +               .tv_id                  = TV_NONE,
72728 +               .properties_required    = PROP_gimple_leh | PROP_cfg,
72729 +               .properties_provided    = 0,
72730 +               .properties_destroyed   = 0,
72731 +               .todo_flags_start       = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
72732 +               .todo_flags_finish      = TODO_verify_stmts // | TODO_dump_func
72733 +       }
72734 +};
72735 +
72736 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
72737 +       .pass = {
72738 +               .type                   = RTL_PASS,
72739 +               .name                   = "stackleak_final",
72740 +               .gate                   = gate_stackleak_track_stack,
72741 +               .execute                = execute_stackleak_final,
72742 +               .sub                    = NULL,
72743 +               .next                   = NULL,
72744 +               .static_pass_number     = 0,
72745 +               .tv_id                  = TV_NONE,
72746 +               .properties_required    = 0,
72747 +               .properties_provided    = 0,
72748 +               .properties_destroyed   = 0,
72749 +               .todo_flags_start       = 0,
72750 +               .todo_flags_finish      = 0
72751 +       }
72752 +};
72753 +
72754 +static bool gate_stackleak_track_stack(void)
72755 +{
72756 +       return track_frame_size >= 0;
72757 +}
72758 +
72759 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
72760 +{
72761 +       gimple call;
72762 +       tree decl, type;
72763 +
72764 +       // insert call to void pax_track_stack(void)
72765 +       type = build_function_type_list(void_type_node, NULL_TREE);
72766 +       decl = build_fn_decl(track_function, type);
72767 +       DECL_ASSEMBLER_NAME(decl); // for LTO
72768 +       call = gimple_build_call(decl, 0);
72769 +       if (before)
72770 +               gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
72771 +       else
72772 +               gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
72773 +}
72774 +
72775 +static unsigned int execute_stackleak_tree_instrument(void)
72776 +{
72777 +       basic_block bb;
72778 +       gimple_stmt_iterator gsi;
72779 +
72780 +       // 1. loop through BBs and GIMPLE statements
72781 +       FOR_EACH_BB(bb) {
72782 +               for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
72783 +                       // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
72784 +                       tree decl;
72785 +                       gimple stmt = gsi_stmt(gsi);
72786 +
72787 +                       if (!is_gimple_call(stmt))
72788 +                               continue;
72789 +                       decl = gimple_call_fndecl(stmt);
72790 +                       if (!decl)
72791 +                               continue;
72792 +                       if (TREE_CODE(decl) != FUNCTION_DECL)
72793 +                               continue;
72794 +                       if (!DECL_BUILT_IN(decl))
72795 +                               continue;
72796 +                       if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
72797 +                               continue;
72798 +                       if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
72799 +                               continue;
72800 +
72801 +                       // 2. insert track call after each __builtin_alloca call
72802 +                       stackleak_add_instrumentation(&gsi, false);
72803 +//                     print_node(stderr, "pax", decl, 4);
72804 +               }
72805 +       }
72806 +
72807 +       // 3. insert track call at the beginning
72808 +       bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
72809 +       gsi = gsi_start_bb(bb);
72810 +       stackleak_add_instrumentation(&gsi, true);
72811 +
72812 +       return 0;
72813 +}
72814 +
72815 +static unsigned int execute_stackleak_final(void)
72816 +{
72817 +       rtx insn;
72818 +
72819 +       if (cfun->calls_alloca)
72820 +               return 0;
72821 +
72822 +       // 1. find pax_track_stack calls
72823 +       for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
72824 +               // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
72825 +               rtx body;
72826 +
72827 +               if (!CALL_P(insn))
72828 +                       continue;
72829 +               body = PATTERN(insn);
72830 +               if (GET_CODE(body) != CALL)
72831 +                       continue;
72832 +               body = XEXP(body, 0);
72833 +               if (GET_CODE(body) != MEM)
72834 +                       continue;
72835 +               body = XEXP(body, 0);
72836 +               if (GET_CODE(body) != SYMBOL_REF)
72837 +                       continue;
72838 +               if (strcmp(XSTR(body, 0), track_function))
72839 +                       continue;
72840 +//             warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72841 +               // 2. delete call if function frame is not big enough
72842 +               if (get_frame_size() >= track_frame_size)
72843 +                       continue;
72844 +               delete_insn_and_edges(insn);
72845 +       }
72846 +
72847 +//     print_simple_rtl(stderr, get_insns());
72848 +//     print_rtl(stderr, get_insns());
72849 +//     warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72850 +
72851 +       return 0;
72852 +}
72853 +
72854 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72855 +{
72856 +       const char * const plugin_name = plugin_info->base_name;
72857 +       const int argc = plugin_info->argc;
72858 +       const struct plugin_argument * const argv = plugin_info->argv;
72859 +       int i;
72860 +       struct register_pass_info stackleak_tree_instrument_pass_info = {
72861 +               .pass                           = &stackleak_tree_instrument_pass.pass,
72862 +//             .reference_pass_name            = "tree_profile",
72863 +               .reference_pass_name            = "optimized",
72864 +               .ref_pass_instance_number       = 0,
72865 +               .pos_op                         = PASS_POS_INSERT_AFTER
72866 +       };
72867 +       struct register_pass_info stackleak_final_pass_info = {
72868 +               .pass                           = &stackleak_final_rtl_opt_pass.pass,
72869 +               .reference_pass_name            = "final",
72870 +               .ref_pass_instance_number       = 0,
72871 +               .pos_op                         = PASS_POS_INSERT_BEFORE
72872 +       };
72873 +
72874 +       if (!plugin_default_version_check(version, &gcc_version)) {
72875 +               error(G_("incompatible gcc/plugin versions"));
72876 +               return 1;
72877 +       }
72878 +
72879 +       register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
72880 +
72881 +       for (i = 0; i < argc; ++i) {
72882 +               if (!strcmp(argv[i].key, "track-lowest-sp")) {
72883 +                       if (!argv[i].value) {
72884 +                               error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72885 +                               continue;
72886 +                       }
72887 +                       track_frame_size = atoi(argv[i].value);
72888 +                       if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
72889 +                               error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72890 +                       continue;
72891 +               }
72892 +               if (!strcmp(argv[i].key, "initialize-locals")) {
72893 +                       if (argv[i].value) {
72894 +                               error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72895 +                               continue;
72896 +                       }
72897 +                       init_locals = true;
72898 +                       continue;
72899 +               }
72900 +               error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72901 +       }
72902 +
72903 +       register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
72904 +       register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
72905 +
72906 +       return 0;
72907 +}
72908 diff -urNp linux-3.0.4/usr/gen_init_cpio.c linux-3.0.4/usr/gen_init_cpio.c
72909 --- linux-3.0.4/usr/gen_init_cpio.c     2011-07-21 22:17:23.000000000 -0400
72910 +++ linux-3.0.4/usr/gen_init_cpio.c     2011-08-23 21:47:56.000000000 -0400
72911 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
72912         int retval;
72913         int rc = -1;
72914         int namesize;
72915 -       int i;
72916 +       unsigned int i;
72917  
72918         mode |= S_IFREG;
72919  
72920 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
72921                         *env_var = *expanded = '\0';
72922                         strncat(env_var, start + 2, end - start - 2);
72923                         strncat(expanded, new_location, start - new_location);
72924 -                       strncat(expanded, getenv(env_var), PATH_MAX);
72925 -                       strncat(expanded, end + 1, PATH_MAX);
72926 +                       strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
72927 +                       strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
72928                         strncpy(new_location, expanded, PATH_MAX);
72929 +                       new_location[PATH_MAX] = 0;
72930                 } else
72931                         break;
72932         }
72933 diff -urNp linux-3.0.4/virt/kvm/kvm_main.c linux-3.0.4/virt/kvm/kvm_main.c
72934 --- linux-3.0.4/virt/kvm/kvm_main.c     2011-07-21 22:17:23.000000000 -0400
72935 +++ linux-3.0.4/virt/kvm/kvm_main.c     2011-08-23 21:47:56.000000000 -0400
72936 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
72937  
72938  static cpumask_var_t cpus_hardware_enabled;
72939  static int kvm_usage_count = 0;
72940 -static atomic_t hardware_enable_failed;
72941 +static atomic_unchecked_t hardware_enable_failed;
72942  
72943  struct kmem_cache *kvm_vcpu_cache;
72944  EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
72945 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void 
72946  
72947         if (r) {
72948                 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
72949 -               atomic_inc(&hardware_enable_failed);
72950 +               atomic_inc_unchecked(&hardware_enable_failed);
72951                 printk(KERN_INFO "kvm: enabling virtualization on "
72952                                  "CPU%d failed\n", cpu);
72953         }
72954 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
72955  
72956         kvm_usage_count++;
72957         if (kvm_usage_count == 1) {
72958 -               atomic_set(&hardware_enable_failed, 0);
72959 +               atomic_set_unchecked(&hardware_enable_failed, 0);
72960                 on_each_cpu(hardware_enable_nolock, NULL, 1);
72961  
72962 -               if (atomic_read(&hardware_enable_failed)) {
72963 +               if (atomic_read_unchecked(&hardware_enable_failed)) {
72964                         hardware_disable_all_nolock();
72965                         r = -EBUSY;
72966                 }
72967 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
72968         kvm_arch_vcpu_put(vcpu);
72969  }
72970  
72971 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
72972 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
72973                   struct module *module)
72974  {
72975         int r;
72976 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
72977         if (!vcpu_align)
72978                 vcpu_align = __alignof__(struct kvm_vcpu);
72979         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
72980 -                                          0, NULL);
72981 +                                          SLAB_USERCOPY, NULL);
72982         if (!kvm_vcpu_cache) {
72983                 r = -ENOMEM;
72984                 goto out_free_3;
72985 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
72986         if (r)
72987                 goto out_free;
72988  
72989 -       kvm_chardev_ops.owner = module;
72990 -       kvm_vm_fops.owner = module;
72991 -       kvm_vcpu_fops.owner = module;
72992 +       pax_open_kernel();
72993 +       *(void **)&kvm_chardev_ops.owner = module;
72994 +       *(void **)&kvm_vm_fops.owner = module;
72995 +       *(void **)&kvm_vcpu_fops.owner = module;
72996 +       pax_close_kernel();
72997  
72998         r = misc_register(&kvm_dev);
72999         if (r) {
This page took 5.848064 seconds and 4 git commands to generate.