diff -urNp linux-2.6.22.1/arch/alpha/kernel/module.c linux-2.6.22.1/arch/alpha/kernel/module.c --- linux-2.6.22.1/arch/alpha/kernel/module.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/alpha/kernel/module.c 2007-08-02 11:38:45.000000000 -0400 @@ -177,7 +177,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, /* The small sections were sorted to the end of the segment. The following should definitely cover them. */ - gp = (u64)me->module_core + me->core_size - 0x8000; + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; got = sechdrs[me->arch.gotsecindex].sh_addr; for (i = 0; i < n; i++) { diff -urNp linux-2.6.22.1/arch/alpha/kernel/osf_sys.c linux-2.6.22.1/arch/alpha/kernel/osf_sys.c --- linux-2.6.22.1/arch/alpha/kernel/osf_sys.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/alpha/kernel/osf_sys.c 2007-08-02 11:38:45.000000000 -0400 @@ -1288,6 +1288,10 @@ arch_get_unmapped_area(struct file *filp merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ +#ifdef CONFIG_PAX_RANDMMAP + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) @@ -1295,8 +1299,8 @@ arch_get_unmapped_area(struct file *filp } /* Next, try allocating at TASK_UNMAPPED_BASE. */ - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), - len, limit); + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit); + if (addr != (unsigned long) -ENOMEM) return addr; diff -urNp linux-2.6.22.1/arch/alpha/kernel/ptrace.c linux-2.6.22.1/arch/alpha/kernel/ptrace.c --- linux-2.6.22.1/arch/alpha/kernel/ptrace.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/alpha/kernel/ptrace.c 2007-08-02 11:09:14.000000000 -0400 @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -289,6 +290,11 @@ do_sys_ptrace(long request, long pid, lo goto out; } + if (gr_handle_ptrace(child, request)) { + ret = -EPERM; + goto out; + } + if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out; diff -urNp linux-2.6.22.1/arch/alpha/mm/fault.c linux-2.6.22.1/arch/alpha/mm/fault.c --- linux-2.6.22.1/arch/alpha/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/alpha/mm/fault.c 2007-08-02 11:38:45.000000000 -0400 @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -54,6 +55,124 @@ __load_new_mm_context(struct mm_struct * __reload_thread(pcb); } +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int ldah, ldq, jmp; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(ldq, (unsigned int *)(regs->pc+4)); + err |= get_user(jmp, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U) == 0x277B0000U && + (ldq & 0xFFFF0000U) == 0xA77B0000U && + jmp == 0x6BFB0000U) + { + unsigned long r27, addr; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; + + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + err = get_user(r27, (unsigned long *)addr); + if (err) + break; + + regs->r27 = r27; + regs->pc = r27; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #2 */ + unsigned int ldah, lda, br; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(lda, (unsigned int *)(regs->pc+4)); + err |= get_user(br, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U) == 0x277B0000U && + (lda & 0xFFFF0000U) == 0xA77B0000U && + (br & 0xFFE00000U) == 0xC3E00000U) + { + unsigned long addr = br | 0xFFFFFFFFFFE00000UL; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; + + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation */ + unsigned int br; + + err = get_user(br, (unsigned int *)regs->pc); + + if (!err && (br & 0xFFE00000U) == 0xC3800000U) { + unsigned int br2, ldq, nop, jmp; + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; + + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + err = get_user(br2, (unsigned int *)addr); + err |= get_user(ldq, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + err |= get_user(jmp, (unsigned int *)(addr+12)); + err |= get_user(resolver, (unsigned long *)(addr+16)); + + if (err) + break; + + if (br2 == 0xC3600000U && + ldq == 0xA77B000CU && + nop == 0x47FF041FU && + jmp == 0x6B7B0000U) + { + regs->r28 = regs->pc+4; + regs->r27 = addr+16; + regs->pc = resolver; + return 3; + } + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk("???????? "); + else + printk("%08x ", c); + } + printk("\n"); +} +#endif /* * This routine handles page faults. It determines the address, @@ -131,8 +250,29 @@ do_page_fault(unsigned long address, uns good_area: si_code = SEGV_ACCERR; if (cause < 0) { - if (!(vma->vm_flags & VM_EXEC)) + if (!(vma->vm_flags & VM_EXEC)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) + goto bad_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); + do_exit(SIGKILL); +#else goto bad_area; +#endif + + } } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) diff -urNp linux-2.6.22.1/arch/arm/mm/mmap.c linux-2.6.22.1/arch/arm/mm/mmap.c --- linux-2.6.22.1/arch/arm/mm/mmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/arm/mm/mmap.c 2007-08-02 11:38:45.000000000 -0400 @@ -60,6 +60,10 @@ arch_get_unmapped_area(struct file *filp if (len > TASK_SIZE) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -72,10 +76,10 @@ arch_get_unmapped_area(struct file *filp return addr; } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; + start_addr = addr = mm->free_area_cache; } else { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; } full_search: @@ -91,8 +95,8 @@ full_search: * Start a new search - just in case we missed * some holes. */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } diff -urNp linux-2.6.22.1/arch/avr32/mm/fault.c linux-2.6.22.1/arch/avr32/mm/fault.c --- linux-2.6.22.1/arch/avr32/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/avr32/mm/fault.c 2007-08-02 11:38:45.000000000 -0400 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru int exception_trace = 1; +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (unsigned char *)pc+i)) + printk("???????? "); + else + printk("%02x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. @@ -158,6 +175,16 @@ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); + do_exit(SIGKILL); + } + } +#endif + if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", diff -urNp linux-2.6.22.1/arch/i386/boot/setup.S linux-2.6.22.1/arch/i386/boot/setup.S --- linux-2.6.22.1/arch/i386/boot/setup.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/boot/setup.S 2007-08-02 11:38:45.000000000 -0400 @@ -893,11 +893,13 @@ startup_32: movl %eax, %gs movl %eax, %ss + movl 0x00000000, %ecx xorl %eax, %eax 1: incl %eax # check that A20 really IS enabled movl %eax, 0x00000000 # loop forever if it isn't cmpl %eax, 0x00100000 je 1b + movl %ecx, 0x00000000 # Jump to the 32bit entry point jmpl *(code32_start - start + (DELTA_INITSEG << 4))(%esi) diff -urNp linux-2.6.22/arch/i386/boot/video.S linux-2.6.22/arch/i386/boot/video.S --- linux-2.6.22/arch/i386/boot/video.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22/arch/i386/boot/video.S 2007-07-10 14:56:30.000000000 -0400 @@ -96,6 +96,7 @@ #define PARAM_LFB_PAGES 0x32 #define PARAM_VESA_ATTRIB 0x34 #define PARAM_CAPABILITIES 0x36 +#define PARAM_VESAPM_SIZE 0x3a /* Define DO_STORE according to CONFIG_VIDEO_RETAIN */ #ifdef CONFIG_VIDEO_RETAIN @@ -280,6 +281,7 @@ dac_done: movw %es, %fs:(PARAM_VESAPM_SEG) movw %di, %fs:(PARAM_VESAPM_OFF) + movw %cx, %fs:(PARAM_VESAPM_SIZE) no_pm: ret # The video mode menu diff -urNp linux-2.6.22.1/arch/i386/Kconfig linux-2.6.22.1/arch/i386/Kconfig --- linux-2.6.22.1/arch/i386/Kconfig 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/Kconfig 2007-08-03 12:36:16.000000000 -0400 @@ -586,7 +586,7 @@ config PAGE_OFFSET hex default 0xB0000000 if VMSPLIT_3G_OPT default 0x80000000 if VMSPLIT_2G - default 0x78000000 if VMSPLIT_2G_OPT + default 0x70000000 if VMSPLIT_2G_OPT default 0x40000000 if VMSPLIT_1G default 0xC0000000 @@ -815,7 +815,7 @@ config CRASH_DUMP config PHYSICAL_START hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) - default "0x100000" + default "0x200000" help This gives the physical address where the kernel is loaded. @@ -900,7 +900,7 @@ config HOTPLUG_CPU config COMPAT_VDSO bool "Compat VDSO support" - default y + default n help Map the VDSO to the predictable old-style address too. ---help--- diff -urNp linux-2.6.22.1/arch/i386/Kconfig.cpu linux-2.6.22.1/arch/i386/Kconfig.cpu --- linux-2.6.22.1/arch/i386/Kconfig.cpu 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/Kconfig.cpu 2007-08-02 11:38:45.000000000 -0400 @@ -274,7 +274,7 @@ config X86_PPRO_FENCE config X86_F00F_BUG bool - depends on M586MMX || M586TSC || M586 || M486 || M386 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC default y config X86_WP_WORKS_OK @@ -304,7 +304,7 @@ config X86_CMPXCHG64 config X86_ALIGNMENT_16 bool - depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 default y config X86_GOOD_APIC diff -urNp linux-2.6.22.1/arch/i386/Kconfig.debug linux-2.6.22.1/arch/i386/Kconfig.debug --- linux-2.6.22.1/arch/i386/Kconfig.debug 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/Kconfig.debug 2007-08-02 11:38:45.000000000 -0400 @@ -46,16 +46,6 @@ config DEBUG_PAGEALLOC This results in a large slowdown, but helps to find certain types of memory corruptions. -config DEBUG_RODATA - bool "Write protect kernel read-only data structures" - depends on DEBUG_KERNEL - help - Mark the kernel read-only data as write-protected in the pagetables, - in order to catch accidental (and incorrect) writes to such const - data. This option may have a slight performance impact because a - portion of the kernel code won't be covered by a 2MB TLB anymore. - If in doubt, say "N". - config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on DEBUG_KERNEL diff -urNp linux-2.6.22.1/arch/i386/kernel/acpi/boot.c linux-2.6.22.1/arch/i386/kernel/acpi/boot.c --- linux-2.6.22.1/arch/i386/kernel/acpi/boot.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/acpi/boot.c 2007-08-02 11:38:45.000000000 -0400 @@ -1095,7 +1095,7 @@ static struct dmi_system_id __initdata a DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, - {} + { NULL, NULL, {{0, NULL}}, NULL} }; #endif /* __i386__ */ diff -urNp linux-2.6.22.1/arch/i386/kernel/acpi/sleep.c linux-2.6.22.1/arch/i386/kernel/acpi/sleep.c --- linux-2.6.22.1/arch/i386/kernel/acpi/sleep.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/acpi/sleep.c 2007-08-02 11:38:45.000000000 -0400 @@ -94,7 +94,7 @@ static __initdata struct dmi_system_id a DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), }, }, - {} + { NULL, NULL, {{0, NULL}}, NULL} }; static int __init acpisleep_dmi_init(void) diff -urNp linux-2.6.22.1/arch/i386/kernel/acpi/wakeup.S linux-2.6.22.1/arch/i386/kernel/acpi/wakeup.S --- linux-2.6.22.1/arch/i386/kernel/acpi/wakeup.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/acpi/wakeup.S 2007-08-02 11:38:45.000000000 -0400 @@ -2,6 +2,7 @@ #include #include #include +#include # # wakeup_code runs in real mode, and at unknown address (determined at run-time). @@ -64,7 +65,7 @@ wakeup_code: # restore efer setting movl real_save_efer_edx - wakeup_code, %edx movl real_save_efer_eax - wakeup_code, %eax - mov $0xc0000080, %ecx + mov $MSR_EFER, %ecx wrmsr 4: # make sure %cr4 is set correctly (features, etc) @@ -205,13 +206,11 @@ wakeup_pmode_return: # and restore the stack ... but you need gdt for this to work movl saved_context_esp, %esp - movl %cs:saved_magic, %eax - cmpl $0x12345678, %eax + cmpl $0x12345678, saved_magic jne bogus_magic # jump to place where we left off - movl saved_eip,%eax - jmp *%eax + jmp *(saved_eip) bogus_magic: movw $0x0e00 + 'B', 0xb8018 @@ -243,7 +242,7 @@ ENTRY(acpi_copy_wakeup_routine) # save efer setting pushl %eax movl %eax, %ebx - mov $0xc0000080, %ecx + mov $MSR_EFER, %ecx rdmsr movl %edx, real_save_efer_edx - wakeup_start (%ebx) movl %eax, real_save_efer_eax - wakeup_start (%ebx) diff -urNp linux-2.6.22.1/arch/i386/kernel/alternative.c linux-2.6.22.1/arch/i386/kernel/alternative.c --- linux-2.6.22.1/arch/i386/kernel/alternative.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/alternative.c 2007-08-02 11:38:45.000000000 -0400 @@ -4,6 +4,7 @@ #include #include #include +#include static int noreplace_smp = 0; static int smp_alt_once = 0; @@ -165,12 +166,18 @@ void apply_alternatives(struct alt_instr u8 *instr; int diff; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); for (a = start; a < end; a++) { BUG_ON(a->replacementlen > a->instrlen); if (!boot_cpu_has(a->cpuid)) continue; - instr = a->instr; + instr = a->instr + __KERNEL_TEXT_OFFSET; #ifdef CONFIG_X86_64 /* vsyscall code is not mapped yet. resolve it manually. */ if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { @@ -183,6 +190,11 @@ void apply_alternatives(struct alt_instr diff = a->instrlen - a->replacementlen; nop_out(instr + a->replacementlen, diff); } + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } #ifdef CONFIG_SMP @@ -191,29 +203,53 @@ static void alternatives_smp_lock(u8 **s { u8 **ptr; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + for (ptr = start; ptr < end; ptr++) { if (*ptr < text) continue; if (*ptr > text_end) continue; - **ptr = 0xf0; /* lock prefix */ - }; + *(*ptr + __KERNEL_TEXT_OFFSET) = 0xf0; /* lock prefix */ + } + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) { u8 **ptr; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + if (noreplace_smp) return; +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + for (ptr = start; ptr < end; ptr++) { if (*ptr < text) continue; if (*ptr > text_end) continue; - nop_out(*ptr, 1); - }; + nop_out(*ptr + __KERNEL_TEXT_OFFSET, 1); + } + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } struct smp_alt_module { @@ -340,21 +376,34 @@ void apply_paravirt(struct paravirt_patc { struct paravirt_patch_site *p; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + if (noreplace_paravirt) return; +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + for (p = start; p < end; p++) { unsigned int used; + u8 *instr = p->instr + __KERNEL_TEXT_OFFSET; - used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr, + used = paravirt_ops.patch(p->instrtype, p->clobbers, instr, p->len); BUG_ON(used > p->len); /* Pad the rest with nops */ - nop_out(p->instr + used, p->len - used); + nop_out(instr + used, p->len - used); } +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + /* Sync to be conservative, in case we patched following * instructions */ sync_core(); diff -urNp linux-2.6.22.1/arch/i386/kernel/apm.c linux-2.6.22.1/arch/i386/kernel/apm.c --- linux-2.6.22.1/arch/i386/kernel/apm.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/apm.c 2007-08-02 11:38:45.000000000 -0400 @@ -600,9 +600,18 @@ static u8 apm_bios_call(u32 func, u32 eb struct desc_struct save_desc_40; struct desc_struct *gdt; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + cpus = apm_save_cpus(); cpu = get_cpu(); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; @@ -613,6 +622,11 @@ static u8 apm_bios_call(u32 func, u32 eb APM_DO_RESTORE_SEGS; apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + put_cpu(); apm_restore_cpus(cpus); @@ -643,9 +657,18 @@ static u8 apm_bios_call_simple(u32 func, struct desc_struct save_desc_40; struct desc_struct *gdt; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + cpus = apm_save_cpus(); cpu = get_cpu(); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; @@ -656,6 +679,11 @@ static u8 apm_bios_call_simple(u32 func, APM_DO_RESTORE_SEGS; apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + put_cpu(); apm_restore_cpus(cpus); return error; @@ -923,7 +951,7 @@ recalc: static void apm_power_off(void) { - unsigned char po_bios_call[] = { + const unsigned char po_bios_call[] = { 0xb8, 0x00, 0x10, /* movw $0x1000,ax */ 0x8e, 0xd0, /* movw ax,ss */ 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */ @@ -1863,7 +1891,10 @@ static const struct file_operations apm_ static struct miscdevice apm_device = { APM_MINOR_DEV, "apm_bios", - &apm_bios_fops + &apm_bios_fops, + {NULL, NULL}, + NULL, + NULL }; @@ -1973,210 +2004,210 @@ static struct dmi_system_id __initdata a print_if_true, KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.", { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), - DMI_MATCH(DMI_BIOS_VERSION, "1AET38WW (1.01b)"), }, + DMI_MATCH(DMI_BIOS_VERSION, "1AET38WW (1.01b)"), }, NULL }, { /* Handle problems with APM on the C600 */ broken_ps2_resume, "Dell Latitude C600", { DMI_MATCH(DMI_SYS_VENDOR, "Dell"), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C600"), }, + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C600"), }, NULL }, { /* Allow interrupts during suspend on Dell Latitude laptops*/ set_apm_ints, "Dell Latitude", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C510"), } + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C510"), }, NULL }, { /* APM crashes */ apm_is_horked, "Dell Inspiron 2500", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"), DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, + DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, NULL }, { /* Allow interrupts during suspend on Dell Inspiron laptops*/ set_apm_ints, "Dell Inspiron", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"), }, + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"), }, NULL }, { /* Handle problems with APM on Inspiron 5000e */ broken_apm_power, "Dell Inspiron 5000e", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A04"), - DMI_MATCH(DMI_BIOS_DATE, "08/24/2000"), }, + DMI_MATCH(DMI_BIOS_DATE, "08/24/2000"), }, NULL }, { /* Handle problems with APM on Inspiron 2500 */ broken_apm_power, "Dell Inspiron 2500", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A12"), - DMI_MATCH(DMI_BIOS_DATE, "02/04/2002"), }, + DMI_MATCH(DMI_BIOS_DATE, "02/04/2002"), }, NULL }, { /* APM crashes */ apm_is_horked, "Dell Dimension 4100", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), DMI_MATCH(DMI_BIOS_VENDOR,"Intel Corp."), - DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, + DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, NULL }, { /* Allow interrupts during suspend on Compaq Laptops*/ set_apm_ints, "Compaq 12XL125", { DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), DMI_MATCH(DMI_PRODUCT_NAME, "Compaq PC"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION,"4.06"), }, + DMI_MATCH(DMI_BIOS_VERSION,"4.06"), }, NULL }, { /* Allow interrupts during APM or the clock goes slow */ set_apm_ints, "ASUSTeK", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "L8400K series Notebook PC"), }, + DMI_MATCH(DMI_PRODUCT_NAME, "L8400K series Notebook PC"), }, NULL }, { /* APM blows on shutdown */ apm_is_horked, "ABIT KX7-333[R]", { DMI_MATCH(DMI_BOARD_VENDOR, "ABIT"), - DMI_MATCH(DMI_BOARD_NAME, "VT8367-8233A (KX7-333[R])"), }, + DMI_MATCH(DMI_BOARD_NAME, "VT8367-8233A (KX7-333[R])"), }, NULL }, { /* APM crashes */ apm_is_horked, "Trigem Delhi3", { DMI_MATCH(DMI_SYS_VENDOR, "TriGem Computer, Inc"), - DMI_MATCH(DMI_PRODUCT_NAME, "Delhi3"), }, + DMI_MATCH(DMI_PRODUCT_NAME, "Delhi3"), }, NULL }, { /* APM crashes */ apm_is_horked, "Fujitsu-Siemens", { DMI_MATCH(DMI_BIOS_VENDOR, "hoenix/FUJITSU SIEMENS"), - DMI_MATCH(DMI_BIOS_VERSION, "Version1.01"), }, + DMI_MATCH(DMI_BIOS_VERSION, "Version1.01"), }, NULL }, { /* APM crashes */ apm_is_horked_d850md, "Intel D850MD", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), - DMI_MATCH(DMI_BIOS_VERSION, "MV85010A.86A.0016.P07.0201251536"), }, + DMI_MATCH(DMI_BIOS_VERSION, "MV85010A.86A.0016.P07.0201251536"), }, NULL }, { /* APM crashes */ apm_is_horked, "Intel D810EMO", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), - DMI_MATCH(DMI_BIOS_VERSION, "MO81010A.86A.0008.P04.0004170800"), }, + DMI_MATCH(DMI_BIOS_VERSION, "MO81010A.86A.0008.P04.0004170800"), }, NULL }, { /* APM crashes */ apm_is_horked, "Dell XPS-Z", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "A11"), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), }, + DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), }, NULL }, { /* APM crashes */ apm_is_horked, "Sharp PC-PJ/AX", { DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), DMI_MATCH(DMI_PRODUCT_NAME, "PC-PJ/AX"), DMI_MATCH(DMI_BIOS_VENDOR,"SystemSoft"), - DMI_MATCH(DMI_BIOS_VERSION,"Version R2.08"), }, + DMI_MATCH(DMI_BIOS_VERSION,"Version R2.08"), }, NULL }, { /* APM crashes */ apm_is_horked, "Dell Inspiron 2500", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"), DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, + DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, NULL }, { /* APM idle hangs */ apm_likes_to_melt, "Jabil AMD", { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), - DMI_MATCH(DMI_BIOS_VERSION, "0AASNP06"), }, + DMI_MATCH(DMI_BIOS_VERSION, "0AASNP06"), }, NULL }, { /* APM idle hangs */ apm_likes_to_melt, "AMI Bios", { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), - DMI_MATCH(DMI_BIOS_VERSION, "0AASNP05"), }, + DMI_MATCH(DMI_BIOS_VERSION, "0AASNP05"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-N505X(DE) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0206H"), - DMI_MATCH(DMI_BIOS_DATE, "08/23/99"), }, + DMI_MATCH(DMI_BIOS_DATE, "08/23/99"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-N505VX */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "W2K06H0"), - DMI_MATCH(DMI_BIOS_DATE, "02/03/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "02/03/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-XG29 */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0117A0"), - DMI_MATCH(DMI_BIOS_DATE, "04/25/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "04/25/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-Z600NE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0121Z1"), - DMI_MATCH(DMI_BIOS_DATE, "05/11/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "05/11/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-Z600NE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "WME01Z1"), - DMI_MATCH(DMI_BIOS_DATE, "08/11/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "08/11/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-Z600LEK(DE) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0206Z3"), - DMI_MATCH(DMI_BIOS_DATE, "12/25/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "12/25/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0203D0"), - DMI_MATCH(DMI_BIOS_DATE, "05/12/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "05/12/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0203Z3"), - DMI_MATCH(DMI_BIOS_DATE, "08/25/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "08/25/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS (with updated BIOS) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0209Z3"), - DMI_MATCH(DMI_BIOS_DATE, "05/12/01"), }, + DMI_MATCH(DMI_BIOS_DATE, "05/12/01"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-F104K */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0204K2"), - DMI_MATCH(DMI_BIOS_DATE, "08/28/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "08/28/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-C1VN/C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0208P1"), - DMI_MATCH(DMI_BIOS_DATE, "11/09/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "11/09/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0204P1"), - DMI_MATCH(DMI_BIOS_DATE, "09/12/00"), }, + DMI_MATCH(DMI_BIOS_DATE, "09/12/00"), }, NULL }, { /* Handle problems with APM on Sony Vaio PCG-C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "WXPO1Z3"), - DMI_MATCH(DMI_BIOS_DATE, "10/26/01"), }, + DMI_MATCH(DMI_BIOS_DATE, "10/26/01"), }, NULL }, { /* broken PM poweroff bios */ set_realmode_power_off, "Award Software v4.60 PGMA", { DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), DMI_MATCH(DMI_BIOS_VERSION, "4.60 PGMA"), - DMI_MATCH(DMI_BIOS_DATE, "134526184"), }, + DMI_MATCH(DMI_BIOS_DATE, "134526184"), }, NULL }, /* Generic per vendor APM settings */ { /* Allow interrupts during suspend on IBM laptops */ set_apm_ints, "IBM", - { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, + { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, NULL }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL} }; /* @@ -2195,6 +2226,10 @@ static int __init apm_init(void) struct desc_struct *gdt; int err; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + dmi_check_system(apm_dmi_table); if (apm_info.bios.version == 0 || paravirt_enabled()) { @@ -2291,6 +2326,11 @@ static int __init apm_init(void) * code to that CPU. */ gdt = get_cpu_gdt_table(0); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + set_base(gdt[APM_CS >> 3], __va((unsigned long)apm_info.bios.cseg << 4)); set_base(gdt[APM_CS_16 >> 3], @@ -2298,6 +2338,10 @@ static int __init apm_init(void) set_base(gdt[APM_DS >> 3], __va((unsigned long)apm_info.bios.dseg << 4)); +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + apm_proc = create_proc_entry("apm", 0, NULL); if (apm_proc) apm_proc->proc_fops = &apm_file_ops; diff -urNp linux-2.6.22.1/arch/i386/kernel/asm-offsets.c linux-2.6.22.1/arch/i386/kernel/asm-offsets.c --- linux-2.6.22.1/arch/i386/kernel/asm-offsets.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/asm-offsets.c 2007-08-02 11:38:45.000000000 -0400 @@ -55,6 +55,7 @@ void foo(void) OFFSET(TI_exec_domain, thread_info, exec_domain); OFFSET(TI_flags, thread_info, flags); OFFSET(TI_status, thread_info, status); + OFFSET(TI_cpu, thread_info, cpu); OFFSET(TI_preempt_count, thread_info, preempt_count); OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_restart_block, thread_info, restart_block); @@ -101,6 +102,7 @@ void foo(void) DEFINE(PTRS_PER_PTE, PTRS_PER_PTE); DEFINE(PTRS_PER_PMD, PTRS_PER_PMD); DEFINE(PTRS_PER_PGD, PTRS_PER_PGD); + DEFINE(PERCPU_MODULE_RESERVE, PERCPU_MODULE_RESERVE); DEFINE(VDSO_PRELINK_asm, VDSO_PRELINK); @@ -114,5 +116,6 @@ void foo(void) OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit); OFFSET(PARAVIRT_iret, paravirt_ops, iret); OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0); + OFFSET(PARAVIRT_write_cr0, paravirt_ops, write_cr0); #endif } diff -urNp linux-2.6.22.1/arch/i386/kernel/cpu/common.c linux-2.6.22.1/arch/i386/kernel/cpu/common.c --- linux-2.6.22.1/arch/i386/kernel/cpu/common.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/cpu/common.c 2007-08-02 11:38:45.000000000 -0400 @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -21,39 +20,15 @@ #include "cpu.h" -DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { - [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 }, - [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 }, - [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 }, - [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 }, - /* - * Segments used for calling PnP BIOS have byte granularity. - * They code segments and data segments have fixed 64k limits, - * the transfer segment sizes are set at run time. - */ - [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */ - [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */ - [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */ - [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */ - /* - * The APM segments have byte granularity and their bases - * are set at run time. All have 64k limits. - */ - [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */ - /* 16-bit code */ - [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 }, - [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */ - - [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 }, - [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 }, -} }; -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); - static int cachesize_override __cpuinitdata = -1; static int disable_x86_fxsr __cpuinitdata; static int disable_x86_serial_nr __cpuinitdata = 1; -static int disable_x86_sep __cpuinitdata; + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) +int disable_x86_sep __cpuinitdata = 1; +#else +int disable_x86_sep __cpuinitdata; +#endif struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; @@ -261,10 +236,10 @@ static int __cpuinit have_cpuid_p(void) void __init cpu_detect(struct cpuinfo_x86 *c) { /* Get vendor name */ - cpuid(0x00000000, &c->cpuid_level, - (int *)&c->x86_vendor_id[0], - (int *)&c->x86_vendor_id[8], - (int *)&c->x86_vendor_id[4]); + cpuid(0x00000000, (unsigned int *)&c->cpuid_level, + (unsigned int *)&c->x86_vendor_id[0], + (unsigned int *)&c->x86_vendor_id[8], + (unsigned int *)&c->x86_vendor_id[4]); c->x86 = 4; if (c->cpuid_level >= 0x00000001) { @@ -304,15 +279,14 @@ static void __init early_cpu_detect(void static void __cpuinit generic_identify(struct cpuinfo_x86 * c) { - u32 tfms, xlvl; - int ebx; + u32 tfms, xlvl, ebx; if (have_cpuid_p()) { /* Get vendor name */ - cpuid(0x00000000, &c->cpuid_level, - (int *)&c->x86_vendor_id[0], - (int *)&c->x86_vendor_id[8], - (int *)&c->x86_vendor_id[4]); + cpuid(0x00000000, (unsigned int *)&c->cpuid_level, + (unsigned int *)&c->x86_vendor_id[0], + (unsigned int *)&c->x86_vendor_id[8], + (unsigned int *)&c->x86_vendor_id[4]); get_cpu_vendor(c, 0); /* Initialize the standard set of capabilities */ @@ -644,7 +618,7 @@ void switch_to_new_gdt(void) { struct Xgt_desc_struct gdt_descr; - gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); + gdt_descr.address = get_cpu_gdt_table(smp_processor_id()); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); @@ -660,7 +634,7 @@ void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct * t = &per_cpu(init_tss, cpu); + struct tss_struct *t = init_tss + cpu; struct thread_struct *thread = &curr->thread; if (cpu_test_and_set(cpu, cpu_initialized)) { diff -urNp linux-2.6.22.1/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c linux-2.6.22.1/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c --- linux-2.6.22.1/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c 2007-08-02 11:38:45.000000000 -0400 @@ -563,7 +563,7 @@ static struct dmi_system_id sw_any_bug_d DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL } }; #endif diff -urNp linux-2.6.22.1/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c linux-2.6.22.1/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c --- linux-2.6.22.1/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c 2007-08-02 11:38:45.000000000 -0400 @@ -229,7 +229,7 @@ static struct cpu_model models[] = { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL }, { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL }, - { NULL, } + { NULL, NULL, 0, NULL} }; #undef _BANIAS #undef BANIAS @@ -404,7 +404,7 @@ static struct dmi_system_id sw_any_bug_d DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL } }; #endif diff -urNp linux-2.6.22.1/arch/i386/kernel/cpu/intel_cacheinfo.c linux-2.6.22.1/arch/i386/kernel/cpu/intel_cacheinfo.c --- linux-2.6.22.1/arch/i386/kernel/cpu/intel_cacheinfo.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/cpu/intel_cacheinfo.c 2007-08-02 11:38:45.000000000 -0400 @@ -318,8 +318,8 @@ unsigned int __cpuinit init_intel_cachei */ if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { /* supports eax=2 call */ - int i, j, n; - int regs[4]; + int j, n; + unsigned int regs[4]; unsigned char *dp = (unsigned char *)regs; int only_trace = 0; @@ -334,7 +334,7 @@ unsigned int __cpuinit init_intel_cachei /* If bit 31 is set, this is an unknown format */ for ( j = 0 ; j < 3 ; j++ ) { - if ( regs[j] < 0 ) regs[j] = 0; + if ( (int)regs[j] < 0 ) regs[j] = 0; } /* Byte 0 is level count, not a descriptor */ diff -urNp linux-2.6.22.1/arch/i386/kernel/cpu/mcheck/therm_throt.c linux-2.6.22.1/arch/i386/kernel/cpu/mcheck/therm_throt.c --- linux-2.6.22.1/arch/i386/kernel/cpu/mcheck/therm_throt.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/cpu/mcheck/therm_throt.c 2007-08-02 11:38:45.000000000 -0400 @@ -150,7 +150,7 @@ static __cpuinit int thermal_throttle_cp return NOTIFY_OK; } -static struct notifier_block thermal_throttle_cpu_notifier = +static __cpuinitdata struct notifier_block thermal_throttle_cpu_notifier = { .notifier_call = thermal_throttle_cpu_callback, }; diff -urNp linux-2.6.22.1/arch/i386/kernel/cpu/mtrr/generic.c linux-2.6.22.1/arch/i386/kernel/cpu/mtrr/generic.c --- linux-2.6.22.1/arch/i386/kernel/cpu/mtrr/generic.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/cpu/mtrr/generic.c 2007-08-02 11:38:45.000000000 -0400 @@ -29,11 +29,11 @@ static struct fixed_range_block fixed_ra { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ - {} + { 0, 0 } }; static unsigned long smp_changes_mask; -static struct mtrr_state mtrr_state = {}; +static struct mtrr_state mtrr_state; #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "mtrr." @@ -79,7 +79,7 @@ static void print_fixed(unsigned base, u } /* Grab all of the MTRR state for this CPU into *state */ -void get_mtrr_state(void) +void __init get_mtrr_state(void) { unsigned int i; struct mtrr_var_range *vrs; diff -urNp linux-2.6.22.1/arch/i386/kernel/crash.c linux-2.6.22.1/arch/i386/kernel/crash.c --- linux-2.6.22.1/arch/i386/kernel/crash.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/crash.c 2007-08-02 11:38:45.000000000 -0400 @@ -55,7 +55,7 @@ static int crash_nmi_callback(struct not return NOTIFY_STOP; local_irq_disable(); - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } diff -urNp linux-2.6.22.1/arch/i386/kernel/doublefault.c linux-2.6.22.1/arch/i386/kernel/doublefault.c --- linux-2.6.22.1/arch/i386/kernel/doublefault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/doublefault.c 2007-08-02 11:38:45.000000000 -0400 @@ -11,17 +11,17 @@ #define DOUBLEFAULT_STACKSIZE (1024) static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) static void doublefault_fn(void) { - struct Xgt_desc_struct gdt_desc = {0, 0}; + struct Xgt_desc_struct gdt_desc = {0, NULL, 0}; unsigned long gdt, tss; store_gdt(&gdt_desc); - gdt = gdt_desc.address; + gdt = (unsigned long)gdt_desc.address; printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); @@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cach /* 0x2 bit is always set */ .eflags = X86_EFLAGS_SF | 0x2, .esp = STACK_START, - .es = __USER_DS, + .es = __KERNEL_DS, .cs = __KERNEL_CS, .ss = __KERNEL_DS, - .ds = __USER_DS, + .ds = __KERNEL_DS, .fs = __KERNEL_PERCPU, .__cr3 = __pa(swapper_pg_dir) diff -urNp linux-2.6.22.1/arch/i386/kernel/efi.c linux-2.6.22.1/arch/i386/kernel/efi.c --- linux-2.6.22.1/arch/i386/kernel/efi.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/efi.c 2007-08-02 11:38:45.000000000 -0400 @@ -63,45 +63,23 @@ extern void * boot_ioremap(unsigned long static unsigned long efi_rt_eflags; static DEFINE_SPINLOCK(efi_rt_lock); -static pgd_t efi_bak_pg_dir_pointer[2]; +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS] __attribute__ ((aligned (4096))); static void efi_call_phys_prelog(void) __acquires(efi_rt_lock) { - unsigned long cr4; - unsigned long temp; struct Xgt_desc_struct gdt_descr; spin_lock(&efi_rt_lock); local_irq_save(efi_rt_eflags); - /* - * If I don't have PSE, I should just duplicate two entries in page - * directory. If I have PSE, I just need to duplicate one entry in - * page directory. - */ - cr4 = read_cr4(); - - if (cr4 & X86_CR4_PSE) { - efi_bak_pg_dir_pointer[0].pgd = - swapper_pg_dir[pgd_index(0)].pgd; - swapper_pg_dir[0].pgd = - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; - } else { - efi_bak_pg_dir_pointer[0].pgd = - swapper_pg_dir[pgd_index(0)].pgd; - efi_bak_pg_dir_pointer[1].pgd = - swapper_pg_dir[pgd_index(0x400000)].pgd; - swapper_pg_dir[pgd_index(0)].pgd = - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; - temp = PAGE_OFFSET + 0x400000; - swapper_pg_dir[pgd_index(0x400000)].pgd = - swapper_pg_dir[pgd_index(temp)].pgd; - } + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS); + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, + min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); /* * After the lock is released, the original page table is restored. */ - local_flush_tlb(); + __flush_tlb_all(); gdt_descr.address = __pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; @@ -110,35 +88,24 @@ static void efi_call_phys_prelog(void) _ static void efi_call_phys_epilog(void) __releases(efi_rt_lock) { - unsigned long cr4; struct Xgt_desc_struct gdt_descr; - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); + gdt_descr.address = get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); - cr4 = read_cr4(); - - if (cr4 & X86_CR4_PSE) { - swapper_pg_dir[pgd_index(0)].pgd = - efi_bak_pg_dir_pointer[0].pgd; - } else { - swapper_pg_dir[pgd_index(0)].pgd = - efi_bak_pg_dir_pointer[0].pgd; - swapper_pg_dir[pgd_index(0x400000)].pgd = - efi_bak_pg_dir_pointer[1].pgd; - } + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS); /* * After the lock is released, the original page table is restored. */ - local_flush_tlb(); + __flush_tlb_all(); local_irq_restore(efi_rt_eflags); spin_unlock(&efi_rt_lock); } -static efi_status_t +static efi_status_t __init phys_efi_set_virtual_address_map(unsigned long memory_map_size, unsigned long descriptor_size, u32 descriptor_version, @@ -154,7 +121,7 @@ phys_efi_set_virtual_address_map(unsigne return status; } -static efi_status_t +static efi_status_t __init phys_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { efi_status_t status; diff -urNp linux-2.6.22.1/arch/i386/kernel/efi_stub.S linux-2.6.22.1/arch/i386/kernel/efi_stub.S --- linux-2.6.22.1/arch/i386/kernel/efi_stub.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/efi_stub.S 2007-08-02 11:38:45.000000000 -0400 @@ -6,6 +6,7 @@ */ #include +#include #include /* @@ -20,7 +21,7 @@ * service functions will comply with gcc calling convention, too. */ -.text +__INIT ENTRY(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been @@ -36,9 +37,7 @@ ENTRY(efi_call_phys) * The mapping of lower virtual memory has been created in prelog and * epilog. */ - movl $1f, %edx - subl $__PAGE_OFFSET, %edx - jmp *%edx + jmp 1f-__PAGE_OFFSET 1: /* @@ -47,14 +46,8 @@ ENTRY(efi_call_phys) * parameter 2, ..., param n. To make things easy, we save the return * address of efi_call_phys in a global variable. */ - popl %edx - movl %edx, saved_return_addr - /* get the function pointer into ECX*/ - popl %ecx - movl %ecx, efi_rt_function_ptr - movl $2f, %edx - subl $__PAGE_OFFSET, %edx - pushl %edx + popl (saved_return_addr) + popl (efi_rt_function_ptr) /* * 3. Clear PG bit in %CR0. @@ -73,9 +66,8 @@ ENTRY(efi_call_phys) /* * 5. Call the physical function. */ - jmp *%ecx + call *(efi_rt_function_ptr-__PAGE_OFFSET) -2: /* * 6. After EFI runtime service returns, control will return to * following instruction. We'd better readjust stack pointer first. @@ -85,37 +77,29 @@ ENTRY(efi_call_phys) /* * 7. Restore PG bit */ - movl %cr0, %edx - orl $0x80000000, %edx - movl %edx, %cr0 - jmp 1f -1: /* * 8. Now restore the virtual mode from flat mode by * adding EIP with PAGE_OFFSET. */ - movl $1f, %edx - jmp *%edx + movl %cr0, %edx + orl $0x80000000, %edx + movl %edx, %cr0 + jmp 1f+__PAGE_OFFSET 1: /* * 9. Balance the stack. And because EAX contain the return value, * we'd better not clobber it. */ - leal efi_rt_function_ptr, %edx - movl (%edx), %ecx - pushl %ecx + pushl (efi_rt_function_ptr) /* - * 10. Push the saved return address onto the stack and return. + * 10. Return to the saved return address. */ - leal saved_return_addr, %edx - movl (%edx), %ecx - pushl %ecx - ret + jmpl *(saved_return_addr) .previous -.data +__INITDATA saved_return_addr: .long 0 efi_rt_function_ptr: diff -urNp linux-2.6.22.1/arch/i386/kernel/entry.S linux-2.6.22.1/arch/i386/kernel/entry.S --- linux-2.6.22.1/arch/i386/kernel/entry.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/entry.S 2007-08-02 11:38:45.000000000 -0400 @@ -97,7 +97,7 @@ VM_MASK = 0x00020000 #define resume_userspace_sig resume_userspace #endif -#define SAVE_ALL \ +#define __SAVE_ALL(_DS) \ cld; \ pushl %fs; \ CFI_ADJUST_CFA_OFFSET 4;\ @@ -129,12 +129,26 @@ VM_MASK = 0x00020000 pushl %ebx; \ CFI_ADJUST_CFA_OFFSET 4;\ CFI_REL_OFFSET ebx, 0;\ - movl $(__USER_DS), %edx; \ + movl $(_DS), %edx; \ movl %edx, %ds; \ movl %edx, %es; \ movl $(__KERNEL_PERCPU), %edx; \ movl %edx, %fs +#ifdef CONFIG_PAX_KERNEXEC +#define SAVE_ALL \ + __SAVE_ALL(__KERNEL_DS); \ + GET_CR0_INTO_EDX; \ + movl %edx, %esi; \ + orl $X86_CR0_WP, %edx; \ + xorl %edx, %esi; \ + SET_CR0_FROM_EDX +#elif defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) +#define SAVE_ALL __SAVE_ALL(__KERNEL_DS) +#else +#define SAVE_ALL __SAVE_ALL(__USER_DS) +#endif + #define RESTORE_INT_REGS \ popl %ebx; \ CFI_ADJUST_CFA_OFFSET -4;\ @@ -248,7 +262,17 @@ check_userspace: movb PT_CS(%esp), %al andl $(VM_MASK | SEGMENT_RPL_MASK), %eax cmpl $USER_RPL, %eax + +#ifdef CONFIG_PAX_KERNEXEC + jae resume_userspace + + GET_CR0_INTO_EDX + xorl %esi, %edx + SET_CR0_FROM_EDX + jmp resume_kernel +#else jb resume_kernel # not returning to v8086 or userspace +#endif ENTRY(resume_userspace) DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt @@ -307,10 +331,9 @@ sysenter_past_esp: /*CFI_REL_OFFSET cs, 0*/ /* * Push current_thread_info()->sysenter_return to the stack. - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words - * pushed above; +8 corresponds to copy_thread's esp0 setting. */ - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) + GET_THREAD_INFO(%ebp) + pushl TI_sysenter_return(%ebp) CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET eip, 0 @@ -318,9 +341,17 @@ sysenter_past_esp: * Load the potential sixth argument from user stack. * Careful about security. */ + movl 12(%esp),%ebp + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov 16(%esp),%ds +1: movl %ds:(%ebp),%ebp +#else cmpl $__PAGE_OFFSET-3,%ebp jae syscall_fault 1: movl (%ebp),%ebp +#endif + .section __ex_table,"a" .align 4 .long 1b,syscall_fault @@ -343,20 +374,37 @@ sysenter_past_esp: movl TI_flags(%ebp), %ecx testw $_TIF_ALLWORK_MASK, %cx jne syscall_exit_work + +#ifdef CONFIG_PAX_RANDKSTACK + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + call pax_randomize_kstack + popl %eax + CFI_ADJUST_CFA_OFFSET -4 +#endif + /* if something modifies registers it must also disable sysexit */ movl PT_EIP(%esp), %edx movl PT_OLDESP(%esp), %ecx xorl %ebp,%ebp TRACE_IRQS_ON 1: mov PT_FS(%esp), %fs +2: mov PT_DS(%esp), %ds +3: mov PT_ES(%esp), %es ENABLE_INTERRUPTS_SYSEXIT CFI_ENDPROC .pushsection .fixup,"ax" -2: movl $0,PT_FS(%esp) +4: movl $0,PT_FS(%esp) jmp 1b +5: movl $0,PT_DS(%esp) + jmp 2b +6: movl $0,PT_ES(%esp) + jmp 3b .section __ex_table,"a" .align 4 - .long 1b,2b + .long 1b,4b + .long 2b,5b + .long 3b,6b .popsection ENDPROC(sysenter_entry) @@ -389,6 +437,10 @@ no_singlestep: testw $_TIF_ALLWORK_MASK, %cx # current->work jne syscall_exit_work +#ifdef CONFIG_PAX_RANDKSTACK + call pax_randomize_kstack +#endif + restore_all: movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS # Warning: PT_OLDSS(%esp) contains the wrong/random values if we @@ -554,17 +606,24 @@ syscall_badsys: END(syscall_badsys) CFI_ENDPROC -#define FIXUP_ESPFIX_STACK \ - /* since we are on a wrong stack, we cant make it a C code :( */ \ - PER_CPU(gdt_page, %ebx); \ - GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \ - addl %esp, %eax; \ - pushl $__KERNEL_DS; \ - CFI_ADJUST_CFA_OFFSET 4; \ - pushl %eax; \ - CFI_ADJUST_CFA_OFFSET 4; \ - lss (%esp), %esp; \ +.macro FIXUP_ESPFIX_STACK + /* since we are on a wrong stack, we cant make it a C code :( */ +#ifdef CONFIG_SMP + movl PER_CPU_VAR(cpu_number), %ebx; + shll $PAGE_SHIFT_asm, %ebx; + addl $cpu_gdt_table, %ebx; +#else + movl $cpu_gdt_table, %ebx; +#endif + GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); + addl %esp, %eax; + pushl $__KERNEL_DS; + CFI_ADJUST_CFA_OFFSET 4; + pushl %eax; + CFI_ADJUST_CFA_OFFSET 4; + lss (%esp), %esp; CFI_ADJUST_CFA_OFFSET -8; +.endm #define UNWIND_ESPFIX_STACK \ movl %ss, %eax; \ /* see if on espfix stack */ \ @@ -581,7 +640,7 @@ END(syscall_badsys) * Build the entry stubs and pointer table with * some assembler magic. */ -.data +.section .rodata,"a",@progbits ENTRY(interrupt) .text @@ -681,12 +740,21 @@ error_code: popl %ecx CFI_ADJUST_CFA_OFFSET -4 /*CFI_REGISTER es, ecx*/ + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_EDX + movl %edx, %esi + orl $X86_CR0_WP, %edx + xorl %edx, %esi + SET_CR0_FROM_EDX +#endif + movl PT_FS(%esp), %edi # get the function address movl PT_ORIG_EAX(%esp), %edx # get the error code movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart mov %ecx, PT_FS(%esp) /*CFI_REL_OFFSET fs, ES*/ - movl $(__USER_DS), %ecx + movl $(__KERNEL_DS), %ecx movl %ecx, %ds movl %ecx, %es movl %esp,%eax # pt_regs pointer @@ -820,6 +888,13 @@ nmi_stack_correct: xorl %edx,%edx # zero error code movl %esp,%eax # pt_regs pointer call do_nmi + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_EDX + xorl %esi, %edx + SET_CR0_FROM_EDX +#endif + jmp restore_nocheck_notrace CFI_ENDPROC @@ -860,6 +935,13 @@ nmi_espfix_stack: FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx,%edx # zero error code call do_nmi + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_EDX + xorl %esi, %edx + SET_CR0_FROM_EDX +#endif + RESTORE_REGS lss 12+4(%esp), %esp # back to espfix stack CFI_ADJUST_CFA_OFFSET -24 @@ -1023,7 +1105,6 @@ ENTRY(kernel_thread_helper) CFI_ENDPROC ENDPROC(kernel_thread_helper) -.section .rodata,"a" #include "syscall_table.S" syscall_table_size=(.-sys_call_table) diff -urNp linux-2.6.22.1/arch/i386/kernel/head.S linux-2.6.22.1/arch/i386/kernel/head.S --- linux-2.6.22.1/arch/i386/kernel/head.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/head.S 2007-08-03 12:34:39.000000000 -0400 @@ -18,6 +18,7 @@ #include #include #include +#include /* * References to members of the new_cpu_data structure. @@ -51,16 +52,23 @@ */ LOW_PAGES = 1<<(32-PAGE_SHIFT_asm) -#if PTRS_PER_PMD > 1 -PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD -#else -PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD) -#endif +PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PTE) BOOTBITMAP_SIZE = LOW_PAGES / 8 ALLOCATOR_SLOP = 4 INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_SIZE_asm +#ifdef CONFIG_PAX_KERNEXEC +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */ +.fill 4096,1,0xcc +#endif + +/* + * Real beginning of normal "text" segment + */ +ENTRY(stext) +ENTRY(_stext) + /* * 32-bit kernel entrypoint; only used by the boot CPU. On entry, * %esi points to the real-mode code as a 32-bit pointer. @@ -82,6 +90,43 @@ ENTRY(startup_32) movl %eax,%fs movl %eax,%gs + movl $__per_cpu_start,%eax + movw %ax,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_PERCPU + 2) + rorl $16,%eax + movb %al,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_PERCPU + 4) + movb %ah,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_PERCPU + 7) + movl $__per_cpu_end + PERCPU_MODULE_RESERVE,%eax + subl $__per_cpu_start,%eax + movw %ax,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_PERCPU + 0) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + /* check for VMware */ + movl $0x564d5868,%eax + xorl %ebx,%ebx + movl $0xa,%ecx + movl $0x5658,%edx + in (%dx),%eax + cmpl $0x564d5868,%ebx + jz 1f + + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),%eax + movl %eax,(cpu_gdt_table - __PAGE_OFFSET + GDT_ENTRY_KERNEL_DS * 8 + 4) +1: +#endif + +#ifdef CONFIG_PAX_KERNEXEC + movl $__KERNEL_TEXT_OFFSET,%eax + movw %ax,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_CS + 2) + rorl $16,%eax + movb %al,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_CS + 4) + movb %ah,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_CS + 7) + + movb %al,(boot_gdt - __PAGE_OFFSET + __BOOT_CS + 4) + movb %ah,(boot_gdt - __PAGE_OFFSET + __BOOT_CS + 7) + rorl $16,%eax + movw %ax,(boot_gdt - __PAGE_OFFSET + __BOOT_CS + 2) +#endif + /* * Clear BSS first so that there are no surprises... * No need to cld as DF is already clear from cld above... @@ -129,24 +174,42 @@ ENTRY(startup_32) * Warning: don't use %esi or the stack in this code. However, %esp * can be used as a GPR if you really need it... */ -page_pde_offset = (__PAGE_OFFSET >> 20); - +#ifdef CONFIG_X86_PAE +page_pde_offset = ((__PAGE_OFFSET >> 21) * (PAGE_SIZE_asm / PTRS_PER_PTE)); +#else +page_pde_offset = ((__PAGE_OFFSET >> 22) * (PAGE_SIZE_asm / PTRS_PER_PTE)); +#endif movl $(pg0 - __PAGE_OFFSET), %edi +#ifdef CONFIG_X86_PAE + movl $(swapper_pm_dir - __PAGE_OFFSET), %edx +#else movl $(swapper_pg_dir - __PAGE_OFFSET), %edx - movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */ +#endif + movl $0x063, %eax /* 0x063 = DIRTY+ACCESSED+PRESENT+RW */ 10: - leal 0x007(%edi),%ecx /* Create PDE entry */ + leal 0x063(%edi),%ecx /* Create PDE entry */ movl %ecx,(%edx) /* Store identity PDE entry */ movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ +#ifdef CONFIG_X86_PAE + movl $0,4(%edx) + movl $0,page_pde_offset+4(%edx) + addl $8,%edx + movl $512, %ecx +#else addl $4,%edx movl $1024, %ecx +#endif 11: stosl +#ifdef CONFIG_X86_PAE + movl $0,(%edi) + addl $4,%edi +#endif addl $0x1000,%eax loop 11b /* End condition: we must map up to and including INIT_MAP_BEYOND_END */ - /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */ - leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp + /* bytes beyond the end of our own page tables; the +0x063 is the attribute bits */ + leal (INIT_MAP_BEYOND_END+0x063)(%edi),%ebp cmpl %ebp,%eax jb 10b movl %edi,(init_pg_tables_end - __PAGE_OFFSET) @@ -177,6 +240,11 @@ ENTRY(startup_32_smp) movl %eax,%fs movl %eax,%gs + /* This is a secondary processor (AP) */ + xorl %ebx,%ebx + incl %ebx +#endif /* CONFIG_SMP */ + /* * New page tables may be in 4Mbyte page mode and may * be using the global pages. @@ -192,42 +260,47 @@ ENTRY(startup_32_smp) * not yet offset PAGE_OFFSET.. */ #define cr4_bits mmu_cr4_features-__PAGE_OFFSET +3: movl cr4_bits,%edx andl %edx,%edx - jz 6f + jz 5f movl %cr4,%eax # Turn on paging options (PSE,PAE,..) orl %edx,%eax movl %eax,%cr4 - btl $5, %eax # check if PAE is enabled - jnc 6f +#ifdef CONFIG_X86_PAE + movl %ebx,%edi /* Check if extended functions are implemented */ movl $0x80000000, %eax cpuid cmpl $0x80000000, %eax - jbe 6f + jbe 4f mov $0x80000001, %eax cpuid /* Execute Disable bit supported? */ btl $20, %edx - jnc 6f + jnc 4f /* Setup EFER (Extended Feature Enable Register) */ - movl $0xc0000080, %ecx + movl $MSR_EFER, %ecx rdmsr btsl $11, %eax /* Make changes effective */ wrmsr -6: - /* This is a secondary processor (AP) */ - xorl %ebx,%ebx - incl %ebx + btsl $63-32,__supported_pte_mask+4-__PAGE_OFFSET + movl $1,nx_enabled-__PAGE_OFFSET -#endif /* CONFIG_SMP */ -3: +#if !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC) && !defined(CONFIG_PAX_MEMORY_UDEREF) + movl $0,disable_x86_sep-__PAGE_OFFSET +#endif + +4: + movl %edi,%ebx +#endif +5: /* * Enable paging @@ -252,9 +325,7 @@ ENTRY(startup_32_smp) #ifdef CONFIG_SMP andl %ebx,%ebx - jz 1f /* Initial CPU cleans BSS */ - jmp checkCPUtype -1: + jnz checkCPUtype /* Initial CPU cleans BSS */ #endif /* CONFIG_SMP */ /* @@ -331,12 +402,12 @@ is386: movl $2,%ecx # set MP ljmp $(__KERNEL_CS),$1f 1: movl $(__KERNEL_DS),%eax # reload all the segment registers movl %eax,%ss # after changing gdt. - movl %eax,%fs # gets reset once there's real percpu - - movl $(__USER_DS),%eax # DS/ES contains default USER segment movl %eax,%ds movl %eax,%es + movl $(__KERNEL_PERCPU), %eax + movl %eax,%fs # set this cpu's percpu + xorl %eax,%eax # Clear GS and LDT movl %eax,%gs lldt %ax @@ -347,11 +418,7 @@ is386: movl $2,%ecx # set MP movb ready, %cl movb $1, ready cmpb $0,%cl # the first CPU calls start_kernel - je 1f - movl $(__KERNEL_PERCPU), %eax - movl %eax,%fs # set this cpu's percpu - jmp initialize_secondary # all other CPUs call initialize_secondary -1: + jne initialize_secondary # all other CPUs call initialize_secondary #endif /* CONFIG_SMP */ jmp start_kernel @@ -462,8 +529,8 @@ hlt_loop: /* This is the default interrupt "handler" :-) */ ALIGN ignore_int: - cld #ifdef CONFIG_PRINTK + cld pushl %eax pushl %ecx pushl %edx @@ -494,28 +561,54 @@ ignore_int: #endif iret -.section .text -/* - * Real beginning of normal "text" segment - */ -ENTRY(stext) -ENTRY(_stext) - /* * BSS section */ -.section ".bss.page_aligned","w" +.section .swapper_pg_dir,"a",@progbits ENTRY(swapper_pg_dir) +#ifdef CONFIG_X86_PAE + .long swapper_pm_dir-__PAGE_OFFSET+1 + .long 0 + .long swapper_pm_dir+512*8-__PAGE_OFFSET+1 + .long 0 + .long swapper_pm_dir+512*16-__PAGE_OFFSET+1 + .long 0 + .long swapper_pm_dir+512*24-__PAGE_OFFSET+1 + .long 0 +#else .fill 1024,4,0 +#endif + +#ifdef CONFIG_X86_PAE +.section .swapper_pm_dir,"a",@progbits +ENTRY(swapper_pm_dir) + .fill 512,8,0 + .fill 512,8,0 + .fill 512,8,0 + .fill 512,8,0 +#endif + +.section .empty_zero_page,"a",@progbits ENTRY(empty_zero_page) .fill 4096,1,0 /* + * The IDT has to be page-aligned to simplify the Pentium + * F0 0F bug workaround.. We have a special link segment + * for this. + */ +.section .idt,"a",@progbits +ENTRY(idt_table) + .fill 256,8,0 + +/* * This starts the data section. */ .data + +.section .rodata,"a",@progbits ENTRY(stack_start) - .long init_thread_union+THREAD_SIZE + .long init_thread_union+THREAD_SIZE-8 .long __BOOT_DS ready: .byte 0 @@ -556,7 +649,7 @@ idt_descr: .word 0 # 32 bit align gdt_desc.address ENTRY(early_gdt_descr) .word GDT_ENTRIES*8-1 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */ + .long cpu_gdt_table /* Overwritten for secondary CPUs */ /* * The boot_gdt must mirror the equivalent in setup.S and is @@ -565,5 +658,61 @@ ENTRY(early_gdt_descr) .align L1_CACHE_BYTES ENTRY(boot_gdt) .fill GDT_ENTRY_BOOT_CS,8,0 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */ + + .align PAGE_SIZE_asm +ENTRY(cpu_gdt_table) + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x0000000000000000 /* 0x0b reserved */ + .quad 0x0000000000000000 /* 0x13 reserved */ + .quad 0x0000000000000000 /* 0x1b reserved */ + .quad 0x0000000000000000 /* 0x20 unused */ + .quad 0x0000000000000000 /* 0x28 unused */ + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ + .quad 0x0000000000000000 /* 0x4b reserved */ + .quad 0x0000000000000000 /* 0x53 reserved */ + .quad 0x0000000000000000 /* 0x5b reserved */ + + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */ + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */ + + .quad 0x0000000000000000 /* 0x80 TSS descriptor */ + .quad 0x0000000000000000 /* 0x88 LDT descriptor */ + + /* + * Segments used for calling PnP BIOS have byte granularity. + * The code segments and data segments have fixed 64k limits, + * the transfer segment sizes are set at run time. + */ + .quad 0x00409b000000ffff /* 0x90 32-bit code */ + .quad 0x00009b000000ffff /* 0x98 16-bit code */ + .quad 0x000093000000ffff /* 0xa0 16-bit data */ + .quad 0x0000930000000000 /* 0xa8 16-bit data */ + .quad 0x0000930000000000 /* 0xb0 16-bit data */ + + /* + * The APM segments have byte granularity and their bases + * are set at run time. All have 64k limits. + */ + .quad 0x00409b000000ffff /* 0xb8 APM CS code */ + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */ + .quad 0x004093000000ffff /* 0xc8 APM DS data */ + + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */ + .quad 0x0040930000000000 /* 0xd8 - PERCPU */ + .quad 0x0000000000000000 /* 0xe0 - PCIBIOS_CS */ + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_DS */ + .quad 0x0000000000000000 /* 0xf0 - unused */ + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ + + /* Be sure this is zeroed to avoid false validations in Xen */ + .fill PAGE_SIZE_asm - GDT_ENTRIES,1,0 + +#ifdef CONFIG_SMP + .fill (NR_CPUS-1) * (PAGE_SIZE_asm),1,0 /* other CPU's GDT */ +#endif diff -urNp linux-2.6.22.1/arch/i386/kernel/hpet.c linux-2.6.22.1/arch/i386/kernel/hpet.c --- linux-2.6.22.1/arch/i386/kernel/hpet.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/hpet.c 2007-08-02 11:38:45.000000000 -0400 @@ -95,7 +95,7 @@ static void hpet_reserve_platform_timers hd.hd_irq[1] = HPET_LEGACY_RTC; for (i = 2; i < nrtimers; timer++, i++) - hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >> + hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; hpet_alloc(&hd); diff -urNp linux-2.6.22.1/arch/i386/kernel/i386_ksyms.c linux-2.6.22.1/arch/i386/kernel/i386_ksyms.c --- linux-2.6.22.1/arch/i386/kernel/i386_ksyms.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/i386_ksyms.c 2007-08-02 11:38:45.000000000 -0400 @@ -2,12 +2,16 @@ #include #include +EXPORT_SYMBOL_GPL(cpu_gdt_table); + EXPORT_SYMBOL(__down_failed); EXPORT_SYMBOL(__down_failed_interruptible); EXPORT_SYMBOL(__down_failed_trylock); EXPORT_SYMBOL(__up_wakeup); /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_generic); +EXPORT_SYMBOL(csum_partial_copy_generic_to_user); +EXPORT_SYMBOL(csum_partial_copy_generic_from_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); diff -urNp linux-2.6.22.1/arch/i386/kernel/i8259.c linux-2.6.22.1/arch/i386/kernel/i8259.c --- linux-2.6.22.1/arch/i386/kernel/i8259.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/i8259.c 2007-08-02 11:38:45.000000000 -0400 @@ -350,7 +350,7 @@ static irqreturn_t math_error_irq(int cp * New motherboards sometimes make IRQ 13 be a PCI interrupt, * so allow interrupt sharing. */ -static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL }; +static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL, 0, NULL }; void __init init_ISA_irqs (void) { diff -urNp linux-2.6.22.1/arch/i386/kernel/init_task.c linux-2.6.22.1/arch/i386/kernel/init_task.c --- linux-2.6.22.1/arch/i386/kernel/init_task.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/init_task.c 2007-08-02 11:38:45.000000000 -0400 @@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task); * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. */ -DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS }; diff -urNp linux-2.6.22.1/arch/i386/kernel/io_apic.c linux-2.6.22.1/arch/i386/kernel/io_apic.c --- linux-2.6.22.1/arch/i386/kernel/io_apic.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/io_apic.c 2007-08-02 11:38:45.000000000 -0400 @@ -357,8 +357,8 @@ static void set_ioapic_affinity_irq(unsi # define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0) # define Dprintk(x...) do { TDprintk(x); } while (0) # else -# define TDprintk(x...) -# define Dprintk(x...) +# define TDprintk(x...) do {} while (0) +# define Dprintk(x...) do {} while (0) # endif #define IRQBALANCE_CHECK_ARCH -999 diff -urNp linux-2.6.22.1/arch/i386/kernel/ioport.c linux-2.6.22.1/arch/i386/kernel/ioport.c --- linux-2.6.22.1/arch/i386/kernel/ioport.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/ioport.c 2007-08-02 11:38:45.000000000 -0400 @@ -16,6 +16,7 @@ #include #include #include +#include /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value) @@ -64,9 +65,16 @@ asmlinkage long sys_ioperm(unsigned long if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) return -EINVAL; +#ifdef CONFIG_GRKERNSEC_IO + if (turn_on) { + gr_handle_ioperm(); +#else if (turn_on && !capable(CAP_SYS_RAWIO)) +#endif return -EPERM; - +#ifdef CONFIG_GRKERNSEC_IO + } +#endif /* * If it's the first ioperm() call in this thread's lifetime, set the * IO bitmap up. ioperm() is much less timing critical than clone(), @@ -89,7 +97,7 @@ asmlinkage long sys_ioperm(unsigned long * because the ->io_bitmap_max value must match the bitmap * contents: */ - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); @@ -143,8 +151,13 @@ asmlinkage long sys_iopl(unsigned long u return -EINVAL; /* Trying to gain more privileges? */ if (level > old) { +#ifdef CONFIG_GRKERNSEC_IO + gr_handle_iopl(); + return -EPERM; +#else if (!capable(CAP_SYS_RAWIO)) return -EPERM; +#endif } t->iopl = level << 12; regs->eflags = (regs->eflags & ~X86_EFLAGS_IOPL) | t->iopl; diff -urNp linux-2.6.22.1/arch/i386/kernel/irq.c linux-2.6.22.1/arch/i386/kernel/irq.c --- linux-2.6.22.1/arch/i386/kernel/irq.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/irq.c 2007-08-02 11:38:45.000000000 -0400 @@ -117,7 +117,7 @@ fastcall unsigned int do_IRQ(struct pt_r int arg1, arg2, ebx; /* build the stack frame on the IRQ stack */ - isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); + isp = (u32*) ((char*)irqctx + sizeof(*irqctx)) - 2; irqctx->tinfo.task = curctx->tinfo.task; irqctx->tinfo.previous_esp = current_stack_pointer; @@ -154,10 +154,10 @@ fastcall unsigned int do_IRQ(struct pt_r * gcc's 3.0 and earlier don't handle that correctly. */ static char softirq_stack[NR_CPUS * THREAD_SIZE] - __attribute__((__aligned__(THREAD_SIZE))); + __attribute__((__aligned__(THREAD_SIZE), __section__(".bss.page_aligned"))); static char hardirq_stack[NR_CPUS * THREAD_SIZE] - __attribute__((__aligned__(THREAD_SIZE))); + __attribute__((__aligned__(THREAD_SIZE), __section__(".bss.page_aligned"))); /* * allocate per-cpu stacks for hardirq and for softirq processing @@ -217,7 +217,7 @@ asmlinkage void do_softirq(void) irqctx->tinfo.previous_esp = current_stack_pointer; /* build the stack frame on the softirq stack */ - isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); + isp = (u32*) ((char*)irqctx + sizeof(*irqctx)) - 2; asm volatile( " xchgl %%ebx,%%esp \n" diff -urNp linux-2.6.22/arch/i386/kernel/kprobes.c linux-2.6.22/arch/i386/kernel/kprobes.c --- linux-2.6.22/arch/i386/kernel/kprobes.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22/arch/i386/kernel/kprobes.c 2007-07-10 14:56:30.000000000 -0400 @@ -48,9 +48,24 @@ static __always_inline void set_jmp_op(v char op; long raddr; } __attribute__((packed)) *jop; - jop = (struct __arch_jmp_op *)from; + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + + jop = (struct __arch_jmp_op *)(from + __KERNEL_TEXT_OFFSET); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + jop->raddr = (long)(to) - ((long)(from) + 5); jop->op = RELATIVEJUMP_INSTRUCTION; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } /* @@ -152,12 +167,26 @@ static int __kprobes is_IF_modifier(kpro int __kprobes arch_prepare_kprobe(struct kprobe *p) { + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + /* insn: must be on special executable page on i386. */ p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + memcpy(p->ainsn.insn, p->addr + __KERNEL_TEXT_OFFSET, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + p->opcode = *p->addr; if (can_boost(p->addr)) { p->ainsn.boostable = 0; @@ -222,7 +251,7 @@ static void __kprobes prepare_singlestep if (p->opcode == BREAKPOINT_INSTRUCTION) regs->eip = (unsigned long)p->addr; else - regs->eip = (unsigned long)p->ainsn.insn; + regs->eip = (unsigned long)p->ainsn.insn - __KERNEL_TEXT_OFFSET; } /* Called with kretprobe_lock held */ @@ -328,7 +357,7 @@ ss_probe: if (p->ainsn.boostable == 1 && !p->post_handler){ /* Boost up -- we can execute copied instructions directly */ reset_current_kprobe(); - regs->eip = (unsigned long)p->ainsn.insn; + regs->eip = (unsigned long)p->ainsn.insn - __KERNEL_TEXT_OFFSET; preempt_enable_no_resched(); return 1; } @@ -478,7 +507,7 @@ static void __kprobes resume_execution(s struct pt_regs *regs, struct kprobe_ctlblk *kcb) { unsigned long *tos = (unsigned long *)®s->esp; - unsigned long copy_eip = (unsigned long)p->ainsn.insn; + unsigned long copy_eip = (unsigned long)p->ainsn.insn - __KERNEL_TEXT_OFFSET; unsigned long orig_eip = (unsigned long)p->addr; regs->eflags &= ~TF_MASK; @@ -651,7 +680,7 @@ int __kprobes kprobe_exceptions_notify(s struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; - if (args->regs && user_mode_vm(args->regs)) + if (args->regs && user_mode(args->regs)) return ret; switch (val) { diff -urNp linux-2.6.22.1/arch/i386/kernel/ldt.c linux-2.6.22.1/arch/i386/kernel/ldt.c --- linux-2.6.22.1/arch/i386/kernel/ldt.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/ldt.c 2007-08-02 11:38:45.000000000 -0400 @@ -58,7 +58,7 @@ static int alloc_ldt(mm_context_t *pc, i #ifdef CONFIG_SMP cpumask_t mask; preempt_disable(); - load_LDT(pc); + load_LDT_nolock(pc); mask = cpumask_of_cpu(smp_processor_id()); if (!cpus_equal(current->mm->cpu_vm_mask, mask)) smp_call_function(flush_ldt, NULL, 1, 1); @@ -102,6 +102,22 @@ int init_new_context(struct task_struct retval = copy_ldt(&mm->context, &old_mm->context); up(&old_mm->context.sem); } + + if (tsk == current) { + mm->context.vdso = ~0UL; + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + mm->context.user_cs_base = 0UL; + mm->context.user_cs_limit = ~0UL; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpus_clear(mm->context.cpu_user_cs_mask); +#endif + +#endif + + } + return retval; } @@ -212,6 +228,13 @@ static int write_ldt(void __user * ptr, } } +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) { + error = -EINVAL; + goto out_unlock; + } +#endif + entry_1 = LDT_entry_a(&ldt_info); entry_2 = LDT_entry_b(&ldt_info); if (oldmode) diff -urNp linux-2.6.22.1/arch/i386/kernel/machine_kexec.c linux-2.6.22.1/arch/i386/kernel/machine_kexec.c --- linux-2.6.22.1/arch/i386/kernel/machine_kexec.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/machine_kexec.c 2007-08-02 11:38:45.000000000 -0400 @@ -29,25 +29,25 @@ static u32 kexec_pmd1[1024] PAGE_ALIGNED static u32 kexec_pte0[1024] PAGE_ALIGNED; static u32 kexec_pte1[1024] PAGE_ALIGNED; -static void set_idt(void *newidt, __u16 limit) +static void set_idt(struct desc_struct *newidt, __u16 limit) { struct Xgt_desc_struct curidt; /* ia32 supports unaliged loads & stores */ curidt.size = limit; - curidt.address = (unsigned long)newidt; + curidt.address = newidt; load_idt(&curidt); }; -static void set_gdt(void *newgdt, __u16 limit) +static void set_gdt(struct desc_struct *newgdt, __u16 limit) { struct Xgt_desc_struct curgdt; /* ia32 supports unaligned loads & stores */ curgdt.size = limit; - curgdt.address = (unsigned long)newgdt; + curgdt.address = newgdt; load_gdt(&curgdt); }; @@ -110,10 +110,10 @@ NORET_TYPE void machine_kexec(struct kim local_irq_disable(); control_page = page_address(image->control_code_page); - memcpy(control_page, relocate_kernel, PAGE_SIZE); + memcpy(control_page, relocate_kernel + __KERNEL_TEXT_OFFSET, PAGE_SIZE); page_list[PA_CONTROL_PAGE] = __pa(control_page); - page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; + page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel + __KERNEL_TEXT_OFFSET; page_list[PA_PGD] = __pa(kexec_pgd); page_list[VA_PGD] = (unsigned long)kexec_pgd; #ifdef CONFIG_X86_PAE diff -urNp linux-2.6.22.1/arch/i386/kernel/module.c linux-2.6.22.1/arch/i386/kernel/module.c --- linux-2.6.22.1/arch/i386/kernel/module.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/module.c 2007-08-02 11:38:45.000000000 -0400 @@ -23,6 +23,8 @@ #include #include +#include + #if 0 #define DEBUGP printk #else @@ -33,9 +35,30 @@ void *module_alloc(unsigned long size) { if (size == 0) return NULL; + +#ifdef CONFIG_PAX_KERNEXEC + return vmalloc(size); +#else return vmalloc_exec(size); +#endif + } +#ifdef CONFIG_PAX_KERNEXEC +void *module_alloc_exec(unsigned long size) +{ + struct vm_struct *area; + + if (size == 0) + return NULL; + + area = __get_vm_area(size, 0, (unsigned long)&MODULES_VADDR, (unsigned long)&MODULES_END); + if (area) + return area->addr; + + return NULL; +} +#endif /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) @@ -45,6 +68,45 @@ void module_free(struct module *mod, voi table entries. */ } +#ifdef CONFIG_PAX_KERNEXEC +void module_free_exec(struct module *mod, void *module_region) +{ + struct vm_struct **p, *tmp; + + if (!module_region) + return; + + if ((PAGE_SIZE-1) & (unsigned long)module_region) { + printk(KERN_ERR "Trying to module_free_exec() bad address (%p)\n", module_region); + WARN_ON(1); + return; + } + + write_lock(&vmlist_lock); + for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) + if (tmp->addr == module_region) + break; + + if (tmp) { + unsigned long cr0; + + pax_open_kernel(cr0); + memset(tmp->addr, 0xCC, tmp->size); + pax_close_kernel(cr0); + + *p = tmp->next; + kfree(tmp); + } + write_unlock(&vmlist_lock); + + if (!tmp) { + printk(KERN_ERR "Trying to module_free_exec() nonexistent vm area (%p)\n", + module_region); + WARN_ON(1); + } +} +#endif + /* We don't need anything special. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, @@ -63,14 +125,16 @@ int apply_relocate(Elf32_Shdr *sechdrs, unsigned int i; Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; - uint32_t *location; + uint32_t *plocation, location; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[i].r_offset; + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; + location = (uint32_t)plocation; + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR) + plocation = (void *)plocation + __KERNEL_TEXT_OFFSET; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr @@ -79,11 +143,11 @@ int apply_relocate(Elf32_Shdr *sechdrs, switch (ELF32_R_TYPE(rel[i].r_info)) { case R_386_32: /* We add the value into the location given */ - *location += sym->st_value; + *plocation += sym->st_value; break; case R_386_PC32: /* Add the value, subtract its postition */ - *location += sym->st_value - (uint32_t)location; + *plocation += sym->st_value - location; break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", diff -urNp linux-2.6.22.1/arch/i386/kernel/paravirt.c linux-2.6.22.1/arch/i386/kernel/paravirt.c --- linux-2.6.22.1/arch/i386/kernel/paravirt.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/paravirt.c 2007-08-02 11:38:45.000000000 -0400 @@ -188,7 +188,7 @@ unsigned paravirt_patch_insns(void *site if (insn_len > len || start == NULL) insn_len = len; else - memcpy(site, start, insn_len); + memcpy(site, start + __KERNEL_TEXT_OFFSET, insn_len); return insn_len; } @@ -228,7 +228,7 @@ static int __init print_banner(void) } core_initcall(print_banner); -struct paravirt_ops paravirt_ops = { +struct paravirt_ops paravirt_ops __read_only = { .name = "bare hardware", .paravirt_enabled = 0, .kernel_rpl = 0, diff -urNp linux-2.6.22.1/arch/i386/kernel/process.c linux-2.6.22.1/arch/i386/kernel/process.c --- linux-2.6.22.1/arch/i386/kernel/process.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/process.c 2007-08-02 11:38:45.000000000 -0400 @@ -68,15 +68,17 @@ EXPORT_SYMBOL(boot_option_idle_override) DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); +#ifdef CONFIG_SMP DEFINE_PER_CPU(int, cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number); +#endif /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) { - return ((unsigned long *)tsk->thread.esp)[3]; + return tsk->thread.eip; } /* @@ -306,7 +308,7 @@ void show_regs(struct pt_regs * regs) 0xffff & regs->xcs,regs->eip, smp_processor_id()); print_symbol("EIP is at %s\n", regs->eip); - if (user_mode_vm(regs)) + if (user_mode(regs)) printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); printk(" EFLAGS: %08lx %s (%s %.*s)\n", regs->eflags, print_tainted(), init_utsname()->release, @@ -346,8 +348,8 @@ int kernel_thread(int (*fn)(void *), voi regs.ebx = (unsigned long) fn; regs.edx = (unsigned long) arg; - regs.xds = __USER_DS; - regs.xes = __USER_DS; + regs.xds = __KERNEL_DS; + regs.xes = __KERNEL_DS; regs.xfs = __KERNEL_PERCPU; regs.orig_eax = -1; regs.eip = (unsigned long) kernel_thread_helper; @@ -369,7 +371,7 @@ void exit_thread(void) struct task_struct *tsk = current; struct thread_struct *t = &tsk->thread; int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; kfree(t->io_bitmap_ptr); t->io_bitmap_ptr = NULL; @@ -390,6 +392,7 @@ void flush_thread(void) { struct task_struct *tsk = current; + __asm__("mov %0,%%gs\n" : : "r" (0) : "memory"); memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); clear_tsk_thread_flag(tsk, TIF_DEBUG); @@ -423,7 +426,7 @@ int copy_thread(int nr, unsigned long cl struct task_struct *tsk; int err; - childregs = task_pt_regs(p); + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8; *childregs = *regs; childregs->eax = 0; childregs->esp = esp; @@ -465,6 +468,11 @@ int copy_thread(int nr, unsigned long cl if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) goto out; +#ifdef CONFIG_PAX_SEGMEXEC + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) + goto out; +#endif + desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; desc->a = LDT_entry_a(&info); desc->b = LDT_entry_b(&info); @@ -644,7 +652,7 @@ struct task_struct fastcall * __switch_t struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -672,6 +680,11 @@ struct task_struct fastcall * __switch_t */ savesegment(gs, prev->gs); +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit)) + __set_fs(task_thread_info(next_p)->addr_limit, cpu); +#endif + /* * Load the per-thread Thread-Local Storage descriptor. */ @@ -838,6 +851,12 @@ asmlinkage int sys_set_thread_area(struc if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; + +#ifdef CONFIG_PAX_SEGMEXEC + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) + return -EINVAL; +#endif + idx = info.entry_number; /* @@ -926,9 +945,28 @@ asmlinkage int sys_get_thread_area(struc return 0; } -unsigned long arch_align_stack(unsigned long sp) +#ifdef CONFIG_PAX_RANDKSTACK +asmlinkage void pax_randomize_kstack(void) { - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() % 8192; - return sp & ~0xf; + struct tss_struct *tss; + unsigned long time; + + if (!randomize_va_space) + return; + + tss = init_tss + smp_processor_id(); + rdtscl(time); + + /* P4 seems to return a 0 LSB, ignore it */ +#ifdef CONFIG_MPENTIUM4 + time &= 0x1EUL; + time <<= 2; +#else + time &= 0xFUL; + time <<= 3; +#endif + + tss->x86_tss.esp0 ^= time; + current->thread.esp0 = tss->x86_tss.esp0; } +#endif diff -urNp linux-2.6.22.1/arch/i386/kernel/ptrace.c linux-2.6.22.1/arch/i386/kernel/ptrace.c --- linux-2.6.22.1/arch/i386/kernel/ptrace.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/ptrace.c 2007-08-02 11:38:45.000000000 -0400 @@ -161,17 +162,20 @@ static unsigned long convert_eip_to_line * and APM bios ones we just ignore here. */ if (seg & LDT_SEGMENT) { - u32 *desc; + struct desc_struct *desc; unsigned long base; down(&child->mm->context.sem); - desc = child->mm->context.ldt + (seg & ~7); - base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000); - - /* 16-bit code segment? */ - if (!((desc[1] >> 22) & 1)) - addr &= 0xffff; - addr += base; + if ((seg >> 3) < child->mm->context.size) { + desc = &child->mm->context.ldt[seg >> 3]; + base = (desc->a >> 16) | ((desc->b & 0xff) << 16) | (desc->b & 0xff000000); + + /* 16-bit code segment? */ + if (!((desc->b >> 22) & 1)) + addr &= 0xffff; + addr += base; + } else + addr = -EINVAL; up(&child->mm->context.sem); } return addr; @@ -183,6 +187,9 @@ static inline int is_setting_trap_flag(s unsigned char opcode[15]; unsigned long addr = convert_eip_to_linear(child, regs); + if (addr == -EINVAL) + return 0; + copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { @@ -334,6 +341,11 @@ ptrace_set_thread_area(struct task_struc if (copy_from_user(&info, user_desc, sizeof(info))) return -EFAULT; +#ifdef CONFIG_PAX_SEGMEXEC + if ((child->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) + return -EINVAL; +#endif + if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; @@ -424,6 +436,17 @@ long arch_ptrace(struct task_struct *chi if(addr == (long) &dummy->u_debugreg[5]) break; if(addr < (long) &dummy->u_debugreg[4] && ((unsigned long) data) >= TASK_SIZE-3) break; + +#ifdef CONFIG_GRKERNSEC + if(addr >= (long) &dummy->u_debugreg[0] && + addr <= (long) &dummy->u_debugreg[3]){ + long reg = (addr - (long) &dummy->u_debugreg[0]) >> 2; + long type = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 4*reg)) & 3; + long align = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 2 + 4*reg)) & 3; + if((type & 1) && (data & align)) + break; + } +#endif /* Sanity-check data. Take one half-byte at once with * check = (val >> (16 + 4*i)) & 0xf. It contains the @@ -640,7 +663,7 @@ void send_sigtrap(struct task_struct *ts info.si_code = TRAP_BRKPT; /* User-mode eip? */ - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL; + info.si_addr = user_mode(regs) ? (void __user *) regs->eip : NULL; /* Send us the fakey SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); diff -urNp linux-2.6.22.1/arch/i386/kernel/reboot.c linux-2.6.22.1/arch/i386/kernel/reboot.c --- linux-2.6.22.1/arch/i386/kernel/reboot.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/reboot.c 2007-08-02 11:38:45.000000000 -0400 @@ -26,7 +26,7 @@ void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -static int reboot_mode; +static unsigned short reboot_mode; static int reboot_thru_bios; #ifdef CONFIG_SMP @@ -129,7 +129,7 @@ static struct dmi_system_id __initdata r DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), }, }, - { } + { NULL, NULL, {{0, NULL}}, NULL} }; static int __init reboot_init(void) @@ -147,18 +147,18 @@ core_initcall(reboot_init); doesn't work with at least one type of 486 motherboard. It is easy to stop this code working; hence the copious comments. */ -static unsigned long long -real_mode_gdt_entries [3] = +static struct desc_struct +real_mode_gdt_entries [3] __read_only = { - 0x0000000000000000ULL, /* Null descriptor */ - 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ - 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ + {0x00000000, 0x00000000}, /* Null descriptor */ + {0x0000ffff, 0x00009b00}, /* 16-bit real-mode 64k code at 0x00000000 */ + {0x0100ffff, 0x00009300} /* 16-bit real-mode 64k data at 0x00000100 */ }; -static struct Xgt_desc_struct -real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries }, -real_mode_idt = { 0x3ff, 0 }, -no_idt = { 0, 0 }; +static const struct Xgt_desc_struct +real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (struct desc_struct *)__pa(real_mode_gdt_entries), 0 }, +real_mode_idt = { 0x3ff, NULL, 0 }, +no_idt = { 0, NULL, 0 }; /* This is 16-bit protected mode code to disable paging and the cache, @@ -180,7 +180,7 @@ no_idt = { 0, 0 }; More could be done here to set up the registers as if a CPU reset had occurred; hopefully real BIOSs don't assume much. */ -static unsigned char real_mode_switch [] = +static const unsigned char real_mode_switch [] = { 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */ 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */ @@ -194,7 +194,7 @@ static unsigned char real_mode_switch [] 0x24, 0x10, /* f: andb $0x10,al */ 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */ }; -static unsigned char jump_to_bios [] = +static const unsigned char jump_to_bios [] = { 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */ }; @@ -204,8 +204,13 @@ static unsigned char jump_to_bios [] = * specified by the code and length parameters. * We assume that length will aways be less that 100! */ -void machine_real_restart(unsigned char *code, int length) +void machine_real_restart(const unsigned char *code, unsigned int length) { + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + local_irq_disable(); /* Write zero to CMOS register number 0x0f, which the BIOS POST @@ -226,8 +231,16 @@ void machine_real_restart(unsigned char from the kernel segment. This assumes the kernel segment starts at virtual address PAGE_OFFSET. */ - memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, - sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS); +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, + min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif /* * Use `swapper_pg_dir' as our page directory. @@ -240,7 +253,7 @@ void machine_real_restart(unsigned char REBOOT.COM programs, and the previous reset routine did this too. */ - *((unsigned short *)0x472) = reboot_mode; + *(unsigned short *)(__va(0x472)) = reboot_mode; /* For the switch to real mode, copy some code to low memory. It has to be in the first 64k because it is running in 16-bit mode, and it @@ -248,9 +261,8 @@ void machine_real_restart(unsigned char off paging. Copy it near the end of the first page, out of the way of BIOS variables. */ - memcpy ((void *) (0x1000 - sizeof (real_mode_switch) - 100), - real_mode_switch, sizeof (real_mode_switch)); - memcpy ((void *) (0x1000 - 100), code, length); + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch)); + memcpy(__va(0x1000 - 100), code, length); /* Set up the IDT for real mode. */ @@ -336,7 +348,7 @@ static void native_machine_emergency_res __asm__ __volatile__("int3"); } /* rebooting needs to touch the page at absolute addr 0 */ - *((unsigned short *)__va(0x472)) = reboot_mode; + *(unsigned short *)(__va(0x472)) = reboot_mode; for (;;) { mach_reboot_fixups(); /* for board specific fixups */ mach_reboot(); diff -urNp linux-2.6.22.1/arch/i386/kernel/setup.c linux-2.6.22.1/arch/i386/kernel/setup.c --- linux-2.6.22.1/arch/i386/kernel/setup.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/setup.c 2007-08-02 11:38:45.000000000 -0400 @@ -82,7 +82,11 @@ struct cpuinfo_x86 new_cpu_data __cpuini struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; EXPORT_SYMBOL(boot_cpu_data); +#ifdef CONFIG_X86_PAE +unsigned long mmu_cr4_features = X86_CR4_PAE; +#else unsigned long mmu_cr4_features; +#endif /* for MCA, but anyone else can use it if they want */ unsigned int machine_id; @@ -404,8 +408,8 @@ void __init setup_bootmem_allocator(void * the (very unlikely) case of us accidentally initializing the * bootmem allocator with an invalid RAM area. */ - reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) + - bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text)); + reserve_bootmem(LOAD_PHYSICAL_ADDR, (PFN_PHYS(min_low_pfn) + + bootmap_size + PAGE_SIZE-1) - LOAD_PHYSICAL_ADDR); /* * reserve physical page 0 - it's a special BIOS page on many boxes, @@ -559,14 +563,14 @@ void __init setup_arch(char **cmdline_p) if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; + init_mm.start_code = (unsigned long) _text + __KERNEL_TEXT_OFFSET; + init_mm.end_code = (unsigned long) _etext + __KERNEL_TEXT_OFFSET; init_mm.end_data = (unsigned long) _edata; init_mm.brk = init_pg_tables_end + PAGE_OFFSET; - code_resource.start = virt_to_phys(_text); - code_resource.end = virt_to_phys(_etext)-1; - data_resource.start = virt_to_phys(_etext); + code_resource.start = virt_to_phys(_text + __KERNEL_TEXT_OFFSET); + code_resource.end = virt_to_phys(_etext + __KERNEL_TEXT_OFFSET)-1; + data_resource.start = virt_to_phys(_data); data_resource.end = virt_to_phys(_edata)-1; parse_early_param(); @@ -658,3 +662,24 @@ void __init setup_arch(char **cmdline_p) #endif #endif } + +unsigned long __per_cpu_offset[NR_CPUS] __read_only; + +EXPORT_SYMBOL(__per_cpu_offset); + +void __init setup_per_cpu_areas(void) +{ + unsigned long size, i; + char *ptr; + unsigned long nr_possible_cpus = num_possible_cpus(); + + /* Copy section for each CPU (we discard the original) */ + size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); + ptr = alloc_bootmem_pages(size * nr_possible_cpus); + + for_each_possible_cpu(i) { + __per_cpu_offset[i] = (unsigned long)ptr; + memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + ptr += size; + } +} diff -urNp linux-2.6.22.1/arch/i386/kernel/signal.c linux-2.6.22.1/arch/i386/kernel/signal.c --- linux-2.6.22.1/arch/i386/kernel/signal.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/signal.c 2007-08-02 11:38:45.000000000 -0400 @@ -350,9 +350,9 @@ static int setup_frame(int sig, struct k } if (current->binfmt->hasvdso) - restorer = (void *)VDSO_SYM(&__kernel_sigreturn); + restorer = (void __user *)VDSO_SYM(&__kernel_sigreturn); else - restorer = (void *)&frame->retcode; + restorer = (void __user *)&frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; @@ -448,7 +448,8 @@ static int setup_rt_frame(int sig, struc goto give_sigsegv; /* Set up to return from userspace. */ - restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn); + + restorer = (void __user *)VDSO_SYM(&__kernel_rt_sigreturn); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; err |= __put_user(restorer, &frame->pretcode); @@ -581,7 +582,7 @@ static void fastcall do_signal(struct pt * before reaching here, so testing against kernel * CS suffices. */ - if (!user_mode(regs)) + if (!user_mode_novm(regs)) return; if (test_thread_flag(TIF_RESTORE_SIGMASK)) diff -urNp linux-2.6.22.1/arch/i386/kernel/smpboot.c linux-2.6.22.1/arch/i386/kernel/smpboot.c --- linux-2.6.22.1/arch/i386/kernel/smpboot.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/smpboot.c 2007-08-03 12:34:39.000000000 -0400 @@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; * has made sure it's suitably aligned. */ -static unsigned long __devinit setup_trampoline(void) +static unsigned long __cpuinit setup_trampoline(void) { memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); return virt_to_phys(trampoline_base); @@ -773,6 +773,10 @@ static int __cpuinit do_boot_cpu(int api unsigned long start_eip; unsigned short nmi_high = 0, nmi_low = 0; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + /* * Save current MTRR state in case it was changed since early boot * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: @@ -789,7 +793,16 @@ static int __cpuinit do_boot_cpu(int api init_gdt(cpu); per_cpu(current_task, cpu) = idle; - early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + early_gdt_descr.address = get_cpu_gdt_table(cpu); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif idle->thread.eip = (unsigned long) start_secondary; /* start_eip had better be page-aligned! */ diff -urNp linux-2.6.22.1/arch/i386/kernel/smp.c linux-2.6.22.1/arch/i386/kernel/smp.c --- linux-2.6.22.1/arch/i386/kernel/smp.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/smp.c 2007-08-02 11:38:45.000000000 -0400 @@ -103,7 +103,7 @@ * about nothing of note with C stepping upwards. */ -DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, }; +DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, {0} }; /* * the following functions deal with sending IPIs between CPUs. diff -urNp linux-2.6.22.1/arch/i386/kernel/smpcommon.c linux-2.6.22.1/arch/i386/kernel/smpcommon.c --- linux-2.6.22.1/arch/i386/kernel/smpcommon.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/smpcommon.c 2007-08-03 12:34:39.000000000 -0400 @@ -3,6 +3,7 @@ */ #include #include +#include DEFINE_PER_CPU(unsigned long, this_cpu_off); EXPORT_PER_CPU_SYMBOL(this_cpu_off); @@ -14,10 +15,29 @@ __cpuinit void init_gdt(int cpu) { struct desc_struct *gdt = get_cpu_gdt_table(cpu); - pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a, - (u32 *)&gdt[GDT_ENTRY_PERCPU].b, - __per_cpu_offset[cpu], 0xFFFFF, - 0x80 | DESCTYPE_S | 0x2, 0x8); +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + + if (cpu) + memcpy(gdt, cpu_gdt_table, GDT_SIZE); + + if (PERCPU_ENOUGH_ROOM <= 64*1024*1024) + pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PERCPU].a, + (__u32 *)&gdt[GDT_ENTRY_PERCPU].b, + __per_cpu_offset[cpu], PERCPU_ENOUGH_ROOM-1, + 0x80 | DESCTYPE_S | 0x3, 0x4); + else + pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PERCPU].a, + (__u32 *)&gdt[GDT_ENTRY_PERCPU].b, + __per_cpu_offset[cpu], ((PERCPU_ENOUGH_ROOM-1) >> PAGE_SHIFT), + 0x80 | DESCTYPE_S | 0x3, 0xC); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; per_cpu(cpu_number, cpu) = cpu; diff -urNp linux-2.6.22.1/arch/i386/kernel/syscall_table.S linux-2.6.22.1/arch/i386/kernel/syscall_table.S --- linux-2.6.22.1/arch/i386/kernel/syscall_table.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/syscall_table.S 2007-08-02 11:38:45.000000000 -0400 @@ -1,3 +1,4 @@ +.section .rodata,"a",@progbits ENTRY(sys_call_table) .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ .long sys_exit diff -urNp linux-2.6.22.1/arch/i386/kernel/sysenter.c linux-2.6.22.1/arch/i386/kernel/sysenter.c --- linux-2.6.22.1/arch/i386/kernel/sysenter.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/sysenter.c 2007-08-02 11:38:45.000000000 -0400 @@ -176,7 +176,7 @@ static __init void relocate_vdso(Elf32_E void enable_sep_cpu(void) { int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; if (!boot_cpu_has(X86_FEATURE_SEP)) { put_cpu(); @@ -199,7 +199,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); /* * Make sure the vDSO gets into every core dump. * Dumping its contents makes post-mortem fully interpretable later @@ -282,7 +282,7 @@ int arch_setup_additional_pages(struct l if (compat) addr = VDSO_HIGH_BASE; else { - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; @@ -307,7 +307,7 @@ int arch_setup_additional_pages(struct l goto up_fail; } - current->mm->context.vdso = (void *)addr; + current->mm->context.vdso = addr; current_thread_info()->sysenter_return = (void *)VDSO_SYM(&SYSENTER_RETURN); @@ -319,8 +319,14 @@ int arch_setup_additional_pages(struct l const char *arch_vma_name(struct vm_area_struct *vma) { - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) return "[vdso]"; + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; +#endif + return NULL; } @@ -329,7 +335,7 @@ struct vm_area_struct *get_gate_vma(stru struct mm_struct *mm = tsk->mm; /* Check to see if this task was created in compat vdso mode */ - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) + if (mm && mm->context.vdso == VDSO_HIGH_BASE) return &gate_vma; return NULL; } diff -urNp linux-2.6.22.1/arch/i386/kernel/sys_i386.c linux-2.6.22.1/arch/i386/kernel/sys_i386.c --- linux-2.6.22.1/arch/i386/kernel/sys_i386.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/sys_i386.c 2007-08-02 11:38:45.000000000 -0400 @@ -40,6 +40,21 @@ asmlinkage int sys_pipe(unsigned long __ return error; } +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) +{ + unsigned long task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (len > task_size || addr > task_size - len) + return -EINVAL; + + return 0; +} + asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) @@ -99,6 +114,205 @@ out: return err; } +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long start_addr, task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (len > task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp) +#endif + + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (task_size - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + } + +#ifdef CONFIG_PAX_PAGEEXEC + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) { + start_addr = 0x00110000UL; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + start_addr += mm->delta_mmap & 0x03FFF000UL; +#endif + + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base) + start_addr = addr = mm->mmap_base; + else + addr = start_addr; + } +#endif + +full_search: + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (task_size - len < addr) { + /* + * Start a new search - just in case we missed + * some holes. + */ + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } + if (!vma || addr + len <= vma->vm_start) { + /* + * Remember the place where we stopped the search: + */ + mm->free_area_cache = addr + len; + return addr; + } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; + if (mm->start_brk <= addr && addr < mm->mmap_base) { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + } +} + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long base = mm->mmap_base, addr = addr0, task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif + + /* requested length too big for entire address space */ + if (len > task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + +#ifdef CONFIG_PAX_PAGEEXEC + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) + goto bottomup; +#endif + +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp) +#endif + + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (task_size - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + + if (mm->mmap_base < len) + goto bottomup; + + addr = mm->mmap_base-len; + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (!vma || addr+len <= vma->vm_start) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + } while (len < vma->vm_start); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->mmap_base = base; + mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + + return addr; +} struct sel_arg_struct { unsigned long n; diff -urNp linux-2.6.22.1/arch/i386/kernel/time.c linux-2.6.22.1/arch/i386/kernel/time.c --- linux-2.6.22.1/arch/i386/kernel/time.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/time.c 2007-08-02 11:38:45.000000000 -0400 @@ -132,20 +132,30 @@ unsigned long profile_pc(struct pt_regs if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs) && in_lock_functions(pc)) { #ifdef CONFIG_FRAME_POINTER - return *(unsigned long *)(regs->ebp + 4); + return *(unsigned long *)(regs->ebp + 4) + __KERNEL_TEXT_OFFSET; #else unsigned long *sp = (unsigned long *)®s->esp; /* Return address is either directly at stack pointer or above a saved eflags. Eflags has bits 22-31 zero, kernel addresses don't. */ + +#ifdef CONFIG_PAX_KERNEXEC + return sp[0] + __KERNEL_TEXT_OFFSET; +#else if (sp[0] >> 22) return sp[0]; if (sp[1] >> 22) return sp[1]; #endif + +#endif } #endif + + if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs)) + pc += __KERNEL_TEXT_OFFSET; + return pc; } EXPORT_SYMBOL(profile_pc); diff -urNp linux-2.6.22.1/arch/i386/kernel/traps.c linux-2.6.22.1/arch/i386/kernel/traps.c --- linux-2.6.22.1/arch/i386/kernel/traps.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/traps.c 2007-08-02 11:38:45.000000000 -0400 @@ -31,6 +31,7 @@ #include #include #include +#include #ifdef CONFIG_EISA #include @@ -66,12 +67,7 @@ asmlinkage int system_call(void); /* Do we ignore FPU interrupts ? */ char ignore_fpu_irq = 0; -/* - * The IDT has to be page-aligned to simplify the Pentium - * F0 0F bug workaround.. We have a special link segment - * for this. - */ -struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, }; +extern struct desc_struct idt_table[256]; asmlinkage void divide_error(void); asmlinkage void debug(void); @@ -283,7 +279,7 @@ void show_registers(struct pt_regs *regs esp = (unsigned long) (®s->esp); savesegment(ss, ss); savesegment(gs, gs); - if (user_mode_vm(regs)) { + if (user_mode(regs)) { in_kernel = 0; esp = regs->esp; ss = regs->xss & 0xffff; @@ -315,17 +311,18 @@ void show_registers(struct pt_regs *regs unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_len = code_bytes; unsigned char c; + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->xcs) >> 3]); printk("\n" KERN_EMERG "Stack: "); show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); printk(KERN_EMERG "Code: "); - eip = (u8 *)regs->eip - code_prologue; + eip = (u8 *)regs->eip - code_prologue + cs_base; if (eip < (u8 *)PAGE_OFFSET || probe_kernel_address(eip, c)) { /* try starting at EIP */ - eip = (u8 *)regs->eip; + eip = (u8 *)regs->eip + cs_base; code_len = code_len - code_prologue + 1; } for (i = 0; i < code_len; i++, eip++) { @@ -334,7 +330,7 @@ void show_registers(struct pt_regs *regs printk(" Bad EIP value."); break; } - if (eip == (u8 *)regs->eip) + if (eip == (u8 *)regs->eip + cs_base) printk("<%02x> ", c); else printk("%02x ", c); @@ -347,6 +343,7 @@ int is_valid_bugaddr(unsigned long eip) { unsigned short ud2; + eip += __KERNEL_TEXT_OFFSET; if (eip < PAGE_OFFSET) return 0; if (probe_kernel_address((unsigned short *)eip, ud2)) @@ -453,7 +450,7 @@ void die(const char * str, struct pt_reg static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) { - if (!user_mode_vm(regs)) + if (!user_mode(regs)) die(str, regs, err); } @@ -469,7 +466,7 @@ static void __kprobes do_trap(int trapnr goto trap_signal; } - if (!user_mode(regs)) + if (!user_mode_novm(regs)) goto kernel_trap; trap_signal: { @@ -572,7 +569,7 @@ fastcall void __kprobes do_general_prote long error_code) { int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = &init_tss[cpu]; struct thread_struct *thread = ¤t->thread; /* @@ -605,9 +602,25 @@ fastcall void __kprobes do_general_prote if (regs->eflags & VM_MASK) goto gp_in_vm86; - if (!user_mode(regs)) + if (!user_mode_novm(regs)) goto gp_in_kernel; +#ifdef CONFIG_PAX_PAGEEXEC + if (!nx_enabled && current->mm && (current->mm->pax_flags & MF_PAX_PAGEEXEC)) { + struct mm_struct *mm = current->mm; + unsigned long limit; + + down_write(&mm->mmap_sem); + limit = mm->context.user_cs_limit; + if (limit < TASK_SIZE) { + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC); + up_write(&mm->mmap_sem); + return; + } + up_write(&mm->mmap_sem); + } +#endif + current->thread.error_code = error_code; current->thread.trap_no = 13; force_sig(SIGSEGV, current); @@ -625,6 +638,13 @@ gp_in_kernel: if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 13, SIGSEGV) == NOTIFY_STOP) return; + +#ifdef CONFIG_PAX_KERNEXEC + if ((regs->xcs & 0xFFFF) == __KERNEL_CS) + die("PAX: suspicious general protection fault", regs, error_code); + else +#endif + die("general protection fault", regs, error_code); } } @@ -706,7 +726,7 @@ void __kprobes die_nmi(struct pt_regs *r /* If we are in kernel we are probably nested up pretty bad * and might aswell get out now while we still can. */ - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { current->thread.trap_no = 2; crash_kexec(regs); } @@ -838,7 +858,7 @@ fastcall void __kprobes do_debug(struct * check for kernel mode by just checking the CPL * of CS. */ - if (!user_mode(regs)) + if (!user_mode_novm(regs)) goto clear_TF_reenable; } @@ -1016,18 +1036,14 @@ fastcall void do_spurious_interrupt_bug( fastcall unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) { - struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt; unsigned long base = (kesp - uesp) & -THREAD_SIZE; unsigned long new_kesp = kesp - base; unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; - __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; + __u32 a, b; + /* Set up base for espfix segment */ - desc &= 0x00f0ff0000000000ULL; - desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | - ((((__u64)base) << 32) & 0xff00000000000000ULL) | - ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | - (lim_pages & 0xffff); - *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; + pack_descriptor(&a, &b, base, lim_pages, 0x93, 0xC); + write_gdt_entry(get_cpu_gdt_table(smp_processor_id()), GDT_ENTRY_ESPFIX_SS, a, b); return new_kesp; } @@ -1075,7 +1091,7 @@ void __init trap_init_f00f_bug(void) * Update the IDT descriptor and reload the IDT so that * it uses the read-only mapped virtual address. */ - idt_descr.address = fix_to_virt(FIX_F00F_IDT); + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT); load_idt(&idt_descr); } #endif diff -urNp linux-2.6.22.1/arch/i386/kernel/tsc.c linux-2.6.22.1/arch/i386/kernel/tsc.c --- linux-2.6.22.1/arch/i386/kernel/tsc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/tsc.c 2007-08-02 11:38:45.000000000 -0400 @@ -308,7 +308,7 @@ static struct dmi_system_id __initdata b DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), }, }, - {} + { NULL, NULL, {{0, NULL}}, NULL} }; /* diff -urNp linux-2.6.22.1/arch/i386/kernel/vm86.c linux-2.6.22.1/arch/i386/kernel/vm86.c --- linux-2.6.22.1/arch/i386/kernel/vm86.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/vm86.c 2007-08-02 11:38:45.000000000 -0400 @@ -148,7 +148,7 @@ struct pt_regs * fastcall save_v86_state do_exit(SIGSEGV); } - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); current->thread.esp0 = current->thread.saved_esp0; current->thread.sysenter_cs = __KERNEL_CS; load_esp0(tss, ¤t->thread); @@ -324,7 +324,7 @@ static void do_sys_vm86(struct kernel_vm tsk->thread.saved_fs = info->regs32->xfs; savesegment(gs, tsk->thread.saved_gs); - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; diff -urNp linux-2.6.22/arch/i386/kernel/vmi.c linux-2.6.22/arch/i386/kernel/vmi.c --- linux-2.6.22/arch/i386/kernel/vmi.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22/arch/i386/kernel/vmi.c 2007-07-10 14:56:30.000000000 -0400 @@ -96,18 +96,43 @@ static unsigned patch_internal(int call, { u64 reloc; struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + reloc = call_vrom_long_func(vmi_rom, get_reloc, call); switch(rel->type) { case VMI_RELOCATION_CALL_REL: BUG_ON(len < 5); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + *(char *)insns = MNEM_CALL; patch_offset(insns, rel->eip); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + return 5; case VMI_RELOCATION_JUMP_REL: BUG_ON(len < 5); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + *(char *)insns = MNEM_JMP; patch_offset(insns, rel->eip); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + return 5; case VMI_RELOCATION_NOP: @@ -485,14 +510,14 @@ static void vmi_set_pud(pud_t *pudp, pud static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - const pte_t pte = { 0 }; + const pte_t pte = __pte(0ULL); vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); } static void vmi_pmd_clear(pmd_t *pmd) { - const pte_t pte = { 0 }; + const pte_t pte = __pte(0ULL); vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD); vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); } @@ -521,8 +546,8 @@ vmi_startup_ipi_hook(int phys_apicid, un ap.ss = __KERNEL_DS; ap.esp = (unsigned long) start_esp; - ap.ds = __USER_DS; - ap.es = __USER_DS; + ap.ds = __KERNEL_DS; + ap.es = __KERNEL_DS; ap.fs = __KERNEL_PERCPU; ap.gs = 0; @@ -719,12 +744,20 @@ static inline int __init activate_vmi(vo u64 reloc; const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + if (call_vrom_func(vmi_rom, vmi_init) != 0) { printk(KERN_ERR "VMI ROM failed to initialize!"); return 0; } savesegment(cs, kernel_cs); +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + paravirt_ops.paravirt_enabled = 1; paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; @@ -903,6 +936,10 @@ static inline int __init activate_vmi(vo para_fill(safe_halt, Halt); +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + /* * Alternative instruction rewriting doesn't happen soon enough * to convert VMI_IRET to a call instead of a jump; so we have diff -urNp linux-2.6.22.1/arch/i386/kernel/vmlinux.lds.S linux-2.6.22.1/arch/i386/kernel/vmlinux.lds.S --- linux-2.6.22.1/arch/i386/kernel/vmlinux.lds.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/kernel/vmlinux.lds.S 2007-08-02 11:38:45.000000000 -0400 @@ -21,6 +21,13 @@ #include #include #include +#include + +#ifdef CONFIG_X86_PAE +#define PMD_SHIFT 21 +#else +#define PMD_SHIFT 22 +#endif OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") OUTPUT_ARCH(i386) @@ -28,22 +35,124 @@ ENTRY(phys_startup_32) jiffies = jiffies_64; PHDRS { - text PT_LOAD FLAGS(5); /* R_E */ - data PT_LOAD FLAGS(7); /* RWE */ - note PT_NOTE FLAGS(0); /* ___ */ + initdata PT_LOAD FLAGS(6); /* RW_ */ + percpu PT_LOAD FLAGS(6); /* RW_ */ + inittext PT_LOAD FLAGS(5); /* R_E */ + text PT_LOAD FLAGS(5); /* R_E */ + rodata PT_LOAD FLAGS(4); /* R__ */ + data PT_LOAD FLAGS(6); /* RW_ */ + note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; - phys_startup_32 = startup_32 - LOAD_OFFSET; + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; + + .text.startup : AT(ADDR(.text.startup) - LOAD_OFFSET) { + BYTE(0xEA) /* jmp far */ + LONG(phys_startup_32) + SHORT(__BOOT_CS) + } :initdata - .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) { - _text = .; /* Text and read-only data */ + /* might get freed after init */ + . = ALIGN(4096); + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { + __smp_locks = .; + *(.smp_locks) + __smp_locks_end = .; + } + /* will be freed after init + * Following ALIGN() is required to make sure no other data falls on the + * same page where __smp_alt_end is pointing as that page might be freed + * after boot. Always make sure that ALIGN() directive is present after + * the section which contains __smp_alt_end. + */ + . = ALIGN(4096); + + /* will be freed after init */ + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { + __init_begin = .; + *(.init.data) + } + . = ALIGN(16); + .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { + __setup_start = .; + *(.init.setup) + __setup_end = .; + } + .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { + __initcall_start = .; + INITCALLS + __initcall_end = .; + } + .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + SECURITY_INIT + . = ALIGN(4); + .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { + *(.altinstr_replacement) + } + . = ALIGN(4); + .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { + __parainstructions = .; + *(.parainstructions) + __parainstructions_end = .; + } + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } +#if defined(CONFIG_BLK_DEV_INITRD) + . = ALIGN(4096); + .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + } +#endif + . = ALIGN(4096); + per_cpu_start = .; + .data.percpu (0) : AT(ADDR(.data.percpu) - LOAD_OFFSET + per_cpu_start) { + __per_cpu_start = . + per_cpu_start; + LONG(0) + *(.data.percpu) + __per_cpu_end = . + per_cpu_start; + } :percpu + . += per_cpu_start; + + /* read-only */ + + . = ALIGN(4096); /* Init code and data */ + .init.text (. - __KERNEL_TEXT_OFFSET) : AT(ADDR(.init.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + _sinittext = .; + *(.init.text) + _einittext = .; + } :inittext + + /* .exit.text is discard at runtime, not link time, to deal with references + from .altinstructions and .eh_frame */ + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { *(.exit.text) } + + .filler : AT(ADDR(.filler) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + BYTE(0) + . = ALIGN(4*1024*1024) - 1; + } + + /* freed after init ends here */ + + .text.head : AT(ADDR(.text.head) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + __init_end = . + __KERNEL_TEXT_OFFSET; + _text = .; /* Text and read-only data */ *(.text.head) } :text = 0x9090 /* read-only */ - .text : AT(ADDR(.text) - LOAD_OFFSET) { + .text : AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -53,12 +162,13 @@ SECTIONS _etext = .; /* End of text section */ } :text = 0x9090 + . += __KERNEL_TEXT_OFFSET; . = ALIGN(16); /* Exception table */ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { __start___ex_table = .; *(__ex_table) __stop___ex_table = .; - } + } :rodata BUG_TABLE @@ -71,9 +181,37 @@ SECTIONS RODATA + . = ALIGN(4096); + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) { + *(.empty_zero_page) + +#ifdef CONFIG_X86_PAE + *(.swapper_pm_dir) +#endif + + *(.swapper_pg_dir) + *(.idt) + } + +#ifdef CONFIG_PAX_KERNEXEC + . = ALIGN(4096); + + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) { + MODULES_VADDR = .; + BYTE(0) + . += (4 * 1024 * 1024); + . = ALIGN(1 << PMD_SHIFT) - 1; + MODULES_END = .; + } + +#else + . = ALIGN(32); +#endif + /* writeable */ . = ALIGN(4096); .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ + _data = .; DATA_DATA CONSTRUCTORS } :data @@ -86,11 +224,6 @@ SECTIONS __nosave_end = .; } - . = ALIGN(4096); - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { - *(.data.idt) - } - . = ALIGN(32); .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { *(.data.cacheline_aligned) @@ -108,85 +241,9 @@ SECTIONS *(.data.init_task) } - /* might get freed after init */ - . = ALIGN(4096); - .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { - __smp_locks = .; - *(.smp_locks) - __smp_locks_end = .; - } - /* will be freed after init - * Following ALIGN() is required to make sure no other data falls on the - * same page where __smp_alt_end is pointing as that page might be freed - * after boot. Always make sure that ALIGN() directive is present after - * the section which contains __smp_alt_end. - */ . = ALIGN(4096); - /* will be freed after init */ - . = ALIGN(4096); /* Init code and data */ - .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { - __init_begin = .; - _sinittext = .; - *(.init.text) - _einittext = .; - } - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } - . = ALIGN(16); - .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { - __initcall_start = .; - INITCALLS - __initcall_end = .; - } - .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - } - SECURITY_INIT - . = ALIGN(4); - .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { - __alt_instructions = .; - *(.altinstructions) - __alt_instructions_end = .; - } - .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { - *(.altinstr_replacement) - } - . = ALIGN(4); - .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { - __parainstructions = .; - *(.parainstructions) - __parainstructions_end = .; - } - /* .exit.text is discard at runtime, not link time, to deal with references - from .altinstructions and .eh_frame */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } - .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } -#if defined(CONFIG_BLK_DEV_INITRD) - . = ALIGN(4096); - .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; - } -#endif - . = ALIGN(4096); - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { - __per_cpu_start = .; - *(.data.percpu) - __per_cpu_end = .; - } - . = ALIGN(4096); - /* freed after init ends here */ - .bss : AT(ADDR(.bss) - LOAD_OFFSET) { - __init_end = .; __bss_start = .; /* BSS */ *(.bss.page_aligned) *(.bss) diff -urNp linux-2.6.22.1/arch/i386/lib/checksum.S linux-2.6.22.1/arch/i386/lib/checksum.S --- linux-2.6.22.1/arch/i386/lib/checksum.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/lib/checksum.S 2007-08-02 11:38:45.000000000 -0400 @@ -28,7 +28,8 @@ #include #include #include - +#include + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ @@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic ( #define ARGBASE 16 #define FP 12 - -ENTRY(csum_partial_copy_generic) + +ENTRY(csum_partial_copy_generic_to_user) CFI_STARTPROC + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + popl %es + CFI_ADJUST_CFA_OFFSET -4 + jmp csum_partial_copy_generic + +ENTRY(csum_partial_copy_generic_from_user) + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 + +ENTRY(csum_partial_copy_generic) subl $4,%esp CFI_ADJUST_CFA_OFFSET 4 pushl %edi @@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic) jmp 4f SRC(1: movw (%esi), %bx ) addl $2, %esi -DST( movw %bx, (%edi) ) +DST( movw %bx, %es:(%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax @@ -343,30 +357,30 @@ DST( movw %bx, (%edi) ) SRC(1: movl (%esi), %ebx ) SRC( movl 4(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +DST( movl %ebx, %es:(%edi) ) adcl %edx, %eax -DST( movl %edx, 4(%edi) ) +DST( movl %edx, %es:4(%edi) ) SRC( movl 8(%esi), %ebx ) SRC( movl 12(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 8(%edi) ) +DST( movl %ebx, %es:8(%edi) ) adcl %edx, %eax -DST( movl %edx, 12(%edi) ) +DST( movl %edx, %es:12(%edi) ) SRC( movl 16(%esi), %ebx ) SRC( movl 20(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 16(%edi) ) +DST( movl %ebx, %es:16(%edi) ) adcl %edx, %eax -DST( movl %edx, 20(%edi) ) +DST( movl %edx, %es:20(%edi) ) SRC( movl 24(%esi), %ebx ) SRC( movl 28(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 24(%edi) ) +DST( movl %ebx, %es:24(%edi) ) adcl %edx, %eax -DST( movl %edx, 28(%edi) ) +DST( movl %edx, %es:28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi @@ -380,7 +394,7 @@ DST( movl %edx, 28(%edi) ) shrl $2, %edx # This clears CF SRC(3: movl (%esi), %ebx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +DST( movl %ebx, %es:(%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx @@ -392,12 +406,12 @@ DST( movl %ebx, (%edi) ) jb 5f SRC( movw (%esi), %cx ) leal 2(%esi), %esi -DST( movw %cx, (%edi) ) +DST( movw %cx, %es:(%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx SRC(5: movb (%esi), %cl ) -DST( movb %cl, (%edi) ) +DST( movb %cl, %es:(%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: @@ -408,7 +422,7 @@ DST( movb %cl, (%edi) ) 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) # zero the complete destination - computing the rest # is too much work @@ -421,11 +435,19 @@ DST( movb %cl, (%edi) ) 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT,(%ebx) + movl $-EFAULT,%ss:(%ebx) jmp 5000b .previous + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %es + CFI_ADJUST_CFA_OFFSET -4 popl %ebx CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE ebx @@ -439,26 +461,41 @@ DST( movb %cl, (%edi) ) CFI_ADJUST_CFA_OFFSET -4 ret CFI_ENDPROC -ENDPROC(csum_partial_copy_generic) +ENDPROC(csum_partial_copy_generic_to_user) #else /* Version for PentiumII/PPro */ #define ROUND1(x) \ + nop; nop; nop; \ SRC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + DST(movl %ebx, %es:x(%edi)) ; #define ROUND(x) \ + nop; nop; nop; \ SRC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + DST(movl %ebx, %es:x(%edi)) ; #define ARGBASE 12 - -ENTRY(csum_partial_copy_generic) + +ENTRY(csum_partial_copy_generic_to_user) CFI_STARTPROC + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + popl %es + CFI_ADJUST_CFA_OFFSET -4 + jmp csum_partial_copy_generic + +ENTRY(csum_partial_copy_generic_from_user) + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 + +ENTRY(csum_partial_copy_generic) pushl %ebx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ebx, 0 @@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic) subl %ebx, %edi lea -1(%esi),%edx andl $-32,%edx - lea 3f(%ebx,%ebx), %ebx + lea 3f(%ebx,%ebx,2), %ebx testl %esi, %esi jmp *%ebx 1: addl $64,%esi @@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic) jb 5f SRC( movw (%esi), %dx ) leal 2(%esi), %esi -DST( movw %dx, (%edi) ) +DST( movw %dx, %es:(%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: SRC( movb (%esi), %dl ) -DST( movb %dl, (%edi) ) +DST( movb %dl, %es:(%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: .section .fixup, "ax" 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) # zero the complete destination (computing the rest is too much work) movl ARGBASE+8(%esp),%edi # dst movl ARGBASE+12(%esp),%ecx # len @@ -523,10 +560,18 @@ DST( movb %dl, (%edi) ) rep; stosb jmp 7b 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) jmp 7b .previous + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %es + CFI_ADJUST_CFA_OFFSET -4 popl %esi CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE esi @@ -538,7 +583,7 @@ DST( movb %dl, (%edi) ) CFI_RESTORE ebx ret CFI_ENDPROC -ENDPROC(csum_partial_copy_generic) +ENDPROC(csum_partial_copy_generic_to_user) #undef ROUND #undef ROUND1 diff -urNp linux-2.6.22.1/arch/i386/lib/getuser.S linux-2.6.22.1/arch/i386/lib/getuser.S --- linux-2.6.22.1/arch/i386/lib/getuser.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/lib/getuser.S 2007-08-02 11:38:45.000000000 -0400 @@ -11,7 +11,7 @@ #include #include #include - +#include /* * __get_user_X @@ -31,7 +31,11 @@ ENTRY(__get_user_1) GET_THREAD_INFO(%edx) cmpl TI_addr_limit(%edx),%eax jae bad_get_user + pushl $(__USER_DS) + popl %ds 1: movzbl (%eax),%edx + pushl %ss + pop %ds xorl %eax,%eax ret CFI_ENDPROC @@ -44,7 +48,11 @@ ENTRY(__get_user_2) GET_THREAD_INFO(%edx) cmpl TI_addr_limit(%edx),%eax jae bad_get_user + pushl $(__USER_DS) + popl %ds 2: movzwl -1(%eax),%edx + pushl %ss + pop %ds xorl %eax,%eax ret CFI_ENDPROC @@ -57,7 +65,11 @@ ENTRY(__get_user_4) GET_THREAD_INFO(%edx) cmpl TI_addr_limit(%edx),%eax jae bad_get_user + pushl $(__USER_DS) + popl %ds 3: movl -3(%eax),%edx + pushl %ss + pop %ds xorl %eax,%eax ret CFI_ENDPROC @@ -65,6 +77,8 @@ ENDPROC(__get_user_4) bad_get_user: CFI_STARTPROC + pushl %ss + pop %ds xorl %edx,%edx movl $-14,%eax ret diff -urNp linux-2.6.22.1/arch/i386/lib/mmx.c linux-2.6.22.1/arch/i386/lib/mmx.c --- linux-2.6.22.1/arch/i386/lib/mmx.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/lib/mmx.c 2007-08-02 11:38:45.000000000 -0400 @@ -30,6 +30,7 @@ void *_mmx_memcpy(void *to, const void * { void *p; int i; + unsigned long cr0; if (unlikely(in_interrupt())) return __memcpy(to, from, len); @@ -40,52 +41,80 @@ void *_mmx_memcpy(void *to, const void * kernel_fpu_begin(); __asm__ __volatile__ ( - "1: prefetch (%0)\n" /* This set is 28 bytes */ - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" /* This set is 28 bytes */ + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b, 3b\n" ".previous" - : : "r" (from) ); + : "=&r" (cr0) : "r" (from) : "ax"); for(; i>5; i--) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movq 8(%0), %%mm1\n" - " movq 16(%0), %%mm2\n" - " movq 24(%0), %%mm3\n" - " movq %%mm0, (%1)\n" - " movq %%mm1, 8(%1)\n" - " movq %%mm2, 16(%1)\n" - " movq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm0\n" - " movq 40(%0), %%mm1\n" - " movq 48(%0), %%mm2\n" - " movq 56(%0), %%mm3\n" - " movq %%mm0, 32(%1)\n" - " movq %%mm1, 40(%1)\n" - " movq %%mm2, 48(%1)\n" - " movq %%mm3, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movq 8(%1), %%mm1\n" + " movq 16(%1), %%mm2\n" + " movq 24(%1), %%mm3\n" + " movq %%mm0, (%2)\n" + " movq %%mm1, 8(%2)\n" + " movq %%mm2, 16(%2)\n" + " movq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm0\n" + " movq 40(%1), %%mm1\n" + " movq 48(%1), %%mm2\n" + " movq 56(%1), %%mm3\n" + " movq %%mm0, 32(%2)\n" + " movq %%mm1, 40(%2)\n" + " movq %%mm2, 48(%2)\n" + " movq %%mm3, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b, 3b\n" ".previous" - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from+=64; to+=64; } @@ -164,6 +193,7 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; + unsigned long cr0; kernel_fpu_begin(); @@ -171,51 +201,79 @@ static void fast_copy_page(void *to, voi * but that is for later. -AV */ __asm__ __volatile__ ( - "1: prefetch (%0)\n" - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b, 3b\n" ".previous" - : : "r" (from) ); + : "=&r" (cr0) : "r" (from) : "ax"); for(i=0; i<(4096-320)/64; i++) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movntq %%mm0, (%1)\n" - " movq 8(%0), %%mm1\n" - " movntq %%mm1, 8(%1)\n" - " movq 16(%0), %%mm2\n" - " movntq %%mm2, 16(%1)\n" - " movq 24(%0), %%mm3\n" - " movntq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm4\n" - " movntq %%mm4, 32(%1)\n" - " movq 40(%0), %%mm5\n" - " movntq %%mm5, 40(%1)\n" - " movq 48(%0), %%mm6\n" - " movntq %%mm6, 48(%1)\n" - " movq 56(%0), %%mm7\n" - " movntq %%mm7, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movntq %%mm0, (%2)\n" + " movq 8(%1), %%mm1\n" + " movntq %%mm1, 8(%2)\n" + " movq 16(%1), %%mm2\n" + " movntq %%mm2, 16(%2)\n" + " movq 24(%1), %%mm3\n" + " movntq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm4\n" + " movntq %%mm4, 32(%2)\n" + " movq 40(%1), %%mm5\n" + " movntq %%mm5, 40(%2)\n" + " movq 48(%1), %%mm6\n" + " movntq %%mm6, 48(%2)\n" + " movq 56(%1), %%mm7\n" + " movntq %%mm7, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b, 3b\n" ".previous" - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from+=64; to+=64; } @@ -296,56 +354,84 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; - - + unsigned long cr0; + kernel_fpu_begin(); __asm__ __volatile__ ( - "1: prefetch (%0)\n" - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b, 3b\n" ".previous" - : : "r" (from) ); + : "=&r" (cr0) : "r" (from) : "ax"); for(i=0; i<4096/64; i++) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movq 8(%0), %%mm1\n" - " movq 16(%0), %%mm2\n" - " movq 24(%0), %%mm3\n" - " movq %%mm0, (%1)\n" - " movq %%mm1, 8(%1)\n" - " movq %%mm2, 16(%1)\n" - " movq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm0\n" - " movq 40(%0), %%mm1\n" - " movq 48(%0), %%mm2\n" - " movq 56(%0), %%mm3\n" - " movq %%mm0, 32(%1)\n" - " movq %%mm1, 40(%1)\n" - " movq %%mm2, 48(%1)\n" - " movq %%mm3, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movq 8(%1), %%mm1\n" + " movq 16(%1), %%mm2\n" + " movq 24(%1), %%mm3\n" + " movq %%mm0, (%2)\n" + " movq %%mm1, 8(%2)\n" + " movq %%mm2, 16(%2)\n" + " movq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm0\n" + " movq 40(%1), %%mm1\n" + " movq 48(%1), %%mm2\n" + " movq 56(%1), %%mm3\n" + " movq %%mm0, 32(%2)\n" + " movq %%mm1, 40(%2)\n" + " movq %%mm2, 48(%2)\n" + " movq %%mm3, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b, 3b\n" ".previous" - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from+=64; to+=64; } diff -urNp linux-2.6.22.1/arch/i386/lib/putuser.S linux-2.6.22.1/arch/i386/lib/putuser.S --- linux-2.6.22.1/arch/i386/lib/putuser.S 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/lib/putuser.S 2007-08-02 11:38:45.000000000 -0400 @@ -11,7 +11,7 @@ #include #include #include - +#include /* * __put_user_X @@ -41,7 +41,11 @@ ENTRY(__put_user_1) ENTER cmpl TI_addr_limit(%ebx),%ecx jae bad_put_user + pushl $(__USER_DS) + popl %ds 1: movb %al,(%ecx) + pushl %ss + popl %ds xorl %eax,%eax EXIT ENDPROC(__put_user_1) @@ -52,7 +56,11 @@ ENTRY(__put_user_2) subl $1,%ebx cmpl %ebx,%ecx jae bad_put_user + pushl $(__USER_DS) + popl %ds 2: movw %ax,(%ecx) + pushl %ss + popl %ds xorl %eax,%eax EXIT ENDPROC(__put_user_2) @@ -63,7 +71,11 @@ ENTRY(__put_user_4) subl $3,%ebx cmpl %ebx,%ecx jae bad_put_user + pushl $(__USER_DS) + popl %ds 3: movl %eax,(%ecx) + pushl %ss + popl %ds xorl %eax,%eax EXIT ENDPROC(__put_user_4) @@ -74,8 +86,12 @@ ENTRY(__put_user_8) subl $7,%ebx cmpl %ebx,%ecx jae bad_put_user + pushl $(__USER_DS) + popl %ds 4: movl %eax,(%ecx) 5: movl %edx,4(%ecx) + pushl %ss + popl %ds xorl %eax,%eax EXIT ENDPROC(__put_user_8) @@ -85,6 +101,10 @@ bad_put_user: CFI_DEF_CFA esp, 2*4 CFI_OFFSET eip, -1*4 CFI_OFFSET ebx, -2*4 + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 movl $-14,%eax EXIT END(bad_put_user) diff -urNp linux-2.6.22.1/arch/i386/lib/usercopy.c linux-2.6.22.1/arch/i386/lib/usercopy.c --- linux-2.6.22.1/arch/i386/lib/usercopy.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/lib/usercopy.c 2007-08-02 11:38:45.000000000 -0400 @@ -29,34 +29,41 @@ static inline int __movsl_is_ok(unsigned * Copy a null terminated string from userspace. */ -#define __do_strncpy_from_user(dst,src,count,res) \ -do { \ - int __d0, __d1, __d2; \ - might_sleep(); \ - __asm__ __volatile__( \ - " testl %1,%1\n" \ - " jz 2f\n" \ - "0: lodsb\n" \ - " stosb\n" \ - " testb %%al,%%al\n" \ - " jz 1f\n" \ - " decl %1\n" \ - " jnz 0b\n" \ - "1: subl %1,%0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %5,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 0b,3b\n" \ - ".previous" \ - : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ - "=&D" (__d2) \ - : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ - : "memory"); \ -} while (0) +static long __do_strncpy_from_user(char *dst, const char __user *src, long count) +{ + int __d0, __d1, __d2; + long res = -EFAULT; + + might_sleep(); + __asm__ __volatile__( + " movw %w10,%%ds\n" + " testl %1,%1\n" + " jz 2f\n" + "0: lodsb\n" + " stosb\n" + " testb %%al,%%al\n" + " jz 1f\n" + " decl %1\n" + " jnz 0b\n" + "1: subl %1,%0\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "3: movl %5,%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,3b\n" + ".previous" + : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), + "=&D" (__d2) + : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst), + "r"(__USER_DS) + : "memory"); + return res; +} /** * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. @@ -81,9 +88,7 @@ do { \ long __strncpy_from_user(char *dst, const char __user *src, long count) { - long res; - __do_strncpy_from_user(dst, src, count, res); - return res; + return __do_strncpy_from_user(dst, src, count); } EXPORT_SYMBOL(__strncpy_from_user); @@ -110,7 +115,7 @@ strncpy_from_user(char *dst, const char { long res = -EFAULT; if (access_ok(VERIFY_READ, src, 1)) - __do_strncpy_from_user(dst, src, count, res); + res = __do_strncpy_from_user(dst, src, count); return res; } EXPORT_SYMBOL(strncpy_from_user); @@ -119,27 +124,33 @@ EXPORT_SYMBOL(strncpy_from_user); * Zero Userspace */ -#define __do_clear_user(addr,size) \ -do { \ - int __d0; \ - might_sleep(); \ - __asm__ __volatile__( \ - "0: rep; stosl\n" \ - " movl %2,%0\n" \ - "1: rep; stosb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: lea 0(%2,%0,4),%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 0b,3b\n" \ - " .long 1b,2b\n" \ - ".previous" \ - : "=&c"(size), "=&D" (__d0) \ - : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ -} while (0) +static unsigned long __do_clear_user(void __user *addr, unsigned long size) +{ + int __d0; + + might_sleep(); + __asm__ __volatile__( + " movw %w6,%%es\n" + "0: rep; stosl\n" + " movl %2,%0\n" + "1: rep; stosb\n" + "2:\n" + " pushl %%ss\n" + " popl %%es\n" + ".section .fixup,\"ax\"\n" + "3: lea 0(%2,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,3b\n" + " .long 1b,2b\n" + ".previous" + : "=&c"(size), "=&D" (__d0) + : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0), + "r"(__USER_DS)); + return size; +} /** * clear_user: - Zero a block of memory in user space. @@ -156,7 +167,7 @@ clear_user(void __user *to, unsigned lon { might_sleep(); if (access_ok(VERIFY_WRITE, to, n)) - __do_clear_user(to, n); + n = __do_clear_user(to, n); return n; } EXPORT_SYMBOL(clear_user); @@ -175,8 +186,7 @@ EXPORT_SYMBOL(clear_user); unsigned long __clear_user(void __user *to, unsigned long n) { - __do_clear_user(to, n); - return n; + return __do_clear_user(to, n); } EXPORT_SYMBOL(__clear_user); @@ -199,14 +209,17 @@ long strnlen_user(const char __user *s, might_sleep(); __asm__ __volatile__( + " movw %w8,%%es\n" " testl %0, %0\n" " jz 3f\n" - " andl %0,%%ecx\n" + " movl %0,%%ecx\n" "0: repne; scasb\n" " setne %%al\n" " subl %%ecx,%0\n" " addl %0,%%eax\n" "1:\n" + " pushl %%ss\n" + " popl %%es\n" ".section .fixup,\"ax\"\n" "2: xorl %%eax,%%eax\n" " jmp 1b\n" @@ -218,7 +231,7 @@ long strnlen_user(const char __user *s, " .long 0b,2b\n" ".previous" :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) - :"0" (n), "1" (s), "2" (0), "3" (mask) + :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS) :"cc"); return res & mask; } @@ -226,10 +239,121 @@ EXPORT_SYMBOL(strnlen_user); #ifdef CONFIG_X86_INTEL_USERCOPY static unsigned long -__copy_user_intel(void __user *to, const void *from, unsigned long size) +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) +{ + int d0, d1; + __asm__ __volatile__( + " movw %w6, %%es\n" + " .align 2,0x90\n" + "1: movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 3f\n" + "2: movl 64(%4), %%eax\n" + " .align 2,0x90\n" + "3: movl 0(%4), %%eax\n" + "4: movl 4(%4), %%edx\n" + "5: movl %%eax, %%es:0(%3)\n" + "6: movl %%edx, %%es:4(%3)\n" + "7: movl 8(%4), %%eax\n" + "8: movl 12(%4),%%edx\n" + "9: movl %%eax, %%es:8(%3)\n" + "10: movl %%edx, %%es:12(%3)\n" + "11: movl 16(%4), %%eax\n" + "12: movl 20(%4), %%edx\n" + "13: movl %%eax, %%es:16(%3)\n" + "14: movl %%edx, %%es:20(%3)\n" + "15: movl 24(%4), %%eax\n" + "16: movl 28(%4), %%edx\n" + "17: movl %%eax, %%es:24(%3)\n" + "18: movl %%edx, %%es:28(%3)\n" + "19: movl 32(%4), %%eax\n" + "20: movl 36(%4), %%edx\n" + "21: movl %%eax, %%es:32(%3)\n" + "22: movl %%edx, %%es:36(%3)\n" + "23: movl 40(%4), %%eax\n" + "24: movl 44(%4), %%edx\n" + "25: movl %%eax, %%es:40(%3)\n" + "26: movl %%edx, %%es:44(%3)\n" + "27: movl 48(%4), %%eax\n" + "28: movl 52(%4), %%edx\n" + "29: movl %%eax, %%es:48(%3)\n" + "30: movl %%edx, %%es:52(%3)\n" + "31: movl 56(%4), %%eax\n" + "32: movl 60(%4), %%edx\n" + "33: movl %%eax, %%es:56(%3)\n" + "34: movl %%edx, %%es:60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" + " cmpl $63, %0\n" + " ja 1b\n" + "35: movl %0, %%eax\n" + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" + "99: rep; movsl\n" + "36: movl %%eax, %0\n" + "37: rep; movsb\n" + "100:\n" + " pushl %%ss\n" + " popl %%es\n" + ".section .fixup,\"ax\"\n" + "101: lea 0(%%eax,%0,4),%0\n" + " jmp 100b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 1b,100b\n" + " .long 2b,100b\n" + " .long 3b,100b\n" + " .long 4b,100b\n" + " .long 5b,100b\n" + " .long 6b,100b\n" + " .long 7b,100b\n" + " .long 8b,100b\n" + " .long 9b,100b\n" + " .long 10b,100b\n" + " .long 11b,100b\n" + " .long 12b,100b\n" + " .long 13b,100b\n" + " .long 14b,100b\n" + " .long 15b,100b\n" + " .long 16b,100b\n" + " .long 17b,100b\n" + " .long 18b,100b\n" + " .long 19b,100b\n" + " .long 20b,100b\n" + " .long 21b,100b\n" + " .long 22b,100b\n" + " .long 23b,100b\n" + " .long 24b,100b\n" + " .long 25b,100b\n" + " .long 26b,100b\n" + " .long 27b,100b\n" + " .long 28b,100b\n" + " .long 29b,100b\n" + " .long 30b,100b\n" + " .long 31b,100b\n" + " .long 32b,100b\n" + " .long 33b,100b\n" + " .long 34b,100b\n" + " .long 35b,100b\n" + " .long 36b,100b\n" + " .long 37b,100b\n" + " .long 99b,101b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) + : "eax", "edx", "memory"); + return size; +} + +static unsigned long +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) { int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "1: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -238,36 +362,36 @@ __copy_user_intel(void __user *to, const " .align 2,0x90\n" "3: movl 0(%4), %%eax\n" "4: movl 4(%4), %%edx\n" - "5: movl %%eax, 0(%3)\n" - "6: movl %%edx, 4(%3)\n" + "5: movl %%eax, %%es:0(%3)\n" + "6: movl %%edx, %%es:4(%3)\n" "7: movl 8(%4), %%eax\n" "8: movl 12(%4),%%edx\n" - "9: movl %%eax, 8(%3)\n" - "10: movl %%edx, 12(%3)\n" + "9: movl %%eax, %%es:8(%3)\n" + "10: movl %%edx, %%es:12(%3)\n" "11: movl 16(%4), %%eax\n" "12: movl 20(%4), %%edx\n" - "13: movl %%eax, 16(%3)\n" - "14: movl %%edx, 20(%3)\n" + "13: movl %%eax, %%es:16(%3)\n" + "14: movl %%edx, %%es:20(%3)\n" "15: movl 24(%4), %%eax\n" "16: movl 28(%4), %%edx\n" - "17: movl %%eax, 24(%3)\n" - "18: movl %%edx, 28(%3)\n" + "17: movl %%eax, %%es:24(%3)\n" + "18: movl %%edx, %%es:28(%3)\n" "19: movl 32(%4), %%eax\n" "20: movl 36(%4), %%edx\n" - "21: movl %%eax, 32(%3)\n" - "22: movl %%edx, 36(%3)\n" + "21: movl %%eax, %%es:32(%3)\n" + "22: movl %%edx, %%es:36(%3)\n" "23: movl 40(%4), %%eax\n" "24: movl 44(%4), %%edx\n" - "25: movl %%eax, 40(%3)\n" - "26: movl %%edx, 44(%3)\n" + "25: movl %%eax, %%es:40(%3)\n" + "26: movl %%edx, %%es:44(%3)\n" "27: movl 48(%4), %%eax\n" "28: movl 52(%4), %%edx\n" - "29: movl %%eax, 48(%3)\n" - "30: movl %%edx, 52(%3)\n" + "29: movl %%eax, %%es:48(%3)\n" + "30: movl %%edx, %%es:52(%3)\n" "31: movl 56(%4), %%eax\n" "32: movl 60(%4), %%edx\n" - "33: movl %%eax, 56(%3)\n" - "34: movl %%edx, 60(%3)\n" + "33: movl %%eax, %%es:56(%3)\n" + "34: movl %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -281,6 +405,8 @@ __copy_user_intel(void __user *to, const "36: movl %%eax, %0\n" "37: rep; movsb\n" "100:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "101: lea 0(%%eax,%0,4),%0\n" " jmp 100b\n" @@ -327,7 +453,7 @@ __copy_user_intel(void __user *to, const " .long 99b,101b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -337,6 +463,7 @@ __copy_user_zeroing_intel(void *to, cons { int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -345,36 +472,36 @@ __copy_user_zeroing_intel(void *to, cons " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movl %%eax, 0(%3)\n" - " movl %%edx, 4(%3)\n" + " movl %%eax, %%es:0(%3)\n" + " movl %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movl %%eax, 8(%3)\n" - " movl %%edx, 12(%3)\n" + " movl %%eax, %%es:8(%3)\n" + " movl %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movl %%eax, 16(%3)\n" - " movl %%edx, 20(%3)\n" + " movl %%eax, %%es:16(%3)\n" + " movl %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movl %%eax, 24(%3)\n" - " movl %%edx, 28(%3)\n" + " movl %%eax, %%es:24(%3)\n" + " movl %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movl %%eax, 32(%3)\n" - " movl %%edx, 36(%3)\n" + " movl %%eax, %%es:32(%3)\n" + " movl %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movl %%eax, 40(%3)\n" - " movl %%edx, 44(%3)\n" + " movl %%eax, %%es:40(%3)\n" + " movl %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movl %%eax, 48(%3)\n" - " movl %%edx, 52(%3)\n" + " movl %%eax, %%es:48(%3)\n" + " movl %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movl %%eax, 56(%3)\n" - " movl %%edx, 60(%3)\n" + " movl %%eax, %%es:56(%3)\n" + " movl %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -388,6 +515,8 @@ __copy_user_zeroing_intel(void *to, cons " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: pushl %0\n" @@ -422,7 +551,7 @@ __copy_user_zeroing_intel(void *to, cons " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -438,6 +567,7 @@ static unsigned long __copy_user_zeroing int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -446,36 +576,36 @@ static unsigned long __copy_user_zeroing " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" + " movnti %%eax, %%es:0(%3)\n" + " movnti %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" + " movnti %%eax, %%es:8(%3)\n" + " movnti %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" + " movnti %%eax, %%es:16(%3)\n" + " movnti %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" + " movnti %%eax, %%es:24(%3)\n" + " movnti %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" + " movnti %%eax, %%es:32(%3)\n" + " movnti %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" + " movnti %%eax, %%es:40(%3)\n" + " movnti %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" + " movnti %%eax, %%es:48(%3)\n" + " movnti %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" + " movnti %%eax, %%es:56(%3)\n" + " movnti %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -490,6 +620,8 @@ static unsigned long __copy_user_zeroing " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: pushl %0\n" @@ -524,7 +656,7 @@ static unsigned long __copy_user_zeroing " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -535,6 +667,7 @@ static unsigned long __copy_user_intel_n int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -543,36 +676,36 @@ static unsigned long __copy_user_intel_n " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" + " movnti %%eax, %%es:0(%3)\n" + " movnti %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" + " movnti %%eax, %%es:8(%3)\n" + " movnti %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" + " movnti %%eax, %%es:16(%3)\n" + " movnti %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" + " movnti %%eax, %%es:24(%3)\n" + " movnti %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" + " movnti %%eax, %%es:32(%3)\n" + " movnti %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" + " movnti %%eax, %%es:40(%3)\n" + " movnti %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" + " movnti %%eax, %%es:48(%3)\n" + " movnti %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" + " movnti %%eax, %%es:56(%3)\n" + " movnti %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -587,6 +720,8 @@ static unsigned long __copy_user_intel_n " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: jmp 8b\n" @@ -615,7 +750,7 @@ static unsigned long __copy_user_intel_n " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -628,90 +763,146 @@ static unsigned long __copy_user_intel_n */ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size); -unsigned long __copy_user_intel(void __user *to, const void *from, +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, + unsigned long size); +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size); unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ -#define __copy_user(to,from,size) \ -do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ - "4: rep; movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ - "0: rep; movsl\n" \ - " movl %3,%0\n" \ - "1: rep; movsb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "5: addl %3,%0\n" \ - " jmp 2b\n" \ - "3: lea 0(%3,%0,4),%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 4b,5b\n" \ - " .long 0b,3b\n" \ - " .long 1b,2b\n" \ - ".previous" \ - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ - : "3"(size), "0"(size), "1"(to), "2"(from) \ - : "memory"); \ -} while (0) - -#define __copy_user_zeroing(to,from,size) \ -do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ - "4: rep; movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ - "0: rep; movsl\n" \ - " movl %3,%0\n" \ - "1: rep; movsb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "5: addl %3,%0\n" \ - " jmp 6f\n" \ - "3: lea 0(%3,%0,4),%0\n" \ - "6: pushl %0\n" \ - " pushl %%eax\n" \ - " xorl %%eax,%%eax\n" \ - " rep; stosb\n" \ - " popl %%eax\n" \ - " popl %0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 4b,5b\n" \ - " .long 0b,3b\n" \ - " .long 1b,6b\n" \ - ".previous" \ - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ - : "3"(size), "0"(size), "1"(to), "2"(from) \ - : "memory"); \ -} while (0) +static unsigned long +__generic_copy_to_user(void __user *to, const void *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%es\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%es\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 2b\n" + "3: lea 0(%3,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,2b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} + +static unsigned long +__generic_copy_from_user(void *to, const void __user *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%ds\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 2b\n" + "3: lea 0(%3,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,2b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} + +static unsigned long +__copy_user_zeroing(void *to, const void __user *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%ds\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 6f\n" + "3: lea 0(%3,%0,4),%0\n" + "6: pushl %0\n" + " pushl %%eax\n" + " xorl %%eax,%%eax\n" + " rep; stosb\n" + " popl %%eax\n" + " popl %0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,6b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) @@ -774,9 +965,9 @@ survive: } #endif if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + n = __generic_copy_to_user(to, from, n); else - n = __copy_user_intel(to, from, n); + n = __generic_copy_to_user_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_to_user_ll); @@ -785,7 +976,7 @@ unsigned long __copy_from_user_ll(void * unsigned long n) { if (movsl_is_ok(to, from, n)) - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); else n = __copy_user_zeroing_intel(to, from, n); return n; @@ -796,9 +987,9 @@ unsigned long __copy_from_user_ll_nozero unsigned long n) { if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); else - n = __copy_user_intel((void __user *)to, + n = __generic_copy_from_user_intel((void __user *)to, (const void *)from, n); return n; } @@ -809,11 +1000,11 @@ unsigned long __copy_from_user_ll_nocach { #ifdef CONFIG_X86_INTEL_USERCOPY if ( n > 64 && cpu_has_xmm2) - n = __copy_user_zeroing_intel_nocache(to, from, n); + n = __copy_user_zeroing_intel_nocache(to, from, n); else - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); #else - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); #endif return n; } @@ -823,11 +1014,11 @@ unsigned long __copy_from_user_ll_nocach { #ifdef CONFIG_X86_INTEL_USERCOPY if ( n > 64 && cpu_has_xmm2) - n = __copy_user_intel_nocache(to, from, n); + n = __copy_user_intel_nocache(to, from, n); else - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); #else - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); #endif return n; } @@ -880,3 +1071,30 @@ copy_from_user(void *to, const void __us return n; } EXPORT_SYMBOL(copy_from_user); + +#ifdef CONFIG_PAX_MEMORY_UDEREF +void __set_fs(mm_segment_t x, int cpu) +{ + unsigned long limit = x.seg; + __u32 a, b; + + current_thread_info()->addr_limit = x; + if (likely(limit)) + limit = (limit - 1UL) >> PAGE_SHIFT; + pack_descriptor(&a, &b, 0UL, limit, 0xF3, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, a, b); +} + +void set_fs(mm_segment_t x) +{ + __set_fs(x, get_cpu()); + put_cpu_no_resched(); +} +#else +void set_fs(mm_segment_t x) +{ + current_thread_info()->addr_limit = x; +} +#endif + +EXPORT_SYMBOL(set_fs); diff -urNp linux-2.6.22.1/arch/i386/mach-default/setup.c linux-2.6.22.1/arch/i386/mach-default/setup.c --- linux-2.6.22.1/arch/i386/mach-default/setup.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mach-default/setup.c 2007-08-02 11:38:45.000000000 -0400 @@ -35,7 +35,7 @@ void __init pre_intr_init_hook(void) /* * IRQ2 is cascade interrupt to second interrupt controller */ -static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; +static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL, 0, NULL}; /** * intr_init_hook - post gate setup interrupt initialisation diff -urNp linux-2.6.22.1/arch/i386/mach-voyager/voyager_basic.c linux-2.6.22.1/arch/i386/mach-voyager/voyager_basic.c --- linux-2.6.22.1/arch/i386/mach-voyager/voyager_basic.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mach-voyager/voyager_basic.c 2007-08-02 11:38:45.000000000 -0400 @@ -130,7 +130,7 @@ voyager_memory_detect(int region, __u32 __u8 cmos[4]; ClickMap_t *map; unsigned long map_addr; - unsigned long old; + pte_t old; if(region >= CLICK_ENTRIES) { printk("Voyager: Illegal ClickMap region %d\n", region); @@ -144,7 +144,7 @@ voyager_memory_detect(int region, __u32 /* steal page 0 for this */ old = pg0[0]; - pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); + pg0[0] = __pte((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); local_flush_tlb(); /* now clear everything out but page 0 */ map = (ClickMap_t *)(map_addr & (~PAGE_MASK)); diff -urNp linux-2.6.22.1/arch/i386/mach-voyager/voyager_smp.c linux-2.6.22.1/arch/i386/mach-voyager/voyager_smp.c --- linux-2.6.22.1/arch/i386/mach-voyager/voyager_smp.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mach-voyager/voyager_smp.c 2007-08-02 11:38:45.000000000 -0400 @@ -554,6 +554,10 @@ do_boot_cpu(__u8 cpu) __u32 *hijack_vector; __u32 start_phys_address = setup_trampoline(); +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + /* There's a clever trick to this: The linux trampoline is * compiled to begin at absolute location zero, so make the * address zero but have the data segment selector compensate @@ -573,7 +577,17 @@ do_boot_cpu(__u8 cpu) init_gdt(cpu); per_cpu(current_task, cpu) = idle; - early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + early_gdt_descr.address = get_cpu_gdt_table(cpu); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + irq_ctx_init(cpu); /* Note: Don't modify initial ss override */ @@ -1276,7 +1290,7 @@ smp_local_timer_interrupt(void) per_cpu(prof_counter, cpu); } - update_process_times(user_mode_vm(get_irq_regs())); + update_process_times(user_mode(get_irq_regs())); } if( ((1< */ - -/* - * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE - * keeps that from happenning. If anyone has a better way, I'm listening. - * - * boot_pte_t is defined only if this all works correctly - */ - -#undef CONFIG_X86_PAE #undef CONFIG_PARAVIRT #include #include #include #include #include - -/* - * I'm cheating here. It is known that the two boot PTE pages are - * allocated next to each other. I'm pretending that they're just - * one big array. - */ - -#define BOOT_PTE_PTRS (PTRS_PER_PTE*2) - -static unsigned long boot_pte_index(unsigned long vaddr) -{ - return __pa(vaddr) >> PAGE_SHIFT; -} - -static inline boot_pte_t* boot_vaddr_to_pte(void *address) -{ - boot_pte_t* boot_pg = (boot_pte_t*)pg0; - return &boot_pg[boot_pte_index((unsigned long)address)]; -} +#include /* * This is only for a caller who is clever enough to page-align * phys_addr and virtual_source, and who also has a preference * about which virtual address from which to steal ptes */ -static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, - void* virtual_source) +static void __init __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, + char* virtual_source) { - boot_pte_t* pte; - int i; - char *vaddr = virtual_source; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t* pte; + unsigned int i; + unsigned long vaddr = (unsigned long)virtual_source; + + pgd = pgd_offset_k(vaddr); + pud = pud_offset(pgd, vaddr); + pmd = pmd_offset(pud, vaddr); + pte = pte_offset_kernel(pmd, vaddr); - pte = boot_vaddr_to_pte(virtual_source); for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) { set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL)); - __flush_tlb_one(&vaddr[i*PAGE_SIZE]); + __flush_tlb_one(&virtual_source[i*PAGE_SIZE]); } } diff -urNp linux-2.6.22.1/arch/i386/mm/extable.c linux-2.6.22.1/arch/i386/mm/extable.c --- linux-2.6.22.1/arch/i386/mm/extable.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mm/extable.c 2007-08-02 11:38:45.000000000 -0400 @@ -11,7 +11,7 @@ int fixup_exception(struct pt_regs *regs const struct exception_table_entry *fixup; #ifdef CONFIG_PNPBIOS - if (unlikely(SEGMENT_IS_PNP_CODE(regs->xcs))) + if (unlikely(!(regs->eflags & VM_MASK) && SEGMENT_IS_PNP_CODE(regs->xcs))) { extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; extern u32 pnp_bios_is_utter_crap; diff -urNp linux-2.6.22.1/arch/i386/mm/fault.c linux-2.6.22.1/arch/i386/mm/fault.c --- linux-2.6.22.1/arch/i386/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mm/fault.c 2007-08-02 11:45:43.000000000 -0400 @@ -26,10 +26,14 @@ #include #include #include +#include +#include +#include #include #include #include +#include extern void die(const char *,struct pt_regs *,long); @@ -79,7 +83,8 @@ static inline unsigned long get_segment_ { unsigned long eip = regs->eip; unsigned seg = regs->xcs & 0xffff; - u32 seg_ar, seg_limit, base, *desc; + u32 seg_ar, seg_limit, base; + struct desc_struct *desc; /* Unlikely, but must come before segment checks. */ if (unlikely(regs->eflags & VM_MASK)) { @@ -93,7 +98,7 @@ static inline unsigned long get_segment_ /* By far the most common cases. */ if (likely(SEGMENT_IS_FLAT_CODE(seg))) - return eip; + return eip + (seg == __KERNEL_CS ? __KERNEL_TEXT_OFFSET : 0); /* Check the segment exists, is within the current LDT/GDT size, that kernel/user (ring 0..3) has the appropriate privilege, @@ -111,16 +116,19 @@ static inline unsigned long get_segment_ if (seg & (1<<2)) { /* Must lock the LDT while reading it. */ down(¤t->mm->context.sem); - desc = current->mm->context.ldt; - desc = (void *)desc + (seg & ~7); + if ((seg >> 3) >= current->mm->context.size) { + up(¤t->mm->context.sem); + *eip_limit = 0; + return 1; /* So that returned eip > *eip_limit. */ + } + desc = ¤t->mm->context.ldt[seg >> 3]; } else { /* Must disable preemption while reading the GDT. */ - desc = (u32 *)get_cpu_gdt_table(get_cpu()); - desc = (void *)desc + (seg & ~7); + desc = &get_cpu_gdt_table(get_cpu())[seg >> 3]; } /* Decode the code segment base from the descriptor */ - base = get_desc_base((unsigned long *)desc); + base = get_desc_base(desc); if (seg & (1<<2)) { up(¤t->mm->context.sem); @@ -221,6 +229,30 @@ static noinline void force_sig_info_faul fastcall void do_invalid_op(struct pt_regs *, unsigned long); +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +static int pax_handle_fetch_fault(struct pt_regs *regs); +#endif + +#ifdef CONFIG_PAX_PAGEEXEC +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + return NULL; + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + return NULL; + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd)) + return NULL; + return pmd; +} +#endif + static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) { unsigned index = pgd_index(address); @@ -301,13 +333,20 @@ fastcall void __kprobes do_page_fault(st struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; - unsigned long address; int write, si_code; + pte_t *pte; + +#ifdef CONFIG_PAX_PAGEEXEC + pmd_t *pmd; + spinlock_t *ptl; + unsigned char pte_mask; +#endif /* get the address */ - address = read_cr2(); + const unsigned long address = read_cr2(); tsk = current; + mm = tsk->mm; si_code = SEGV_MAPERR; @@ -344,14 +383,12 @@ fastcall void __kprobes do_page_fault(st if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) local_irq_enable(); - mm = tsk->mm; - /* * If we're in an interrupt, have no user context or are running in an * atomic region then we must not take the fault.. */ if (in_atomic() || !mm) - goto bad_area_nosemaphore; + goto bad_area_nopax; /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -371,10 +408,104 @@ fastcall void __kprobes do_page_fault(st if (!down_read_trylock(&mm->mmap_sem)) { if ((error_code & 4) == 0 && !search_exception_tables(regs->eip)) - goto bad_area_nosemaphore; + goto bad_area_nopax; down_read(&mm->mmap_sem); } +#ifdef CONFIG_PAX_PAGEEXEC + if (nx_enabled || (error_code & 5) != 5 || (regs->eflags & X86_EFLAGS_VM) || + !(mm->pax_flags & MF_PAX_PAGEEXEC)) + goto not_pax_fault; + + /* PaX: it's our fault, let's handle it if we can */ + + /* PaX: take a look at read faults before acquiring any locks */ + if (unlikely(!(error_code & 2) && (regs->eip == address))) { + /* instruction fetch attempt from a protected page in user mode */ + up_read(&mm->mmap_sem); + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return; + } +#endif + + pax_report_fault(regs, (void *)regs->eip, (void *)regs->esp); + do_exit(SIGKILL); + } + + pmd = pax_get_pmd(mm, address); + if (unlikely(!pmd)) + goto not_pax_fault; + + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) { + pte_unmap_unlock(pte, ptl); + goto not_pax_fault; + } + + if (unlikely((error_code & 2) && !pte_write(*pte))) { + /* write attempt to a protected page in user mode */ + pte_unmap_unlock(pte, ptl); + goto not_pax_fault; + } + +#ifdef CONFIG_SMP + if (likely(address > get_limit(regs->xcs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask))) +#else + if (likely(address > get_limit(regs->xcs))) +#endif + { + set_pte(pte, pte_mkread(*pte)); + __flush_tlb_one(address); + pte_unmap_unlock(pte, ptl); + up_read(&mm->mmap_sem); + return; + } + + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & 2) << (_PAGE_BIT_DIRTY-1)); + + /* + * PaX: fill DTLB with user rights and retry + */ + __asm__ __volatile__ ( +#ifdef CONFIG_PAX_MEMORY_UDEREF + "movw %w4,%%es\n" +#endif + "orb %2,(%1)\n" +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) +/* + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* + * page fault when examined during a TLB load attempt. this is true not only + * for PTEs holding a non-present entry but also present entries that will + * raise a page fault (such as those set up by PaX, or the copy-on-write + * mechanism). in effect it means that we do *not* need to flush the TLBs + * for our target pages since their PTEs are simply not in the TLBs at all. + + * the best thing in omitting it is that we gain around 15-20% speed in the + * fast path of the page fault handler and can get rid of tracing since we + * can no longer flush unintended entries. + */ + "invlpg (%0)\n" +#endif + "testb $0,%%es:(%0)\n" + "xorb %3,(%1)\n" +#ifdef CONFIG_PAX_MEMORY_UDEREF + "pushl %%ss\n" + "popl %%es\n" +#endif + : + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS) + : "memory", "cc"); + pte_unmap_unlock(pte, ptl); + up_read(&mm->mmap_sem); + return; + +not_pax_fault: +#endif + vma = find_vma(mm, address); if (!vma) goto bad_area; @@ -392,6 +523,12 @@ fastcall void __kprobes do_page_fault(st if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp) goto bad_area; } + +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1) + goto bad_area; +#endif + if (expand_stack(vma, address)) goto bad_area; /* @@ -401,6 +538,8 @@ fastcall void __kprobes do_page_fault(st good_area: si_code = SEGV_ACCERR; write = 0; + if (nx_enabled && (error_code & 16) && !(vma->vm_flags & VM_EXEC)) + goto bad_area; switch (error_code & 3) { default: /* 3: write, present */ /* fall through */ @@ -456,6 +595,41 @@ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (mm && (error_code & 4) && !(regs->eflags & X86_EFLAGS_VM)) { + /* + * It's possible to have interrupts off here. + */ + local_irq_enable(); + +#ifdef CONFIG_PAX_PAGEEXEC + if ((nx_enabled && (error_code & 16)) || + ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(error_code & 3) && (regs->eip == address))) { + pax_report_fault(regs, (void *)regs->eip, (void *)regs->esp); + do_exit(SIGKILL); + } +#endif + +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & 3) && (regs->eip + SEGMEXEC_TASK_SIZE == address)) { + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return; + } +#endif + + pax_report_fault(regs, (void *)regs->eip, (void *)regs->esp); + do_exit(SIGKILL); + } +#endif + + } +#endif + +bad_area_nopax: /* User mode accesses just cause a SIGSEGV */ if (error_code & 4) { /* @@ -485,7 +659,7 @@ bad_area_nosemaphore: if (boot_cpu_data.f00f_bug) { unsigned long nr; - nr = (address - idt_descr.address) >> 3; + nr = (address - (unsigned long)idt_descr.address) >> 3; if (nr == 6) { do_invalid_op(regs, 0); @@ -518,18 +692,34 @@ no_context: __typeof__(pte_val(__pte(0))) page; #ifdef CONFIG_X86_PAE - if (error_code & 16) { - pte_t *pte = lookup_address(address); + if (nx_enabled && (error_code & 16)) { + pte = lookup_address(address); if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) printk(KERN_CRIT "kernel tried to execute " "NX-protected page - exploit attempt? " - "(uid: %d)\n", current->uid); + "(uid: %d, task: %s, pid: %d)\n", + current->uid, current->comm, current->pid); } #endif if (address < PAGE_SIZE) printk(KERN_ALERT "BUG: unable to handle kernel NULL " "pointer dereference"); + +#ifdef CONFIG_PAX_KERNEXEC +#ifdef CONFIG_MODULES + else if (init_mm.start_code <= address && address < (unsigned long)MODULES_END) +#else + else if (init_mm.start_code <= address && address < init_mm.end_code) +#endif + if (tsk->signal->curr_ip) + printk(KERN_ERR "PAX: From %u.%u.%u.%u: %s:%d, uid/euid: %u/%u, attempted to modify kernel code", + NIPQUAD(tsk->signal->curr_ip), tsk->comm, tsk->pid, tsk->uid, tsk->euid); + else + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code", + tsk->comm, tsk->pid, tsk->uid, tsk->euid); +#endif + else printk(KERN_ALERT "BUG: unable to handle kernel paging" " request"); @@ -560,7 +750,7 @@ no_context: * it's allocated already. */ if ((page >> PAGE_SHIFT) < max_low_pfn - && (page & _PAGE_PRESENT)) { + && (page & (_PAGE_PRESENT | _PAGE_PSE)) == _PAGE_PRESENT) { page &= PAGE_MASK; page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)]; @@ -645,3 +835,109 @@ void vmalloc_sync_all(void) start = address + PGDIR_SIZE; } } + +#ifdef CONFIG_PAX_EMUTRAMP +/* + * PaX: decide what to do with offenders (regs->eip = fault address) + * + * returns 1 when task should be killed + * 2 when gcc trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + static const unsigned char trans[8] = { + offsetof(struct pt_regs, eax) / 4, + offsetof(struct pt_regs, ecx) / 4, + offsetof(struct pt_regs, edx) / 4, + offsetof(struct pt_regs, ebx) / 4, + offsetof(struct pt_regs, esp) / 4, + offsetof(struct pt_regs, ebp) / 4, + offsetof(struct pt_regs, esi) / 4, + offsetof(struct pt_regs, edi) / 4, + }; + int err; + + if (regs->eflags & X86_EFLAGS_VM) + return 1; + + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) + return 1; + + do { /* PaX: gcc trampoline emulation #1 */ + unsigned char mov1, mov2; + unsigned short jmp; + unsigned long addr1, addr2; + + err = get_user(mov1, (unsigned char __user *)regs->eip); + err |= get_user(addr1, (unsigned long __user *)(regs->eip + 1)); + err |= get_user(mov2, (unsigned char __user *)(regs->eip + 5)); + err |= get_user(addr2, (unsigned long __user *)(regs->eip + 6)); + err |= get_user(jmp, (unsigned short __user *)(regs->eip + 10)); + + if (err) + break; + + if ((mov1 & 0xF8) == 0xB8 && + (mov2 & 0xF8) == 0xB8 && + (mov1 & 0x07) != (mov2 & 0x07) && + (jmp & 0xF8FF) == 0xE0FF && + (mov2 & 0x07) == ((jmp>>8) & 0x07)) + { + ((unsigned long *)regs)[trans[mov1 & 0x07]] = addr1; + ((unsigned long *)regs)[trans[mov2 & 0x07]] = addr2; + regs->eip = addr2; + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #2 */ + unsigned char mov, jmp; + unsigned long addr1, addr2; + + err = get_user(mov, (unsigned char __user *)regs->eip); + err |= get_user(addr1, (unsigned long __user *)(regs->eip + 1)); + err |= get_user(jmp, (unsigned char __user *)(regs->eip + 5)); + err |= get_user(addr2, (unsigned long __user *)(regs->eip + 6)); + + if (err) + break; + + if ((mov & 0xF8) == 0xB8 && + jmp == 0xE9) + { + ((unsigned long *)regs)[trans[mov & 0x07]] = addr1; + regs->eip += addr2 + 10; + return 2; + } + } while (0); + + return 1; /* PaX in action */ +} +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +void pax_report_insns(void *pc, void *sp) +{ + long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (unsigned char __user *)pc+i)) + printk("?? "); + else + printk("%02x ", c); + } + printk("\n"); + + printk(KERN_ERR "PAX: bytes at SP-4: "); + for (i = -1; i < 20; i++) { + unsigned long c; + if (get_user(c, (unsigned long __user *)sp+i)) + printk("???????? "); + else + printk("%08lx ", c); + } + printk("\n"); +} +#endif diff -urNp linux-2.6.22.1/arch/i386/mm/hugetlbpage.c linux-2.6.22.1/arch/i386/mm/hugetlbpage.c --- linux-2.6.22.1/arch/i386/mm/hugetlbpage.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mm/hugetlbpage.c 2007-08-02 11:38:45.000000000 -0400 @@ -229,13 +229,18 @@ static unsigned long hugetlb_get_unmappe { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long start_addr, task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif if (len > mm->cached_hole_size) { - start_addr = mm->free_area_cache; + start_addr = mm->free_area_cache; } else { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + start_addr = mm->mmap_base; + mm->cached_hole_size = 0; } full_search: @@ -243,13 +248,13 @@ full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { + if (task_size - len < addr) { /* * Start a new search - just in case we missed * some holes. */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } @@ -271,9 +276,8 @@ static unsigned long hugetlb_get_unmappe { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev_vma; - unsigned long base = mm->mmap_base, addr = addr0; + unsigned long base = mm->mmap_base, addr; unsigned long largest_hole = mm->cached_hole_size; - int first_time = 1; /* don't allow allocations above current base */ if (mm->free_area_cache > base) @@ -283,7 +287,7 @@ static unsigned long hugetlb_get_unmappe largest_hole = 0; mm->free_area_cache = base; } -try_again: + /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) goto fail; @@ -325,22 +329,26 @@ try_again: fail: /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (first_time) { - mm->free_area_cache = base; - largest_hole = 0; - first_time = 0; - goto try_again; - } - /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); @@ -348,6 +343,7 @@ fail: /* * Restore the topdown base: */ + mm->mmap_base = base; mm->free_area_cache = base; mm->cached_hole_size = ~0UL; @@ -360,10 +356,17 @@ hugetlb_get_unmapped_area(struct file *f { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + unsigned long task_size = TASK_SIZE; if (len & ~HPAGE_MASK) return -EINVAL; - if (len > TASK_SIZE) + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (len > task_size) return -ENOMEM; if (flags & MAP_FIXED) { @@ -375,7 +378,7 @@ hugetlb_get_unmapped_area(struct file *f if (addr) { addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && + if (task_size - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } diff -urNp linux-2.6.22.1/arch/i386/mm/init.c linux-2.6.22.1/arch/i386/mm/init.c --- linux-2.6.22.1/arch/i386/mm/init.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mm/init.c 2007-08-02 11:38:45.000000000 -0400 @@ -44,6 +44,7 @@ #include #include #include +#include unsigned int __VMALLOC_RESERVE = 128 << 20; @@ -53,32 +54,6 @@ unsigned long highstart_pfn, highend_pfn static int noinline do_test_wp_bit(void); /* - * Creates a middle page table and puts a pointer to it in the - * given global directory entry. This only returns the gd entry - * in non-PAE compilation mode, since the middle layer is folded. - */ -static pmd_t * __init one_md_table_init(pgd_t *pgd) -{ - pud_t *pud; - pmd_t *pmd_table; - -#ifdef CONFIG_X86_PAE - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { - pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); - - paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); - pud = pud_offset(pgd, 0); - if (pmd_table != pmd_offset(pud, 0)) - BUG(); - } -#endif - pud = pud_offset(pgd, 0); - pmd_table = pmd_offset(pud, 0); - return pmd_table; -} - -/* * Create a page table and place a pointer to it in a middle page * directory entry. */ @@ -88,7 +63,11 @@ static pte_t * __init one_page_table_ini pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT); +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE)); +#else set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); +#endif BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } @@ -109,6 +88,7 @@ static pte_t * __init one_page_table_ini static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; int pgd_idx, pmd_idx; unsigned long vaddr; @@ -119,8 +99,13 @@ static void __init page_table_range_init pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { - pmd = one_md_table_init(pgd); - pmd = pmd + pmd_index(vaddr); + pud = pud_offset(pgd, vaddr); + pmd = pmd_offset(pud, vaddr); + +#ifdef CONFIG_X86_PAE + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); +#endif + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { one_page_table_init(pmd); @@ -130,11 +115,23 @@ static void __init page_table_range_init } } -static inline int is_kernel_text(unsigned long addr) +static inline int is_kernel_text(unsigned long start, unsigned long end) { - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) - return 1; - return 0; + unsigned long etext; + +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) + etext = (unsigned long)&MODULES_END - __KERNEL_TEXT_OFFSET; +#else + etext = (unsigned long)&_etext; +#endif + + if ((start > etext + __KERNEL_TEXT_OFFSET || + end <= (unsigned long)_stext + __KERNEL_TEXT_OFFSET) && + (start > (unsigned long)_einittext + __KERNEL_TEXT_OFFSET || + end <= (unsigned long)_sinittext + __KERNEL_TEXT_OFFSET) && + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000))) + return 0; + return 1; } /* @@ -146,25 +143,29 @@ static void __init kernel_physical_mappi { unsigned long pfn; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; - int pgd_idx, pmd_idx, pte_ofs; + unsigned int pgd_idx, pmd_idx, pte_ofs; pgd_idx = pgd_index(PAGE_OFFSET); pgd = pgd_base + pgd_idx; pfn = 0; - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { - pmd = one_md_table_init(pgd); - if (pfn >= max_low_pfn) - continue; + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) { + pud = pud_offset(pgd, 0); + pmd = pmd_offset(pud, 0); + +#ifdef CONFIG_X86_PAE + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); +#endif + for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) { - unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET; /* Map with big pages if possible, otherwise create normal page tables. */ - if (cpu_has_pse) { - unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; - if (is_kernel_text(address) || is_kernel_text(address2)) + if (cpu_has_pse && address >= (unsigned long)__va(0x100000)) { + if (is_kernel_text(address, address + PMD_SIZE)) set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); else set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE)); @@ -176,7 +177,7 @@ static void __init kernel_physical_mappi for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { - if (is_kernel_text(address)) + if (is_kernel_text(address, address + PAGE_SIZE)) set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); else set_pte(pte, pfn_pte(pfn, PAGE_KERNEL)); @@ -326,9 +327,9 @@ static void __init set_highmem_pages_ini #define set_highmem_pages_init(bad_ppro) do { } while (0) #endif /* CONFIG_HIGHMEM */ -unsigned long long __PAGE_KERNEL = _PAGE_KERNEL; +unsigned long long __PAGE_KERNEL __read_only = _PAGE_KERNEL; EXPORT_SYMBOL(__PAGE_KERNEL); -unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; +unsigned long long __PAGE_KERNEL_EXEC __read_only = _PAGE_KERNEL_EXEC; #ifdef CONFIG_NUMA extern void __init remap_numa_kva(void); @@ -339,26 +340,10 @@ extern void __init remap_numa_kva(void); void __init native_pagetable_setup_start(pgd_t *base) { #ifdef CONFIG_X86_PAE - int i; + unsigned int i; - /* - * Init entries of the first-level page table to the - * zero page, if they haven't already been set up. - * - * In a normal native boot, we'll be running on a - * pagetable rooted in swapper_pg_dir, but not in PAE - * mode, so this will end up clobbering the mappings - * for the lower 24Mbytes of the address space, - * without affecting the kernel address space. - */ - for (i = 0; i < USER_PTRS_PER_PGD; i++) - set_pgd(&base[i], - __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); - - /* Make sure kernel address space is empty so that a pagetable - will be allocated for it. */ - memset(&base[USER_PTRS_PER_PGD], 0, - KERNEL_PGD_PTRS * sizeof(pgd_t)); + for (i = 0; i < PTRS_PER_PGD; i++) + paravirt_alloc_pd(__pa(swapper_pm_dir + i) >> PAGE_SHIFT); #else paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT); #endif @@ -366,16 +351,6 @@ void __init native_pagetable_setup_start void __init native_pagetable_setup_done(pgd_t *base) { -#ifdef CONFIG_X86_PAE - /* - * Add low memory identity-mappings - SMP needs it when - * starting up on an AP from real-mode. In the non-PAE - * case we already have these mappings through head.S. - * All user-space mappings are explicitly cleared after - * SMP startup. - */ - set_pgd(&base[0], base[USER_PTRS_PER_PGD]); -#endif } /* @@ -437,12 +412,12 @@ static void __init pagetable_init (void) * Swap suspend & friends need this for resume because things like the intel-agp * driver might have split up a kernel 4MB mapping. */ -char __nosavedata swsusp_pg_dir[PAGE_SIZE] +pgd_t __nosavedata swsusp_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned (PAGE_SIZE))); static inline void save_pg_dir(void) { - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD); } #else static inline void save_pg_dir(void) @@ -471,8 +446,7 @@ void zap_low_mappings (void) flush_tlb_all(); } -static int disable_nx __initdata = 0; -u64 __supported_pte_mask __read_mostly = ~_PAGE_NX; +u64 __supported_pte_mask __read_only = ~_PAGE_NX; /* * noexec = on|off @@ -482,39 +456,34 @@ u64 __supported_pte_mask __read_mostly = * on Enable * off Disable */ +#if !defined(CONFIG_PAX_PAGEEXEC) static int __init noexec_setup(char *str) { if (!str || !strcmp(str, "on")) { - if (cpu_has_nx) { - __supported_pte_mask |= _PAGE_NX; - disable_nx = 0; - } + if (cpu_has_nx) + nx_enabled = 1; } else if (!strcmp(str,"off")) { - disable_nx = 1; - __supported_pte_mask &= ~_PAGE_NX; + nx_enabled = 0; } else return -EINVAL; return 0; } early_param("noexec", noexec_setup); +#endif int nx_enabled = 0; #ifdef CONFIG_X86_PAE static void __init set_nx(void) { - unsigned int v[4], l, h; + if (!nx_enabled && cpu_has_nx) { + unsigned l, h; - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); - if ((v[3] & (1 << 20)) && !disable_nx) { - rdmsr(MSR_EFER, l, h); - l |= EFER_NX; - wrmsr(MSR_EFER, l, h); - nx_enabled = 1; - __supported_pte_mask |= _PAGE_NX; - } + __supported_pte_mask &= ~_PAGE_NX; + rdmsr(MSR_EFER, l, h); + l &= ~EFER_NX; + wrmsr(MSR_EFER, l, h); } } @@ -567,14 +536,6 @@ void __init paging_init(void) load_cr3(swapper_pg_dir); -#ifdef CONFIG_X86_PAE - /* - * We will bail out later - printk doesn't work right now so - * the user would just see a hanging kernel. - */ - if (cpu_has_pae) - set_in_cr4(X86_CR4_PAE); -#endif __flush_tlb_all(); kmap_init(); @@ -645,7 +606,7 @@ void __init mem_init(void) set_highmem_pages_init(bad_ppro); codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; + datasize = (unsigned long) &_edata - (unsigned long) &_data; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); @@ -690,10 +651,10 @@ void __init mem_init(void) (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, - (unsigned long)&_etext, (unsigned long)&_edata, - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, + (unsigned long)&_data, (unsigned long)&_edata, + ((unsigned long)&_edata - (unsigned long)&_data) >> 10, - (unsigned long)&_text, (unsigned long)&_etext, + (unsigned long)&_text + __KERNEL_TEXT_OFFSET, (unsigned long)&_etext + __KERNEL_TEXT_OFFSET, ((unsigned long)&_etext - (unsigned long)&_text) >> 10); #ifdef CONFIG_HIGHMEM @@ -704,10 +665,6 @@ void __init mem_init(void) BUG_ON((unsigned long)high_memory > VMALLOC_START); #endif /* double-sanity-check paranoia */ -#ifdef CONFIG_X86_PAE - if (!cpu_has_pae) - panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); -#endif if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); @@ -843,6 +800,38 @@ void free_init_pages(char *what, unsigne void free_initmem(void) { + +#ifdef CONFIG_PAX_KERNEXEC + /* PaX: limit KERNEL_CS to actual size */ + unsigned long addr, limit; + __u32 a, b; + int cpu; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + +#ifdef CONFIG_MODULES + limit = (unsigned long)&MODULES_END - __KERNEL_TEXT_OFFSET; +#else + limit = (unsigned long)&_etext; +#endif + limit = (limit - 1UL) >> PAGE_SHIFT; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + pack_descriptor(&a, &b, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, a, b); + } + + /* PaX: make KERNEL_CS read-only */ + for (addr = __KERNEL_TEXT_OFFSET; addr < (unsigned long)&_data; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + } + flush_tlb_all(); +#endif + free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); diff -urNp linux-2.6.22.1/arch/i386/mm/mmap.c linux-2.6.22.1/arch/i386/mm/mmap.c --- linux-2.6.22.1/arch/i386/mm/mmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mm/mmap.c 2007-08-02 11:38:45.000000000 -0400 @@ -35,12 +35,18 @@ * Leave an at least ~128 MB hole. */ #define MIN_GAP (128*1024*1024) -#define MAX_GAP (TASK_SIZE/6*5) +#define MAX_GAP (task_size/6*5) static inline unsigned long mmap_base(struct mm_struct *mm) { unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; unsigned long random_factor = 0; + unsigned long task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif if (current->flags & PF_RANDOMIZE) random_factor = get_random_int() % (1024*1024); @@ -50,7 +56,7 @@ static inline unsigned long mmap_base(st else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(TASK_SIZE - gap - random_factor); + return PAGE_ALIGN(task_size - gap - random_factor); } /* @@ -66,11 +72,30 @@ void arch_pick_mmap_layout(struct mm_str if (sysctl_legacy_va_layout || (current->personality & ADDR_COMPAT_LAYOUT) || current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) { + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(mm); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff -urNp linux-2.6.22.1/arch/i386/mm/pageattr.c linux-2.6.22.1/arch/i386/mm/pageattr.c --- linux-2.6.22.1/arch/i386/mm/pageattr.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/mm/pageattr.c 2007-08-02 11:38:45.000000000 -0400 @@ -13,6 +13,7 @@ #include #include #include +#include static DEFINE_SPINLOCK(cpa_lock); static struct list_head df_list = LIST_HEAD_INIT(df_list); @@ -37,16 +38,16 @@ pte_t *lookup_address(unsigned long addr } static struct page *split_large_page(unsigned long address, pgprot_t prot, - pgprot_t ref_prot) + pgprot_t ref_prot, unsigned long flags) { int i; unsigned long addr; struct page *base; pte_t *pbase; - spin_unlock_irq(&cpa_lock); + spin_unlock_irqrestore(&cpa_lock, flags); base = alloc_pages(GFP_KERNEL, 0); - spin_lock_irq(&cpa_lock); + spin_lock_irqsave(&cpa_lock, flags); if (!base) return NULL; @@ -99,7 +100,18 @@ static void set_pmd_pte(pte_t *kpte, uns struct page *page; unsigned long flags; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + set_pte_atomic(kpte, pte); /* change init_mm */ + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + if (SHARED_KERNEL_PMD) return; @@ -126,7 +138,7 @@ static inline void revert_page(struct pa pte_t *linear; ref_prot = - ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext) + ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext + __KERNEL_TEXT_OFFSET) ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE; linear = (pte_t *) @@ -137,7 +149,7 @@ static inline void revert_page(struct pa } static int -__change_page_attr(struct page *page, pgprot_t prot) +__change_page_attr(struct page *page, pgprot_t prot, unsigned long flags) { pte_t *kpte; unsigned long address; @@ -158,13 +170,20 @@ __change_page_attr(struct page *page, pg struct page *split; ref_prot = - ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext) + ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext + __KERNEL_TEXT_OFFSET) ? PAGE_KERNEL_EXEC : PAGE_KERNEL; - split = split_large_page(address, prot, ref_prot); + split = split_large_page(address, prot, ref_prot, flags); if (!split) return -ENOMEM; - set_pmd_pte(kpte,address,mk_pte(split, ref_prot)); - kpte_page = split; + if (pte_huge(*kpte)) { + set_pmd_pte(kpte,address,mk_pte(split, ref_prot)); + kpte_page = split; + } else { + __free_pages(split, 0); + kpte = lookup_address(address); + kpte_page = virt_to_page(kpte); + set_pte_atomic(kpte, mk_pte(page, prot)); + } } page_private(kpte_page)++; } else if (!pte_huge(*kpte)) { @@ -216,7 +235,7 @@ int change_page_attr(struct page *page, spin_lock_irqsave(&cpa_lock, flags); for (i = 0; i < numpages; i++, page++) { - err = __change_page_attr(page, prot); + err = __change_page_attr(page, prot, flags); if (err) break; } diff -urNp linux-2.6.22.1/arch/i386/oprofile/backtrace.c linux-2.6.22.1/arch/i386/oprofile/backtrace.c --- linux-2.6.22.1/arch/i386/oprofile/backtrace.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/oprofile/backtrace.c 2007-08-02 11:38:45.000000000 -0400 @@ -22,7 +22,7 @@ struct frame_head { static struct frame_head * dump_kernel_backtrace(struct frame_head * head) { - oprofile_add_trace(head->ret); + oprofile_add_trace(head->ret + __KERNEL_TEXT_OFFSET); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ @@ -116,7 +116,7 @@ x86_backtrace(struct pt_regs * const reg head = (struct frame_head *)regs->ebp; #endif - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { while (depth-- && valid_kernel_stack(head, regs)) head = dump_kernel_backtrace(head); return; diff -urNp linux-2.6.22.1/arch/i386/oprofile/op_model_p4.c linux-2.6.22.1/arch/i386/oprofile/op_model_p4.c --- linux-2.6.22.1/arch/i386/oprofile/op_model_p4.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/oprofile/op_model_p4.c 2007-08-02 11:38:45.000000000 -0400 @@ -47,7 +47,7 @@ static inline void setup_num_counters(vo #endif } -static int inline addr_increment(void) +static inline int addr_increment(void) { #ifdef CONFIG_SMP return smp_num_siblings == 2 ? 2 : 1; diff -urNp linux-2.6.22.1/arch/i386/pci/common.c linux-2.6.22.1/arch/i386/pci/common.c --- linux-2.6.22.1/arch/i386/pci/common.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/pci/common.c 2007-08-02 11:38:45.000000000 -0400 @@ -287,7 +287,7 @@ static struct dmi_system_id __devinitdat DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"), }, }, - {} + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL} }; struct pci_bus * __devinit pcibios_scan_root(int busnum) diff -urNp linux-2.6.22.1/arch/i386/pci/early.c linux-2.6.22.1/arch/i386/pci/early.c --- linux-2.6.22.1/arch/i386/pci/early.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/pci/early.c 2007-08-02 11:38:45.000000000 -0400 @@ -7,7 +7,7 @@ /* Direct PCI access. This is used for PCI accesses in early boot before the PCI subsystem works. */ -#define PDprintk(x...) +#define PDprintk(x...) do {} while (0) u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) { diff -urNp linux-2.6.22.1/arch/i386/pci/fixup.c linux-2.6.22.1/arch/i386/pci/fixup.c --- linux-2.6.22.1/arch/i386/pci/fixup.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/pci/fixup.c 2007-08-02 11:38:45.000000000 -0400 @@ -389,7 +389,7 @@ static struct dmi_system_id __devinitdat DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL } }; static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev) diff -urNp linux-2.6.22.1/arch/i386/pci/irq.c linux-2.6.22.1/arch/i386/pci/irq.c --- linux-2.6.22.1/arch/i386/pci/irq.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/pci/irq.c 2007-08-02 11:38:45.000000000 -0400 @@ -507,7 +507,7 @@ static __init int intel_router_probe(str static struct pci_device_id __initdata pirq_440gx[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) }, - { }, + { PCI_DEVICE(0, 0) } }; /* 440GX has a proprietary PIRQ router -- don't use it */ @@ -1049,7 +1049,7 @@ static struct dmi_system_id __initdata p DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL } }; static int __init pcibios_irq_init(void) diff -urNp linux-2.6.22/arch/i386/pci/pcbios.c linux-2.6.22/arch/i386/pci/pcbios.c --- linux-2.6.22/arch/i386/pci/pcbios.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22/arch/i386/pci/pcbios.c 2007-07-10 14:56:30.000000000 -0400 @@ -57,50 +57,120 @@ union bios32 { static struct { unsigned long address; unsigned short segment; -} bios32_indirect = { 0, __KERNEL_CS }; +} bios32_indirect __read_only = { 0, __PCIBIOS_CS }; /* * Returns the entry point for the given service, NULL on error */ -static unsigned long bios32_service(unsigned long service) +static unsigned long __devinit bios32_service(unsigned long service) { unsigned char return_code; /* %al */ unsigned long address; /* %ebx */ unsigned long length; /* %ecx */ unsigned long entry; /* %edx */ unsigned long flags; + struct desc_struct *gdt; + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif local_irq_save(flags); - __asm__("lcall *(%%edi); cld" + + gdt = get_cpu_gdt_table(smp_processor_id()); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PCIBIOS_CS].a, + (__u32 *)&gdt[GDT_ENTRY_PCIBIOS_CS].b, + 0UL, 0xFFFFFUL, 0x9B, 0xC); + pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PCIBIOS_DS].a, + (__u32 *)&gdt[GDT_ENTRY_PCIBIOS_DS].b, + 0UL, 0xFFFFFUL, 0x93, 0xC); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld" : "=a" (return_code), "=b" (address), "=c" (length), "=d" (entry) : "0" (service), "1" (0), - "D" (&bios32_indirect)); + "D" (&bios32_indirect), + "r"(__PCIBIOS_DS) + : "memory"); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0; + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0; + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0; + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + local_irq_restore(flags); switch (return_code) { - case 0: - return address + entry; - case 0x80: /* Not present */ - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); - return 0; - default: /* Shouldn't happen */ - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", - service, return_code); - return 0; + case 0: { + int cpu; + unsigned char flags; + + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry); + address = address + PAGE_OFFSET; + length += 16UL; /* some BIOSs underreport this... */ + flags = 4; + if (length >= 64*1024*1024) { + length >>= PAGE_SHIFT; + flags |= 8; + } + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + gdt = get_cpu_gdt_table(cpu); + pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PCIBIOS_CS].a, + (__u32 *)&gdt[GDT_ENTRY_PCIBIOS_CS].b, + address, length, 0x9b, flags); + pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PCIBIOS_DS].a, + (__u32 *)&gdt[GDT_ENTRY_PCIBIOS_DS].b, + address, length, 0x93, flags); + } + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + + return entry; + } + case 0x80: /* Not present */ + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); + return 0; + default: /* Shouldn't happen */ + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", + service, return_code); + return 0; } } static struct { unsigned long address; unsigned short segment; -} pci_indirect = { 0, __KERNEL_CS }; +} pci_indirect __read_only = { 0, __PCIBIOS_CS }; -static int pci_bios_present; +static int pci_bios_present __read_only; static int __devinit check_pcibios(void) { @@ -109,11 +178,13 @@ static int __devinit check_pcibios(void) unsigned long flags, pcibios_entry; if ((pcibios_entry = bios32_service(PCI_SERVICE))) { - pci_indirect.address = pcibios_entry + PAGE_OFFSET; + pci_indirect.address = pcibios_entry; local_irq_save(flags); - __asm__( - "lcall *(%%edi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%edi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -122,7 +193,8 @@ static int __devinit check_pcibios(void) "=b" (ebx), "=c" (ecx) : "1" (PCIBIOS_PCI_BIOS_PRESENT), - "D" (&pci_indirect) + "D" (&pci_indirect), + "r" (__PCIBIOS_DS) : "memory"); local_irq_restore(flags); @@ -158,7 +230,10 @@ static int __devinit pci_bios_find_devic unsigned short bx; unsigned short ret; - __asm__("lcall *(%%edi); cld\n\t" + __asm__("movw %w7, %%ds\n\t" + "lcall *%%ss:(%%edi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -168,7 +243,8 @@ static int __devinit pci_bios_find_devic "c" (device_id), "d" (vendor), "S" ((int) index), - "D" (&pci_indirect)); + "D" (&pci_indirect), + "r" (__PCIBIOS_DS)); *bus = (bx >> 8) & 0xff; *device_fn = bx & 0xff; return (int) (ret & 0xff00) >> 8; @@ -188,7 +264,10 @@ static int pci_bios_read(unsigned int se switch (len) { case 1: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -197,10 +276,14 @@ static int pci_bios_read(unsigned int se : "1" (PCIBIOS_READ_CONFIG_BYTE), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; case 2: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -209,10 +292,14 @@ static int pci_bios_read(unsigned int se : "1" (PCIBIOS_READ_CONFIG_WORD), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; case 4: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -221,7 +308,8 @@ static int pci_bios_read(unsigned int se : "1" (PCIBIOS_READ_CONFIG_DWORD), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; } @@ -244,7 +332,10 @@ static int pci_bios_write(unsigned int s switch (len) { case 1: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -253,10 +344,14 @@ static int pci_bios_write(unsigned int s "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; case 2: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -265,10 +360,14 @@ static int pci_bios_write(unsigned int s "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; case 4: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -277,7 +376,8 @@ static int pci_bios_write(unsigned int s "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; } @@ -430,10 +530,13 @@ struct irq_routing_table * __devinit pci DBG("PCI: Fetching IRQ routing table... "); __asm__("push %%es\n\t" + "movw %w8, %%ds\n\t" "push %%ds\n\t" "pop %%es\n\t" - "lcall *(%%esi); cld\n\t" + "lcall *%%ss:(%%esi); cld\n\t" "pop %%es\n\t" + "push %%ss\n\t" + "pop %%ds\n" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -444,7 +547,8 @@ struct irq_routing_table * __devinit pci "1" (0), "D" ((long) &opt), "S" (&pci_indirect), - "m" (opt) + "m" (opt), + "r" (__PCIBIOS_DS) : "memory"); DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); if (ret & 0xff00) @@ -468,7 +572,10 @@ int pcibios_set_irq_routing(struct pci_d { int ret; - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w5, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -476,7 +583,8 @@ int pcibios_set_irq_routing(struct pci_d : "0" (PCIBIOS_SET_PCI_HW_INT), "b" ((dev->bus->number << 8) | dev->devfn), "c" ((irq << 8) | (pin + 10)), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); return !(ret & 0xff00); } EXPORT_SYMBOL(pcibios_set_irq_routing); diff -urNp linux-2.6.22.1/arch/i386/power/cpu.c linux-2.6.22.1/arch/i386/power/cpu.c --- linux-2.6.22.1/arch/i386/power/cpu.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/i386/power/cpu.c 2007-08-02 11:38:45.000000000 -0400 @@ -64,7 +64,7 @@ static void do_fpu_end(void) static void fix_processor_context(void) { int cpu = smp_processor_id(); - struct tss_struct * t = &per_cpu(init_tss, cpu); + struct tss_struct *t = init_tss + cpu; set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ diff -urNp linux-2.6.22.1/arch/ia64/ia32/binfmt_elf32.c linux-2.6.22.1/arch/ia64/ia32/binfmt_elf32.c --- linux-2.6.22.1/arch/ia64/ia32/binfmt_elf32.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/ia64/ia32/binfmt_elf32.c 2007-08-02 11:38:45.000000000 -0400 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_ #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack)) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) + +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#endif + /* Ugly but avoids duplication */ #include "../../../fs/binfmt_elf.c" @@ -226,8 +233,20 @@ ia32_setup_arg_pages (struct linux_binpr mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC; else mpnt->vm_flags = VM_STACK_FLAGS; - mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)? - PAGE_COPY_EXEC: PAGE_COPY; + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + mpnt->vm_flags &= ~VM_EXEC; + +#ifdef CONFIG_PAX_MPROTECT + if (current->mm->pax_flags & MF_PAX_MPROTECT) + mpnt->vm_flags &= ~VM_MAYEXEC; +#endif + + } +#endif + + mpnt->vm_page_prot = vm_get_page_prot(mpnt->vm_flags); if ((ret = insert_vm_struct(current->mm, mpnt))) { up_write(¤t->mm->mmap_sem); kmem_cache_free(vm_area_cachep, mpnt); diff -urNp linux-2.6.22.1/arch/ia64/ia32/ia32priv.h linux-2.6.22.1/arch/ia64/ia32/ia32priv.h --- linux-2.6.22.1/arch/ia64/ia32/ia32priv.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/ia64/ia32/ia32priv.h 2007-08-02 11:38:45.000000000 -0400 @@ -304,7 +304,14 @@ struct old_linux32_dirent { #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_386 -#define IA32_STACK_TOP IA32_PAGE_OFFSET +#ifdef CONFIG_PAX_RANDUSTACK +#define __IA32_DELTA_STACK (current->mm->delta_stack) +#else +#define __IA32_DELTA_STACK 0UL +#endif + +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK) + #define IA32_GATE_OFFSET IA32_PAGE_OFFSET #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE diff -urNp linux-2.6.22.1/arch/ia64/kernel/module.c linux-2.6.22.1/arch/ia64/kernel/module.c --- linux-2.6.22.1/arch/ia64/kernel/module.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/ia64/kernel/module.c 2007-08-02 11:38:45.000000000 -0400 @@ -321,7 +321,7 @@ module_alloc (unsigned long size) void module_free (struct module *mod, void *module_region) { - if (mod->arch.init_unw_table && module_region == mod->module_init) { + if (mod->arch.init_unw_table && module_region == mod->module_init_rx) { unw_remove_unwind_table(mod->arch.init_unw_table); mod->arch.init_unw_table = NULL; } @@ -499,15 +499,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd } static inline int +in_init_rx (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; +} + +static inline int +in_init_rw (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; +} + +static inline int in_init (const struct module *mod, uint64_t addr) { - return addr - (uint64_t) mod->module_init < mod->init_size; + return in_init_rx(mod, value) || in_init_rw(mod, value); +} + +static inline int +in_core_rx (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; +} + +static inline int +in_core_rw (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; } static inline int in_core (const struct module *mod, uint64_t addr) { - return addr - (uint64_t) mod->module_core < mod->core_size; + return in_core_rx(mod, value) || in_core_rw(mod, value); } static inline int @@ -691,7 +715,14 @@ do_reloc (struct module *mod, uint8_t r_ break; case RV_BDREL: - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); + if (in_init_rx(mod, val)) + val -= (uint64_t) mod->module_init_rx; + else if (in_init_rw(mod, val)) + val -= (uint64_t) mod->module_init_rw; + else if (in_core_rx(mod, val)) + val -= (uint64_t) mod->module_core_rx; + else if (in_core_rw(mod, val)) + val -= (uint64_t) mod->module_core_rw; break; case RV_LTV: @@ -825,15 +856,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, * addresses have been selected... */ uint64_t gp; - if (mod->core_size > MAX_LTOFF) + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) /* * This takes advantage of fact that SHF_ARCH_SMALL gets allocated * at the end of the module. */ - gp = mod->core_size - MAX_LTOFF / 2; + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; else - gp = mod->core_size / 2; - gp = (uint64_t) mod->module_core + ((gp + 7) & -8); + gp = (mod->core_size_rx + mod->core_size_rw) / 2; + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); mod->arch.gp = gp; DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); } diff -urNp linux-2.6.22.1/arch/ia64/kernel/ptrace.c linux-2.6.22.1/arch/ia64/kernel/ptrace.c --- linux-2.6.22.1/arch/ia64/kernel/ptrace.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/ia64/kernel/ptrace.c 2007-08-02 11:09:14.000000000 -0400 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -1447,6 +1448,9 @@ sys_ptrace (long request, pid_t pid, uns if (pid == 1) /* no messing around with init! */ goto out_tsk; + if (gr_handle_ptrace(child, request)) + goto out_tsk; + if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out_tsk; diff -urNp linux-2.6.22.1/arch/ia64/kernel/sys_ia64.c linux-2.6.22.1/arch/ia64/kernel/sys_ia64.c --- linux-2.6.22.1/arch/ia64/kernel/sys_ia64.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/ia64/kernel/sys_ia64.c 2007-08-02 11:38:45.000000000 -0400 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; #endif + +#ifdef CONFIG_PAX_RANDMMAP + if ((mm->pax_flags & MF_PAX_RANDMMAP) && addr && filp) + addr = mm->free_area_cache; + else +#endif + if (!addr) addr = mm->free_area_cache; @@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { + if (start_addr != mm->mmap_base) { /* Start a new search --- just in case we missed some holes. */ - addr = TASK_UNMAPPED_BASE; + addr = mm->mmap_base; goto full_search; } return -ENOMEM; diff -urNp linux-2.6.22.1/arch/ia64/mm/fault.c linux-2.6.22.1/arch/ia64/mm/fault.c --- linux-2.6.22.1/arch/ia64/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/ia64/mm/fault.c 2007-08-02 11:38:45.000000000 -0400 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -72,6 +73,23 @@ mapped_kernel_page_is_present (unsigned return pte_present(pte); } +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk("???????? "); + else + printk("%08x ", c); + } + printk("\n"); +} +#endif + void __kprobes ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) { @@ -138,9 +156,23 @@ ia64_do_page_fault (unsigned long addres mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); - if ((vma->vm_flags & mask) != mask) + if ((vma->vm_flags & mask) != mask) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) + goto bad_area; + + up_read(&mm->mmap_sem); + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); + do_exit(SIGKILL); + } +#endif + goto bad_area; + } + survive: /* * If for any reason at all we couldn't handle the fault, make diff -urNp linux-2.6.22.1/arch/ia64/mm/init.c linux-2.6.22.1/arch/ia64/mm/init.c --- linux-2.6.22.1/arch/ia64/mm/init.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/ia64/mm/init.c 2007-08-02 11:38:45.000000000 -0400 @@ -20,8 +20,8 @@ #include #include #include +#include -#include #include #include #include @@ -130,8 +130,21 @@ ia64_init_addr_space (void) vma->vm_mm = current->mm; vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; - vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + vma->vm_flags &= ~VM_EXEC; + +#ifdef CONFIG_PAX_MPROTECT + if (current->mm->pax_flags & MF_PAX_MPROTECT) + vma->vm_flags &= ~VM_MAYEXEC; +#endif + + } +#endif + + vma->vm_page_prot = vm_get_page_prot(VM_DATA_DEFAULT_FLAGS); down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); diff -urNp linux-2.6.22.1/arch/mips/kernel/binfmt_elfn32.c linux-2.6.22.1/arch/mips/kernel/binfmt_elfn32.c --- linux-2.6.22.1/arch/mips/kernel/binfmt_elfn32.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/mips/kernel/binfmt_elfn32.c 2007-08-02 11:38:45.000000000 -0400 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE ((current->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN ((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN ((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include #include #include diff -urNp linux-2.6.22.1/arch/mips/kernel/binfmt_elfo32.c linux-2.6.22.1/arch/mips/kernel/binfmt_elfo32.c --- linux-2.6.22.1/arch/mips/kernel/binfmt_elfo32.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/mips/kernel/binfmt_elfo32.c 2007-08-02 11:38:45.000000000 -0400 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE ((current->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN ((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN ((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include #include #include diff -urNp linux-2.6.22.1/arch/mips/kernel/syscall.c linux-2.6.22.1/arch/mips/kernel/syscall.c --- linux-2.6.22.1/arch/mips/kernel/syscall.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/mips/kernel/syscall.c 2007-08-02 11:38:45.000000000 -0400 @@ -87,6 +87,11 @@ unsigned long arch_get_unmapped_area(str do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; + +#ifdef CONFIG_PAX_RANDMMAP + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -97,7 +102,7 @@ unsigned long arch_get_unmapped_area(str (!vmm || addr + len <= vmm->vm_start)) return addr; } - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else diff -urNp linux-2.6.22.1/arch/mips/mm/fault.c linux-2.6.22.1/arch/mips/mm/fault.c --- linux-2.6.22.1/arch/mips/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/mips/mm/fault.c 2007-08-02 11:38:45.000000000 -0400 @@ -26,6 +26,23 @@ #include #include /* For VMALLOC_END */ +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk("???????? "); + else + printk("%08x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate diff -urNp linux-2.6.22.1/arch/parisc/kernel/module.c linux-2.6.22.1/arch/parisc/kernel/module.c --- linux-2.6.22.1/arch/parisc/kernel/module.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/parisc/kernel/module.c 2007-08-02 11:38:45.000000000 -0400 @@ -73,16 +73,38 @@ /* three functions to determine where in the module core * or init pieces the location is */ +static inline int in_init_rx(struct module *me, void *loc) +{ + return (loc >= me->module_init_rx && + loc < (me->module_init_rx + me->init_size_rx)); +} + +static inline int in_init_rw(struct module *me, void *loc) +{ + return (loc >= me->module_init_rw && + loc < (me->module_init_rw + me->init_size_rw)); +} + static inline int in_init(struct module *me, void *loc) { - return (loc >= me->module_init && - loc <= (me->module_init + me->init_size)); + return in_init_rx(me, loc) || in_init_rw(me, loc); +} + +static inline int in_core_rx(struct module *me, void *loc) +{ + return (loc >= me->module_core_rx && + loc < (me->module_core_rx + me->core_size_rx)); +} + +static inline int in_core_rw(struct module *me, void *loc) +{ + return (loc >= me->module_core_rw && + loc < (me->module_core_rw + me->core_size_rw)); } static inline int in_core(struct module *me, void *loc) { - return (loc >= me->module_core && - loc <= (me->module_core + me->core_size)); + return in_core_rx(me, loc) || in_core_rw(me, loc); } static inline int in_local(struct module *me, void *loc) @@ -296,21 +318,21 @@ int module_frob_arch_sections(CONST Elf_ } /* align things a bit */ - me->core_size = ALIGN(me->core_size, 16); - me->arch.got_offset = me->core_size; - me->core_size += gots * sizeof(struct got_entry); - - me->core_size = ALIGN(me->core_size, 16); - me->arch.fdesc_offset = me->core_size; - me->core_size += fdescs * sizeof(Elf_Fdesc); - - me->core_size = ALIGN(me->core_size, 16); - me->arch.stub_offset = me->core_size; - me->core_size += stubs * sizeof(struct stub_entry); - - me->init_size = ALIGN(me->init_size, 16); - me->arch.init_stub_offset = me->init_size; - me->init_size += init_stubs * sizeof(struct stub_entry); + me->core_size_rw = ALIGN(me->core_size_rw, 16); + me->arch.got_offset = me->core_size_rw; + me->core_size_rw += gots * sizeof(struct got_entry); + + me->core_size_rw = ALIGN(me->core_size_rw, 16); + me->arch.fdesc_offset = me->core_size_rw; + me->core_size_rw += fdescs * sizeof(Elf_Fdesc); + + me->core_size_rx = ALIGN(me->core_size_rx, 16); + me->arch.stub_offset = me->core_size_rx; + me->core_size_rx += stubs * sizeof(struct stub_entry); + + me->init_size_rx = ALIGN(me->init_size_rx, 16); + me->arch.init_stub_offset = me->init_size_rx; + me->init_size_rx += init_stubs * sizeof(struct stub_entry); me->arch.got_max = gots; me->arch.fdesc_max = fdescs; @@ -330,7 +352,7 @@ static Elf64_Word get_got(struct module BUG_ON(value == 0); - got = me->module_core + me->arch.got_offset; + got = me->module_core_rw + me->arch.got_offset; for (i = 0; got[i].addr; i++) if (got[i].addr == value) goto out; @@ -348,7 +370,7 @@ static Elf64_Word get_got(struct module #ifdef CONFIG_64BIT static Elf_Addr get_fdesc(struct module *me, unsigned long value) { - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; if (!value) { printk(KERN_ERR "%s: zero OPD requested!\n", me->name); @@ -366,7 +388,7 @@ static Elf_Addr get_fdesc(struct module /* Create new one */ fdesc->addr = value; - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; return (Elf_Addr)fdesc; } #endif /* CONFIG_64BIT */ @@ -386,12 +408,12 @@ static Elf_Addr get_stub(struct module * if(init_section) { i = me->arch.init_stub_count++; BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max); - stub = me->module_init + me->arch.init_stub_offset + + stub = me->module_init_rx + me->arch.init_stub_offset + i * sizeof(struct stub_entry); } else { i = me->arch.stub_count++; BUG_ON(me->arch.stub_count > me->arch.stub_max); - stub = me->module_core + me->arch.stub_offset + + stub = me->module_core_rx + me->arch.stub_offset + i * sizeof(struct stub_entry); } @@ -759,7 +781,7 @@ register_unwind_table(struct module *me, table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; end = table + sechdrs[me->arch.unwind_section].sh_size; - gp = (Elf_Addr)me->module_core + me->arch.got_offset; + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", me->arch.unwind_section, table, end, gp); diff -urNp linux-2.6.22.1/arch/parisc/kernel/sys_parisc.c linux-2.6.22.1/arch/parisc/kernel/sys_parisc.c --- linux-2.6.22.1/arch/parisc/kernel/sys_parisc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/parisc/kernel/sys_parisc.c 2007-08-02 11:38:46.000000000 -0400 @@ -111,7 +111,7 @@ unsigned long arch_get_unmapped_area(str if (flags & MAP_FIXED) return addr; if (!addr) - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (filp) { addr = get_shared_area(filp->f_mapping, addr, len, pgoff); diff -urNp linux-2.6.22.1/arch/parisc/kernel/traps.c linux-2.6.22.1/arch/parisc/kernel/traps.c --- linux-2.6.22.1/arch/parisc/kernel/traps.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/parisc/kernel/traps.c 2007-08-02 11:38:46.000000000 -0400 @@ -712,9 +712,7 @@ void handle_interruption(int code, struc down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm,regs->iaoq[0]); - if (vma && (regs->iaoq[0] >= vma->vm_start) - && (vma->vm_flags & VM_EXEC)) { - + if (vma && (regs->iaoq[0] >= vma->vm_start)) { fault_address = regs->iaoq[0]; fault_space = regs->iasq[0]; diff -urNp linux-2.6.22.1/arch/parisc/mm/fault.c linux-2.6.22.1/arch/parisc/mm/fault.c --- linux-2.6.22.1/arch/parisc/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/parisc/mm/fault.c 2007-08-02 11:38:46.000000000 -0400 @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include @@ -53,7 +55,7 @@ DEFINE_PER_CPU(struct exception_data, ex static unsigned long parisc_acctyp(unsigned long code, unsigned int inst) { - if (code == 6 || code == 16) + if (code == 6 || code == 7 || code == 16) return VM_EXEC; switch (inst & 0xf0000000) { @@ -139,6 +141,116 @@ parisc_acctyp(unsigned long code, unsign } #endif +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) + * + * returns 1 when task should be killed + * 2 when rt_sigreturn trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: unpatched PLT emulation */ + unsigned int bl, depwi; + + err = get_user(bl, (unsigned int *)instruction_pointer(regs)); + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); + + if (err) + break; + + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; + + err = get_user(ldw, (unsigned int *)addr); + err |= get_user(bv, (unsigned int *)(addr+4)); + err |= get_user(ldw2, (unsigned int *)(addr+8)); + + if (err) + break; + + if (ldw == 0x0E801096U && + bv == 0xEAC0C000U && + ldw2 == 0x0E881095U) + { + unsigned int resolver, map; + + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); + if (err) + break; + + regs->gr[20] = instruction_pointer(regs)+8; + regs->gr[21] = map; + regs->gr[22] = resolver; + regs->iaoq[0] = resolver | 3UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + return 3; + } + } + } while (0); +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + +#ifndef CONFIG_PAX_EMUSIGRT + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) + return 1; +#endif + + do { /* PaX: rt_sigreturn emulation */ + unsigned int ldi1, ldi2, bel, nop; + + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); + + if (err) + break; + + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && + ldi2 == 0x3414015AU && + bel == 0xE4008200U && + nop == 0x08000240U) + { + regs->gr[25] = (ldi1 & 2) >> 1; + regs->gr[20] = __NR_rt_sigreturn; + regs->gr[31] = regs->iaoq[1] + 16; + regs->sr[0] = regs->iasq[1]; + regs->iaoq[0] = 0x100UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + regs->iasq[0] = regs->sr[2]; + regs->iasq[1] = regs->sr[2]; + return 2; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk("???????? "); + else + printk("%08x ", c); + } + printk("\n"); +} +#endif + void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address) { @@ -164,8 +276,33 @@ good_area: acc_type = parisc_acctyp(code,regs->iir); - if ((vma->vm_flags & acc_type) != acc_type) + if ((vma->vm_flags & acc_type) != acc_type) { + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && + (address & ~3UL) == instruction_pointer(regs)) + { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 3: + return; +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + case 2: + return; +#endif + + } + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); + do_exit(SIGKILL); + } +#endif + goto bad_area; + } /* * If for any reason at all we couldn't handle the fault, make diff -urNp linux-2.6.22.1/arch/powerpc/kernel/module_32.c linux-2.6.22.1/arch/powerpc/kernel/module_32.c --- linux-2.6.22.1/arch/powerpc/kernel/module_32.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/powerpc/kernel/module_32.c 2007-08-02 11:38:46.000000000 -0400 @@ -126,7 +126,7 @@ int module_frob_arch_sections(Elf32_Ehdr me->arch.core_plt_section = i; } if (!me->arch.core_plt_section || !me->arch.init_plt_section) { - printk("Module doesn't contain .plt or .init.plt sections.\n"); + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); return -ENOEXEC; } @@ -167,11 +167,16 @@ static uint32_t do_plt_call(void *locati DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); /* Init, or core PLT? */ - if (location >= mod->module_core - && location < mod->module_core + mod->core_size) + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; - else + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; + else { + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); + return ~0UL; + } /* Find this entry, or if that fails, the next avail. entry */ while (entry->jump[0]) { diff -urNp linux-2.6.22.1/arch/powerpc/kernel/signal_32.c linux-2.6.22.1/arch/powerpc/kernel/signal_32.c --- linux-2.6.22.1/arch/powerpc/kernel/signal_32.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/powerpc/kernel/signal_32.c 2007-08-02 11:38:46.000000000 -0400 @@ -758,7 +758,7 @@ static int handle_rt_signal(unsigned lon /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { if (save_user_regs(regs, frame, 0)) goto badframe; regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; diff -urNp linux-2.6.22.1/arch/powerpc/kernel/signal_64.c linux-2.6.22.1/arch/powerpc/kernel/signal_64.c --- linux-2.6.22.1/arch/powerpc/kernel/signal_64.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/powerpc/kernel/signal_64.c 2007-08-02 11:38:46.000000000 -0400 @@ -400,7 +400,7 @@ static int setup_rt_frame(int signr, str current->thread.fpscr.val = 0; /* Set up to return from userspace. */ - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); diff -urNp linux-2.6.22.1/arch/powerpc/kernel/vdso.c linux-2.6.22.1/arch/powerpc/kernel/vdso.c --- linux-2.6.22.1/arch/powerpc/kernel/vdso.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/powerpc/kernel/vdso.c 2007-08-02 11:38:46.000000000 -0400 @@ -199,7 +199,7 @@ int arch_setup_additional_pages(struct l vdso_base = VDSO32_MBASE; #endif - current->mm->context.vdso_base = 0; + current->mm->context.vdso_base = ~0UL; /* vDSO has a problem and was disabled, just don't "enable" it for the * process @@ -216,7 +216,7 @@ int arch_setup_additional_pages(struct l */ down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, vdso_base, - vdso_pages << PAGE_SHIFT, 0, 0); + vdso_pages << PAGE_SHIFT, 0, MAP_PRIVATE | MAP_EXECUTABLE); if (IS_ERR_VALUE(vdso_base)) { rc = vdso_base; goto fail_mmapsem; diff -urNp linux-2.6.22.1/arch/powerpc/mm/fault.c linux-2.6.22.1/arch/powerpc/mm/fault.c --- linux-2.6.22.1/arch/powerpc/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/powerpc/mm/fault.c 2007-08-02 11:38:46.000000000 -0400 @@ -29,6 +29,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include @@ -62,6 +68,364 @@ static inline int notify_page_fault(stru } #endif +#ifdef CONFIG_PAX_EMUSIGRT +void pax_syscall_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_syscall = 0UL; +} + +static struct page *pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int *type) +{ + struct page *page; + unsigned int *kaddr; + + page = alloc_page(GFP_HIGHUSER); + if (!page) + return NOPAGE_OOM; + + kaddr = kmap(page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x44000002U; /* sc */ + __flush_dcache_icache(kaddr); + kunmap(page); + if (type) + *type = VM_FAULT_MAJOR; + return page; +} + +static struct vm_operations_struct pax_vm_ops = { + .close = pax_syscall_close, + .nopage = pax_syscall_nopage, +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + memset(vma, 0, sizeof(*vma)); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->nip = fault address) + * + * returns 1 when task should be killed + * 2 when patched GOT trampoline was detected + * 3 when patched PLT trampoline was detected + * 4 when unpatched PLT trampoline was detected + * 5 when sigreturn trampoline was detected + * 6 when rt_sigreturn trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUSIGRT) + int err; +#endif + +#ifdef CONFIG_PAX_EMUPLT + do { /* PaX: patched GOT emulation */ + unsigned int blrl; + + err = get_user(blrl, (unsigned int *)regs->nip); + + if (!err && blrl == 0x4E800021U) { + unsigned long temp = regs->nip; + + regs->nip = regs->link & 0xFFFFFFFCUL; + regs->link = temp + 4UL; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #1 */ + unsigned int b; + + err = get_user(b, (unsigned int *)regs->nip); + + if (!err && (b & 0xFC000003U) == 0x48000000U) { + regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL); + return 3; + } + } while (0); + + do { /* PaX: unpatched PLT emulation #1 */ + unsigned int li, b; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(b, (unsigned int *)(regs->nip+4)); + + if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) { + unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(rlwinm, (unsigned int *)addr); + err |= get_user(add, (unsigned int *)(addr+4)); + err |= get_user(li2, (unsigned int *)(addr+8)); + err |= get_user(addis2, (unsigned int *)(addr+12)); + err |= get_user(mtctr, (unsigned int *)(addr+16)); + err |= get_user(li3, (unsigned int *)(addr+20)); + err |= get_user(addis3, (unsigned int *)(addr+24)); + err |= get_user(bctr, (unsigned int *)(addr+28)); + + if (err) + break; + + if (rlwinm == 0x556C083CU && + add == 0x7D6C5A14U && + (li2 & 0xFFFF0000U) == 0x39800000U && + (addis2 & 0xFFFF0000U) == 0x3D8C0000U && + mtctr == 0x7D8903A6U && + (li3 & 0xFFFF0000U) == 0x39800000U && + (addis3 & 0xFFFF0000U) == 0x3D8C0000U && + bctr == 0x4E800420U) + { + regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16; + regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->ctr += (addis2 & 0xFFFFU) << 16; + regs->nip = regs->ctr; + return 4; + } + } + } while (0); + +#if 0 + do { /* PaX: unpatched PLT emulation #2 */ + unsigned int lis, lwzu, b, bctr; + + err = get_user(lis, (unsigned int *)regs->nip); + err |= get_user(lwzu, (unsigned int *)(regs->nip+4)); + err |= get_user(b, (unsigned int *)(regs->nip+8)); + err |= get_user(bctr, (unsigned int *)(regs->nip+12)); + + if (err) + break; + + if ((lis & 0xFFFF0000U) == 0x39600000U && + (lwzu & 0xU) == 0xU && + (b & 0xFC000003U) == 0x48000000U && + bctr == 0x4E800420U) + { + unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(addis, (unsigned int*)addr); + err |= get_user(addi, (unsigned int*)(addr+4)); + err |= get_user(rlwinm, (unsigned int*)(addr+8)); + err |= get_user(add, (unsigned int*)(addr+12)); + err |= get_user(li2, (unsigned int*)(addr+16)); + err |= get_user(addis2, (unsigned int*)(addr+20)); + err |= get_user(mtctr, (unsigned int*)(addr+24)); + err |= get_user(li3, (unsigned int*)(addr+28)); + err |= get_user(addis3, (unsigned int*)(addr+32)); + err |= get_user(bctr, (unsigned int*)(addr+36)); + + if (err) + break; + + if ((addis & 0xFFFF0000U) == 0x3D6B0000U && + (addi & 0xFFFF0000U) == 0x396B0000U && + rlwinm == 0x556C083CU && + add == 0x7D6C5A14U && + (li2 & 0xFFFF0000U) == 0x39800000U && + (addis2 & 0xFFFF0000U) == 0x3D8C0000U && + mtctr == 0x7D8903A6U && + (li3 & 0xFFFF0000U) == 0x39800000U && + (addis3 & 0xFFFF0000U) == 0x3D8C0000U && + bctr == 0x4E800420U) + { + regs->gpr[PT_R11] = + regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16; + regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->ctr += (addis2 & 0xFFFFU) << 16; + regs->nip = regs->ctr; + return 4; + } + } + } while (0); +#endif + + do { /* PaX: unpatched PLT emulation #3 */ + unsigned int li, b; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(b, (unsigned int *)(regs->nip+4)); + + if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) { + unsigned int addis, lwz, mtctr, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(addis, (unsigned int *)addr); + err |= get_user(lwz, (unsigned int *)(addr+4)); + err |= get_user(mtctr, (unsigned int *)(addr+8)); + err |= get_user(bctr, (unsigned int *)(addr+12)); + + if (err) + break; + + if ((addis & 0xFFFF0000U) == 0x3D6B0000U && + (lwz & 0xFFFF0000U) == 0x816B0000U && + mtctr == 0x7D6903A6U && + bctr == 0x4E800420U) + { + unsigned int r11; + + addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + + err = get_user(r11, (unsigned int *)addr); + if (err) + break; + + regs->gpr[PT_R11] = r11; + regs->ctr = r11; + regs->nip = r11; + return 4; + } + } + } while (0); +#endif + +#ifdef CONFIG_PAX_EMUSIGRT + do { /* PaX: sigreturn emulation */ + unsigned int li, sc; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(sc, (unsigned int *)(regs->nip+4)); + + if (!err && li == 0x38000000U + __NR_sigreturn && sc == 0x44000002U) { + struct vm_area_struct *vma; + unsigned long call_syscall; + + down_read(¤t->mm->mmap_sem); + call_syscall = current->mm->call_syscall; + up_read(¤t->mm->mmap_sem); + if (likely(call_syscall)) + goto emulate; + + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_syscall) { + call_syscall = current->mm->call_syscall; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_syscall & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_syscall)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_syscall = call_syscall; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->gpr[PT_R0] = __NR_sigreturn; + regs->nip = call_syscall; + return 5; + } + } while (0); + + do { /* PaX: rt_sigreturn emulation */ + unsigned int li, sc; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(sc, (unsigned int *)(regs->nip+4)); + + if (!err && li == 0x38000000U + __NR_rt_sigreturn && sc == 0x44000002U) { + struct vm_area_struct *vma; + unsigned int call_syscall; + + down_read(¤t->mm->mmap_sem); + call_syscall = current->mm->call_syscall; + up_read(¤t->mm->mmap_sem); + if (likely(call_syscall)) + goto rt_emulate; + + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_syscall) { + call_syscall = current->mm->call_syscall; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto rt_emulate; + } + + call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_syscall & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_syscall)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_syscall = call_syscall; + up_write(¤t->mm->mmap_sem); + +rt_emulate: + regs->gpr[PT_R0] = __NR_rt_sigreturn; + regs->nip = call_syscall; + return 6; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk("???????? "); + else + printk("%08x ", c); + } + printk("\n"); +} +#endif + /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. @@ -157,7 +521,7 @@ int __kprobes do_page_fault(struct pt_re * indicate errors in DSISR but can validly be set in SRR1. */ if (trap == 0x400) - error_code &= 0x48200000; + error_code &= 0x58200000; else is_write = error_code & DSISR_ISSTORE; #else @@ -355,6 +719,37 @@ bad_area: bad_area_nosemaphore: /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { +#ifdef CONFIG_PPC64 + if (is_exec && (error_code & DSISR_PROTFAULT)) { +#else + if (is_exec && regs->nip == address) { +#endif + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + case 4: + return 0; +#endif + +#ifdef CONFIG_PAX_EMUSIGRT + case 5: + case 6: + return 0; +#endif + + } + + pax_report_fault(regs, (void*)regs->nip, (void*)regs->gpr[PT_R1]); + do_exit(SIGKILL); + } + } +#endif + _exception(SIGSEGV, regs, code, address); return 0; } diff -urNp linux-2.6.22.1/arch/powerpc/mm/mmap.c linux-2.6.22.1/arch/powerpc/mm/mmap.c --- linux-2.6.22.1/arch/powerpc/mm/mmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/powerpc/mm/mmap.c 2007-08-02 11:38:46.000000000 -0400 @@ -75,10 +75,22 @@ void arch_pick_mmap_layout(struct mm_str */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff -urNp linux-2.6.22.1/arch/ppc/mm/fault.c linux-2.6.22.1/arch/ppc/mm/fault.c --- linux-2.6.22.1/arch/ppc/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/ppc/mm/fault.c 2007-08-02 11:38:46.000000000 -0400 @@ -25,6 +25,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include @@ -48,6 +53,364 @@ unsigned long pte_misses; /* updated by unsigned long pte_errors; /* updated by do_page_fault() */ unsigned int probingmem; +#ifdef CONFIG_PAX_EMUSIGRT +void pax_syscall_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_syscall = 0UL; +} + +static struct page *pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int *type) +{ + struct page *page; + unsigned int *kaddr; + + page = alloc_page(GFP_HIGHUSER); + if (!page) + return NOPAGE_OOM; + + kaddr = kmap(page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x44000002U; /* sc */ + __flush_dcache_icache(kaddr); + kunmap(page); + if (type) + *type = VM_FAULT_MAJOR; + return page; +} + +static struct vm_operations_struct pax_vm_ops = { + .close = pax_syscall_close, + .nopage = pax_syscall_nopage, +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + memset(vma, 0, sizeof(*vma)); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->nip = fault address) + * + * returns 1 when task should be killed + * 2 when patched GOT trampoline was detected + * 3 when patched PLT trampoline was detected + * 4 when unpatched PLT trampoline was detected + * 5 when sigreturn trampoline was detected + * 6 when rt_sigreturn trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUSIGRT) + int err; +#endif + +#ifdef CONFIG_PAX_EMUPLT + do { /* PaX: patched GOT emulation */ + unsigned int blrl; + + err = get_user(blrl, (unsigned int *)regs->nip); + + if (!err && blrl == 0x4E800021U) { + unsigned long temp = regs->nip; + + regs->nip = regs->link & 0xFFFFFFFCUL; + regs->link = temp + 4UL; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #1 */ + unsigned int b; + + err = get_user(b, (unsigned int *)regs->nip); + + if (!err && (b & 0xFC000003U) == 0x48000000U) { + regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL); + return 3; + } + } while (0); + + do { /* PaX: unpatched PLT emulation #1 */ + unsigned int li, b; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(b, (unsigned int *)(regs->nip+4)); + + if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) { + unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(rlwinm, (unsigned int *)addr); + err |= get_user(add, (unsigned int *)(addr+4)); + err |= get_user(li2, (unsigned int *)(addr+8)); + err |= get_user(addis2, (unsigned int *)(addr+12)); + err |= get_user(mtctr, (unsigned int *)(addr+16)); + err |= get_user(li3, (unsigned int *)(addr+20)); + err |= get_user(addis3, (unsigned int *)(addr+24)); + err |= get_user(bctr, (unsigned int *)(addr+28)); + + if (err) + break; + + if (rlwinm == 0x556C083CU && + add == 0x7D6C5A14U && + (li2 & 0xFFFF0000U) == 0x39800000U && + (addis2 & 0xFFFF0000U) == 0x3D8C0000U && + mtctr == 0x7D8903A6U && + (li3 & 0xFFFF0000U) == 0x39800000U && + (addis3 & 0xFFFF0000U) == 0x3D8C0000U && + bctr == 0x4E800420U) + { + regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16; + regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->ctr += (addis2 & 0xFFFFU) << 16; + regs->nip = regs->ctr; + return 4; + } + } + } while (0); + +#if 0 + do { /* PaX: unpatched PLT emulation #2 */ + unsigned int lis, lwzu, b, bctr; + + err = get_user(lis, (unsigned int *)regs->nip); + err |= get_user(lwzu, (unsigned int *)(regs->nip+4)); + err |= get_user(b, (unsigned int *)(regs->nip+8)); + err |= get_user(bctr, (unsigned int *)(regs->nip+12)); + + if (err) + break; + + if ((lis & 0xFFFF0000U) == 0x39600000U && + (lwzu & 0xU) == 0xU && + (b & 0xFC000003U) == 0x48000000U && + bctr == 0x4E800420U) + { + unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(addis, (unsigned int*)addr); + err |= get_user(addi, (unsigned int*)(addr+4)); + err |= get_user(rlwinm, (unsigned int*)(addr+8)); + err |= get_user(add, (unsigned int*)(addr+12)); + err |= get_user(li2, (unsigned int*)(addr+16)); + err |= get_user(addis2, (unsigned int*)(addr+20)); + err |= get_user(mtctr, (unsigned int*)(addr+24)); + err |= get_user(li3, (unsigned int*)(addr+28)); + err |= get_user(addis3, (unsigned int*)(addr+32)); + err |= get_user(bctr, (unsigned int*)(addr+36)); + + if (err) + break; + + if ((addis & 0xFFFF0000U) == 0x3D6B0000U && + (addi & 0xFFFF0000U) == 0x396B0000U && + rlwinm == 0x556C083CU && + add == 0x7D6C5A14U && + (li2 & 0xFFFF0000U) == 0x39800000U && + (addis2 & 0xFFFF0000U) == 0x3D8C0000U && + mtctr == 0x7D8903A6U && + (li3 & 0xFFFF0000U) == 0x39800000U && + (addis3 & 0xFFFF0000U) == 0x3D8C0000U && + bctr == 0x4E800420U) + { + regs->gpr[PT_R11] = + regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16; + regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->ctr += (addis2 & 0xFFFFU) << 16; + regs->nip = regs->ctr; + return 4; + } + } + } while (0); +#endif + + do { /* PaX: unpatched PLT emulation #3 */ + unsigned int li, b; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(b, (unsigned int *)(regs->nip+4)); + + if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) { + unsigned int addis, lwz, mtctr, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(addis, (unsigned int *)addr); + err |= get_user(lwz, (unsigned int *)(addr+4)); + err |= get_user(mtctr, (unsigned int *)(addr+8)); + err |= get_user(bctr, (unsigned int *)(addr+12)); + + if (err) + break; + + if ((addis & 0xFFFF0000U) == 0x3D6B0000U && + (lwz & 0xFFFF0000U) == 0x816B0000U && + mtctr == 0x7D6903A6U && + bctr == 0x4E800420U) + { + unsigned int r11; + + addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + + err = get_user(r11, (unsigned int *)addr); + if (err) + break; + + regs->gpr[PT_R11] = r11; + regs->ctr = r11; + regs->nip = r11; + return 4; + } + } + } while (0); +#endif + +#ifdef CONFIG_PAX_EMUSIGRT + do { /* PaX: sigreturn emulation */ + unsigned int li, sc; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(sc, (unsigned int *)(regs->nip+4)); + + if (!err && li == 0x38000000U + __NR_sigreturn && sc == 0x44000002U) { + struct vm_area_struct *vma; + unsigned long call_syscall; + + down_read(¤t->mm->mmap_sem); + call_syscall = current->mm->call_syscall; + up_read(¤t->mm->mmap_sem); + if (likely(call_syscall)) + goto emulate; + + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_syscall) { + call_syscall = current->mm->call_syscall; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_syscall & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_syscall)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_syscall = call_syscall; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->gpr[PT_R0] = __NR_sigreturn; + regs->nip = call_syscall; + return 5; + } + } while (0); + + do { /* PaX: rt_sigreturn emulation */ + unsigned int li, sc; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(sc, (unsigned int *)(regs->nip+4)); + + if (!err && li == 0x38000000U + __NR_rt_sigreturn && sc == 0x44000002U) { + struct vm_area_struct *vma; + unsigned int call_syscall; + + down_read(¤t->mm->mmap_sem); + call_syscall = current->mm->call_syscall; + up_read(¤t->mm->mmap_sem); + if (likely(call_syscall)) + goto rt_emulate; + + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_syscall) { + call_syscall = current->mm->call_syscall; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto rt_emulate; + } + + call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_syscall & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_syscall)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_syscall = call_syscall; + up_write(¤t->mm->mmap_sem); + +rt_emulate: + regs->gpr[PT_R0] = __NR_rt_sigreturn; + regs->nip = call_syscall; + return 6; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk("???????? "); + else + printk("%08x ", c); + } + printk("\n"); +} +#endif + /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. @@ -108,7 +471,7 @@ int do_page_fault(struct pt_regs *regs, * indicate errors in DSISR but can validly be set in SRR1. */ if (TRAP(regs) == 0x400) - error_code &= 0x48200000; + error_code &= 0x58200000; else is_write = error_code & 0x02000000; #endif /* CONFIG_4xx || CONFIG_BOOKE */ @@ -203,15 +566,14 @@ good_area: pte_t *ptep; pmd_t *pmdp; -#if 0 +#if 1 /* It would be nice to actually enforce the VM execute permission on CPUs which can do so, but far too much stuff in userspace doesn't get the permissions right, so we let any page be executed for now. */ if (! (vma->vm_flags & VM_EXEC)) goto bad_area; -#endif - +#else /* Since 4xx/Book-E supports per-page execute permission, * we lazily flush dcache to icache. */ ptep = NULL; @@ -234,6 +596,7 @@ good_area: pte_unmap_unlock(ptep, ptl); } #endif +#endif /* a read */ } else { /* protection fault */ @@ -279,6 +642,33 @@ bad_area: /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { + if ((TRAP(regs) == 0x400) && (regs->nip == address)) { + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + case 4: + return 0; +#endif + +#ifdef CONFIG_PAX_EMUSIGRT + case 5: + case 6: + return 0; +#endif + + } + + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[1]); + do_exit(SIGKILL); + } + } +#endif + _exception(SIGSEGV, regs, code, address); return 0; } diff -urNp linux-2.6.22.1/arch/s390/kernel/module.c linux-2.6.22.1/arch/s390/kernel/module.c --- linux-2.6.22.1/arch/s390/kernel/module.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/s390/kernel/module.c 2007-08-02 11:38:46.000000000 -0400 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, /* Increase core size by size of got & plt and set start offsets for got and plt. */ - me->core_size = ALIGN(me->core_size, 4); - me->arch.got_offset = me->core_size; - me->core_size += me->arch.got_size; - me->arch.plt_offset = me->core_size; - me->core_size += me->arch.plt_size; + me->core_size_rw = ALIGN(me->core_size_rw, 4); + me->arch.got_offset = me->core_size_rw; + me->core_size_rw += me->arch.got_size; + me->arch.plt_offset = me->core_size_rx; + me->core_size_rx += me->arch.plt_size; return 0; } @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base if (info->got_initialized == 0) { Elf_Addr *gotent; - gotent = me->module_core + me->arch.got_offset + + gotent = me->module_core_rw + me->arch.got_offset + info->got_offset; *gotent = val; info->got_initialized = 1; @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base else if (r_type == R_390_GOTENT || r_type == R_390_GOTPLTENT) *(unsigned int *) loc = - (val + (Elf_Addr) me->module_core - loc) >> 1; + (val + (Elf_Addr) me->module_core_rw - loc) >> 1; else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) *(unsigned long *) loc = val; @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { unsigned int *ip; - ip = me->module_core + me->arch.plt_offset + + ip = me->module_core_rx + me->arch.plt_offset + info->plt_offset; #ifndef CONFIG_64BIT ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ @@ -316,7 +316,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base val = me->arch.plt_offset - me->arch.got_offset + info->plt_offset + rela->r_addend; else - val = (Elf_Addr) me->module_core + + val = (Elf_Addr) me->module_core_rx + me->arch.plt_offset + info->plt_offset + rela->r_addend - loc; if (r_type == R_390_PLT16DBL) @@ -336,7 +336,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF64: /* 64 bit offset to GOT. */ val = val + rela->r_addend - - ((Elf_Addr) me->module_core + me->arch.got_offset); + ((Elf_Addr) me->module_core_rw + me->arch.got_offset); if (r_type == R_390_GOTOFF16) *(unsigned short *) loc = val; else if (r_type == R_390_GOTOFF32) @@ -346,7 +346,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ - val = (Elf_Addr) me->module_core + me->arch.got_offset + + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) *(unsigned int *) loc = val; diff -urNp linux-2.6.22.1/arch/sparc/kernel/ptrace.c linux-2.6.22.1/arch/sparc/kernel/ptrace.c --- linux-2.6.22.1/arch/sparc/kernel/ptrace.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc/kernel/ptrace.c 2007-08-02 11:09:14.000000000 -0400 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -303,6 +304,11 @@ asmlinkage void do_ptrace(struct pt_regs goto out_tsk; } + if (gr_handle_ptrace(child, request)) { + pt_error_return(regs, EPERM); + goto out_tsk; + } + if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH) || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) { if (ptrace_attach(child)) { diff -urNp linux-2.6.22.1/arch/sparc/kernel/sys_sparc.c linux-2.6.22.1/arch/sparc/kernel/sys_sparc.c --- linux-2.6.22.1/arch/sparc/kernel/sys_sparc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc/kernel/sys_sparc.c 2007-08-02 11:38:46.000000000 -0400 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str if (ARCH_SUN4C_SUN4 && len > 0x20000000) return -ENOMEM; if (!addr) - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr); diff -urNp linux-2.6.22.1/arch/sparc/Makefile linux-2.6.22.1/arch/sparc/Makefile --- linux-2.6.22.1/arch/sparc/Makefile 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc/Makefile 2007-08-02 11:09:14.000000000 -0400 @@ -36,7 +36,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc # Renaming is done to avoid confusing pattern matching rules in 2.5.45 (multy-) INIT_Y := $(patsubst %/, %/built-in.o, $(init-y)) CORE_Y := $(core-y) -CORE_Y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ +CORE_Y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ CORE_Y := $(patsubst %/, %/built-in.o, $(CORE_Y)) DRIVERS_Y := $(patsubst %/, %/built-in.o, $(drivers-y)) NET_Y := $(patsubst %/, %/built-in.o, $(net-y)) diff -urNp linux-2.6.22.1/arch/sparc/mm/fault.c linux-2.6.22.1/arch/sparc/mm/fault.c --- linux-2.6.22.1/arch/sparc/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc/mm/fault.c 2007-08-02 11:38:46.000000000 -0400 @@ -21,6 +21,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -216,6 +220,252 @@ static unsigned long compute_si_addr(str return safe_compute_effective_address(regs, insn); } +#ifdef CONFIG_PAX_PAGEEXEC +void pax_emuplt_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static struct page *pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int *type) +{ + struct page *page; + unsigned int *kaddr; + + page = alloc_page(GFP_HIGHUSER); + if (!page) + return NOPAGE_OOM; + + kaddr = kmap(page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(page); + kunmap(page); + if (type) + *type = VM_FAULT_MAJOR; + + return page; +} + +static struct vm_operations_struct pax_vm_ops = { + .close = pax_emuplt_close, + .nopage = pax_emuplt_nopage, +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + memset(vma, 0, sizeof(*vma)); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} + +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int *)regs->pc); + err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); + err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned int addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int *)regs->pc); + + if (!err && (ba & 0xFFC00000U) == 0x30800000U) { + unsigned int addr; + + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->pc); + err |= get_user(jmpl, (unsigned int *)(regs->pc+4)); + err |= get_user(nop, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U && + nop == 0x01000000U) + { + unsigned int addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->pc); + err |= get_user(ba, (unsigned int *)(regs->pc+4)); + err |= get_user(nop, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned int addr, save, call; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + else + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); + + err = get_user(save, (unsigned int *)addr); + err |= get_user(call, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_dl_resolve)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->pc = call_dl_resolve; + regs->npc = addr+4; + return 3; + } + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int *)(regs->pc-4)); + err |= get_user(call, (unsigned int *)regs->pc); + err |= get_user(nop, (unsigned int *)(regs->pc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); + + regs->u_regs[UREG_RETPC] = regs->pc; + regs->pc = dl_resolve; + regs->npc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk("???????? "); + else + printk("%08x ", c); + } + printk("\n"); +} +#endif + asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address) { @@ -279,6 +529,24 @@ good_area: if(!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); + do_exit(SIGKILL); + } +#endif + /* Allow reads even for write-only mappings */ if(!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; diff -urNp linux-2.6.22.1/arch/sparc/mm/init.c linux-2.6.22.1/arch/sparc/mm/init.c --- linux-2.6.22.1/arch/sparc/mm/init.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc/mm/init.c 2007-08-02 11:38:46.000000000 -0400 @@ -333,17 +333,17 @@ void __init paging_init(void) /* Initialize the protection map with non-constant, MMU dependent values. */ protection_map[0] = PAGE_NONE; - protection_map[1] = PAGE_READONLY; - protection_map[2] = PAGE_COPY; - protection_map[3] = PAGE_COPY; + protection_map[1] = PAGE_READONLY_NOEXEC; + protection_map[2] = PAGE_COPY_NOEXEC; + protection_map[3] = PAGE_COPY_NOEXEC; protection_map[4] = PAGE_READONLY; protection_map[5] = PAGE_READONLY; protection_map[6] = PAGE_COPY; protection_map[7] = PAGE_COPY; protection_map[8] = PAGE_NONE; - protection_map[9] = PAGE_READONLY; - protection_map[10] = PAGE_SHARED; - protection_map[11] = PAGE_SHARED; + protection_map[9] = PAGE_READONLY_NOEXEC; + protection_map[10] = PAGE_SHARED_NOEXEC; + protection_map[11] = PAGE_SHARED_NOEXEC; protection_map[12] = PAGE_READONLY; protection_map[13] = PAGE_READONLY; protection_map[14] = PAGE_SHARED; diff -urNp linux-2.6.22.1/arch/sparc/mm/srmmu.c linux-2.6.22.1/arch/sparc/mm/srmmu.c --- linux-2.6.22.1/arch/sparc/mm/srmmu.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc/mm/srmmu.c 2007-08-02 11:38:46.000000000 -0400 @@ -2160,6 +2160,13 @@ void __init ld_mmu_srmmu(void) BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED)); BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); + +#ifdef CONFIG_PAX_PAGEEXEC + BTFIXUPSET_INT(page_shared_noexec, pgprot_val(SRMMU_PAGE_SHARED_NOEXEC)); + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC)); + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC)); +#endif + BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); diff -urNp linux-2.6.22.1/arch/sparc64/kernel/ptrace.c linux-2.6.22.1/arch/sparc64/kernel/ptrace.c --- linux-2.6.22.1/arch/sparc64/kernel/ptrace.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc64/kernel/ptrace.c 2007-08-02 11:09:14.000000000 -0400 @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -216,6 +217,11 @@ asmlinkage void do_ptrace(struct pt_regs goto out_tsk; } + if (gr_handle_ptrace(child, (long)request)) { + pt_error_return(regs, EPERM); + goto out_tsk; + } + if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH) || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) { if (ptrace_attach(child)) { diff -urNp linux-2.6.22.1/arch/sparc64/kernel/sys_sparc.c linux-2.6.22.1/arch/sparc64/kernel/sys_sparc.c --- linux-2.6.22.1/arch/sparc64/kernel/sys_sparc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc64/kernel/sys_sparc.c 2007-08-02 11:38:46.000000000 -0400 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if ((filp || (flags & MAP_SHARED)) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str if (filp || (flags & MAP_SHARED)) do_color_align = 1; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -152,9 +156,9 @@ unsigned long arch_get_unmapped_area(str } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; + start_addr = addr = mm->free_area_cache; } else { - start_addr = addr = TASK_UNMAPPED_BASE; + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; } @@ -174,8 +178,8 @@ full_search: vma = find_vma(mm, VA_EXCLUDE_END); } if (unlikely(task_size < addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } @@ -215,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if ((filp || (flags & MAP_SHARED)) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -378,6 +382,12 @@ void arch_pick_mmap_layout(struct mm_str current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { @@ -392,6 +402,12 @@ void arch_pick_mmap_layout(struct mm_str gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff -urNp linux-2.6.22.1/arch/sparc64/mm/fault.c linux-2.6.22.1/arch/sparc64/mm/fault.c --- linux-2.6.22.1/arch/sparc64/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc64/mm/fault.c 2007-08-02 11:38:46.000000000 -0400 @@ -20,6 +20,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -273,6 +277,369 @@ cannot_handle: unhandled_fault (address, current, regs); } +#ifdef CONFIG_PAX_PAGEEXEC +#ifdef CONFIG_PAX_EMUPLT +static void pax_emuplt_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static struct page *pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int *type) +{ + struct page *page; + unsigned int *kaddr; + + page = alloc_page(GFP_HIGHUSER); + if (!page) + return NOPAGE_OOM; + + kaddr = kmap(page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(page); + kunmap(page); + if (type) + *type = VM_FAULT_MAJOR; + return page; +} + +static struct vm_operations_struct pax_vm_ops = { + .close = pax_emuplt_close, + .nopage = pax_emuplt_nopage, +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + memset(vma, 0, sizeof(*vma)); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +/* + * PaX: decide what to do with offenders (regs->tpc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int *)regs->tpc); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int *)regs->tpc); + + if (!err && (ba & 0xFFC00000U) == 0x30800000U) { + unsigned long addr; + + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #4 */ + unsigned int mov1, call, mov2; + + err = get_user(mov1, (unsigned int *)regs->tpc); + err |= get_user(call, (unsigned int *)(regs->tpc+4)); + err |= get_user(mov2, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if (mov1 == 0x8210000FU && + (call & 0xC0000000U) == 0x40000000U && + mov2 == 0x9E100001U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #5 */ + unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop; + + err = get_user(sethi1, (unsigned int *)regs->tpc); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); + err |= get_user(or1, (unsigned int *)(regs->tpc+8)); + err |= get_user(or2, (unsigned int *)(regs->tpc+12)); + err |= get_user(sllx, (unsigned int *)(regs->tpc+16)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); + err |= get_user(nop, (unsigned int *)(regs->tpc+24)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + (or1 & 0xFFFFE000U) == 0x82106000U && + (or2 & 0xFFFFE000U) == 0x8A116000U && + sllx == 0x83287020 && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #6 */ + unsigned int sethi1, sethi2, sllx, or, jmpl, nop; + + err = get_user(sethi1, (unsigned int *)regs->tpc); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); + err |= get_user(sllx, (unsigned int *)(regs->tpc+8)); + err |= get_user(or, (unsigned int *)(regs->tpc+12)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+16)); + err |= get_user(nop, (unsigned int *)(regs->tpc+20)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + sllx == 0x83287020 && + (or & 0xFFFFE000U) == 0x8A116000U && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #7 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(ba, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (ba & 0xFFF00000U) == 0x30600000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(ba, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned long addr; + unsigned int save, call; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + else + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + err = get_user(save, (unsigned int *)addr); + err |= get_user(call, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_dl_resolve)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->tpc = call_dl_resolve; + regs->tnpc = addr+4; + return 3; + } + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int *)(regs->tpc-4)); + err |= get_user(call, (unsigned int *)regs->tpc); + err |= get_user(nop, (unsigned int *)(regs->tpc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + + regs->u_regs[UREG_RETPC] = regs->tpc; + regs->tpc = dl_resolve; + regs->tnpc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk("???????? "); + else + printk("%08x ", c); + } + printk("\n"); +} +#endif + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { struct mm_struct *mm = current->mm; @@ -314,8 +681,10 @@ asmlinkage void __kprobes do_sparc64_fau goto intr_or_no_mm; if (test_thread_flag(TIF_32BIT)) { - if (!(regs->tstate & TSTATE_PRIV)) + if (!(regs->tstate & TSTATE_PRIV)) { regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } address &= 0xffffffff; } @@ -332,6 +701,29 @@ asmlinkage void __kprobes do_sparc64_fau if (!vma) goto bad_area; +#ifdef CONFIG_PAX_PAGEEXEC + /* PaX: detect ITLB misses on non-exec pages */ + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) + { + if (address != regs->tpc) + goto good_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void*)regs->tpc, (void*)(regs->u_regs[UREG_FP] + STACK_BIAS)); + do_exit(SIGKILL); + } +#endif + /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the diff -urNp linux-2.6.22.1/arch/sparc64/mm/Makefile linux-2.6.22.1/arch/sparc64/mm/Makefile --- linux-2.6.22.1/arch/sparc64/mm/Makefile 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/sparc64/mm/Makefile 2007-08-02 11:38:46.000000000 -0400 @@ -3,7 +3,7 @@ # EXTRA_AFLAGS := -ansi -EXTRA_CFLAGS := -Werror +#EXTRA_CFLAGS := -Werror obj-y := ultra.o tlb.o tsb.o fault.o init.o generic.o diff -urNp linux-2.6.22.1/arch/v850/kernel/module.c linux-2.6.22.1/arch/v850/kernel/module.c --- linux-2.6.22.1/arch/v850/kernel/module.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/v850/kernel/module.c 2007-08-02 11:38:46.000000000 -0400 @@ -150,8 +150,8 @@ static uint32_t do_plt_call (void *locat tramp[1] = ((val >> 16) & 0xffff) + 0x610000; /* ...; jmp r1 */ /* Init, or core PLT? */ - if (location >= mod->module_core - && location < mod->module_core + mod->core_size) + if (location >= mod->module_core_rx + && location < mod->module_core_rx + mod->core_size_rx) entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; else entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; diff -urNp linux-2.6.22.1/arch/x86_64/ia32/ia32_binfmt.c linux-2.6.22.1/arch/x86_64/ia32/ia32_binfmt.c --- linux-2.6.22.1/arch/x86_64/ia32/ia32_binfmt.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/ia32/ia32_binfmt.c 2007-08-02 11:38:46.000000000 -0400 @@ -143,6 +143,13 @@ struct elf_prpsinfo //#include #include +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x08048000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + typedef struct user_i387_ia32_struct elf_fpregset_t; typedef struct user32_fxsr_struct elf_fpxregset_t; @@ -317,8 +324,20 @@ int ia32_setup_arg_pages(struct linux_bi mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC; else mpnt->vm_flags = VM_STACK_FLAGS; - mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ? - PAGE_COPY_EXEC : PAGE_COPY; + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + mpnt->vm_flags &= ~VM_EXEC; + +#ifdef CONFIG_PAX_MPROTECT + if (mm->pax_flags & MF_PAX_MPROTECT) + mpnt->vm_flags &= ~VM_MAYEXEC; +#endif + + } +#endif + + mpnt->vm_page_prot = vm_get_page_prot(mpnt->vm_flags); if ((ret = insert_vm_struct(mm, mpnt))) { up_write(&mm->mmap_sem); kmem_cache_free(vm_area_cachep, mpnt); @@ -329,15 +348,18 @@ int ia32_setup_arg_pages(struct linux_bi for (i = 0 ; i < MAX_ARG_PAGES ; i++) { struct page *page = bprm->page[i]; + int retval; if (page) { bprm->page[i] = NULL; - install_arg_page(mpnt, page, stack_base); + retval = install_arg_page(mpnt, page, stack_base); + if (!ret) + ret = retval; } stack_base += PAGE_SIZE; } up_write(&mm->mmap_sem); - - return 0; + + return ret; } EXPORT_SYMBOL(ia32_setup_arg_pages); diff -urNp linux-2.6.22.1/arch/x86_64/ia32/mmap32.c linux-2.6.22.1/arch/x86_64/ia32/mmap32.c --- linux-2.6.22.1/arch/x86_64/ia32/mmap32.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/ia32/mmap32.c 2007-08-02 11:38:46.000000000 -0400 @@ -69,10 +69,22 @@ void ia32_pick_mmap_layout(struct mm_str (current->personality & ADDR_COMPAT_LAYOUT) || current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(mm); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff -urNp linux-2.6.22.1/arch/x86_64/kernel/hpet.c linux-2.6.22.1/arch/x86_64/kernel/hpet.c --- linux-2.6.22.1/arch/x86_64/kernel/hpet.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/kernel/hpet.c 2007-08-02 11:38:46.000000000 -0400 @@ -65,7 +65,7 @@ static __init int late_hpet_init(void) hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE); timer = &hpet->hpet_timers[2]; for (i = 2; i < ntimer; timer++, i++) - hd.hd_irq[i] = (timer->hpet_config & + hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; diff -urNp linux-2.6.22.1/arch/x86_64/kernel/ioport.c linux-2.6.22.1/arch/x86_64/kernel/ioport.c --- linux-2.6.22.1/arch/x86_64/kernel/ioport.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/kernel/ioport.c 2007-08-02 11:09:14.000000000 -0400 @@ -41,8 +41,16 @@ asmlinkage long sys_ioperm(unsigned long if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) return -EINVAL; + +#ifdef CONFIG_GRKERNSEC_IO + if (turn_on) { + gr_handle_ioperm(); + return -EPERM; + } +#else if (turn_on && !capable(CAP_SYS_RAWIO)) return -EPERM; +#endif /* * If it's the first ioperm() call in this thread's lifetime, set the @@ -111,8 +119,13 @@ asmlinkage long sys_iopl(unsigned int le return -EINVAL; /* Trying to gain more privileges? */ if (level > old) { +#ifdef CONFIG_GRKERNSEC_IO + gr_handle_iopl(); + return -EPERM; +#else if (!capable(CAP_SYS_RAWIO)) return -EPERM; +#endif } regs->eflags = (regs->eflags &~ X86_EFLAGS_IOPL) | (level << 12); return 0; diff -urNp linux-2.6.22.1/arch/x86_64/kernel/process.c linux-2.6.22.1/arch/x86_64/kernel/process.c --- linux-2.6.22.1/arch/x86_64/kernel/process.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/kernel/process.c 2007-08-02 11:38:46.000000000 -0400 @@ -883,10 +883,3 @@ int dump_task_regs(struct task_struct *t return 1; } - -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() % 8192; - return sp & ~0xf; -} diff -urNp linux-2.6.22.1/arch/x86_64/kernel/setup64.c linux-2.6.22.1/arch/x86_64/kernel/setup64.c --- linux-2.6.22.1/arch/x86_64/kernel/setup64.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/kernel/setup64.c 2007-08-02 11:38:46.000000000 -0400 @@ -37,7 +37,7 @@ struct desc_ptr idt_descr = { 256 * 16 - char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); unsigned long __supported_pte_mask __read_mostly = ~0UL; -static int do_not_nx __cpuinitdata = 0; +EXPORT_SYMBOL(__supported_pte_mask); /* noexec=on|off Control non executable mappings for 64bit processes. @@ -51,16 +51,14 @@ static int __init nonx_setup(char *str) return -EINVAL; if (!strncmp(str, "on", 2)) { __supported_pte_mask |= _PAGE_NX; - do_not_nx = 0; } else if (!strncmp(str, "off", 3)) { - do_not_nx = 1; __supported_pte_mask &= ~_PAGE_NX; } return 0; } early_param("noexec", nonx_setup); -int force_personality32 = 0; +int force_personality32; /* noexec32=on|off Control non executable heap for 32bit processes. @@ -177,7 +175,7 @@ void __cpuinit check_efer(void) unsigned long efer; rdmsrl(MSR_EFER, efer); - if (!(efer & EFER_NX) || do_not_nx) { + if (!(efer & EFER_NX)) { __supported_pte_mask &= ~_PAGE_NX; } } diff -urNp linux-2.6.22.1/arch/x86_64/kernel/signal.c linux-2.6.22.1/arch/x86_64/kernel/signal.c --- linux-2.6.22.1/arch/x86_64/kernel/signal.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/kernel/signal.c 2007-08-02 11:38:46.000000000 -0400 @@ -253,8 +253,8 @@ static int setup_rt_frame(int sig, struc err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); if (sizeof(*set) == 16) { - __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); - __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); + err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); + err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); } else err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); diff -urNp linux-2.6.22.1/arch/x86_64/kernel/sys_x86_64.c linux-2.6.22.1/arch/x86_64/kernel/sys_x86_64.c --- linux-2.6.22.1/arch/x86_64/kernel/sys_x86_64.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/kernel/sys_x86_64.c 2007-08-02 11:38:46.000000000 -0400 @@ -64,8 +64,8 @@ out: return error; } -static void find_start_end(unsigned long flags, unsigned long *begin, - unsigned long *end) +static void find_start_end(struct mm_struct *mm, unsigned long flags, + unsigned long *begin, unsigned long *end) { if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { /* This is usually used needed to map code in small @@ -78,7 +78,7 @@ static void find_start_end(unsigned long *begin = 0x40000000; *end = 0x80000000; } else { - *begin = TASK_UNMAPPED_BASE; + *begin = mm->mmap_base; *end = TASK_SIZE; } } @@ -95,11 +95,15 @@ arch_get_unmapped_area(struct file *filp if (flags & MAP_FIXED) return addr; - find_start_end(flags, &begin, &end); + find_start_end(mm, flags, &begin, &end); if (len > end) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); diff -urNp linux-2.6.22.1/arch/x86_64/mm/fault.c linux-2.6.22.1/arch/x86_64/mm/fault.c --- linux-2.6.22.1/arch/x86_64/mm/fault.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/mm/fault.c 2007-08-02 11:38:46.000000000 -0400 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -301,6 +302,33 @@ static int vmalloc_fault(unsigned long a return 0; } +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (unsigned char __user *)pc+i)) + printk("?? "); + else + printk("%02x ", c); + } + printk("\n"); + + printk(KERN_ERR "PAX: bytes at SP-8: "); + for (i = -1; i < 10; i++) { + unsigned long c; + if (get_user(c, (unsigned long __user *)sp+i)) + printk("???????????????? "); + else + printk("%016lx ", c); + } + printk("\n"); +} +#endif + int page_fault_trace = 0; int exception_trace = 1; @@ -430,6 +458,8 @@ asmlinkage void __kprobes do_page_fault( good_area: info.si_code = SEGV_ACCERR; write = 0; + if ((error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC)) + goto bad_area; switch (error_code & (PF_PROT|PF_WRITE)) { default: /* 3: write, present */ /* fall through */ @@ -502,7 +532,14 @@ bad_area_nosemaphore: tsk->comm, tsk->pid, tsk->xid, address, regs->rip, regs->rsp, error_code); } - + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm && (mm->pax_flags & MF_PAX_PAGEEXEC) && (error_code & 16)) { + pax_report_fault(regs, (void*)regs->rip, (void*)regs->rsp); + do_exit(SIGKILL); + } +#endif + tsk->thread.cr2 = address; /* Kernel addresses are always protection faults */ tsk->thread.error_code = error_code | (address >= TASK_SIZE); diff -urNp linux-2.6.22.1/arch/x86_64/mm/mmap.c linux-2.6.22.1/arch/x86_64/mm/mmap.c --- linux-2.6.22.1/arch/x86_64/mm/mmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/arch/x86_64/mm/mmap.c 2007-08-02 11:38:46.000000000 -0400 @@ -23,6 +23,12 @@ void arch_pick_mmap_layout(struct mm_str unsigned rnd = get_random_int() & 0xfffffff; mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT; } + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } diff -urNp linux-2.6.22.1/crypto/lrw.c linux-2.6.22.1/crypto/lrw.c --- linux-2.6.22.1/crypto/lrw.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/crypto/lrw.c 2007-08-02 11:38:46.000000000 -0400 @@ -54,7 +54,7 @@ static int setkey(struct crypto_tfm *par struct priv *ctx = crypto_tfm_ctx(parent); struct crypto_cipher *child = ctx->child; int err, i; - be128 tmp = { 0 }; + be128 tmp = { 0, 0 }; int bsize = crypto_cipher_blocksize(child); crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); diff -urNp linux-2.6.22.1/Documentation/dontdiff linux-2.6.22.1/Documentation/dontdiff --- linux-2.6.22.1/Documentation/dontdiff 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/Documentation/dontdiff 2007-08-02 11:38:45.000000000 -0400 @@ -177,10 +177,13 @@ version.h* vmlinux vmlinux-* vmlinux.aout +vmlinux.bin.all vmlinux.lds +vmlinux.relocs vsyscall.lds wanxlfw.inc uImage unifdef +utsrelease.h zImage* zconf.hash.c diff -urNp linux-2.6.22.1/drivers/acpi/blacklist.c linux-2.6.22.1/drivers/acpi/blacklist.c --- linux-2.6.22.1/drivers/acpi/blacklist.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/acpi/blacklist.c 2007-08-02 11:38:46.000000000 -0400 @@ -70,7 +70,7 @@ static struct acpi_blacklist_item acpi_b {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions, "Bogus PCI routing", 1}, - {""} + {"", "", 0, 0, 0, all_versions, 0} }; #if CONFIG_ACPI_BLACKLIST_YEAR diff -urNp linux-2.6.22.1/drivers/acpi/glue.c linux-2.6.22.1/drivers/acpi/glue.c --- linux-2.6.22.1/drivers/acpi/glue.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/acpi/glue.c 2007-08-02 11:38:46.000000000 -0400 @@ -16,7 +16,7 @@ #if ACPI_GLUE_DEBUG #define DBG(x...) printk(PREFIX x) #else -#define DBG(x...) +#define DBG(x...) do {} while (0) #endif static LIST_HEAD(bus_type_list); static DECLARE_RWSEM(bus_type_sem); diff -urNp linux-2.6.22.1/drivers/acpi/processor_core.c linux-2.6.22.1/drivers/acpi/processor_core.c --- linux-2.6.22.1/drivers/acpi/processor_core.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/acpi/processor_core.c 2007-08-02 11:38:46.000000000 -0400 @@ -635,7 +635,7 @@ static int __cpuinit acpi_processor_star return 0; } - BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0)); + BUG_ON(pr->id >= NR_CPUS); /* * Buggy BIOS check diff -urNp linux-2.6.22.1/drivers/acpi/processor_idle.c linux-2.6.22.1/drivers/acpi/processor_idle.c --- linux-2.6.22.1/drivers/acpi/processor_idle.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/acpi/processor_idle.c 2007-08-02 11:38:46.000000000 -0400 @@ -163,7 +163,7 @@ static struct dmi_system_id __cpuinitdat DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, (void *)2}, - {}, + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL}, }; static inline u32 ticks_elapsed(u32 t1, u32 t2) diff -urNp linux-2.6.22.1/drivers/acpi/sleep/main.c linux-2.6.22.1/drivers/acpi/sleep/main.c --- linux-2.6.22.1/drivers/acpi/sleep/main.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/acpi/sleep/main.c 2007-08-02 11:38:46.000000000 -0400 @@ -241,7 +241,7 @@ static struct dmi_system_id __initdata a .ident = "Toshiba Satellite 4030cdt", .matches = {DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),}, }, - {}, + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL}, }; int __init acpi_sleep_init(void) diff -urNp linux-2.6.22.1/drivers/acpi/tables/tbfadt.c linux-2.6.22.1/drivers/acpi/tables/tbfadt.c --- linux-2.6.22.1/drivers/acpi/tables/tbfadt.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/acpi/tables/tbfadt.c 2007-08-02 11:38:46.000000000 -0400 @@ -48,7 +48,7 @@ ACPI_MODULE_NAME("tbfadt") /* Local prototypes */ -static void inline +static inline void acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, u8 bit_width, u64 address); @@ -122,7 +122,7 @@ static struct acpi_fadt_info fadt_info_t * ******************************************************************************/ -static void inline +static inline void acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, u8 bit_width, u64 address) { diff -urNp linux-2.6.22.1/drivers/ata/ata_piix.c linux-2.6.22.1/drivers/ata/ata_piix.c --- linux-2.6.22.1/drivers/ata/ata_piix.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ata/ata_piix.c 2007-08-02 11:38:46.000000000 -0400 @@ -244,7 +244,7 @@ static const struct pci_device_id piix_p /* SATA Controller IDE (ICH9M) */ { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, - { } /* terminate list */ + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; static struct pci_driver piix_pci_driver = { @@ -580,7 +580,7 @@ static const struct ich_laptop ich_lapto { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ /* end marker */ - { 0, } + { 0, 0, 0 } }; /** diff -urNp linux-2.6.22.1/drivers/ata/libata-core.c linux-2.6.22.1/drivers/ata/libata-core.c --- linux-2.6.22.1/drivers/ata/libata-core.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ata/libata-core.c 2007-08-02 11:38:46.000000000 -0400 @@ -469,7 +469,7 @@ static const struct ata_xfer_ent { { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 }, { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 }, - { -1, }, + { -1, 0, 0 }, }; /** @@ -2559,7 +2559,7 @@ static const struct ata_timing ata_timin /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ - { 0xFF } + { 0xFF, 0, 0, 0, 0, 0, 0, 0, 0 } }; #define ENOUGH(v,unit) (((v)-1)/(unit)+1) @@ -3804,7 +3804,7 @@ static const struct ata_blacklist_entry /* Devices with NCQ limits */ /* End Marker */ - { } + { NULL, NULL, 0 } }; unsigned long ata_device_blacklisted(const struct ata_device *dev) diff -urNp linux-2.6.22.1/drivers/char/agp/frontend.c linux-2.6.22.1/drivers/char/agp/frontend.c --- linux-2.6.22.1/drivers/char/agp/frontend.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/agp/frontend.c 2007-08-02 11:38:46.000000000 -0400 @@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct ag if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) return -EFAULT; - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv)) return -EFAULT; client = agp_find_client_by_pid(reserve.pid); diff -urNp linux-2.6.22.1/drivers/char/agp/intel-agp.c linux-2.6.22.1/drivers/char/agp/intel-agp.c --- linux-2.6.22.1/drivers/char/agp/intel-agp.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/agp/intel-agp.c 2007-08-02 11:38:46.000000000 -0400 @@ -2059,7 +2059,7 @@ static struct pci_device_id agp_intel_pc ID(PCI_DEVICE_ID_INTEL_G33_HB), ID(PCI_DEVICE_ID_INTEL_Q35_HB), ID(PCI_DEVICE_ID_INTEL_Q33_HB), - { } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); diff -urNp linux-2.6.22.1/drivers/char/drm/drm_drawable.c linux-2.6.22.1/drivers/char/drm/drm_drawable.c --- linux-2.6.22.1/drivers/char/drm/drm_drawable.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/drm/drm_drawable.c 2007-08-02 11:38:46.000000000 -0400 @@ -245,7 +245,7 @@ int drm_update_drawable_info(DRM_IOCTL_A idx = id / (8 * sizeof(*bitfield)); shift = id % (8 * sizeof(*bitfield)); - if (idx < 0 || idx >= bitfield_length || + if (idx >= bitfield_length || !(bitfield[idx] & (1 << shift))) { DRM_ERROR("No such drawable %d\n", update.handle); return DRM_ERR(EINVAL); @@ -330,7 +330,7 @@ drm_drawable_info_t *drm_get_drawable_in idx = id / (8 * sizeof(*bitfield)); shift = id % (8 * sizeof(*bitfield)); - if (idx < 0 || idx >= dev->drw_bitfield_length || + if (idx >= dev->drw_bitfield_length || !(bitfield[idx] & (1 << shift))) { DRM_DEBUG("No such drawable %d\n", id); return NULL; diff -urNp linux-2.6.22.1/drivers/char/drm/drm_pciids.h linux-2.6.22.1/drivers/char/drm/drm_pciids.h --- linux-2.6.22.1/drivers/char/drm/drm_pciids.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/drm/drm_pciids.h 2007-08-02 11:38:46.000000000 -0400 @@ -251,7 +251,7 @@ {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0, 0, 0} + {0, 0, 0, 0, 0, 0, 0 } #define i830_PCI_IDS \ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ diff -urNp linux-2.6.22.1/drivers/char/hpet.c linux-2.6.22.1/drivers/char/hpet.c --- linux-2.6.22.1/drivers/char/hpet.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/hpet.c 2007-08-02 11:38:46.000000000 -0400 @@ -1008,7 +1008,7 @@ static struct acpi_driver hpet_acpi_driv }, }; -static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; +static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops, {NULL, NULL}, NULL, NULL }; static int __init hpet_init(void) { diff -urNp linux-2.6.22.1/drivers/char/keyboard.c linux-2.6.22.1/drivers/char/keyboard.c --- linux-2.6.22.1/drivers/char/keyboard.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/keyboard.c 2007-08-02 11:38:46.000000000 -0400 @@ -595,6 +595,16 @@ static void k_spec(struct vc_data *vc, u kbd->kbdmode == VC_MEDIUMRAW) && value != KVAL(K_SAK)) return; /* SAK is allowed even in raw mode */ + +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + { + void *func = fn_handler[value]; + if (func == fn_show_state || func == fn_show_ptregs || + func == fn_show_mem) + return; + } +#endif + fn_handler[value](vc); } @@ -1334,7 +1344,7 @@ static const struct input_device_id kbd_ .evbit = { BIT(EV_SND) }, }, - { }, /* Terminating entry */ + { 0 }, /* Terminating entry */ }; MODULE_DEVICE_TABLE(input, kbd_ids); diff -urNp linux-2.6.22.1/drivers/char/mem.c linux-2.6.22.1/drivers/char/mem.c --- linux-2.6.22.1/drivers/char/mem.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/mem.c 2007-08-02 11:38:46.000000000 -0400 @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -34,6 +35,10 @@ # include #endif +#ifdef CONFIG_GRKERNSEC +extern struct file_operations grsec_fops; +#endif + /* * Architectures vary in how they handle caching for addresses * outside of main memory. @@ -173,6 +178,11 @@ static ssize_t write_mem(struct file * f if (!valid_phys_addr_range(p, count)) return -EFAULT; +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_mem_write(); + return -EPERM; +#endif + written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED @@ -274,6 +284,11 @@ static int mmap_mem(struct file * file, if (!private_mapping_ok(vma)) return -ENOSYS; +#ifdef CONFIG_GRKERNSEC_KMEM + if (gr_handle_mem_mmap(vma->vm_pgoff << PAGE_SHIFT, vma)) + return -EPERM; +#endif + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); @@ -505,6 +520,11 @@ static ssize_t write_kmem(struct file * ssize_t written; char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_kmem_write(); + return -EPERM; +#endif + if (p < (unsigned long) high_memory) { wrote = count; @@ -628,6 +648,10 @@ static inline size_t read_zero_pagealign struct vm_area_struct * vma; unsigned long addr=(unsigned long)buf; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m; +#endif + mm = current->mm; /* Oops, this was forgotten before. -ben */ down_read(&mm->mmap_sem); @@ -644,8 +668,14 @@ static inline size_t read_zero_pagealign if (count > size) count = size; +#ifdef CONFIG_PAX_SEGMEXEC + vma_m = pax_find_mirror_vma(vma); + if (vma_m) + zap_page_range(vma_m, addr + SEGMEXEC_TASK_SIZE, count, NULL); +#endif + zap_page_range(vma, addr, count, NULL); - if (zeromap_page_range(vma, addr, count, PAGE_COPY)) + if (zeromap_page_range(vma, addr, count, vma->vm_page_prot)) break; size -= count; @@ -798,6 +828,16 @@ static loff_t memory_lseek(struct file * static int open_port(struct inode * inode, struct file * filp) { +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_open_port(); + return -EPERM; +#endif + + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; +} + +static int open_mem(struct inode * inode, struct file * filp) +{ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } @@ -805,7 +845,6 @@ static int open_port(struct inode * inod #define full_lseek null_lseek #define write_zero write_null #define read_full read_zero -#define open_mem open_port #define open_kmem open_mem #define open_oldmem open_mem @@ -938,6 +977,11 @@ static int memory_open(struct inode * in filp->f_op = &oldmem_fops; break; #endif +#ifdef CONFIG_GRKERNSEC + case 13: + filp->f_op = &grsec_fops; + break; +#endif default: return -ENXIO; } @@ -970,6 +1014,9 @@ static const struct { #ifdef CONFIG_CRASH_DUMP {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops}, #endif +#ifdef CONFIG_GRKERNSEC + {13,"grsec", S_IRUSR | S_IWUGO, &grsec_fops}, +#endif }; static struct class *mem_class; diff -urNp linux-2.6.22.1/drivers/char/nvram.c linux-2.6.22.1/drivers/char/nvram.c --- linux-2.6.22.1/drivers/char/nvram.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/nvram.c 2007-08-02 11:38:46.000000000 -0400 @@ -449,7 +449,10 @@ static const struct file_operations nvra static struct miscdevice nvram_dev = { NVRAM_MINOR, "nvram", - &nvram_fops + &nvram_fops, + {NULL, NULL}, + NULL, + NULL }; static int __init diff -urNp linux-2.6.22.1/drivers/char/random.c linux-2.6.22.1/drivers/char/random.c --- linux-2.6.22.1/drivers/char/random.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/random.c 2007-08-02 11:40:03.000000000 -0400 @@ -248,8 +248,13 @@ /* * Configuration information */ +#ifdef CONFIG_GRKERNSEC_RANDNET +#define INPUT_POOL_WORDS 512 +#define OUTPUT_POOL_WORDS 128 +#else #define INPUT_POOL_WORDS 128 #define OUTPUT_POOL_WORDS 32 +#endif #define SEC_XFER_SIZE 512 /* @@ -286,10 +291,17 @@ static struct poolinfo { int poolwords; int tap1, tap2, tap3, tap4, tap5; } poolinfo_table[] = { +#ifdef CONFIG_GRKERNSEC_RANDNET + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */ + { 512, 411, 308, 208, 104, 1 }, + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */ + { 128, 103, 76, 51, 25, 1 }, +#else /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ { 128, 103, 76, 51, 25, 1 }, /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ { 32, 26, 20, 14, 7, 1 }, +#endif #if 0 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ { 2048, 1638, 1231, 819, 411, 1 }, diff -urNp linux-2.6.22.1/drivers/char/vt_ioctl.c linux-2.6.22.1/drivers/char/vt_ioctl.c --- linux-2.6.22.1/drivers/char/vt_ioctl.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/char/vt_ioctl.c 2007-08-02 11:09:15.000000000 -0400 @@ -95,6 +95,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __ case KDSKBENT: if (!perm) return -EPERM; + +#ifdef CONFIG_GRKERNSEC + if (!capable(CAP_SYS_TTY_CONFIG)) + return -EPERM; +#endif + if (!i && v == K_NOSUCHMAP) { /* deallocate map */ key_map = key_maps[s]; @@ -235,6 +241,13 @@ do_kdgkb_ioctl(int cmd, struct kbsentry goto reterr; } +#ifdef CONFIG_GRKERNSEC + if (!capable(CAP_SYS_TTY_CONFIG)) { + ret = -EPERM; + goto reterr; + } +#endif + q = func_table[i]; first_free = funcbufptr + (funcbufsize - funcbufleft); for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++) diff -urNp linux-2.6.22.1/drivers/edac/edac_mc.h linux-2.6.22.1/drivers/edac/edac_mc.h --- linux-2.6.22.1/drivers/edac/edac_mc.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/edac/edac_mc.h 2007-08-02 11:38:46.000000000 -0400 @@ -71,11 +71,11 @@ extern int edac_debug_level; #else /* !CONFIG_EDAC_DEBUG */ -#define debugf0( ... ) -#define debugf1( ... ) -#define debugf2( ... ) -#define debugf3( ... ) -#define debugf4( ... ) +#define debugf0( ... ) do {} while (0) +#define debugf1( ... ) do {} while (0) +#define debugf2( ... ) do {} while (0) +#define debugf3( ... ) do {} while (0) +#define debugf4( ... ) do {} while (0) #endif /* !CONFIG_EDAC_DEBUG */ diff -urNp linux-2.6.22.1/drivers/hwmon/fscpos.c linux-2.6.22.1/drivers/hwmon/fscpos.c --- linux-2.6.22.1/drivers/hwmon/fscpos.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/hwmon/fscpos.c 2007-08-02 11:38:46.000000000 -0400 @@ -231,7 +231,6 @@ static ssize_t set_pwm(struct i2c_client unsigned long v = simple_strtoul(buf, NULL, 10); /* Range: 0..255 */ - if (v < 0) v = 0; if (v > 255) v = 255; mutex_lock(&data->update_lock); diff -urNp linux-2.6.22.1/drivers/hwmon/k8temp.c linux-2.6.22.1/drivers/hwmon/k8temp.c --- linux-2.6.22.1/drivers/hwmon/k8temp.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/hwmon/k8temp.c 2007-08-02 11:38:46.000000000 -0400 @@ -130,7 +130,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_n static struct pci_device_id k8temp_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, - { 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, }; MODULE_DEVICE_TABLE(pci, k8temp_ids); diff -urNp linux-2.6.22.1/drivers/hwmon/sis5595.c linux-2.6.22.1/drivers/hwmon/sis5595.c --- linux-2.6.22.1/drivers/hwmon/sis5595.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/hwmon/sis5595.c 2007-08-02 11:38:46.000000000 -0400 @@ -755,7 +755,7 @@ static struct sis5595_data *sis5595_upda static struct pci_device_id sis5595_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, sis5595_pci_ids); diff -urNp linux-2.6.22.1/drivers/hwmon/via686a.c linux-2.6.22.1/drivers/hwmon/via686a.c --- linux-2.6.22.1/drivers/hwmon/via686a.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/hwmon/via686a.c 2007-08-02 11:38:46.000000000 -0400 @@ -813,7 +813,7 @@ static struct via686a_data *via686a_upda static struct pci_device_id via686a_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, via686a_pci_ids); diff -urNp linux-2.6.22.1/drivers/hwmon/vt8231.c linux-2.6.22.1/drivers/hwmon/vt8231.c --- linux-2.6.22.1/drivers/hwmon/vt8231.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/hwmon/vt8231.c 2007-08-02 11:38:46.000000000 -0400 @@ -666,7 +666,7 @@ static struct i2c_driver vt8231_driver = static struct pci_device_id vt8231_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, vt8231_pci_ids); diff -urNp linux-2.6.22.1/drivers/hwmon/w83791d.c linux-2.6.22.1/drivers/hwmon/w83791d.c --- linux-2.6.22.1/drivers/hwmon/w83791d.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/hwmon/w83791d.c 2007-08-02 11:38:46.000000000 -0400 @@ -289,8 +289,8 @@ static int w83791d_attach_adapter(struct static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind); static int w83791d_detach_client(struct i2c_client *client); -static int w83791d_read(struct i2c_client *client, u8 register); -static int w83791d_write(struct i2c_client *client, u8 register, u8 value); +static int w83791d_read(struct i2c_client *client, u8 reg); +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); static struct w83791d_data *w83791d_update_device(struct device *dev); #ifdef DEBUG diff -urNp linux-2.6.22.1/drivers/i2c/busses/i2c-i801.c linux-2.6.22.1/drivers/i2c/busses/i2c-i801.c --- linux-2.6.22.1/drivers/i2c/busses/i2c-i801.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/i2c/busses/i2c-i801.c 2007-08-02 11:38:46.000000000 -0400 @@ -460,7 +460,7 @@ static struct pci_device_id i801_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE (pci, i801_ids); diff -urNp linux-2.6.22.1/drivers/i2c/busses/i2c-i810.c linux-2.6.22.1/drivers/i2c/busses/i2c-i810.c --- linux-2.6.22.1/drivers/i2c/busses/i2c-i810.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/i2c/busses/i2c-i810.c 2007-08-02 11:38:46.000000000 -0400 @@ -198,7 +198,7 @@ static struct pci_device_id i810_ids[] _ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810E_IG) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG) }, - { 0, }, + { 0, 0, 0, 0, 0, 0, 0 }, }; MODULE_DEVICE_TABLE (pci, i810_ids); diff -urNp linux-2.6.22.1/drivers/i2c/busses/i2c-piix4.c linux-2.6.22.1/drivers/i2c/busses/i2c-piix4.c --- linux-2.6.22.1/drivers/i2c/busses/i2c-piix4.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/i2c/busses/i2c-piix4.c 2007-08-02 11:38:46.000000000 -0400 @@ -113,7 +113,7 @@ static struct dmi_system_id __devinitdat .ident = "IBM", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, - { }, + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL }, }; static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, @@ -411,7 +411,7 @@ static struct pci_device_id piix4_ids[] .driver_data = 3 }, { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3), .driver_data = 0 }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE (pci, piix4_ids); diff -urNp linux-2.6.22.1/drivers/i2c/busses/i2c-sis630.c linux-2.6.22.1/drivers/i2c/busses/i2c-sis630.c --- linux-2.6.22.1/drivers/i2c/busses/i2c-sis630.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/i2c/busses/i2c-sis630.c 2007-08-02 11:38:46.000000000 -0400 @@ -465,7 +465,7 @@ static struct i2c_adapter sis630_adapter static struct pci_device_id sis630_ids[] __devinitdata = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, - { 0, } + { PCI_DEVICE(0, 0) } }; MODULE_DEVICE_TABLE (pci, sis630_ids); diff -urNp linux-2.6.22.1/drivers/i2c/busses/i2c-sis96x.c linux-2.6.22.1/drivers/i2c/busses/i2c-sis96x.c --- linux-2.6.22.1/drivers/i2c/busses/i2c-sis96x.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/i2c/busses/i2c-sis96x.c 2007-08-02 11:38:46.000000000 -0400 @@ -255,7 +255,7 @@ static struct i2c_adapter sis96x_adapter static struct pci_device_id sis96x_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, - { 0, } + { PCI_DEVICE(0, 0) } }; MODULE_DEVICE_TABLE (pci, sis96x_ids); diff -urNp linux-2.6.22.1/drivers/ide/ide-cd.c linux-2.6.22.1/drivers/ide/ide-cd.c --- linux-2.6.22.1/drivers/ide/ide-cd.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ide/ide-cd.c 2007-08-02 11:38:46.000000000 -0400 @@ -457,8 +457,6 @@ void cdrom_analyze_sense_data(ide_drive_ sector &= ~(bio_sectors -1); valid = (sector - failed_command->sector) << 9; - if (valid < 0) - valid = 0; if (sector < get_capacity(info->disk) && drive->probed_capacity - sector < 4 * 75) { set_capacity(info->disk, sector); diff -urNp linux-2.6.22.1/drivers/ieee1394/dv1394.c linux-2.6.22.1/drivers/ieee1394/dv1394.c --- linux-2.6.22.1/drivers/ieee1394/dv1394.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ieee1394/dv1394.c 2007-08-02 11:38:46.000000000 -0400 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c based upon DIF section and sequence */ -static void inline +static inline void frame_put_packet (struct frame *f, struct packet *p) { int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */ @@ -918,7 +918,7 @@ static int do_dv1394_init(struct video_c /* default SYT offset is 3 cycles */ init->syt_offset = 3; - if ( (init->channel > 63) || (init->channel < 0) ) + if (init->channel > 63) init->channel = 63; chan_mask = (u64)1 << init->channel; @@ -2173,7 +2173,7 @@ static struct ieee1394_device_id dv1394_ .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, .version = AVC_SW_VERSION_ENTRY & 0xffffff }, - { } + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table); diff -urNp linux-2.6.22.1/drivers/ieee1394/eth1394.c linux-2.6.22.1/drivers/ieee1394/eth1394.c --- linux-2.6.22.1/drivers/ieee1394/eth1394.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ieee1394/eth1394.c 2007-08-02 11:38:46.000000000 -0400 @@ -449,7 +449,7 @@ static struct ieee1394_device_id eth1394 .specifier_id = ETHER1394_GASP_SPECIFIER_ID, .version = ETHER1394_GASP_VERSION, }, - {} + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table); diff -urNp linux-2.6.22.1/drivers/ieee1394/hosts.c linux-2.6.22.1/drivers/ieee1394/hosts.c --- linux-2.6.22.1/drivers/ieee1394/hosts.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ieee1394/hosts.c 2007-08-02 11:38:46.000000000 -0400 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso } static struct hpsb_host_driver dummy_driver = { + .name = "dummy", .transmit_packet = dummy_transmit_packet, .devctl = dummy_devctl, .isoctl = dummy_isoctl diff -urNp linux-2.6.22.1/drivers/ieee1394/ohci1394.c linux-2.6.22.1/drivers/ieee1394/ohci1394.c --- linux-2.6.22.1/drivers/ieee1394/ohci1394.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ieee1394/ohci1394.c 2007-08-02 11:38:46.000000000 -0400 @@ -160,9 +160,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_ printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) /* Module Parameters */ -static int phys_dma = 1; +static int phys_dma; module_param(phys_dma, int, 0444); -MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1)."); +MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 0)."); static void dma_trm_tasklet(unsigned long data); static void dma_trm_reset(struct dma_trm_ctx *d); @@ -3632,7 +3632,7 @@ static struct pci_device_id ohci1394_pci .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, - { 0, }, + { 0, 0, 0, 0, 0, 0, 0 }, }; MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl); diff -urNp linux-2.6.22.1/drivers/ieee1394/raw1394.c linux-2.6.22.1/drivers/ieee1394/raw1394.c --- linux-2.6.22.1/drivers/ieee1394/raw1394.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ieee1394/raw1394.c 2007-08-02 11:38:46.000000000 -0400 @@ -3013,7 +3013,7 @@ static struct ieee1394_device_id raw1394 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff}, - {} + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table); diff -urNp linux-2.6.22.1/drivers/ieee1394/sbp2.c linux-2.6.22.1/drivers/ieee1394/sbp2.c --- linux-2.6.22.1/drivers/ieee1394/sbp2.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ieee1394/sbp2.c 2007-08-02 11:38:46.000000000 -0400 @@ -273,7 +273,7 @@ static struct ieee1394_device_id sbp2_id .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, .version = SBP2_SW_VERSION_ENTRY & 0xffffff}, - {} + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); @@ -2136,7 +2136,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME); MODULE_LICENSE("GPL"); -static int sbp2_module_init(void) +static int __init sbp2_module_init(void) { int ret; diff -urNp linux-2.6.22.1/drivers/ieee1394/video1394.c linux-2.6.22.1/drivers/ieee1394/video1394.c --- linux-2.6.22.1/drivers/ieee1394/video1394.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/ieee1394/video1394.c 2007-08-02 11:38:46.000000000 -0400 @@ -893,7 +893,7 @@ static long video1394_ioctl(struct file if (unlikely(d == NULL)) return -EFAULT; - if (unlikely((v.buffer<0) || (v.buffer>=d->num_desc - 1))) { + if (unlikely(v.buffer>=d->num_desc - 1)) { PRINT(KERN_ERR, ohci->host->id, "Buffer %d out of range",v.buffer); return -EINVAL; @@ -959,7 +959,7 @@ static long video1394_ioctl(struct file if (unlikely(d == NULL)) return -EFAULT; - if (unlikely((v.buffer<0) || (v.buffer>d->num_desc - 1))) { + if (unlikely(v.buffer>d->num_desc - 1)) { PRINT(KERN_ERR, ohci->host->id, "Buffer %d out of range",v.buffer); return -EINVAL; @@ -1030,7 +1030,7 @@ static long video1394_ioctl(struct file d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel); if (d == NULL) return -EFAULT; - if ((v.buffer<0) || (v.buffer>=d->num_desc - 1)) { + if (v.buffer>=d->num_desc - 1) { PRINT(KERN_ERR, ohci->host->id, "Buffer %d out of range",v.buffer); return -EINVAL; @@ -1137,7 +1137,7 @@ static long video1394_ioctl(struct file d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel); if (d == NULL) return -EFAULT; - if ((v.buffer<0) || (v.buffer>=d->num_desc-1)) { + if (v.buffer>=d->num_desc-1) { PRINT(KERN_ERR, ohci->host->id, "Buffer %d out of range",v.buffer); return -EINVAL; @@ -1309,7 +1309,7 @@ static struct ieee1394_device_id video13 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff }, - { } + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, video1394_id_table); diff -urNp linux-2.6.22.1/drivers/input/keyboard/atkbd.c linux-2.6.22.1/drivers/input/keyboard/atkbd.c --- linux-2.6.22.1/drivers/input/keyboard/atkbd.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/input/keyboard/atkbd.c 2007-08-02 11:38:46.000000000 -0400 @@ -1075,7 +1075,7 @@ static struct serio_device_id atkbd_seri .id = SERIO_ANY, .extra = SERIO_ANY, }, - { 0 } + { 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(serio, atkbd_serio_ids); diff -urNp linux-2.6.22.1/drivers/input/mouse/lifebook.c linux-2.6.22.1/drivers/input/mouse/lifebook.c --- linux-2.6.22.1/drivers/input/mouse/lifebook.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/input/mouse/lifebook.c 2007-08-02 11:38:46.000000000 -0400 @@ -102,7 +102,7 @@ static struct dmi_system_id lifebook_dmi DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL} }; static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse) diff -urNp linux-2.6.22.1/drivers/input/mouse/psmouse-base.c linux-2.6.22.1/drivers/input/mouse/psmouse-base.c --- linux-2.6.22.1/drivers/input/mouse/psmouse-base.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/input/mouse/psmouse-base.c 2007-08-02 11:38:46.000000000 -0400 @@ -1296,7 +1296,7 @@ static struct serio_device_id psmouse_se .id = SERIO_ANY, .extra = SERIO_ANY, }, - { 0 } + { 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(serio, psmouse_serio_ids); diff -urNp linux-2.6.22.1/drivers/input/mouse/synaptics.c linux-2.6.22.1/drivers/input/mouse/synaptics.c --- linux-2.6.22.1/drivers/input/mouse/synaptics.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/input/mouse/synaptics.c 2007-08-02 11:38:46.000000000 -0400 @@ -417,7 +417,7 @@ static void synaptics_process_packet(str break; case 2: if (SYN_MODEL_PEN(priv->model_id)) - ; /* Nothing, treat a pen as a single finger */ + break; /* Nothing, treat a pen as a single finger */ break; case 4 ... 15: if (SYN_CAP_PALMDETECT(priv->capabilities)) @@ -624,7 +624,7 @@ static struct dmi_system_id toshiba_dmi_ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL } }; #endif diff -urNp linux-2.6.22.1/drivers/input/mousedev.c linux-2.6.22.1/drivers/input/mousedev.c --- linux-2.6.22.1/drivers/input/mousedev.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/input/mousedev.c 2007-08-02 11:38:46.000000000 -0400 @@ -815,7 +815,7 @@ static struct input_handler mousedev_han #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX static struct miscdevice psaux_mouse = { - PSMOUSE_MINOR, "psaux", &mousedev_fops + PSMOUSE_MINOR, "psaux", &mousedev_fops, {NULL, NULL}, NULL, NULL }; static int psaux_registered; #endif diff -urNp linux-2.6.22.1/drivers/input/serio/i8042-x86ia64io.h linux-2.6.22.1/drivers/input/serio/i8042-x86ia64io.h --- linux-2.6.22.1/drivers/input/serio/i8042-x86ia64io.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/input/serio/i8042-x86ia64io.h 2007-08-02 11:38:46.000000000 -0400 @@ -110,7 +110,7 @@ static struct dmi_system_id __initdata i DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL } }; /* @@ -252,7 +252,7 @@ static struct dmi_system_id __initdata i DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL } }; diff -urNp linux-2.6.22.1/drivers/input/serio/serio_raw.c linux-2.6.22.1/drivers/input/serio/serio_raw.c --- linux-2.6.22.1/drivers/input/serio/serio_raw.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/input/serio/serio_raw.c 2007-08-02 11:38:47.000000000 -0400 @@ -369,7 +369,7 @@ static struct serio_device_id serio_raw_ .id = SERIO_ANY, .extra = SERIO_ANY, }, - { 0 } + { 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(serio, serio_raw_serio_ids); diff -urNp linux-2.6.22.1/drivers/kvm/kvm_main.c linux-2.6.22.1/drivers/kvm/kvm_main.c --- linux-2.6.22.1/drivers/kvm/kvm_main.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/kvm/kvm_main.c 2007-08-02 11:38:47.000000000 -0400 @@ -60,19 +60,19 @@ static struct kvm_stats_debugfs_item { int offset; struct dentry *dentry; } debugfs_entries[] = { - { "pf_fixed", STAT_OFFSET(pf_fixed) }, - { "pf_guest", STAT_OFFSET(pf_guest) }, - { "tlb_flush", STAT_OFFSET(tlb_flush) }, - { "invlpg", STAT_OFFSET(invlpg) }, - { "exits", STAT_OFFSET(exits) }, - { "io_exits", STAT_OFFSET(io_exits) }, - { "mmio_exits", STAT_OFFSET(mmio_exits) }, - { "signal_exits", STAT_OFFSET(signal_exits) }, - { "irq_window", STAT_OFFSET(irq_window_exits) }, - { "halt_exits", STAT_OFFSET(halt_exits) }, - { "request_irq", STAT_OFFSET(request_irq_exits) }, - { "irq_exits", STAT_OFFSET(irq_exits) }, - { NULL } + { "pf_fixed", STAT_OFFSET(pf_fixed), NULL }, + { "pf_guest", STAT_OFFSET(pf_guest), NULL }, + { "tlb_flush", STAT_OFFSET(tlb_flush), NULL }, + { "invlpg", STAT_OFFSET(invlpg), NULL }, + { "exits", STAT_OFFSET(exits), NULL }, + { "io_exits", STAT_OFFSET(io_exits), NULL }, + { "mmio_exits", STAT_OFFSET(mmio_exits), NULL }, + { "signal_exits", STAT_OFFSET(signal_exits), NULL }, + { "irq_window", STAT_OFFSET(irq_window_exits), NULL }, + { "halt_exits", STAT_OFFSET(halt_exits), NULL }, + { "request_irq", STAT_OFFSET(request_irq_exits), NULL }, + { "irq_exits", STAT_OFFSET(irq_exits), NULL }, + { NULL, NULL, NULL } }; static struct dentry *debugfs_dir; @@ -2193,7 +2193,7 @@ static int kvm_vcpu_ioctl_translate(stru static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { - if (irq->irq < 0 || irq->irq >= 256) + if (irq->irq >= 256) return -EINVAL; vcpu_load(vcpu); @@ -2851,6 +2851,9 @@ static struct miscdevice kvm_dev = { KVM_MINOR, "kvm", &kvm_chardev_ops, + {NULL, NULL}, + NULL, + NULL }; static int kvm_reboot(struct notifier_block *notifier, unsigned long val, diff -urNp linux-2.6.22.1/drivers/kvm/vmx.c linux-2.6.22.1/drivers/kvm/vmx.c --- linux-2.6.22.1/drivers/kvm/vmx.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/kvm/vmx.c 2007-08-02 11:38:47.000000000 -0400 @@ -2013,7 +2013,7 @@ again: vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; - asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); + asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__KERNEL_DS)); if (fail) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; diff -urNp linux-2.6.22.1/drivers/md/bitmap.c linux-2.6.22.1/drivers/md/bitmap.c --- linux-2.6.22.1/drivers/md/bitmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/md/bitmap.c 2007-08-02 11:38:47.000000000 -0400 @@ -57,7 +57,7 @@ # if DEBUG > 0 # define PRINTK(x...) printk(KERN_DEBUG x) # else -# define PRINTK(x...) +# define PRINTK(x...) do {} while (0) # endif #endif diff -urNp linux-2.6.22.1/drivers/mtd/devices/doc2001.c linux-2.6.22.1/drivers/mtd/devices/doc2001.c --- linux-2.6.22.1/drivers/mtd/devices/doc2001.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/mtd/devices/doc2001.c 2007-08-02 11:09:15.000000000 -0400 @@ -398,6 +398,8 @@ static int doc_read (struct mtd_info *mt /* Don't allow read past end of device */ if (from >= this->totlen) return -EINVAL; + if (!len) + return -EINVAL; /* Don't allow a single read to cross a 512-byte block boundary */ if (from + len > ((from | 0x1ff) + 1)) diff -urNp linux-2.6.22.1/drivers/net/eepro100.c linux-2.6.22.1/drivers/net/eepro100.c --- linux-2.6.22.1/drivers/net/eepro100.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/net/eepro100.c 2007-08-02 11:38:47.000000000 -0400 @@ -47,7 +47,7 @@ static int rxdmacount /* = 0 */; # define rx_align(skb) skb_reserve((skb), 2) # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed)) #else -# define rx_align(skb) +# define rx_align(skb) do {} while (0) # define RxFD_ALIGNMENT #endif @@ -2339,33 +2339,33 @@ static void __devexit eepro100_remove_on } static struct pci_device_id eepro100_pci_tbl[] = { - { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, }, - { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, }, - { 0,} + { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl); diff -urNp linux-2.6.22.1/drivers/net/pcnet32.c linux-2.6.22.1/drivers/net/pcnet32.c --- linux-2.6.22.1/drivers/net/pcnet32.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/net/pcnet32.c 2007-08-02 11:38:47.000000000 -0400 @@ -82,7 +82,7 @@ static int cards_found; /* * VLB I/O addresses */ -static unsigned int pcnet32_portlist[] __initdata = +static unsigned int pcnet32_portlist[] __devinitdata = { 0x300, 0x320, 0x340, 0x360, 0 }; static int pcnet32_debug = 0; diff -urNp linux-2.6.22.1/drivers/net/tg3.h linux-2.6.22.1/drivers/net/tg3.h --- linux-2.6.22.1/drivers/net/tg3.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/net/tg3.h 2007-08-02 11:38:47.000000000 -0400 @@ -127,6 +127,7 @@ #define CHIPREV_ID_5750_A0 0x4000 #define CHIPREV_ID_5750_A1 0x4001 #define CHIPREV_ID_5750_A3 0x4003 +#define CHIPREV_ID_5750_C1 0x4201 #define CHIPREV_ID_5750_C2 0x4202 #define CHIPREV_ID_5752_A0_HW 0x5000 #define CHIPREV_ID_5752_A0 0x6000 diff -urNp linux-2.6.22/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.22/drivers/pci/hotplug/cpqphp_nvram.c --- linux-2.6.22/drivers/pci/hotplug/cpqphp_nvram.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22/drivers/pci/hotplug/cpqphp_nvram.c 2007-07-10 14:56:30.000000000 -0400 @@ -425,9 +425,13 @@ static u32 store_HRT (void __iomem *rom_ void compaq_nvram_init (void __iomem *rom_start) { + +#ifndef CONFIG_PAX_KERNEXEC if (rom_start) { compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); } +#endif + dbg("int15 entry = %p\n", compaq_int15_entry_point); /* initialize our int15 lock */ diff -urNp linux-2.6.22.1/drivers/pci/pcie/aer/aerdrv.c linux-2.6.22.1/drivers/pci/pcie/aer/aerdrv.c --- linux-2.6.22.1/drivers/pci/pcie/aer/aerdrv.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pci/pcie/aer/aerdrv.c 2007-08-02 11:38:47.000000000 -0400 @@ -58,7 +58,7 @@ static struct pcie_port_service_id aer_i .port_type = PCIE_RC_PORT, .service_type = PCIE_PORT_SERVICE_AER, }, - { /* end: all zeroes */ } + { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; static struct pci_error_handlers aer_error_handlers = { diff -urNp linux-2.6.22.1/drivers/pci/pcie/aer/aerdrv_core.c linux-2.6.22.1/drivers/pci/pcie/aer/aerdrv_core.c --- linux-2.6.22.1/drivers/pci/pcie/aer/aerdrv_core.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pci/pcie/aer/aerdrv_core.c 2007-08-02 11:38:47.000000000 -0400 @@ -647,7 +647,7 @@ static void aer_isr_one_error(struct pci struct aer_err_source *e_src) { struct device *s_device; - struct aer_err_info e_info = {0, 0, 0,}; + struct aer_err_info e_info = {0, 0, 0, {0, 0, 0, 0}}; int i; u16 id; diff -urNp linux-2.6.22.1/drivers/pci/pcie/portdrv_pci.c linux-2.6.22.1/drivers/pci/pcie/portdrv_pci.c --- linux-2.6.22.1/drivers/pci/pcie/portdrv_pci.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pci/pcie/portdrv_pci.c 2007-08-02 11:38:47.000000000 -0400 @@ -265,7 +265,7 @@ static void pcie_portdrv_err_resume(stru static const struct pci_device_id port_pci_ids[] = { { /* handle any PCI-Express port */ PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0), - }, { /* end: all zeroes */ } + }, { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, port_pci_ids); diff -urNp linux-2.6.22.1/drivers/pci/proc.c linux-2.6.22.1/drivers/pci/proc.c --- linux-2.6.22.1/drivers/pci/proc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pci/proc.c 2007-08-02 11:09:15.000000000 -0400 @@ -466,7 +466,15 @@ static int __init pci_proc_init(void) { struct proc_dir_entry *entry; struct pci_dev *dev = NULL; +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + proc_bus_pci_dir = proc_mkdir_mode("pci", S_IRUSR | S_IXUSR, proc_bus); +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + proc_bus_pci_dir = proc_mkdir_mode("pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, proc_bus); +#endif +#else proc_bus_pci_dir = proc_mkdir("pci", proc_bus); +#endif entry = create_proc_entry("devices", 0, proc_bus_pci_dir); if (entry) entry->proc_fops = &proc_bus_pci_dev_operations; diff -urNp linux-2.6.22.1/drivers/pcmcia/ti113x.h linux-2.6.22.1/drivers/pcmcia/ti113x.h --- linux-2.6.22.1/drivers/pcmcia/ti113x.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pcmcia/ti113x.h 2007-08-02 11:38:47.000000000 -0400 @@ -897,7 +897,7 @@ static struct pci_device_id ene_tune_tbl DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID, ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE), - {} + { 0, 0, 0, 0, 0, 0, 0 } }; static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus) diff -urNp linux-2.6.22.1/drivers/pcmcia/yenta_socket.c linux-2.6.22.1/drivers/pcmcia/yenta_socket.c --- linux-2.6.22.1/drivers/pcmcia/yenta_socket.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pcmcia/yenta_socket.c 2007-08-02 11:38:47.000000000 -0400 @@ -1358,7 +1358,7 @@ static struct pci_device_id yenta_table /* match any cardbus bridge */ CB_ID(PCI_ANY_ID, PCI_ANY_ID, DEFAULT), - { /* all zeroes */ } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, yenta_table); diff -urNp linux-2.6.22.1/drivers/pnp/pnpbios/bioscalls.c linux-2.6.22.1/drivers/pnp/pnpbios/bioscalls.c --- linux-2.6.22.1/drivers/pnp/pnpbios/bioscalls.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pnp/pnpbios/bioscalls.c 2007-08-02 11:38:47.000000000 -0400 @@ -65,7 +65,7 @@ set_base(gdt[(selname) >> 3], (u32)(addr set_limit(gdt[(selname) >> 3], size); \ } while(0) -static struct desc_struct bad_bios_desc = { 0, 0x00409200 }; +static struct desc_struct bad_bios_desc = { 0, 0x00409300 }; /* * At some point we want to use this stack frame pointer to unwind @@ -93,6 +93,10 @@ static inline u16 call_pnp_bios(u16 func struct desc_struct save_desc_40; int cpu; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + /* * PnP BIOSes are generally not terribly re-entrant. * Also, don't rely on them to save everything correctly. @@ -107,6 +111,10 @@ static inline u16 call_pnp_bios(u16 func /* On some boxes IRQ's during PnP BIOS calls are deadly. */ spin_lock_irqsave(&pnp_bios_lock, flags); +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + /* The lock prevents us bouncing CPU here */ if (ts1_size) Q2_SET_SEL(smp_processor_id(), PNP_TS1, ts1_base, ts1_size); @@ -142,9 +150,14 @@ static inline u16 call_pnp_bios(u16 func "i" (0) : "memory" ); - spin_unlock_irqrestore(&pnp_bios_lock, flags); get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + + spin_unlock_irqrestore(&pnp_bios_lock, flags); put_cpu(); /* If we get here and this is set then the PnP BIOS faulted on us. */ @@ -515,15 +528,25 @@ static int pnp_bios_write_escd(char *dat * Initialization */ -void pnpbios_calls_init(union pnp_bios_install_struct *header) +void __init pnpbios_calls_init(union pnp_bios_install_struct *header) { int i; + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + spin_lock_init(&pnp_bios_lock); pnp_bios_callpoint.offset = header->fields.pm16offset; pnp_bios_callpoint.segment = PNP_CS16; set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + for (i = 0; i < NR_CPUS; i++) { struct desc_struct *gdt = get_cpu_gdt_table(i); if (!gdt) @@ -532,4 +555,9 @@ void pnpbios_calls_init(union pnp_bios_i set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], __va(header->fields.pm16cseg)); set_base(gdt[GDT_ENTRY_PNPBIOS_DS], __va(header->fields.pm16dseg)); } + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } diff -urNp linux-2.6.22.1/drivers/pnp/quirks.c linux-2.6.22.1/drivers/pnp/quirks.c --- linux-2.6.22.1/drivers/pnp/quirks.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pnp/quirks.c 2007-08-02 11:38:47.000000000 -0400 @@ -231,7 +231,7 @@ static struct pnp_fixup pnp_fixups[] = { { "CTL0044", quirk_sb16audio_resources }, { "CTL0045", quirk_sb16audio_resources }, { "SMCf010", quirk_smc_enable }, - { "" } + { "", NULL } }; void pnp_fixup_device(struct pnp_dev *dev) diff -urNp linux-2.6.22.1/drivers/pnp/resource.c linux-2.6.22.1/drivers/pnp/resource.c --- linux-2.6.22.1/drivers/pnp/resource.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/pnp/resource.c 2007-08-02 11:38:47.000000000 -0400 @@ -364,7 +364,7 @@ int pnp_check_irq(struct pnp_dev * dev, return 1; /* check if the resource is valid */ - if (*irq < 0 || *irq > 15) + if (*irq > 15) return 0; /* check if the resource is reserved */ @@ -430,7 +430,7 @@ int pnp_check_dma(struct pnp_dev * dev, return 1; /* check if the resource is valid */ - if (*dma < 0 || *dma == 4 || *dma > 7) + if (*dma == 4 || *dma > 7) return 0; /* check if the resource is reserved */ diff -urNp linux-2.6.22.1/drivers/scsi/scsi_lib.c linux-2.6.22.1/drivers/scsi/scsi_lib.c --- linux-2.6.22.1/drivers/scsi/scsi_lib.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/scsi/scsi_lib.c 2007-08-02 11:38:47.000000000 -0400 @@ -44,7 +44,7 @@ struct scsi_host_sg_pool { #error SCSI_MAX_PHYS_SEGMENTS is too small #endif -#define SP(x) { x, "sgpool-" #x } +#define SP(x) { x, "sgpool-" #x, NULL, NULL } static struct scsi_host_sg_pool scsi_sg_pools[] = { SP(8), SP(16), diff -urNp linux-2.6.22.1/drivers/scsi/scsi_logging.h linux-2.6.22.1/drivers/scsi/scsi_logging.h --- linux-2.6.22.1/drivers/scsi/scsi_logging.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/scsi/scsi_logging.h 2007-08-02 11:38:47.000000000 -0400 @@ -51,7 +51,7 @@ do { \ } while (0); \ } while (0) #else -#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) +#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do {} while (0) #endif /* CONFIG_SCSI_LOGGING */ /* diff -urNp linux-2.6.22.1/drivers/serial/8250_pci.c linux-2.6.22.1/drivers/serial/8250_pci.c --- linux-2.6.22.1/drivers/serial/8250_pci.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/serial/8250_pci.c 2007-08-02 11:38:47.000000000 -0400 @@ -2417,7 +2417,7 @@ static struct pci_device_id serial_pci_t PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, pbn_default }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; static struct pci_driver serial_pci_driver = { diff -urNp linux-2.6.22.1/drivers/usb/class/cdc-acm.c linux-2.6.22.1/drivers/usb/class/cdc-acm.c --- linux-2.6.22.1/drivers/usb/class/cdc-acm.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/usb/class/cdc-acm.c 2007-08-02 11:38:47.000000000 -0400 @@ -1188,7 +1188,7 @@ static struct usb_device_id acm_ids[] = USB_CDC_ACM_PROTO_AT_CDMA) }, /* NOTE: COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */ - { } + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE (usb, acm_ids); diff -urNp linux-2.6.22.1/drivers/usb/class/usblp.c linux-2.6.22.1/drivers/usb/class/usblp.c --- linux-2.6.22.1/drivers/usb/class/usblp.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/usb/class/usblp.c 2007-08-02 11:38:47.000000000 -0400 @@ -219,7 +219,7 @@ static const struct quirk_printer_struct { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */ { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut */ { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */ - { 0, 0 } + { 0, 0, 0 } }; static int usblp_select_alts(struct usblp *usblp); @@ -1234,7 +1234,7 @@ static struct usb_device_id usblp_ids [] { USB_INTERFACE_INFO(7, 1, 2) }, { USB_INTERFACE_INFO(7, 1, 3) }, { USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */ - { } /* Terminating entry */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, usblp_ids); diff -urNp linux-2.6.22.1/drivers/usb/core/hub.c linux-2.6.22.1/drivers/usb/core/hub.c --- linux-2.6.22.1/drivers/usb/core/hub.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/usb/core/hub.c 2007-08-02 11:38:47.000000000 -0400 @@ -2833,7 +2833,7 @@ static struct usb_device_id hub_id_table .bDeviceClass = USB_CLASS_HUB}, { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, .bInterfaceClass = USB_CLASS_HUB}, - { } /* Terminating entry */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, hub_id_table); diff -urNp linux-2.6.22.1/drivers/usb/host/ehci-pci.c linux-2.6.22.1/drivers/usb/host/ehci-pci.c --- linux-2.6.22.1/drivers/usb/host/ehci-pci.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/usb/host/ehci-pci.c 2007-08-02 11:38:47.000000000 -0400 @@ -377,7 +377,7 @@ static const struct pci_device_id pci_id PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0), .driver_data = (unsigned long) &ehci_pci_hc_driver, }, - { /* end: all zeroes */ } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, pci_ids); diff -urNp linux-2.6.22.1/drivers/usb/host/uhci-hcd.c linux-2.6.22.1/drivers/usb/host/uhci-hcd.c --- linux-2.6.22.1/drivers/usb/host/uhci-hcd.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/usb/host/uhci-hcd.c 2007-08-02 11:38:47.000000000 -0400 @@ -895,7 +895,7 @@ static const struct pci_device_id uhci_p /* handle any USB UHCI controller */ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0), .driver_data = (unsigned long) &uhci_driver, - }, { /* end: all zeroes */ } + }, { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, uhci_pci_ids); diff -urNp linux-2.6.22.1/drivers/usb/storage/debug.h linux-2.6.22.1/drivers/usb/storage/debug.h --- linux-2.6.22.1/drivers/usb/storage/debug.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/usb/storage/debug.h 2007-08-02 11:38:47.000000000 -0400 @@ -56,9 +56,9 @@ void usb_stor_show_sense( unsigned char #define US_DEBUGPX(x...) printk( x ) #define US_DEBUG(x) x #else -#define US_DEBUGP(x...) -#define US_DEBUGPX(x...) -#define US_DEBUG(x) +#define US_DEBUGP(x...) do {} while (0) +#define US_DEBUGPX(x...) do {} while (0) +#define US_DEBUG(x) do {} while (0) #endif #endif diff -urNp linux-2.6.22.1/drivers/usb/storage/usb.c linux-2.6.22.1/drivers/usb/storage/usb.c --- linux-2.6.22.1/drivers/usb/storage/usb.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/usb/storage/usb.c 2007-08-02 11:38:47.000000000 -0400 @@ -141,7 +141,7 @@ static struct usb_device_id storage_usb_ #undef UNUSUAL_DEV #undef USUAL_DEV /* Terminating entry */ - { } + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE (usb, storage_usb_ids); @@ -181,7 +181,7 @@ static struct us_unusual_dev us_unusual_ # undef USUAL_DEV /* Terminating entry */ - { NULL } + { NULL, NULL, 0, 0, NULL } }; diff -urNp linux-2.6.22.1/drivers/video/fbcmap.c linux-2.6.22.1/drivers/video/fbcmap.c --- linux-2.6.22.1/drivers/video/fbcmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/video/fbcmap.c 2007-08-02 11:38:47.000000000 -0400 @@ -251,8 +251,7 @@ int fb_set_user_cmap(struct fb_cmap_user int rc, size = cmap->len * sizeof(u16); struct fb_cmap umap; - if (cmap->start < 0 || (!info->fbops->fb_setcolreg && - !info->fbops->fb_setcmap)) + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) return -EINVAL; memset(&umap, 0, sizeof(struct fb_cmap)); diff -urNp linux-2.6.22.1/drivers/video/fbmem.c linux-2.6.22.1/drivers/video/fbmem.c --- linux-2.6.22.1/drivers/video/fbmem.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/video/fbmem.c 2007-08-02 11:38:47.000000000 -0400 @@ -392,7 +392,7 @@ static void fb_do_show_logo(struct fb_in image->dx += image->width + 8; } } else if (rotate == FB_ROTATE_UD) { - for (x = 0; x < num && image->dx >= 0; x++) { + for (x = 0; x < num && (__s32)image->dx >= 0; x++) { info->fbops->fb_imageblit(info, image); image->dx -= image->width + 8; } @@ -404,7 +404,7 @@ static void fb_do_show_logo(struct fb_in image->dy += image->height + 8; } } else if (rotate == FB_ROTATE_CCW) { - for (x = 0; x < num && image->dy >= 0; x++) { + for (x = 0; x < num && (__s32)image->dy >= 0; x++) { info->fbops->fb_imageblit(info, image); image->dy -= image->height + 8; } @@ -973,9 +973,9 @@ fb_ioctl(struct inode *inode, struct fil case FBIOPUT_CON2FBMAP: if (copy_from_user(&con2fb, argp, sizeof(con2fb))) return - EFAULT; - if (con2fb.console < 0 || con2fb.console > MAX_NR_CONSOLES) + if (con2fb.console > MAX_NR_CONSOLES) return -EINVAL; - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) + if (con2fb.framebuffer >= FB_MAX) return -EINVAL; #ifdef CONFIG_KMOD if (!registered_fb[con2fb.framebuffer]) diff -urNp linux-2.6.22.1/drivers/video/fbmon.c linux-2.6.22.1/drivers/video/fbmon.c --- linux-2.6.22.1/drivers/video/fbmon.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/video/fbmon.c 2007-08-02 11:38:47.000000000 -0400 @@ -45,7 +45,7 @@ #ifdef DEBUG #define DPRINTK(fmt, args...) printk(fmt,## args) #else -#define DPRINTK(fmt, args...) +#define DPRINTK(fmt, args...) do {} while (0) #endif #define FBMON_FIX_HEADER 1 diff -urNp linux-2.6.22.1/drivers/video/i810/i810_accel.c linux-2.6.22.1/drivers/video/i810/i810_accel.c --- linux-2.6.22.1/drivers/video/i810/i810_accel.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/video/i810/i810_accel.c 2007-08-02 11:38:47.000000000 -0400 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct } } printk("ringbuffer lockup!!!\n"); + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space); i810_report_error(mmio); par->dev_flags |= LOCKUP; info->pixmap.scan_align = 1; diff -urNp linux-2.6.22.1/drivers/video/i810/i810_main.c linux-2.6.22.1/drivers/video/i810/i810_main.c --- linux-2.6.22.1/drivers/video/i810/i810_main.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/video/i810/i810_main.c 2007-08-02 11:38:47.000000000 -0400 @@ -120,7 +120,7 @@ static struct pci_device_id i810fb_pci_t PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, - { 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, }; static struct pci_driver i810fb_driver = { @@ -1509,7 +1509,7 @@ static int i810fb_cursor(struct fb_info int size = ((cursor->image.width + 7) >> 3) * cursor->image.height; int i; - u8 *data = kmalloc(64 * 8, GFP_ATOMIC); + u8 *data = kmalloc(64 * 8, GFP_KERNEL); if (data == NULL) return -ENOMEM; diff -urNp linux-2.6.22.1/drivers/video/modedb.c linux-2.6.22.1/drivers/video/modedb.c --- linux-2.6.22.1/drivers/video/modedb.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/drivers/video/modedb.c 2007-08-02 11:38:47.000000000 -0400 @@ -37,228 +37,228 @@ static const struct fb_videomode modedb[ { /* 640x400 @ 70 Hz, 31.5 kHz hsync */ NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 60 Hz, 31.5 kHz hsync */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 56 Hz, 35.15 kHz hsync */ NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */ NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, - 0, FB_VMODE_INTERLACED + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x400 @ 85 Hz, 37.86 kHz hsync */ NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3, - FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 72 Hz, 36.5 kHz hsync */ NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 75 Hz, 37.50 kHz hsync */ NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 60 Hz, 37.8 kHz hsync */ NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 85 Hz, 43.27 kHz hsync */ NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */ NULL, 69, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, - 0, FB_VMODE_INTERLACED + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 72 Hz, 48.0 kHz hsync */ NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 60 Hz, 48.4 kHz hsync */ NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 100 Hz, 53.01 kHz hsync */ NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 60 Hz, 53.5 kHz hsync */ NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 85 Hz, 55.84 kHz hsync */ NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 70 Hz, 56.5 kHz hsync */ NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */ NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12, - 0, FB_VMODE_INTERLACED + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 100 Hz, 64.02 kHz hsync */ NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 76 Hz, 62.5 kHz hsync */ NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 70 Hz, 62.4 kHz hsync */ NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */ NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1400x1050 @ 60Hz, 63.9 kHz hsync */ NULL, 68, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/ NULL, 75, 1400, 1050, 9271, 120, 56, 13, 0, 112, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/ NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 85 Hz, 70.24 kHz hsync */ NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 78 Hz, 70.8 kHz hsync */ NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */ NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1600x1200 @ 60Hz, 75.00 kHz hsync */ NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 84 Hz, 76.0 kHz hsync */ NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */ NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 100Hz, 80.21 kHz hsync */ NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */ NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */ NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 100 Hz, 89.62 kHz hsync */ NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */ NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */ NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */ NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */ NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1800x1440 @ 64Hz, 96.15 kHz hsync */ NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1800x1440 @ 70Hz, 104.52 kHz hsync */ NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 512x384 @ 78 Hz, 31.50 kHz hsync */ NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 512x384 @ 85 Hz, 34.38 kHz hsync */ NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */ NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */ NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 320x240 @ 72 Hz, 36.5 kHz hsync */ NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */ NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 400x300 @ 60 Hz, 37.8 kHz hsync */ NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 400x300 @ 72 Hz, 48.0 kHz hsync */ NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */ NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 480x300 @ 60 Hz, 37.8 kHz hsync */ NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 480x300 @ 63 Hz, 39.6 kHz hsync */ NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 480x300 @ 72 Hz, 48.0 kHz hsync */ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, - FB_VMODE_NONINTERLACED + FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */ NULL, 60, 1152, 768, 15386, 158, 26, 29, 3, 136, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */ NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, }; diff -urNp linux-2.6.22/drivers/video/vesafb.c linux-2.6.22/drivers/video/vesafb.c --- linux-2.6.22/drivers/video/vesafb.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22/drivers/video/vesafb.c 2007-07-10 14:56:30.000000000 -0400 @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -224,6 +225,7 @@ static int __init vesafb_probe(struct pl unsigned int size_vmode; unsigned int size_remap; unsigned int size_total; + void *pmi_code = NULL; if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) return -ENODEV; @@ -266,10 +268,6 @@ static int __init vesafb_probe(struct pl size_remap = size_total; vesafb_fix.smem_len = size_remap; -#ifndef __i386__ - screen_info.vesapm_seg = 0; -#endif - if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) { printk(KERN_WARNING "vesafb: cannot reserve video memory at 0x%lx\n", @@ -302,9 +300,21 @@ static int __init vesafb_probe(struct pl printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n", vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); +#ifdef __i386__ + +#ifdef CONFIG_PAX_KERNEXEC + pmi_code = module_alloc_exec(screen_info.vesapm_size); + if (!pmi_code) +#else + if (0) +#endif + +#endif + screen_info.vesapm_seg = 0; + if (screen_info.vesapm_seg) { - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n", - screen_info.vesapm_seg,screen_info.vesapm_off); + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n", + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size); } if (screen_info.vesapm_seg < 0xc000) @@ -312,9 +322,29 @@ static int __init vesafb_probe(struct pl if (ypan || pmi_setpal) { unsigned short *pmi_base; - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); - pmi_start = (void*)((char*)pmi_base + pmi_base[1]); - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]); + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); + memcpy(pmi_code, pmi_base, screen_info.vesapm_size); + pax_close_kernel(cr0); +#else + pmi_code = pmi_base; +#endif + + pmi_start = (void*)((char*)pmi_code + pmi_base[1]); + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]); + +#ifdef CONFIG_PAX_KERNEXEC + pmi_start -= __KERNEL_TEXT_OFFSET; + pmi_pal -= __KERNEL_TEXT_OFFSET; +#endif + printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal); if (pmi_base[3]) { printk(KERN_INFO "vesafb: pmi: ports = "); @@ -456,6 +486,11 @@ static int __init vesafb_probe(struct pl info->node, info->fix.id); return 0; err: + +#ifdef CONFIG_PAX_KERNEXEC + module_free_exec(NULL, pmi_code); +#endif + if (info->screen_base) iounmap(info->screen_base); framebuffer_release(info); diff -urNp linux-2.6.22.1/fs/binfmt_aout.c linux-2.6.22.1/fs/binfmt_aout.c --- linux-2.6.22.1/fs/binfmt_aout.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/binfmt_aout.c 2007-08-02 11:38:47.000000000 -0400 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -123,10 +124,12 @@ static int aout_core_dump(long signr, st /* If the size of the dump file exceeds the rlimit, then see what would happen if we wrote the stack, but not the data area. */ #ifdef __sparc__ + gr_learn_resource(current, RLIMIT_CORE, dump.u_dsize+dump.u_ssize, 1); if ((dump.u_dsize+dump.u_ssize) > current->signal->rlim[RLIMIT_CORE].rlim_cur) dump.u_dsize = 0; #else + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE, 1); if ((dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE > current->signal->rlim[RLIMIT_CORE].rlim_cur) dump.u_dsize = 0; @@ -134,10 +137,12 @@ static int aout_core_dump(long signr, st /* Make sure we have enough room to write the stack and data areas. */ #ifdef __sparc__ + gr_learn_resource(current, RLIMIT_CORE, dump.u_ssize, 1); if ((dump.u_ssize) > current->signal->rlim[RLIMIT_CORE].rlim_cur) dump.u_ssize = 0; #else + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize+1) * PAGE_SIZE, 1); if ((dump.u_ssize+1) * PAGE_SIZE > current->signal->rlim[RLIMIT_CORE].rlim_cur) dump.u_ssize = 0; @@ -294,6 +299,8 @@ static int load_aout_binary(struct linux rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; if (rlim >= RLIM_INFINITY) rlim = ~0; + + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1); if (ex.a_data + ex.a_bss > rlim) return -ENOMEM; @@ -326,6 +333,28 @@ static int load_aout_binary(struct linux current->mm->mmap = NULL; compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; + +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) + current->mm->pax_flags = 0UL; +#endif + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) { + current->mm->pax_flags |= MF_PAX_PAGEEXEC; + +#ifdef CONFIG_PAX_EMUTRAMP + if (N_FLAGS(ex) & F_PAX_EMUTRAMP) + current->mm->pax_flags |= MF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_PAX_MPROTECT + if (!(N_FLAGS(ex) & F_PAX_MPROTECT)) + current->mm->pax_flags |= MF_PAX_MPROTECT; +#endif + + } +#endif + #ifdef __sparc__ if (N_MAGIC(ex) == NMAGIC) { loff_t pos = fd_offset; @@ -421,7 +450,7 @@ static int load_aout_binary(struct linux down_write(¤t->mm->mmap_sem); error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, - PROT_READ | PROT_WRITE | PROT_EXEC, + PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset + ex.a_text); up_write(¤t->mm->mmap_sem); diff -urNp linux-2.6.22.1/fs/binfmt_elf.c linux-2.6.22.1/fs/binfmt_elf.c --- linux-2.6.22.1/fs/binfmt_elf.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/binfmt_elf.c 2007-08-02 11:38:47.000000000 -0400 @@ -39,10 +39,21 @@ #include #include #include +#include + #include #include #include +#ifdef CONFIG_PAX_SEGMEXEC +#include +#endif + +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); +EXPORT_SYMBOL(pax_set_initial_flags_func); +#endif + static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); static int load_elf_library(struct file *); static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); @@ -84,6 +90,8 @@ static struct linux_binfmt elf_format = static int set_brk(unsigned long start, unsigned long end) { + unsigned long e = end; + start = ELF_PAGEALIGN(start); end = ELF_PAGEALIGN(end); if (end > start) { @@ -94,7 +102,7 @@ static int set_brk(unsigned long start, if (BAD_ADDR(addr)) return addr; } - current->mm->start_brk = current->mm->brk = end; + current->mm->start_brk = current->mm->brk = e; return 0; } @@ -315,10 +323,9 @@ static unsigned long load_elf_interp(str { struct elf_phdr *elf_phdata; struct elf_phdr *eppnt; - unsigned long load_addr = 0; - int load_addr_set = 0; + unsigned long load_addr = 0, min_addr, max_addr, task_size = TASK_SIZE; unsigned long last_bss = 0, elf_bss = 0; - unsigned long error = ~0UL; + unsigned long error = -EINVAL; int retval, i, size; /* First of all, some simple consistency checks */ @@ -357,66 +364,86 @@ static unsigned long load_elf_interp(str goto out_close; } +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif + eppnt = elf_phdata; + min_addr = task_size; + max_addr = 0; + error = -ENOMEM; + for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { - if (eppnt->p_type == PT_LOAD) { - int elf_type = MAP_PRIVATE | MAP_DENYWRITE; - int elf_prot = 0; - unsigned long vaddr = 0; - unsigned long k, map_addr; - - if (eppnt->p_flags & PF_R) - elf_prot = PROT_READ; - if (eppnt->p_flags & PF_W) - elf_prot |= PROT_WRITE; - if (eppnt->p_flags & PF_X) - elf_prot |= PROT_EXEC; - vaddr = eppnt->p_vaddr; - if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) - elf_type |= MAP_FIXED; - - map_addr = elf_map(interpreter, load_addr + vaddr, - eppnt, elf_prot, elf_type); - error = map_addr; - if (BAD_ADDR(map_addr)) - goto out_close; - - if (!load_addr_set && - interp_elf_ex->e_type == ET_DYN) { - load_addr = map_addr - ELF_PAGESTART(vaddr); - load_addr_set = 1; - } + if (eppnt->p_type != PT_LOAD) + continue; - /* - * Check to see if the section's size will overflow the - * allowed task size. Note that p_filesz must always be - * <= p_memsize so it's only necessary to check p_memsz. - */ - k = load_addr + eppnt->p_vaddr; - if (BAD_ADDR(k) || - eppnt->p_filesz > eppnt->p_memsz || - eppnt->p_memsz > TASK_SIZE || - TASK_SIZE - eppnt->p_memsz < k) { - error = -ENOMEM; - goto out_close; - } + /* + * Check to see if the section's size will overflow the + * allowed task size. Note that p_filesz must always be + * <= p_memsize so it is only necessary to check p_memsz. + */ + if (eppnt->p_filesz > eppnt->p_memsz || eppnt->p_vaddr >= eppnt->p_vaddr + eppnt->p_memsz) + goto out_close; - /* - * Find the end of the file mapping for this phdr, and - * keep track of the largest address we see for this. - */ - k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; - if (k > elf_bss) - elf_bss = k; + if (min_addr > ELF_PAGESTART(eppnt->p_vaddr)) + min_addr = ELF_PAGESTART(eppnt->p_vaddr); + if (max_addr < ELF_PAGEALIGN(eppnt->p_vaddr + eppnt->p_memsz)) + max_addr = ELF_PAGEALIGN(eppnt->p_vaddr + eppnt->p_memsz); + } + if (min_addr >= max_addr || max_addr > task_size) + goto out_close; - /* - * Do the same thing for the memory mapping - between - * elf_bss and last_bss is the bss section. - */ - k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; - if (k > last_bss) - last_bss = k; - } + if (interp_elf_ex->e_type == ET_DYN) { + load_addr = get_unmapped_area(interpreter, 0, max_addr - min_addr, 0, MAP_PRIVATE | MAP_EXECUTABLE); + + if (load_addr >= task_size) + goto out_close; + + load_addr -= min_addr; + } + + eppnt = elf_phdata; + for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { + int elf_type = MAP_PRIVATE | MAP_DENYWRITE | MAP_FIXED; + int elf_prot = 0; + unsigned long vaddr = 0; + unsigned long k, map_addr; + + if (eppnt->p_type != PT_LOAD) + continue; + + if (eppnt->p_flags & PF_R) + elf_prot = PROT_READ; + if (eppnt->p_flags & PF_W) + elf_prot |= PROT_WRITE; + if (eppnt->p_flags & PF_X) + elf_prot |= PROT_EXEC; + vaddr = eppnt->p_vaddr; + + map_addr = elf_map(interpreter, load_addr + vaddr, + eppnt, elf_prot, elf_type); + error = map_addr; + if (BAD_ADDR(map_addr)) + goto out_close; + + k = load_addr + eppnt->p_vaddr; + + /* + * Find the end of the file mapping for this phdr, and + * keep track of the largest address we see for this. + */ + k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; + if (k > elf_bss) + elf_bss = k; + + /* + * Do the same thing for the memory mapping - between + * elf_bss and last_bss is the bss section. + */ + k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; + if (k > last_bss) + last_bss = k; } /* @@ -444,6 +471,8 @@ static unsigned long load_elf_interp(str *interp_load_addr = load_addr; error = ((unsigned long)interp_elf_ex->e_entry) + load_addr; + if (BAD_ADDR(error)) + error = -EFAULT; out_close: kfree(elf_phdata); @@ -454,7 +483,7 @@ out: static unsigned long load_aout_interp(struct exec *interp_ex, struct file *interpreter) { - unsigned long text_data, elf_entry = ~0UL; + unsigned long text_data, elf_entry = -EINVAL; char __user * addr; loff_t offset; @@ -497,6 +526,177 @@ out: return elf_entry; } +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE) +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata) +{ + unsigned long pax_flags = 0UL; + +#ifdef CONFIG_PAX_PAGEEXEC + if (elf_phdata->p_flags & PF_PAGEEXEC) + pax_flags |= MF_PAX_PAGEEXEC; +#endif + +#ifdef CONFIG_PAX_SEGMEXEC + if (elf_phdata->p_flags & PF_SEGMEXEC) + pax_flags |= MF_PAX_SEGMEXEC; +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + if (nx_enabled) + pax_flags &= ~MF_PAX_SEGMEXEC; + else + pax_flags &= ~MF_PAX_PAGEEXEC; + } +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + if (elf_phdata->p_flags & PF_EMUTRAMP) + pax_flags |= MF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_PAX_MPROTECT + if (elf_phdata->p_flags & PF_MPROTECT) + pax_flags |= MF_PAX_MPROTECT; +#endif + +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP)) + pax_flags |= MF_PAX_RANDMMAP; +#endif + + return pax_flags; +} +#endif + +#ifdef CONFIG_PAX_PT_PAX_FLAGS +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata) +{ + unsigned long pax_flags = 0UL; + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC)) + pax_flags |= MF_PAX_PAGEEXEC; +#endif + +#ifdef CONFIG_PAX_SEGMEXEC + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) + pax_flags |= MF_PAX_SEGMEXEC; +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + if (nx_enabled) + pax_flags &= ~MF_PAX_SEGMEXEC; + else + pax_flags &= ~MF_PAX_PAGEEXEC; + } +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP)) + pax_flags |= MF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_PAX_MPROTECT + if (!(elf_phdata->p_flags & PF_NOMPROTECT)) + pax_flags |= MF_PAX_MPROTECT; +#endif + +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP)) + pax_flags |= MF_PAX_RANDMMAP; +#endif + + return pax_flags; +} +#endif + +#ifdef CONFIG_PAX_EI_PAX +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex) +{ + unsigned long pax_flags = 0UL; + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC)) + pax_flags |= MF_PAX_PAGEEXEC; +#endif + +#ifdef CONFIG_PAX_SEGMEXEC + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) + pax_flags |= MF_PAX_SEGMEXEC; +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + if (nx_enabled) + pax_flags &= ~MF_PAX_SEGMEXEC; + else + pax_flags &= ~MF_PAX_PAGEEXEC; + } +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP)) + pax_flags |= MF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_PAX_MPROTECT + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT)) + pax_flags |= MF_PAX_MPROTECT; +#endif + +#ifdef CONFIG_PAX_ASLR + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP)) + pax_flags |= MF_PAX_RANDMMAP; +#endif + + return pax_flags; +} +#endif + +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata) +{ + unsigned long pax_flags = 0UL; + +#ifdef CONFIG_PAX_PT_PAX_FLAGS + unsigned long i; +#endif + +#ifdef CONFIG_PAX_EI_PAX + pax_flags = pax_parse_ei_pax(elf_ex); +#endif + +#ifdef CONFIG_PAX_PT_PAX_FLAGS + for (i = 0UL; i < elf_ex->e_phnum; i++) + if (elf_phdata[i].p_type == PT_PAX_FLAGS) { + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) || + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) || + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) || + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) || + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP))) + return -EINVAL; + +#ifdef CONFIG_PAX_SOFTMODE + if (pax_softmode) + pax_flags = pax_parse_softmode(&elf_phdata[i]); + else +#endif + + pax_flags = pax_parse_hardmode(&elf_phdata[i]); + break; + } +#endif + + if (0 > pax_check_flags(&pax_flags)) + return -EINVAL; + + current->mm->pax_flags = pax_flags; + return 0; +} +#endif + /* * These are the functions used to load ELF style executables and shared * libraries. There is no binary dependent code anywhere else. @@ -534,7 +734,7 @@ static int load_elf_binary(struct linux_ char * elf_interpreter = NULL; unsigned int interpreter_type = INTERPRETER_NONE; unsigned char ibcs2_interpreter = 0; - unsigned long error; + unsigned long error = 0; struct elf_phdr *elf_ppnt, *elf_phdata; unsigned long elf_bss, elf_brk; int elf_exec_fileno; @@ -546,12 +746,12 @@ static int load_elf_binary(struct linux_ char passed_fileno[6]; struct files_struct *files; int executable_stack = EXSTACK_DEFAULT; - unsigned long def_flags = 0; struct { struct elfhdr elf_ex; struct elfhdr interp_elf_ex; struct exec interp_ex; } *loc; + unsigned long task_size = TASK_SIZE; loc = kmalloc(sizeof(*loc), GFP_KERNEL); if (!loc) { @@ -782,14 +982,89 @@ static int load_elf_binary(struct linux_ current->mm->end_code = 0; current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; - current->mm->def_flags = def_flags; + +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) + current->mm->pax_flags = 0UL; +#endif + +#ifdef CONFIG_PAX_DLRESOLVE + current->mm->call_dl_resolve = 0UL; +#endif + +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) + current->mm->call_syscall = 0UL; +#endif + +#ifdef CONFIG_PAX_ASLR + current->mm->delta_mmap = 0UL; + current->mm->delta_stack = 0UL; +#endif + + current->mm->def_flags = 0; + +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) { + send_sig(SIGKILL, current, 0); + goto out_free_dentry; + } +#endif + +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS + pax_set_initial_flags(bprm); +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) + if (pax_set_initial_flags_func) + (pax_set_initial_flags_func)(bprm); +#endif + +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) { + current->mm->context.user_cs_limit = PAGE_SIZE; + current->mm->def_flags |= VM_PAGEEXEC; + } +#endif + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE; + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE; + task_size = SEGMEXEC_TASK_SIZE; + } +#endif + +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC) + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu()); + put_cpu_no_resched(); + } +#endif + +#ifdef CONFIG_PAX_ASLR + if (current->mm->pax_flags & MF_PAX_RANDMMAP) { + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT; + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT; + } +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) + executable_stack = EXSTACK_DEFAULT; +#endif /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (!(current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))) +#endif + if (elf_read_implies_exec(loc->elf_ex, executable_stack)) current->personality |= READ_IMPLIES_EXEC; +#ifdef CONFIG_PAX_ASLR + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current->flags |= PF_RANDOMIZE; arch_pick_mmap_layout(current->mm); @@ -865,6 +1140,20 @@ static int load_elf_binary(struct linux_ * might try to exec. This is because the brk will * follow the loader, and is not movable. */ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + +#ifdef CONFIG_PAX_RANDMMAP + /* PaX: randomize base address at the default exe base if requested */ + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) { +#ifdef CONFIG_SPARC64 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1); +#else + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT; +#endif + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias); + elf_flags |= MAP_FIXED; + } +#endif + } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, @@ -897,9 +1186,9 @@ static int load_elf_binary(struct linux_ * allowed task size. Note that p_filesz must always be * <= p_memsz so it is only necessary to check p_memsz. */ - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || - elf_ppnt->p_memsz > TASK_SIZE || - TASK_SIZE - elf_ppnt->p_memsz < k) { + if (k >= task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz || + elf_ppnt->p_memsz > task_size || + task_size - elf_ppnt->p_memsz < k) { /* set_brk can never work. Avoid overflows. */ send_sig(SIGKILL, current, 0); retval = -EINVAL; @@ -927,6 +1216,11 @@ static int load_elf_binary(struct linux_ start_data += load_bias; end_data += load_bias; +#ifdef CONFIG_PAX_RANDMMAP + if (current->mm->pax_flags & MF_PAX_RANDMMAP) + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4); +#endif + /* Calling set_brk effectively mmaps the pages that we need * for the bss and break sections. We must do this before * mapping in the interpreter, to make sure it doesn't wind @@ -938,9 +1232,11 @@ static int load_elf_binary(struct linux_ goto out_free_dentry; } if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { - send_sig(SIGSEGV, current, 0); - retval = -EFAULT; /* Nobody gets to see this, but.. */ - goto out_free_dentry; + /* + * This bss-zeroing can fail if the ELF + * file specifies odd protections. So + * we don't check the return value + */ } if (elf_interpreter) { @@ -1173,8 +1469,10 @@ static int dump_seek(struct file *file, unsigned long n = off; if (n > PAGE_SIZE) n = PAGE_SIZE; - if (!dump_write(file, buf, n)) + if (!dump_write(file, buf, n)) { + free_page((unsigned long)buf); return 0; + } off -= n; } free_page((unsigned long)buf); @@ -1189,7 +1487,7 @@ static int dump_seek(struct file *file, * * I think we should skip something. But I am not sure how. H.J. */ -static int maydump(struct vm_area_struct *vma) +static int maydump(struct vm_area_struct *vma, long signr) { /* The vma can be set up to tell us the answer directly. */ if (vma->vm_flags & VM_ALWAYSDUMP) @@ -1204,7 +1502,7 @@ static int maydump(struct vm_area_struct return vma->vm_file->f_path.dentry->d_inode->i_nlink == 0; /* If it hasn't been written to, don't write it out */ - if (!vma->anon_vma) + if (signr != SIGKILL && !vma->anon_vma) return 0; return 1; @@ -1261,8 +1559,11 @@ static int writenote(struct memelfnote * #undef DUMP_WRITE #define DUMP_WRITE(addr, nr) \ + do { \ + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ - goto end_coredump; + goto end_coredump; \ + } while (0); #define DUMP_SEEK(off) \ if (!dump_seek(file, (off))) \ goto end_coredump; @@ -1654,7 +1955,7 @@ static int elf_core_dump(long signr, str phdr.p_offset = offset; phdr.p_vaddr = vma->vm_start; phdr.p_paddr = 0; - phdr.p_filesz = maydump(vma) ? sz : 0; + phdr.p_filesz = maydump(vma, signr) ? sz : 0; phdr.p_memsz = sz; offset += phdr.p_filesz; phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; @@ -1698,7 +1999,7 @@ static int elf_core_dump(long signr, str vma = next_vma(vma, gate_vma)) { unsigned long addr; - if (!maydump(vma)) + if (!maydump(vma, signr)) continue; for (addr = vma->vm_start; @@ -1721,6 +2022,7 @@ static int elf_core_dump(long signr, str flush_cache_page(vma, addr, page_to_pfn(page)); kaddr = kmap(page); + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1); if ((size += PAGE_SIZE) > limit || !dump_write(file, kaddr, PAGE_SIZE)) { diff -urNp linux-2.6.22.1/fs/binfmt_flat.c linux-2.6.22.1/fs/binfmt_flat.c --- linux-2.6.22.1/fs/binfmt_flat.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/binfmt_flat.c 2007-08-02 11:38:47.000000000 -0400 @@ -559,7 +559,9 @@ static int load_flat_file(struct linux_b realdatastart = (unsigned long) -ENOMEM; printk("Unable to allocate RAM for process data, errno %d\n", (int)-realdatastart); + down_write(¤t->mm->mmap_sem); do_munmap(current->mm, textpos, text_len); + up_write(¤t->mm->mmap_sem); ret = realdatastart; goto err; } @@ -581,8 +583,10 @@ static int load_flat_file(struct linux_b } if (result >= (unsigned long)-4096) { printk("Unable to read data+bss, errno %d\n", (int)-result); + down_write(¤t->mm->mmap_sem); do_munmap(current->mm, textpos, text_len); do_munmap(current->mm, realdatastart, data_len + extra); + up_write(¤t->mm->mmap_sem); ret = result; goto err; } @@ -655,8 +659,10 @@ static int load_flat_file(struct linux_b } if (result >= (unsigned long)-4096) { printk("Unable to read code+data+bss, errno %d\n",(int)-result); + down_write(¤t->mm->mmap_sem); do_munmap(current->mm, textpos, text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long)); + up_write(¤t->mm->mmap_sem); ret = result; goto err; } diff -urNp linux-2.6.22.1/fs/binfmt_misc.c linux-2.6.22.1/fs/binfmt_misc.c --- linux-2.6.22.1/fs/binfmt_misc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/binfmt_misc.c 2007-08-02 11:38:47.000000000 -0400 @@ -113,9 +113,11 @@ static int load_misc_binary(struct linux struct files_struct *files = NULL; retval = -ENOEXEC; - if (!enabled) + if (!enabled || bprm->misc) goto _ret; + bprm->misc++; + /* to keep locking time low, we copy the interpreter string */ read_lock(&entries_lock); fmt = check_file(bprm); @@ -718,7 +720,7 @@ static int bm_fill_super(struct super_bl static struct tree_descr bm_files[] = { [2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO}, [3] = {"register", &bm_register_operations, S_IWUSR}, - /* last one */ {""} + /* last one */ {"", NULL, 0} }; int err = simple_fill_super(sb, 0x42494e4d, bm_files); if (!err) diff -urNp linux-2.6.22.1/fs/block_dev.c linux-2.6.22.1/fs/block_dev.c --- linux-2.6.22.1/fs/block_dev.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/block_dev.c 2007-08-02 11:38:47.000000000 -0400 @@ -949,7 +949,7 @@ static int bd_claim_by_kobject(struct bl struct kobject *kobj) { int res; - struct bd_holder *bo, *found; + struct bd_holder *bo, *found = NULL; if (!kobj) return -EINVAL; diff -urNp linux-2.6.22.1/fs/buffer.c linux-2.6.22.1/fs/buffer.c --- linux-2.6.22.1/fs/buffer.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/buffer.c 2007-08-02 11:09:15.000000000 -0400 @@ -41,6 +41,7 @@ #include #include #include +#include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); @@ -2002,6 +2003,7 @@ static int __generic_cont_expand(struct err = -EFBIG; limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long) size, 1); if (limit != RLIM_INFINITY && size > (loff_t)limit) { send_sig(SIGXFSZ, current, 0); goto out; diff -urNp linux-2.6.22.1/fs/cifs/cifs_uniupr.h linux-2.6.22.1/fs/cifs/cifs_uniupr.h --- linux-2.6.22.1/fs/cifs/cifs_uniupr.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/cifs/cifs_uniupr.h 2007-08-02 11:38:47.000000000 -0400 @@ -132,7 +132,7 @@ const struct UniCaseRange CifsUniUpperRa {0x0490, 0x04cc, UniCaseRangeU0490}, {0x1e00, 0x1ffc, UniCaseRangeU1e00}, {0xff40, 0xff5a, UniCaseRangeUff40}, - {0} + {0, 0, NULL} }; #endif diff -urNp linux-2.6.22.1/fs/cifs/dir.c linux-2.6.22.1/fs/cifs/dir.c --- linux-2.6.22.1/fs/cifs/dir.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/cifs/dir.c 2007-08-02 11:38:47.000000000 -0400 @@ -397,7 +397,7 @@ int cifs_mknod(struct inode *inode, stru /* BB Do not bother to decode buf since no local inode yet to put timestamps in, but we can reuse it safely */ - int bytes_written; + unsigned int bytes_written; struct win_dev *pdev; pdev = (struct win_dev *)buf; if (S_ISCHR(mode)) { diff -urNp linux-2.6.22.1/fs/cifs/inode.c linux-2.6.22.1/fs/cifs/inode.c --- linux-2.6.22.1/fs/cifs/inode.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/cifs/inode.c 2007-08-02 11:38:47.000000000 -0400 @@ -1461,7 +1461,7 @@ int cifs_setattr(struct dentry *direntry atomic_dec(&open_file->wrtPending); cFYI(1,("SetFSize for attrs rc = %d", rc)); if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { - int bytes_written; + unsigned int bytes_written; rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size, &bytes_written, NULL, NULL, @@ -1494,7 +1494,7 @@ int cifs_setattr(struct dentry *direntry cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc==0) { - int bytes_written; + unsigned int bytes_written; rc = CIFSSMBWrite(xid, pTcon, netfid, 0, attrs->ia_size, diff -urNp linux-2.6.22.1/fs/compat.c linux-2.6.22.1/fs/compat.c --- linux-2.6.22.1/fs/compat.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/compat.c 2007-08-02 11:09:15.000000000 -0400 @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -1364,6 +1365,11 @@ int compat_do_execve(char * filename, struct file *file; int retval; int i; +#ifdef CONFIG_GRKERNSEC + struct file *old_exec_file; + struct acl_subject_label *old_acl; + struct rlimit old_rlim[RLIM_NLIMITS]; +#endif retval = -ENOMEM; bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); @@ -1381,6 +1387,15 @@ int compat_do_execve(char * filename, bprm->file = file; bprm->filename = filename; bprm->interp = filename; + + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->user->processes), 1); + retval = -EAGAIN; + if (gr_handle_nproc()) + goto out_file; + retval = -EACCES; + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) + goto out_file; + bprm->mm = mm_alloc(); retval = -ENOMEM; if (!bprm->mm) @@ -1419,10 +1434,39 @@ int compat_do_execve(char * filename, if (retval < 0) goto out; + if (!gr_tpe_allow(file)) { + retval = -EACCES; + goto out; + } + + if (gr_check_crash_exec(file)) { + retval = -EACCES; + goto out; + } + + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); + + gr_handle_exec_args(bprm, (char __user * __user *)argv); + +#ifdef CONFIG_GRKERNSEC + old_acl = current->acl; + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); + old_exec_file = current->exec_file; + get_file(file); + current->exec_file = file; +#endif + + gr_set_proc_label(file->f_dentry, file->f_vfsmnt); + retval = search_binary_handler(bprm, regs); if (retval >= 0) { free_arg_pages(bprm); +#ifdef CONFIG_GRKERNSEC + if (old_exec_file) + fput(old_exec_file); +#endif + /* execve success */ security_bprm_free(bprm); acct_update_integrals(current); @@ -1430,6 +1474,13 @@ int compat_do_execve(char * filename, return retval; } +#ifdef CONFIG_GRKERNSEC + current->acl = old_acl; + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); + fput(current->exec_file); + current->exec_file = old_exec_file; +#endif + out: /* Something went wrong, return the inode and free the argument pages*/ for (i = 0 ; i < MAX_ARG_PAGES ; i++) { diff -urNp linux-2.6.22.1/fs/debugfs/inode.c linux-2.6.22.1/fs/debugfs/inode.c --- linux-2.6.22.1/fs/debugfs/inode.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/debugfs/inode.c 2007-08-02 11:38:47.000000000 -0400 @@ -125,7 +125,7 @@ static inline int debugfs_positive(struc static int debug_fill_super(struct super_block *sb, void *data, int silent) { - static struct tree_descr debug_files[] = {{""}}; + static struct tree_descr debug_files[] = {{"", NULL, 0}}; return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); } diff -urNp linux-2.6.22.1/fs/exec.c linux-2.6.22.1/fs/exec.c --- linux-2.6.22.1/fs/exec.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/exec.c 2007-08-02 11:44:13.000000000 -0400 @@ -52,6 +52,8 @@ #include #include #include +#include +#include #include #include @@ -309,7 +320,7 @@ EXPORT_SYMBOL(copy_strings_kernel); * * vma->vm_mm->mmap_sem is held for writing. */ -void install_arg_page(struct vm_area_struct *vma, +int install_arg_page(struct vm_area_struct *vma, struct page *page, unsigned long address) { struct mm_struct *mm = vma->vm_mm; @@ -327,6 +338,12 @@ void install_arg_page(struct vm_area_str pte_unmap_unlock(pte, ptl); goto out; } + +#ifdef CONFIG_PAX_SEGMEXEC + if (pax_find_mirror_vma(vma)) + BUG_ON(TestSetPageLocked(page)); +#endif + inc_mm_counter(mm, anon_rss); lru_cache_add_active(page); set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( @@ -335,10 +352,42 @@ void install_arg_page(struct vm_area_str pte_unmap_unlock(pte, ptl); /* no need for flush_tlb */ - return; + return 0; out: __free_page(page); force_sig(SIGKILL, current); + return -ENOMEM; +} + +static int install_arg_page_mirror(struct vm_area_struct *vma, + struct page *page, unsigned long address) +{ + struct mm_struct *mm = vma->vm_mm; + pte_t *pte; + spinlock_t *ptl; + + page_cache_get(page); + pte = get_locked_pte(mm, address, &ptl); + if (!pte) + goto out; + if (!pte_none(*pte)) { + pte_unmap_unlock(pte, ptl); + goto out; + } + inc_mm_counter(mm, anon_rss); + set_pte_at(mm, address, pte, mk_pte(page, vma->vm_page_prot)); + page_add_anon_rmap(page, vma, address); + pte_unmap_unlock(pte, ptl); + + /* no need for flush_tlb */ + unlock_page(page); + return 0; +out: + unlock_page(page); + page_cache_release(page); + __free_page(page); + force_sig(SIGKILL, current); + return -ENOMEM; } #define EXTRA_STACK_VM_PAGES 20 /* random */ @@ -353,6 +402,10 @@ int setup_arg_pages(struct linux_binprm int i, ret; long arg_size; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *mpnt_m = NULL; +#endif + #ifdef CONFIG_STACK_GROWSUP /* Move the argument and environment strings to the bottom of the * stack space. @@ -434,7 +478,20 @@ int setup_arg_pages(struct linux_binprm else mpnt->vm_flags = VM_STACK_FLAGS; mpnt->vm_flags |= mm->def_flags; - mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7]; + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + mpnt->vm_flags &= ~VM_EXEC; + +#ifdef CONFIG_PAX_MPROTECT + if (mm->pax_flags & MF_PAX_MPROTECT) + mpnt->vm_flags &= ~VM_MAYEXEC; +#endif + + } +#endif + + mpnt->vm_page_prot = vm_get_page_prot(mpnt->vm_flags); if ((ret = insert_vm_struct(mm, mpnt))) { up_write(&mm->mmap_sem); kmem_cache_free(vm_area_cachep, mpnt); @@ -444,17 +498,30 @@ int setup_arg_pages(struct linux_binprm mm->stack_vm = mm->total_vm; } - for (i = 0 ; i < MAX_ARG_PAGES ; i++) { + for (i = 0 ; i < MAX_ARG_PAGES ; i++, stack_base += PAGE_SIZE) { struct page *page = bprm->page[i]; - if (page) { - bprm->page[i] = NULL; - install_arg_page(mpnt, page, stack_base); - } - stack_base += PAGE_SIZE; + int retval; + if (!page) + continue; + + bprm->page[i] = NULL; + retval = install_arg_page(mpnt, page, stack_base); + if (!ret) + ret = retval; + +#ifdef CONFIG_PAX_SEGMEXEC + if (!mpnt_m || retval) + continue; + + retval = install_arg_page_mirror(mpnt_m, page, stack_base + SEGMEXEC_TASK_SIZE); + if (!ret) + ret = retval; +#endif + } up_write(&mm->mmap_sem); - - return 0; + + return ret; } EXPORT_SYMBOL(setup_arg_pages); @@ -491,7 +565,7 @@ struct file *open_exec(const char *name) file = ERR_PTR(-EACCES); if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && S_ISREG(inode->i_mode)) { - int err = vfs_permission(&nd, MAY_EXEC); + err = vfs_permission(&nd, MAY_EXEC); file = ERR_PTR(err); if (!err) { file = nameidata_to_filp(&nd, O_RDONLY); @@ -1158,6 +1232,11 @@ int do_execve(char * filename, struct file *file; int retval; int i; +#ifdef CONFIG_GRKERNSEC + struct file *old_exec_file; + struct acl_subject_label *old_acl; + struct rlimit old_rlim[RLIM_NLIMITS]; +#endif retval = -ENOMEM; bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); @@ -1169,10 +1248,29 @@ int do_execve(char * filename, if (IS_ERR(file)) goto out_kfree; + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->user->processes), 1); + + if (gr_handle_nproc()) { + allow_write_access(file); + fput(file); + return -EAGAIN; + } + + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { + allow_write_access(file); + fput(file); + return -EACCES; + } + sched_exec(); bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); +#ifdef CONFIG_PAX_RANDUSTACK + if (randomize_va_space) + bprm->p -= (pax_get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK; +#endif + bprm->file = file; bprm->filename = filename; bprm->interp = filename; @@ -1214,8 +1312,38 @@ int do_execve(char * filename, if (retval < 0) goto out; + if (!gr_tpe_allow(file)) { + retval = -EACCES; + goto out; + } + + if (gr_check_crash_exec(file)) { + retval = -EACCES; + goto out; + } + + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); + + gr_handle_exec_args(bprm, argv); + +#ifdef CONFIG_GRKERNSEC + old_acl = current->acl; + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); + old_exec_file = current->exec_file; + get_file(file); + current->exec_file = file; +#endif + + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt); + if (retval < 0) + goto out_fail; + retval = search_binary_handler(bprm,regs); if (retval >= 0) { +#ifdef CONFIG_GRKERNSEC + if (old_exec_file) + fput(old_exec_file); +#endif free_arg_pages(bprm); /* execve success */ @@ -1225,6 +1353,14 @@ int do_execve(char * filename, return retval; } +out_fail: +#ifdef CONFIG_GRKERNSEC + current->acl = old_acl; + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); + fput(current->exec_file); + current->exec_file = old_exec_file; +#endif + out: /* Something went wrong, return the inode and free the argument pages*/ for (i = 0 ; i < MAX_ARG_PAGES ; i++) { @@ -1388,6 +1524,114 @@ out: return ispipe; } +int pax_check_flags(unsigned long *flags) +{ + int retval = 0; + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC) + if (*flags & MF_PAX_SEGMEXEC) + { + *flags &= ~MF_PAX_SEGMEXEC; + retval = -EINVAL; + } +#endif + + if ((*flags & MF_PAX_PAGEEXEC) + +#ifdef CONFIG_PAX_PAGEEXEC + && (*flags & MF_PAX_SEGMEXEC) +#endif + + ) + { + *flags &= ~MF_PAX_PAGEEXEC; + retval = -EINVAL; + } + + if ((*flags & MF_PAX_MPROTECT) + +#ifdef CONFIG_PAX_MPROTECT + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) +#endif + + ) + { + *flags &= ~MF_PAX_MPROTECT; + retval = -EINVAL; + } + + if ((*flags & MF_PAX_EMUTRAMP) + +#ifdef CONFIG_PAX_EMUTRAMP + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) +#endif + + ) + { + *flags &= ~MF_PAX_EMUTRAMP; + retval = -EINVAL; + } + + return retval; +} + +EXPORT_SYMBOL(pax_check_flags); + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = current->mm; + char *buffer_exec = (char *)__get_free_page(GFP_ATOMIC); + char *buffer_fault = (char *)__get_free_page(GFP_ATOMIC); + char *path_exec = NULL; + char *path_fault = NULL; + unsigned long start = 0UL, end = 0UL, offset = 0UL; + + if (buffer_exec && buffer_fault) { + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL; + + down_read(&mm->mmap_sem); + vma = mm->mmap; + while (vma && (!vma_exec || !vma_fault)) { + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) + vma_exec = vma; + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end) + vma_fault = vma; + vma = vma->vm_next; + } + if (vma_exec) { + path_exec = d_path(vma_exec->vm_file->f_path.dentry, vma_exec->vm_file->f_path.mnt, buffer_exec, PAGE_SIZE); + if (IS_ERR(path_exec)) + path_exec = ""; + } + if (vma_fault) { + start = vma_fault->vm_start; + end = vma_fault->vm_end; + offset = vma_fault->vm_pgoff << PAGE_SHIFT; + if (vma_fault->vm_file) { + path_fault = d_path(vma_fault->vm_file->f_path.dentry, vma_fault->vm_file->f_path.mnt, buffer_fault, PAGE_SIZE); + if (IS_ERR(path_fault)) + path_fault = ""; + } else + path_fault = ""; + } + up_read(&mm->mmap_sem); + } + if (tsk->signal->curr_ip) + printk(KERN_ERR "PAX: From %u.%u.%u.%u: execution attempt in: %s, %08lx-%08lx %08lx\n", NIPQUAD(tsk->signal->curr_ip), path_fault, start, end, offset); + else + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset); + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, " + "PC: %p, SP: %p\n", path_exec, tsk->comm, tsk->pid, + tsk->uid, tsk->euid, pc, sp); + free_page((unsigned long)buffer_exec); + free_page((unsigned long)buffer_fault); + pax_report_insns(pc, sp); + do_coredump(SIGKILL, SIGKILL, regs); +} +#endif + static void zap_process(struct task_struct *start) { struct task_struct *t; @@ -1530,6 +1774,10 @@ int do_coredump(long signr, int exit_cod */ clear_thread_flag(TIF_SIGPENDING); + if (signr == SIGKILL || signr == SIGILL) + gr_handle_brute_attach(current); + + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1); if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) goto fail_unlock; diff -urNp linux-2.6.22.1/fs/ext2/balloc.c linux-2.6.22.1/fs/ext2/balloc.c --- linux-2.6.22.1/fs/ext2/balloc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/ext2/balloc.c 2007-08-02 11:09:15.000000000 -0400 @@ -114,7 +114,7 @@ static int reserve_blocks(struct super_b if (free_blocks < count) count = free_blocks; - if (free_blocks < root_blocks + count && !capable(CAP_SYS_RESOURCE) && + if (free_blocks < root_blocks + count && !capable_nolog(CAP_SYS_RESOURCE) && sbi->s_resuid != current->fsuid && (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { /* diff -urNp linux-2.6.22.1/fs/ext3/xattr.c linux-2.6.22.1/fs/ext3/xattr.c diff -urNp linux-2.6.22.1/fs/fcntl.c linux-2.6.22.1/fs/fcntl.c --- linux-2.6.22.1/fs/fcntl.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/fcntl.c 2007-08-02 11:09:15.000000000 -0400 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -64,6 +65,7 @@ static int locate_fd(struct files_struct struct fdtable *fdt; error = -EINVAL; + gr_learn_resource(current, RLIMIT_NOFILE, orig_start, 0); if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) goto out; @@ -82,6 +84,7 @@ repeat: fdt->max_fds, start); error = -EMFILE; + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0); if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) goto out; if (!vx_files_avail(1)) @@ -140,6 +143,8 @@ asmlinkage long sys_dup2(unsigned int ol struct files_struct * files = current->files; struct fdtable *fdt; + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0); + spin_lock(&files->file_lock); if (!(file = fcheck(oldfd))) goto out_unlock; @@ -458,7 +463,8 @@ static inline int sigio_perm(struct task return (((fown->euid == 0) || (fown->euid == p->suid) || (fown->euid == p->uid) || (fown->uid == p->suid) || (fown->uid == p->uid)) && - !security_file_send_sigiotask(p, fown, sig)); + !security_file_send_sigiotask(p, fown, sig) && + !gr_check_protected_task(p) && !gr_pid_is_chrooted(p)); } static void send_sigio_to_task(struct task_struct *p, diff -urNp linux-2.6.22.1/fs/fuse/control.c linux-2.6.22.1/fs/fuse/control.c --- linux-2.6.22.1/fs/fuse/control.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/fuse/control.c 2007-08-02 11:38:47.000000000 -0400 @@ -159,7 +159,7 @@ void fuse_ctl_remove_conn(struct fuse_co static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent) { - struct tree_descr empty_descr = {""}; + struct tree_descr empty_descr = {"", NULL, 0}; struct fuse_conn *fc; int err; diff -urNp linux-2.6.22.1/fs/Kconfig linux-2.6.22.1/fs/Kconfig --- linux-2.6.22.1/fs/Kconfig 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/Kconfig 2007-08-02 11:09:15.000000000 -0400 @@ -912,7 +912,7 @@ config PROC_FS config PROC_KCORE bool "/proc/kcore support" if !ARM - depends on PROC_FS && MMU + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD config PROC_VMCORE bool "/proc/vmcore support (EXPERIMENTAL)" diff -urNp linux-2.6.22.1/fs/namei.c linux-2.6.22.1/fs/namei.c --- linux-2.6.22.1/fs/namei.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/namei.c 2007-08-02 11:09:15.000000000 -0400 @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -636,6 +637,13 @@ static inline int do_follow_link(struct err = security_inode_follow_link(path->dentry, nd); if (err) goto loop; + + if (gr_handle_follow_link(path->dentry->d_parent->d_inode, + path->dentry->d_inode, path->dentry, nd->mnt)) { + err = -EACCES; + goto loop; + } + current->link_count++; current->total_link_count++; nd->depth++; @@ -981,11 +989,18 @@ return_reval: break; } return_base: + if (!gr_acl_handle_hidden_file(nd->dentry, nd->mnt)) { + path_release(nd); + return -ENOENT; + } return 0; out_dput: dput_path(&next, nd); break; } + if (!gr_acl_handle_hidden_file(nd->dentry, nd->mnt)) + err = -ENOENT; + path_release(nd); return_err: return err; @@ -1616,9 +1631,17 @@ static int open_namei_create(struct name int error; struct dentry *dir = nd->dentry; + if (!gr_acl_handle_creat(path->dentry, nd->dentry, nd->mnt, flag, mode)) { + error = -EACCES; + goto out_unlock_dput; + } + if (!IS_POSIXACL(dir->d_inode)) mode &= ~current->fs->umask; error = vfs_create(dir->d_inode, path->dentry, mode, nd); + if (!error) + gr_handle_create(path->dentry, nd->mnt); +out_unlock_dput: mutex_unlock(&dir->d_inode->i_mutex); dput(nd->dentry); nd->dentry = path->dentry; @@ -1669,6 +1692,17 @@ int open_namei(int dfd, const char *path nd, flag); if (error) return error; + + if (gr_handle_rawio(nd->dentry->d_inode)) { + error = -EPERM; + goto exit; + } + + if (!gr_acl_handle_open(nd->dentry, nd->mnt, flag)) { + error = -EACCES; + goto exit; + } + goto ok; } @@ -1718,6 +1752,23 @@ do_last: /* * It already exists. */ + + if (gr_handle_rawio(path.dentry->d_inode)) { + mutex_unlock(&dir->d_inode->i_mutex); + error = -EPERM; + goto exit_dput; + } + if (!gr_acl_handle_open(path.dentry, nd->mnt, flag)) { + mutex_unlock(&dir->d_inode->i_mutex); + error = -EACCES; + goto exit_dput; + } + if (gr_handle_fifo(path.dentry, nd->mnt, dir, flag, acc_mode)) { + mutex_unlock(&dir->d_inode->i_mutex); + error = -EACCES; + goto exit_dput; + } + mutex_unlock(&dir->d_inode->i_mutex); audit_inode(pathname, path.dentry->d_inode); @@ -1773,6 +1824,13 @@ do_link: error = security_inode_follow_link(path.dentry, nd); if (error) goto exit_dput; + + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode, + path.dentry, nd->mnt)) { + error = -EACCES; + goto exit_dput; + } + error = __do_follow_link(&path, nd); if (error) { /* Does someone understand code flow here? Or it is only @@ -1901,6 +1959,22 @@ asmlinkage long sys_mknodat(int dfd, con if (!IS_POSIXACL(nd.dentry->d_inode)) mode &= ~current->fs->umask; if (!IS_ERR(dentry)) { + if (gr_handle_chroot_mknod(dentry, nd.mnt, mode)) { + error = -EPERM; + dput(dentry); + mutex_unlock(&nd.dentry->d_inode->i_mutex); + path_release(&nd); + goto out; + } + + if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) { + error = -EACCES; + dput(dentry); + mutex_unlock(&nd.dentry->d_inode->i_mutex); + path_release(&nd); + goto out; + } + switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd); @@ -1918,6 +1992,10 @@ asmlinkage long sys_mknodat(int dfd, con default: error = -EINVAL; } + + if (!error) + gr_handle_create(dentry, nd.mnt); + dput(dentry); } mutex_unlock(&nd.dentry->d_inode->i_mutex); @@ -1975,9 +2053,18 @@ asmlinkage long sys_mkdirat(int dfd, con if (IS_ERR(dentry)) goto out_unlock; + if (!gr_acl_handle_mkdir(dentry, nd.dentry, nd.mnt)) { + error = -EACCES; + goto out_unlock_dput; + } + if (!IS_POSIXACL(nd.dentry->d_inode)) mode &= ~current->fs->umask; error = vfs_mkdir(nd.dentry->d_inode, dentry, mode, &nd); + + if (!error) + gr_handle_create(dentry, nd.mnt); +out_unlock_dput: dput(dentry); out_unlock: mutex_unlock(&nd.dentry->d_inode->i_mutex); @@ -2059,6 +2146,8 @@ static long do_rmdir(int dfd, const char char * name; struct dentry *dentry; struct nameidata nd; + ino_t saved_ino = 0; + dev_t saved_dev = 0; name = getname(pathname); if(IS_ERR(name)) @@ -2084,7 +2173,22 @@ static long do_rmdir(int dfd, const char error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; + + if (dentry->d_inode != NULL) { + if (dentry->d_inode->i_nlink <= 1) { + saved_ino = dentry->d_inode->i_ino; + saved_dev = dentry->d_inode->i_sb->s_dev; + } + + if (!gr_acl_handle_rmdir(dentry, nd.mnt)) { + error = -EACCES; + goto dput_exit2; + } + } error = vfs_rmdir(nd.dentry->d_inode, dentry, &nd); + if (!error && (saved_dev || saved_ino)) + gr_handle_delete(saved_ino, saved_dev); +dput_exit2: dput(dentry); exit2: mutex_unlock(&nd.dentry->d_inode->i_mutex); @@ -2143,6 +2247,8 @@ static long do_unlinkat(int dfd, const c struct dentry *dentry; struct nameidata nd; struct inode *inode = NULL; + ino_t saved_ino = 0; + dev_t saved_dev = 0; name = getname(pathname); if(IS_ERR(name)) @@ -2158,13 +2264,26 @@ static long do_unlinkat(int dfd, const c dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { + error = 0; /* Why not before? Because we want correct error value */ if (nd.last.name[nd.last.len]) goto slashes; inode = dentry->d_inode; - if (inode) + if (inode) { + if (inode->i_nlink <= 1) { + saved_ino = inode->i_ino; + saved_dev = inode->i_sb->s_dev; + } + + if (!gr_acl_handle_unlink(dentry, nd.mnt)) + error = -EACCES; + atomic_inc(&inode->i_count); - error = vfs_unlink(nd.dentry->d_inode, dentry, &nd); + } + if (!error) + error = vfs_unlink(nd.dentry->d_inode, dentry, &nd); + if (!error && (saved_ino || saved_dev)) + gr_handle_delete(saved_ino, saved_dev); exit2: dput(dentry); } @@ -2245,7 +2364,16 @@ asmlinkage long sys_symlinkat(const char if (IS_ERR(dentry)) goto out_unlock; + if (!gr_acl_handle_symlink(dentry, nd.dentry, nd.mnt, from)) { + error = -EACCES; + goto out_dput_unlock; + } + error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO, &nd); + + if (!error) + gr_handle_create(dentry, nd.mnt); +out_dput_unlock: dput(dentry); out_unlock: mutex_unlock(&nd.dentry->d_inode->i_mutex); @@ -2340,7 +2468,25 @@ asmlinkage long sys_linkat(int olddfd, c error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out_unlock; + + if (gr_handle_hardlink(old_nd.dentry, old_nd.mnt, + old_nd.dentry->d_inode, + old_nd.dentry->d_inode->i_mode, to)) { + error = -EACCES; + goto out_unlock_dput; + } + + if (!gr_acl_handle_link(new_dentry, nd.dentry, nd.mnt, + old_nd.dentry, old_nd.mnt, to)) { + error = -EACCES; + goto out_unlock_dput; + } + error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry, &nd); + + if (!error) + gr_handle_create(new_dentry, nd.mnt); +out_unlock_dput: dput(new_dentry); out_unlock: mutex_unlock(&nd.dentry->d_inode->i_mutex); @@ -2566,8 +2712,16 @@ static int do_rename(int olddfd, const c if (new_dentry == trap) goto exit5; - error = vfs_rename(old_dir->d_inode, old_dentry, + error = gr_acl_handle_rename(new_dentry, newnd.dentry, newnd.mnt, + old_dentry, old_dir->d_inode, oldnd.mnt, + newname); + + if (!error) + error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); + if (!error) + gr_handle_rename(old_dir->d_inode, newnd.dentry->d_inode, old_dentry, + new_dentry, oldnd.mnt, new_dentry->d_inode ? 1 : 0); exit5: dput(new_dentry); exit4: diff -urNp linux-2.6.22.1/fs/namespace.c linux-2.6.22.1/fs/namespace.c --- linux-2.6.22.1/fs/namespace.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/namespace.c 2007-08-02 11:09:15.000000000 -0400 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include "pnode.h" @@ -602,6 +603,8 @@ static int do_umount(struct vfsmount *mn DQUOT_OFF(sb); retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); unlock_kernel(); + + gr_log_remount(mnt->mnt_devname, retval); } up_write(&sb->s_umount); return retval; @@ -622,6 +625,9 @@ static int do_umount(struct vfsmount *mn security_sb_umount_busy(mnt); up_write(&namespace_sem); release_mounts(&umount_list); + + gr_log_unmount(mnt->mnt_devname, retval); + return retval; } @@ -1427,6 +1433,11 @@ long do_mount(char *dev_name, char *dir_ if (retval) goto dput_out; + if (gr_handle_chroot_mount(nd.dentry, nd.mnt, dev_name)) { + retval = -EPERM; + goto dput_out; + } + if (flags & MS_REMOUNT) retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, data_page, tag); @@ -1441,6 +1452,9 @@ long do_mount(char *dev_name, char *dir_ dev_name, data_page); dput_out: path_release(&nd); + + gr_log_mount(dev_name, dir_name, retval); + return retval; } @@ -1678,6 +1692,9 @@ asmlinkage long sys_pivot_root(const cha if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (gr_handle_chroot_pivot()) + return -EPERM; + lock_kernel(); error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, diff -urNp linux-2.6.22.1/fs/nfs/callback_xdr.c linux-2.6.22.1/fs/nfs/callback_xdr.c --- linux-2.6.22.1/fs/nfs/callback_xdr.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/nfs/callback_xdr.c 2007-08-02 11:38:47.000000000 -0400 @@ -139,7 +139,7 @@ static __be32 decode_compound_hdr_arg(st if (unlikely(status != 0)) return status; /* We do not like overly long tags! */ - if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12 || hdr->taglen < 0) { + if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12) { printk("NFSv4 CALLBACK %s: client sent tag of length %u\n", __FUNCTION__, hdr->taglen); return htonl(NFS4ERR_RESOURCE); diff -urNp linux-2.6.22.1/fs/nfs/nfs4proc.c linux-2.6.22.1/fs/nfs/nfs4proc.c --- linux-2.6.22.1/fs/nfs/nfs4proc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/nfs/nfs4proc.c 2007-08-02 11:38:47.000000000 -0400 @@ -493,7 +493,7 @@ static int _nfs4_do_open_reclaim(struct static int nfs4_do_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) { struct nfs_server *server = NFS_SERVER(state->inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = _nfs4_do_open_reclaim(ctx, state); @@ -538,7 +538,7 @@ static int _nfs4_open_delegation_recall( int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; struct nfs_server *server = NFS_SERVER(state->inode); int err; do { @@ -843,7 +843,7 @@ static int _nfs4_open_expired(struct nfs static inline int nfs4_do_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) { struct nfs_server *server = NFS_SERVER(state->inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { @@ -1090,7 +1090,7 @@ static int nfs4_do_setattr(struct inode struct iattr *sattr, struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(server, @@ -1354,7 +1354,7 @@ static int _nfs4_server_capabilities(str int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(server, @@ -1387,7 +1387,7 @@ static int _nfs4_lookup_root(struct nfs_ static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(server, @@ -1476,7 +1476,7 @@ static int _nfs4_proc_getattr(struct nfs static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(server, @@ -1568,7 +1568,7 @@ static int nfs4_proc_lookupfh(struct nfs struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(server, @@ -1612,7 +1612,7 @@ static int _nfs4_proc_lookup(struct inod static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), @@ -1668,7 +1668,7 @@ static int _nfs4_proc_access(struct inod static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), @@ -1723,7 +1723,7 @@ static int _nfs4_proc_readlink(struct in static int nfs4_proc_readlink(struct inode *inode, struct page *page, unsigned int pgbase, unsigned int pglen) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), @@ -1813,7 +1813,7 @@ static int _nfs4_proc_remove(struct inod static int nfs4_proc_remove(struct inode *dir, struct qstr *name) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), @@ -1907,7 +1907,7 @@ static int _nfs4_proc_rename(struct inod static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(old_dir), @@ -1954,7 +1954,7 @@ static int _nfs4_proc_link(struct inode static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), @@ -2011,7 +2011,7 @@ static int _nfs4_proc_symlink(struct ino static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, unsigned int len, struct iattr *sattr) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), @@ -2064,7 +2064,7 @@ static int _nfs4_proc_mkdir(struct inode static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), @@ -2110,7 +2110,7 @@ static int _nfs4_proc_readdir(struct den static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page *page, unsigned int count, int plus) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(state->inode), @@ -2180,7 +2180,7 @@ static int _nfs4_proc_mknod(struct inode static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, dev_t rdev) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(dir), @@ -2209,7 +2209,7 @@ static int _nfs4_proc_statfs(struct nfs_ static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(server, @@ -2237,7 +2237,7 @@ static int _nfs4_do_fsinfo(struct nfs_se static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { @@ -2280,7 +2280,7 @@ static int _nfs4_proc_pathconf(struct nf static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *pathconf) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { @@ -2599,7 +2599,7 @@ out_free: static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; ssize_t ret; do { ret = __nfs4_get_acl_uncached(inode, buf, buflen); @@ -2653,7 +2653,7 @@ static int __nfs4_proc_set_acl(struct in static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = nfs4_handle_exception(NFS_SERVER(inode), @@ -2950,7 +2950,7 @@ static int _nfs4_proc_delegreturn(struct int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid) { struct nfs_server *server = NFS_SERVER(inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { err = _nfs4_proc_delegreturn(inode, cred, stateid); @@ -3025,7 +3025,7 @@ out: static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { @@ -3354,7 +3354,7 @@ static int _nfs4_do_setlk(struct nfs4_st static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) { struct nfs_server *server = NFS_SERVER(state->inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { @@ -3372,7 +3372,7 @@ static int nfs4_lock_reclaim(struct nfs4 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) { struct nfs_server *server = NFS_SERVER(state->inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; err = nfs4_set_lock_state(state, request); @@ -3433,7 +3433,7 @@ out: static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; do { @@ -3483,7 +3483,7 @@ nfs4_proc_lock(struct file *filp, int cm int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) { struct nfs_server *server = NFS_SERVER(state->inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = {0, 0}; int err; err = nfs4_set_lock_state(state, fl); diff -urNp linux-2.6.22.1/fs/nfsd/nfs4state.c linux-2.6.22.1/fs/nfsd/nfs4state.c --- linux-2.6.22.1/fs/nfsd/nfs4state.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/nfsd/nfs4state.c 2007-08-02 11:38:47.000000000 -0400 @@ -1243,7 +1243,7 @@ static int access_valid(u32 x) static int deny_valid(u32 x) { - return (x >= 0 && x < 5); + return (x < 5); } static void diff -urNp linux-2.6.22.1/fs/nls/nls_base.c linux-2.6.22.1/fs/nls/nls_base.c --- linux-2.6.22.1/fs/nls/nls_base.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/nls/nls_base.c 2007-08-02 11:38:47.000000000 -0400 @@ -42,7 +42,7 @@ static struct utf8_table utf8_table[] = {0xF8, 0xF0, 3*6, 0x1FFFFF, 0x10000, /* 4 byte sequence */}, {0xFC, 0xF8, 4*6, 0x3FFFFFF, 0x200000, /* 5 byte sequence */}, {0xFE, 0xFC, 5*6, 0x7FFFFFFF, 0x4000000, /* 6 byte sequence */}, - {0, /* end of table */} + {0, 0, 0, 0, 0, /* end of table */} }; int diff -urNp linux-2.6.22.1/fs/ntfs/file.c linux-2.6.22.1/fs/ntfs/file.c --- linux-2.6.22.1/fs/ntfs/file.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/ntfs/file.c 2007-08-02 11:38:47.000000000 -0400 @@ -2295,6 +2295,6 @@ const struct inode_operations ntfs_file_ #endif /* NTFS_RW */ }; -const struct file_operations ntfs_empty_file_ops = {}; +const struct file_operations ntfs_empty_file_ops; -const struct inode_operations ntfs_empty_inode_ops = {}; +const struct inode_operations ntfs_empty_inode_ops; diff -urNp linux-2.6.22.1/fs/open.c linux-2.6.22.1/fs/open.c --- linux-2.6.22.1/fs/open.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/open.c 2007-08-02 11:09:15.000000000 -0400 @@ -26,6 +26,7 @@ #include #include #include +#include int vfs_statfs(struct dentry *dentry, struct kstatfs *buf) { @@ -203,6 +204,9 @@ int do_truncate(struct dentry *dentry, l if (length < 0) return -EINVAL; + if (filp && !gr_acl_handle_truncate(dentry, filp->f_vfsmnt)) + return -EACCES; + newattrs.ia_size = length; newattrs.ia_valid = ATTR_SIZE | time_attrs; if (filp) { @@ -400,6 +404,9 @@ asmlinkage long sys_faccessat(int dfd, c if(IS_RDONLY(nd.dentry->d_inode) || MNT_IS_RDONLY(nd.mnt)) res = -EROFS; + if (!res && !gr_acl_handle_access(nd.dentry, nd.mnt, mode)) + res = -EACCES; + out_path_release: path_release(&nd); out: @@ -429,6 +436,8 @@ asmlinkage long sys_chdir(const char __u if (error) goto dput_and_out; + gr_log_chdir(nd.dentry, nd.mnt); + set_fs_pwd(current->fs, nd.mnt, nd.dentry); dput_and_out: @@ -459,6 +468,13 @@ asmlinkage long sys_fchdir(unsigned int goto out_putf; error = file_permission(file, MAY_EXEC); + + if (!error && !gr_chroot_fchdir(dentry, mnt)) + error = -EPERM; + + if (!error) + gr_log_chdir(dentry, mnt); + if (!error) set_fs_pwd(current->fs, mnt, dentry); out_putf: @@ -484,8 +500,16 @@ asmlinkage long sys_chroot(const char __ if (!capable(CAP_SYS_CHROOT)) goto dput_and_out; + if (gr_handle_chroot_chroot(nd.dentry, nd.mnt)) + goto dput_and_out; + set_fs_root(current->fs, nd.mnt, nd.dentry); set_fs_altroot(); + + gr_handle_chroot_caps(current); + + gr_handle_chroot_chdir(nd.dentry, nd.mnt); + error = 0; dput_and_out: path_release(&nd); @@ -516,9 +540,22 @@ asmlinkage long sys_fchmod(unsigned int err = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_putf; + + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) { + err = -EACCES; + goto out_putf; + } + mutex_lock(&inode->i_mutex); if (mode == (mode_t) -1) mode = inode->i_mode; + + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) { + err = -EPERM; + mutex_unlock(&inode->i_mutex); + goto out_putf; + } + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; err = notify_change(dentry, &newattrs); @@ -551,9 +588,21 @@ asmlinkage long sys_fchmodat(int dfd, co if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto dput_and_out; + if (!gr_acl_handle_chmod(nd.dentry, nd.mnt, mode)) { + error = -EACCES; + goto dput_and_out; + }; + mutex_lock(&inode->i_mutex); if (mode == (mode_t) -1) mode = inode->i_mode; + + if (gr_handle_chroot_chmod(nd.dentry, nd.mnt, mode)) { + error = -EACCES; + mutex_unlock(&inode->i_mutex); + goto dput_and_out; + } + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; error = notify_change(nd.dentry, &newattrs); @@ -587,6 +636,12 @@ static int chown_common(struct dentry * error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out; + + if (!gr_acl_handle_chown(dentry, mnt)) { + error = -EACCES; + goto out; + } + newattrs.ia_valid = ATTR_CTIME; if (user != (uid_t) -1) { newattrs.ia_valid |= ATTR_UID; @@ -873,6 +928,7 @@ repeat: * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ + gr_learn_resource(current, RLIMIT_NOFILE, fd, 0); if (fd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) goto out; diff -urNp linux-2.6.22.1/fs/partitions/efi.c linux-2.6.22.1/fs/partitions/efi.c --- linux-2.6.22.1/fs/partitions/efi.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/partitions/efi.c 2007-08-02 11:38:47.000000000 -0400 @@ -99,7 +99,7 @@ #ifdef EFI_DEBUG #define Dprintk(x...) printk(KERN_DEBUG x) #else -#define Dprintk(x...) +#define Dprintk(x...) do {} while (0) #endif /* This allows a kernel command line option 'gpt' to override diff -urNp linux-2.6.22.1/fs/pipe.c linux-2.6.22.1/fs/pipe.c --- linux-2.6.22.1/fs/pipe.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/pipe.c 2007-08-02 11:09:15.000000000 -0400 @@ -828,7 +828,7 @@ void free_pipe_info(struct inode *inode) inode->i_pipe = NULL; } -static struct vfsmount *pipe_mnt __read_mostly; +struct vfsmount *pipe_mnt __read_mostly; static int pipefs_delete_dentry(struct dentry *dentry) { /* diff -urNp linux-2.6.22.1/fs/proc/array.c linux-2.6.22.1/fs/proc/array.c --- linux-2.6.22.1/fs/proc/array.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/proc/array.c 2007-08-02 11:38:47.000000000 -0400 @@ -291,6 +291,21 @@ static inline char *task_cap(struct task (unsigned)vx_info_mbcap(vxi, p->cap_effective)); } +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) +static inline char *task_pax(struct task_struct *p, char *buffer) +{ + if (p->mm) + return buffer + sprintf(buffer, "PaX:\t%c%c%c%c%c\n", + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p', + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e', + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm', + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r', + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's'); + else + return buffer + sprintf(buffer, "PaX:\t-----\n"); +} +#endif + int proc_pid_status(struct task_struct *task, char * buffer) { char * orig = buffer; @@ -309,9 +324,20 @@ int proc_pid_status(struct task_struct * #if defined(CONFIG_S390) buffer = task_show_regs(task, buffer); #endif + +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) + buffer = task_pax(task, buffer); +#endif + return buffer - orig; } +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ + (_mm->pax_flags & MF_PAX_RANDMMAP || \ + _mm->pax_flags & MF_PAX_SEGMEXEC)) +#endif + static int do_task_stat(struct task_struct *task, char * buffer, int whole) { unsigned long vsize, eip, esp, wchan = ~0UL; @@ -398,6 +424,19 @@ static int do_task_stat(struct task_stru stime = task->stime; } +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + if (PAX_RAND_FLAGS(mm)) { + eip = 0; + esp = 0; + wchan = 0; + } +#endif +#ifdef CONFIG_GRKERNSEC_HIDESYM + wchan = 0; + eip =0; + esp =0; +#endif + /* scale priority and nice values from timeslices to -20..20 */ /* to make it look like a "normal" Unix priority/nice value */ priority = task_prio(task); @@ -437,9 +476,15 @@ static int do_task_stat(struct task_stru vsize, mm ? get_mm_rss(mm) : 0, rsslim, +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->start_code : 0), + PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->end_code : 0), + PAX_RAND_FLAGS(mm) ? 0 : (mm ? mm->start_stack : 0), +#else mm ? mm->start_code : 0, mm ? mm->end_code : 0, mm ? mm->start_stack : 0, +#endif esp, eip, /* The signal information here is obsolete. @@ -486,3 +531,14 @@ int proc_pid_statm(struct task_struct *t return sprintf(buffer,"%d %d %d %d %d %d %d\n", size, resident, shared, text, lib, data, 0); } + +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR +int proc_pid_ipaddr(struct task_struct *task, char * buffer) +{ + int len; + + len = sprintf(buffer, "%u.%u.%u.%u\n", NIPQUAD(task->signal->curr_ip)); + return len; +} +#endif + diff -urNp linux-2.6.22.1/fs/proc/base.c linux-2.6.22.1/fs/proc/base.c --- linux-2.6.22.1/fs/proc/base.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/proc/base.c 2007-08-02 11:38:47.000000000 -0400 @@ -73,6 +73,7 @@ #include #include #include +#include #include "internal.h" @@ -123,7 +124,7 @@ struct pid_entry { NULL, &proc_info_file_operations, \ { .proc_read = &proc_##OTYPE } ) -int maps_protect; +int maps_protect = 1; EXPORT_SYMBOL(maps_protect); static struct fs_struct *get_fs_struct(struct task_struct *task) @@ -197,7 +198,7 @@ static int proc_root_link(struct inode * (task->parent == current && \ (task->ptrace & PT_PTRACED) && \ (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \ - security_ptrace(current,task) == 0)) + security_ptrace(current,task) == 0 && !gr_handle_proc_ptrace(task))) static int proc_pid_environ(struct task_struct *task, char * buffer) { @@ -258,9 +259,9 @@ static int proc_pid_auxv(struct task_str struct mm_struct *mm = get_task_mm(task); if (mm) { unsigned int nwords = 0; - do + do { nwords += 2; - while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ + } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ res = nwords * sizeof(mm->saved_auxv[0]); if (res > PAGE_SIZE) res = PAGE_SIZE; @@ -333,6 +334,8 @@ static int proc_fd_access_allowed(struct task = get_proc_task(inode); if (task) { allowed = ptrace_may_attach(task); + if (allowed != 0) + allowed = !gr_acl_handle_procpidmem(task); put_task_struct(task); } return allowed; @@ -523,7 +526,7 @@ static ssize_t mem_read(struct file * fi if (!task) goto out_no_task; - if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) + if (!MAY_PTRACE(task) || !ptrace_may_attach(task) || gr_acl_handle_procpidmem(task)) goto out; ret = -ENOMEM; @@ -593,7 +596,7 @@ static ssize_t mem_write(struct file * f if (!task) goto out_no_task; - if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) + if (!MAY_PTRACE(task) || !ptrace_may_attach(task) || gr_acl_handle_procpidmem(task)) goto out; copied = -ENOMEM; @@ -1050,7 +1053,11 @@ static struct inode *proc_pid_make_inode inode->i_gid = 0; if (task_dumpable(task)) { inode->i_uid = task->euid; +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; +#else inode->i_gid = task->egid; +#endif } /* procfs is xid tagged */ inode->i_tag = (tag_t)vx_task_xid(task); @@ -1063,17 +1070,45 @@ static int pid_getattr(struct vfsmount * { struct inode *inode = dentry->d_inode; struct task_struct *task; +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + struct task_struct *tmp = current; +#endif + generic_fillattr(inode, stat); rcu_read_lock(); stat->uid = 0; stat->gid = 0; task = pid_task(proc_pid(inode), PIDTYPE_PID); - if (task) { + + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) { + rcu_read_unlock(); + return -ENOENT; + } + + + if (task +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + && (!tmp->uid || (tmp->uid == task->uid) +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + || in_group_p(CONFIG_GRKERNSEC_PROC_GID) +#endif + ) +#endif + ) { if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || +#ifdef CONFIG_GRKERNSEC_PROC_USER + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || +#endif task_dumpable(task)) { stat->uid = task->euid; +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + stat->gid = CONFIG_GRKERNSEC_PROC_GID; +#else stat->gid = task->egid; +#endif } } rcu_read_unlock(); @@ -1101,6 +1136,12 @@ static int pid_revalidate(struct dentry { struct inode *inode = dentry->d_inode; struct task_struct *task = get_proc_task(inode); + + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) { + put_task_struct(task); + goto drop; + } + if (task) { unsigned pid = name_to_int(dentry); if (pid != ~0U && pid != vx_map_pid(task->pid)) { @@ -1151,9 +1194,18 @@ static int pid_revalidate(struct dentry } if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || +#ifdef CONFIG_GRKERNSEC_PROC_USER + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || +#endif task_dumpable(task)) { inode->i_uid = task->euid; +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; +#else inode->i_gid = task->egid; +#endif } else { inode->i_uid = 0; inode->i_gid = 0; @@ -1371,6 +1435,9 @@ static struct dentry *proc_lookupfd_comm if (fd == ~0U) goto out; + if (gr_acl_handle_procpidmem(task)) + goto out; + result = instantiate(dir, dentry, task, &fd); out: put_task_struct(task); @@ -1407,6 +1461,8 @@ static int proc_readfd_common(struct fil goto out; filp->f_pos++; default: + if (gr_acl_handle_procpidmem(p)) + goto out; files = get_files_struct(p); if (!files) goto out; @@ -1595,6 +1651,9 @@ static struct dentry *proc_pident_lookup !memcmp(dentry->d_name.name, "ninfo", 5))) goto out; + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) + goto out; + /* * Yes, it does not scale. And it should not. Don't add * new entries into /proc// without very good reasons. @@ -1640,6 +1699,9 @@ static int proc_pident_readdir(struct fi if (!task) goto out_no_task; + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) + goto out; + ret = 0; pid = task->pid; i = filp->f_pos; @@ -1910,6 +1972,9 @@ static struct dentry *proc_base_lookup(s if (p > last) goto out; + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) + goto out; + error = proc_base_instantiate(dir, dentry, task, p); out: @@ -2006,6 +2071,9 @@ static const struct pid_entry tgid_base_ #ifdef CONFIG_TASK_IO_ACCOUNTING INF("io", S_IRUGO, pid_io_accounting), #endif +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR + INF("ipaddr", S_IRUSR, pid_ipaddr), +#endif }; static int proc_tgid_base_readdir(struct file * filp, @@ -2109,7 +2177,14 @@ static struct dentry *proc_pid_instantia if (!inode) goto out; +#ifdef CONFIG_GRKERNSEC_PROC_USER + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR; +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP; +#else inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; +#endif inode->i_op = &proc_tgid_base_inode_operations; inode->i_fop = &proc_tgid_base_operations; inode->i_flags|=S_IMMUTABLE; @@ -2150,7 +2225,11 @@ struct dentry *proc_pid_lookup(struct in if (!task) goto out; + if (gr_check_hidden_task(task)) + goto out_put_task; + result = proc_pid_instantiate(dir, dentry, task, NULL); +out_put_task: put_task_struct(task); out: return result; @@ -2208,6 +2287,9 @@ int proc_pid_readdir(struct file * filp, { unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; struct task_struct *reaper = get_proc_task_real(filp->f_path.dentry->d_inode); +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + struct task_struct *tmp = current; +#endif struct task_struct *task; int tgid; @@ -2225,6 +2307,18 @@ int proc_pid_readdir(struct file * filp, task; put_task_struct(task), task = next_tgid(tgid + 1)) { tgid = task->pid; + + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task) +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + || (tmp->uid && (task->uid != tmp->uid) +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID) +#endif + ) +#endif + ) + continue; + filp->f_pos = tgid + TGID_OFFSET; if (!vx_proc_task_visible(task)) continue; diff -urNp linux-2.6.22.1/fs/proc/inode.c linux-2.6.22.1/fs/proc/inode.c --- linux-2.6.22.1/fs/proc/inode.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/proc/inode.c 2007-08-02 11:09:15.000000000 -0400 @@ -158,7 +158,11 @@ struct inode *proc_get_inode(struct supe if (de->mode) { inode->i_mode = de->mode; inode->i_uid = de->uid; +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; +#else inode->i_gid = de->gid; +#endif } if (de->vx_flags) PROC_I(inode)->vx_flags = de->vx_flags; diff -urNp linux-2.6.22.1/fs/proc/internal.h linux-2.6.22.1/fs/proc/internal.h --- linux-2.6.22.1/fs/proc/internal.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/proc/internal.h 2007-08-02 11:09:15.000000000 -0400 @@ -45,6 +45,9 @@ extern int proc_tid_stat(struct task_str extern int proc_tgid_stat(struct task_struct *, char *); extern int proc_pid_status(struct task_struct *, char *); extern int proc_pid_statm(struct task_struct *, char *); +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR +extern int proc_pid_ipaddr(struct task_struct*,char*); +#endif extern const struct file_operations proc_maps_operations; extern const struct file_operations proc_numa_maps_operations; diff -urNp linux-2.6.22.1/fs/proc/proc_misc.c linux-2.6.22.1/fs/proc/proc_misc.c --- linux-2.6.22.1/fs/proc/proc_misc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/proc/proc_misc.c 2007-08-02 11:09:15.000000000 -0400 @@ -657,6 +657,8 @@ void create_seq_entry(char *name, mode_t void __init proc_misc_init(void) { + int gr_mode = 0; + static struct { char *name; int (*read_proc)(char*,char**,off_t,int,int*,void*); @@ -672,7 +674,9 @@ void __init proc_misc_init(void) {"stram", stram_read_proc}, #endif {"filesystems", filesystems_read_proc}, +#ifndef CONFIG_GRKERNSEC_PROC_ADD {"cmdline", cmdline_read_proc}, +#endif {"locks", locks_read_proc}, {"execdomains", execdomains_read_proc}, {NULL,} @@ -680,6 +684,15 @@ void __init proc_misc_init(void) for (p = simple_ones; p->name; p++) create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL); +#ifdef CONFIG_GRKERNSEC_PROC_USER + gr_mode = S_IRUSR; +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + gr_mode = S_IRUSR | S_IRGRP; +#endif +#ifdef CONFIG_GRKERNSEC_PROC_ADD + create_proc_read_entry("cmdline", gr_mode, NULL, &cmdline_read_proc, NULL); +#endif + proc_symlink("mounts", NULL, "self/mounts"); /* And now for trickier ones */ @@ -691,7 +704,11 @@ void __init proc_misc_init(void) entry->proc_fops = &proc_kmsg_operations; } #endif +#ifdef CONFIG_GRKERNSEC_PROC_ADD + create_seq_entry("devices", gr_mode, &proc_devinfo_operations); +#else create_seq_entry("devices", 0, &proc_devinfo_operations); +#endif create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations); #ifdef CONFIG_BLOCK create_seq_entry("partitions", 0, &proc_partitions_operations); @@ -699,7 +716,11 @@ void __init proc_misc_init(void) create_seq_entry("stat", 0, &proc_stat_operations); create_seq_entry("interrupts", 0, &proc_interrupts_operations); #ifdef CONFIG_SLAB +#ifdef CONFIG_GRKERNSEC_PROC_ADD + create_seq_entry("slabinfo",S_IWUSR|gr_mode,&proc_slabinfo_operations); +#else create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); +#endif #ifdef CONFIG_DEBUG_SLAB_LEAK create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations); #endif @@ -716,7 +737,7 @@ void __init proc_misc_init(void) #ifdef CONFIG_SCHEDSTATS create_seq_entry("schedstat", 0, &proc_schedstat_operations); #endif -#ifdef CONFIG_PROC_KCORE +#if defined(CONFIG_PROC_KCORE) && !defined(CONFIG_GRKERNSEC_PROC_ADD) proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL); if (proc_root_kcore) { proc_root_kcore->proc_fops = &proc_kcore_operations; diff -urNp linux-2.6.22.1/fs/proc/proc_sysctl.c linux-2.6.22.1/fs/proc/proc_sysctl.c --- linux-2.6.22.1/fs/proc/proc_sysctl.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/proc/proc_sysctl.c 2007-08-02 11:09:15.000000000 -0400 @@ -7,6 +7,8 @@ #include #include "internal.h" +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op); + static struct dentry_operations proc_sys_dentry_operations; static const struct file_operations proc_sys_file_operations; static struct inode_operations proc_sys_inode_operations; @@ -151,6 +153,9 @@ static struct dentry *proc_sys_lookup(st if (!table) goto out; + if (gr_handle_sysctl(table, 001)) + goto out; + err = ERR_PTR(-ENOMEM); inode = proc_sys_make_inode(dir, table); if (!inode) @@ -358,6 +363,9 @@ static int proc_sys_readdir(struct file if (pos < filp->f_pos) continue; + if (gr_handle_sysctl(table, 0)) + continue; + if (proc_sys_fill_cache(filp, dirent, filldir, table) < 0) goto out; filp->f_pos = pos + 1; @@ -420,6 +428,30 @@ out: return error; } +/* Eric Biederman is to blame */ +static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +{ + int error = 0; + struct ctl_table_header *head; + struct ctl_table *table; + + table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); + /* Has the sysctl entry disappeared on us? */ + if (!table) + goto out; + + if (gr_handle_sysctl(table, 001)) { + error = -ENOENT; + goto out; + } + +out: + sysctl_head_finish(head); + + generic_fillattr(dentry->d_inode, stat); + + return error; +} static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; @@ -448,6 +480,7 @@ static struct inode_operations proc_sys_ .lookup = proc_sys_lookup, .permission = proc_sys_permission, .setattr = proc_sys_setattr, + .getattr = proc_sys_getattr, }; static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd) diff -urNp linux-2.6.22.1/fs/proc/root.c linux-2.6.22.1/fs/proc/root.c --- linux-2.6.22.1/fs/proc/root.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/proc/root.c 2007-08-02 11:09:15.000000000 -0400 @@ -64,7 +64,13 @@ void __init proc_root_init(void) return; } proc_misc_init(); +#ifdef CONFIG_GRKERNSEC_PROC_USER + proc_net = proc_mkdir_mode("net", S_IRUSR | S_IXUSR, NULL); +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + proc_net = proc_mkdir_mode("net", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); +#else proc_net = proc_mkdir("net", NULL); +#endif proc_net_stat = proc_mkdir("net/stat", NULL); #ifdef CONFIG_SYSVIPC @@ -78,7 +84,15 @@ void __init proc_root_init(void) #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + proc_bus = proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL); +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + proc_bus = proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); +#endif +#else proc_bus = proc_mkdir("bus", NULL); +#endif proc_vx_init(); proc_sys_init(); } diff -urNp linux-2.6.22.1/fs/proc/task_mmu.c linux-2.6.22.1/fs/proc/task_mmu.c --- linux-2.6.22.1/fs/proc/task_mmu.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/proc/task_mmu.c 2007-08-02 11:38:47.000000000 -0400 @@ -44,15 +44,27 @@ char *task_mem(struct mm_struct *mm, cha "VmStk:\t%8lu kB\n" "VmExe:\t%8lu kB\n" "VmLib:\t%8lu kB\n" - "VmPTE:\t%8lu kB\n", - hiwater_vm << (PAGE_SHIFT-10), + "VmPTE:\t%8lu kB\n" + +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT + "CsBase:\t%8lx\nCsLim:\t%8lx\n" +#endif + + ,hiwater_vm << (PAGE_SHIFT-10), (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), mm->locked_vm << (PAGE_SHIFT-10), hiwater_rss << (PAGE_SHIFT-10), total_rss << (PAGE_SHIFT-10), data << (PAGE_SHIFT-10), mm->stack_vm << (PAGE_SHIFT-10), text, lib, - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10 + +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT + , mm->context.user_cs_base, mm->context.user_cs_limit +#endif + + ); + return buffer; } @@ -131,6 +143,12 @@ struct pmd_walker { unsigned long, void *); }; +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ + (_mm->pax_flags & MF_PAX_RANDMMAP || \ + _mm->pax_flags & MF_PAX_SEGMEXEC)) +#endif + static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) { struct proc_maps_private *priv = m->private; @@ -153,13 +171,22 @@ static int show_map_internal(struct seq_ } seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start, + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end, +#else vma->vm_start, vma->vm_end, +#endif flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', flags & VM_EXEC ? 'x' : '-', flags & VM_MAYSHARE ? 's' : 'p', +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_pgoff << PAGE_SHIFT, +#else vma->vm_pgoff << PAGE_SHIFT, +#endif MAJOR(dev), MINOR(dev), ino, &len); /* @@ -173,11 +200,11 @@ static int show_map_internal(struct seq_ const char *name = arch_vma_name(vma); if (!name) { if (mm) { - if (vma->vm_start <= mm->start_brk && - vma->vm_end >= mm->brk) { + if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { name = "[heap]"; - } else if (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack) { + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) || + (vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack)) { name = "[stack]"; } } else { @@ -191,7 +218,27 @@ static int show_map_internal(struct seq_ } seq_putc(m, '\n'); - if (mss) + + if (mss) { +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + if (PAX_RAND_FLAGS(mm)) + seq_printf(m, + "Size: %8lu kB\n" + "Rss: %8lu kB\n" + "Shared_Clean: %8lu kB\n" + "Shared_Dirty: %8lu kB\n" + "Private_Clean: %8lu kB\n" + "Private_Dirty: %8lu kB\n", + "Referenced: %8lu kB\n", + 0UL, + 0UL, + 0UL, + 0UL, + 0UL, + 0UL, + 0UL); + else +#endif seq_printf(m, "Size: %8lu kB\n" "Rss: %8lu kB\n" @@ -207,6 +254,7 @@ static int show_map_internal(struct seq_ mss->private_clean >> 10, mss->private_dirty >> 10, mss->referenced >> 10); + } if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; diff -urNp linux-2.6.22.1/fs/readdir.c linux-2.6.22.1/fs/readdir.c --- linux-2.6.22.1/fs/readdir.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/readdir.c 2007-08-02 11:09:15.000000000 -0400 @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include @@ -64,6 +66,7 @@ struct old_linux_dirent { struct readdir_callback { struct old_linux_dirent __user * dirent; + struct file * file; int result; }; @@ -79,6 +82,10 @@ static int fillonedir(void * __buf, cons d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) return -EOVERFLOW; + + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) + return 0; + buf->result++; dirent = buf->dirent; if (!access_ok(VERIFY_WRITE, dirent, @@ -110,6 +117,7 @@ asmlinkage long old_readdir(unsigned int buf.result = 0; buf.dirent = dirent; + buf.file = file; error = vfs_readdir(file, fillonedir, &buf); if (error >= 0) @@ -136,6 +144,7 @@ struct linux_dirent { struct getdents_callback { struct linux_dirent __user * current_dir; struct linux_dirent __user * previous; + struct file * file; int count; int error; }; @@ -154,6 +163,10 @@ static int filldir(void * __buf, const c d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) return -EOVERFLOW; + + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) + return 0; + dirent = buf->previous; if (dirent) { if (__put_user(offset, &dirent->d_off)) @@ -200,6 +213,7 @@ asmlinkage long sys_getdents(unsigned in buf.previous = NULL; buf.count = count; buf.error = 0; + buf.file = file; error = vfs_readdir(file, filldir, &buf); if (error < 0) @@ -222,6 +236,7 @@ out: struct getdents_callback64 { struct linux_dirent64 __user * current_dir; struct linux_dirent64 __user * previous; + struct file *file; int count; int error; }; @@ -236,6 +251,10 @@ static int filldir64(void * __buf, const buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; + + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) + return 0; + dirent = buf->previous; if (dirent) { if (__put_user(offset, &dirent->d_off)) @@ -282,6 +301,7 @@ asmlinkage long sys_getdents64(unsigned buf.current_dir = dirent; buf.previous = NULL; + buf.file = file; buf.count = count; buf.error = 0; diff -urNp linux-2.6.22.1/fs/udf/balloc.c linux-2.6.22.1/fs/udf/balloc.c --- linux-2.6.22.1/fs/udf/balloc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/udf/balloc.c 2007-08-02 11:38:47.000000000 -0400 @@ -153,8 +153,7 @@ static void udf_bitmap_free_blocks(struc unsigned long overflow; mutex_lock(&sbi->s_alloc_mutex); - if (bloc.logicalBlockNum < 0 || - (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) + if (bloc.logicalBlockNum + count > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, @@ -227,7 +226,7 @@ static int udf_bitmap_prealloc_blocks(st struct buffer_head *bh; mutex_lock(&sbi->s_alloc_mutex); - if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) + if (first_block >= UDF_SB_PARTLEN(sb, partition)) goto out; if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) @@ -294,7 +293,7 @@ static int udf_bitmap_new_block(struct s mutex_lock(&sbi->s_alloc_mutex); repeat: - if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) + if (goal >= UDF_SB_PARTLEN(sb, partition)) goal = 0; nr_groups = bitmap->s_nr_groups; @@ -434,8 +433,7 @@ static void udf_table_free_blocks(struct int i; mutex_lock(&sbi->s_alloc_mutex); - if (bloc.logicalBlockNum < 0 || - (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) + if (bloc.logicalBlockNum + count > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, @@ -679,7 +677,7 @@ static int udf_table_prealloc_blocks(str struct extent_position epos; int8_t etype = -1; - if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) + if (first_block >= UDF_SB_PARTLEN(sb, partition)) return 0; if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) @@ -758,7 +756,7 @@ static int udf_table_new_block(struct su return newblock; mutex_lock(&sbi->s_alloc_mutex); - if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) + if (goal >= UDF_SB_PARTLEN(sb, partition)) goal = 0; /* We search for the closest matching block to goal. If we find a exact hit, diff -urNp linux-2.6.22.1/fs/udf/inode.c linux-2.6.22.1/fs/udf/inode.c --- linux-2.6.22.1/fs/udf/inode.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/udf/inode.c 2007-08-02 11:38:47.000000000 -0400 @@ -311,9 +311,6 @@ static int udf_get_block(struct inode *i lock_kernel(); - if (block < 0) - goto abort_negative; - if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1) { UDF_I_NEXT_ALLOC_BLOCK(inode) ++; @@ -334,10 +331,6 @@ static int udf_get_block(struct inode *i abort: unlock_kernel(); return err; - -abort_negative: - udf_warning(inode->i_sb, "udf_get_block", "block < 0"); - goto abort; } static struct buffer_head * diff -urNp linux-2.6.22.1/fs/ufs/inode.c linux-2.6.22.1/fs/ufs/inode.c --- linux-2.6.22.1/fs/ufs/inode.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/ufs/inode.c 2007-08-02 11:38:47.000000000 -0400 @@ -55,9 +55,7 @@ static int ufs_block_to_path(struct inod UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); - if (i_block < 0) { - ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0"); - } else if (i_block < direct_blocks) { + if (i_block < direct_blocks) { offsets[n++] = i_block; } else if ((i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = UFS_IND_BLOCK; @@ -439,8 +437,6 @@ int ufs_getfrag_block(struct inode *inod lock_kernel(); UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); - if (fragment < 0) - goto abort_negative; if (fragment > ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) << uspi->s_fpbshift)) @@ -503,10 +499,6 @@ abort: unlock_kernel(); return err; -abort_negative: - ufs_warning(sb, "ufs_get_block", "block < 0"); - goto abort; - abort_too_big: ufs_warning(sb, "ufs_get_block", "block > big"); goto abort; diff -urNp linux-2.6.22.1/fs/utimes.c linux-2.6.22.1/fs/utimes.c --- linux-2.6.22.1/fs/utimes.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/utimes.c 2007-08-02 11:09:15.000000000 -0400 @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,7 @@ long do_utimes(int dfd, char __user *fil int error; struct nameidata nd; struct dentry *dentry; + struct vfsmount *mnt; struct inode *inode; struct iattr newattrs; struct file *f = NULL; @@ -65,6 +67,7 @@ long do_utimes(int dfd, char __user *fil if (!f) goto out; dentry = f->f_path.dentry; + mnt = f->f_path.mnt; } else { error = __user_walk_fd(dfd, filename, (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW, &nd); if (error) @@ -78,6 +81,7 @@ long do_utimes(int dfd, char __user *fil if (error) goto dput_and_out; dentry = nd.dentry; + mnt = nd.mnt; } inode = dentry->d_inode; @@ -117,6 +121,12 @@ long do_utimes(int dfd, char __user *fil } } } + + if (!gr_acl_handle_utime(dentry, mnt)) { + error = -EACCES; + goto dput_and_out; + } + mutex_lock(&inode->i_mutex); error = notify_change(dentry, &newattrs); mutex_unlock(&inode->i_mutex); diff -urNp linux-2.6.22.1/fs/xfs/xfs_bmap.c linux-2.6.22.1/fs/xfs/xfs_bmap.c --- linux-2.6.22.1/fs/xfs/xfs_bmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/fs/xfs/xfs_bmap.c 2007-08-02 11:38:47.000000000 -0400 @@ -365,7 +365,7 @@ xfs_bmap_validate_ret( int nmap, int ret_nmap); #else -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0) #endif /* DEBUG */ #if defined(XFS_RW_TRACE) diff -urNp linux-2.6.22.1/grsecurity/gracl_alloc.c linux-2.6.22.1/grsecurity/gracl_alloc.c --- linux-2.6.22.1/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl_alloc.c 2007-08-02 11:09:15.000000000 -0400 @@ -0,0 +1,91 @@ +#include +#include +#include +#include +#include +#include + +static unsigned long alloc_stack_next = 1; +static unsigned long alloc_stack_size = 1; +static void **alloc_stack; + +static __inline__ int +alloc_pop(void) +{ + if (alloc_stack_next == 1) + return 0; + + kfree(alloc_stack[alloc_stack_next - 2]); + + alloc_stack_next--; + + return 1; +} + +static __inline__ void +alloc_push(void *buf) +{ + if (alloc_stack_next >= alloc_stack_size) + BUG(); + + alloc_stack[alloc_stack_next - 1] = buf; + + alloc_stack_next++; + + return; +} + +void * +acl_alloc(unsigned long len) +{ + void *ret; + + if (len > PAGE_SIZE) + BUG(); + + ret = kmalloc(len, GFP_KERNEL); + + if (ret) + alloc_push(ret); + + return ret; +} + +void +acl_free_all(void) +{ + if (gr_acl_is_enabled() || !alloc_stack) + return; + + while (alloc_pop()) ; + + if (alloc_stack) { + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE) + kfree(alloc_stack); + else + vfree(alloc_stack); + } + + alloc_stack = NULL; + alloc_stack_size = 1; + alloc_stack_next = 1; + + return; +} + +int +acl_alloc_stack_init(unsigned long size) +{ + if ((size * sizeof (void *)) <= PAGE_SIZE) + alloc_stack = + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL); + else + alloc_stack = (void **) vmalloc(size * sizeof (void *)); + + alloc_stack_size = size; + + if (!alloc_stack) + return 0; + else + return 1; +} diff -urNp linux-2.6.22.1/grsecurity/gracl.c linux-2.6.22.1/grsecurity/gracl.c --- linux-2.6.22.1/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl.c 2007-08-03 10:51:44.000000000 -0400 @@ -0,0 +1,3675 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static struct acl_role_db acl_role_set; +static struct name_db name_set; +static struct inodev_db inodev_set; + +/* for keeping track of userspace pointers used for subjects, so we + can share references in the kernel as well +*/ + +static struct dentry *real_root; +static struct vfsmount *real_root_mnt; + +static struct acl_subj_map_db subj_map_set; + +static struct acl_role_label *default_role; + +static u16 acl_sp_role_value; + +extern char *gr_shared_page[4]; +static DECLARE_MUTEX(gr_dev_sem); +rwlock_t gr_inode_lock = RW_LOCK_UNLOCKED; + +struct gr_arg *gr_usermode; + +static unsigned int gr_status = GR_STATUS_INIT; + +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); +extern void gr_clear_learn_entries(void); + +#ifdef CONFIG_GRKERNSEC_RESLOG +extern void gr_log_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt); +#endif + +unsigned char *gr_system_salt; +unsigned char *gr_system_sum; + +static struct sprole_pw **acl_special_roles = NULL; +static __u16 num_sprole_pws = 0; + +static struct acl_role_label *kernel_role = NULL; + +static unsigned int gr_auth_attempts = 0; +static unsigned long gr_auth_expires = 0UL; + +extern struct vfsmount *sock_mnt; +extern struct vfsmount *pipe_mnt; +extern struct vfsmount *shm_mnt; +static struct acl_object_label *fakefs_obj; + +extern int gr_init_uidset(void); +extern void gr_free_uidset(void); +extern void gr_remove_uid(uid_t uid); +extern int gr_find_uid(uid_t uid); + +__inline__ int +gr_acl_is_enabled(void) +{ + return (gr_status & GR_READY); +} + +char gr_roletype_to_char(void) +{ + switch (current->role->roletype & + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP | + GR_ROLE_SPECIAL)) { + case GR_ROLE_DEFAULT: + return 'D'; + case GR_ROLE_USER: + return 'U'; + case GR_ROLE_GROUP: + return 'G'; + case GR_ROLE_SPECIAL: + return 'S'; + } + + return 'X'; +} + +__inline__ int +gr_acl_tpe_check(void) +{ + if (unlikely(!(gr_status & GR_READY))) + return 0; + if (current->role->roletype & GR_ROLE_TPE) + return 1; + else + return 0; +} + +int +gr_handle_rawio(const struct inode *inode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (inode && S_ISBLK(inode->i_mode) && + grsec_enable_chroot_caps && proc_is_chrooted(current) && + !capable(CAP_SYS_RAWIO)) + return 1; +#endif + return 0; +} + +static int +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb) +{ + int i; + unsigned long *l1; + unsigned long *l2; + unsigned char *c1; + unsigned char *c2; + int num_longs; + + if (likely(lena != lenb)) + return 0; + + l1 = (unsigned long *)a; + l2 = (unsigned long *)b; + + num_longs = lena / sizeof(unsigned long); + + for (i = num_longs; i--; l1++, l2++) { + if (unlikely(*l1 != *l2)) + return 0; + } + + c1 = (unsigned char *) l1; + c2 = (unsigned char *) l2; + + i = lena - (num_longs * sizeof(unsigned long)); + + for (; i--; c1++, c2++) { + if (unlikely(*c1 != *c2)) + return 0; + } + + return 1; +} + +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt, + struct dentry *root, struct vfsmount *rootmnt, + char *buffer, int buflen) +{ + char * end = buffer+buflen; + char * retval; + int namelen; + + *--end = '\0'; + buflen--; + + if (buflen < 1) + goto Elong; + /* Get '/' right */ + retval = end-1; + *retval = '/'; + + for (;;) { + struct dentry * parent; + + if (dentry == root && vfsmnt == rootmnt) + break; + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { + /* Global root? */ + spin_lock(&vfsmount_lock); + if (vfsmnt->mnt_parent == vfsmnt) { + spin_unlock(&vfsmount_lock); + goto global_root; + } + dentry = vfsmnt->mnt_mountpoint; + vfsmnt = vfsmnt->mnt_parent; + spin_unlock(&vfsmount_lock); + continue; + } + parent = dentry->d_parent; + prefetch(parent); + namelen = dentry->d_name.len; + buflen -= namelen + 1; + if (buflen < 0) + goto Elong; + end -= namelen; + memcpy(end, dentry->d_name.name, namelen); + *--end = '/'; + retval = end; + dentry = parent; + } + + return retval; + +global_root: + namelen = dentry->d_name.len; + buflen -= namelen; + if (buflen < 0) + goto Elong; + retval -= namelen-1; /* hit the slash */ + memcpy(retval, dentry->d_name.name, namelen); + return retval; +Elong: + return ERR_PTR(-ENAMETOOLONG); +} + +static char * +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt, + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen) +{ + char *retval; + + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); + if (unlikely(IS_ERR(retval))) + retval = strcpy(buf, ""); + else if (unlikely(retval[1] == '/' && retval[2] == '\0')) + retval[1] = '\0'; + + return retval; +} + +static char * +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, + char *buf, int buflen) +{ + char *res; + + /* we can use real_root, real_root_mnt, because this is only called + by the RBAC system */ + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen); + + return res; +} + +static char * +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, + char *buf, int buflen) +{ + char *res; + struct dentry *root; + struct vfsmount *rootmnt; + struct task_struct *reaper = child_reaper(current); + + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */ + read_lock(&reaper->fs->lock); + root = dget(reaper->fs->root); + rootmnt = mntget(reaper->fs->rootmnt); + read_unlock(&reaper->fs->lock); + + spin_lock(&dcache_lock); + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen); + spin_unlock(&dcache_lock); + + dput(root); + mntput(rootmnt); + return res; +} + +static char * +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) +{ + char *ret; + spin_lock(&dcache_lock); + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), + PAGE_SIZE); + spin_unlock(&dcache_lock); + return ret; +} + +char * +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), + PAGE_SIZE); +} + +char * +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), + PAGE_SIZE); +} + +char * +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()), + PAGE_SIZE); +} + +char * +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()), + PAGE_SIZE); +} + +char * +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()), + PAGE_SIZE); +} + +__inline__ __u32 +to_gr_audit(const __u32 reqmode) +{ + /* masks off auditable permission flags, then shifts them to create + auditing flags, and adds the special case of append auditing if + we're requesting write */ + return (((reqmode & GR_AUDIT_READ) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0)); +} + +struct acl_subject_label * +lookup_subject_map(const struct acl_subject_label *userp) +{ + unsigned int index = shash(userp, subj_map_set.s_size); + struct subject_map *match; + + match = subj_map_set.s_hash[index]; + + while (match && match->user != userp) + match = match->next; + + if (match != NULL) + return match->kernel; + else + return NULL; +} + +static void +insert_subj_map_entry(struct subject_map *subjmap) +{ + unsigned int index = shash(subjmap->user, subj_map_set.s_size); + struct subject_map **curr; + + subjmap->prev = NULL; + + curr = &subj_map_set.s_hash[index]; + if (*curr != NULL) + (*curr)->prev = subjmap; + + subjmap->next = *curr; + *curr = subjmap; + + return; +} + +static struct acl_role_label * +lookup_acl_role_label(const struct task_struct *task, const uid_t uid, + const gid_t gid) +{ + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size); + struct acl_role_label *match; + struct role_allowed_ip *ipp; + unsigned int x; + + match = acl_role_set.r_hash[index]; + + while (match) { + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) { + for (x = 0; x < match->domain_child_num; x++) { + if (match->domain_children[x] == uid) + goto found; + } + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER) + break; + match = match->next; + } +found: + if (match == NULL) { + try_group: + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); + match = acl_role_set.r_hash[index]; + + while (match) { + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) { + for (x = 0; x < match->domain_child_num; x++) { + if (match->domain_children[x] == gid) + goto found2; + } + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP) + break; + match = match->next; + } +found2: + if (match == NULL) + match = default_role; + if (match->allowed_ips == NULL) + return match; + else { + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { + if (likely + ((ntohl(task->signal->curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask))) + return match; + } + match = default_role; + } + } else if (match->allowed_ips == NULL) { + return match; + } else { + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { + if (likely + ((ntohl(task->signal->curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask))) + return match; + } + goto try_group; + } + + return match; +} + +struct acl_subject_label * +lookup_acl_subj_label(const ino_t ino, const dev_t dev, + const struct acl_role_label *role) +{ + unsigned int index = fhash(ino, dev, role->subj_hash_size); + struct acl_subject_label *match; + + match = role->subj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +static struct acl_object_label * +lookup_acl_obj_label(const ino_t ino, const dev_t dev, + const struct acl_subject_label *subj) +{ + unsigned int index = fhash(ino, dev, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +static struct acl_object_label * +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev, + const struct acl_subject_label *subj) +{ + unsigned int index = fhash(ino, dev, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + !(match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && (match->mode & GR_DELETED)) + return match; + + match = subj->obj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +static struct name_entry * +lookup_name_entry(const char *name) +{ + unsigned int len = strlen(name); + unsigned int key = full_name_hash(name, len); + unsigned int index = key % name_set.n_size; + struct name_entry *match; + + match = name_set.n_hash[index]; + + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len))) + match = match->next; + + return match; +} + +static struct inodev_entry * +lookup_inodev_entry(const ino_t ino, const dev_t dev) +{ + unsigned int index = fhash(ino, dev, inodev_set.i_size); + struct inodev_entry *match; + + match = inodev_set.i_hash[index]; + + while (match && (match->nentry->inode != ino || match->nentry->device != dev)) + match = match->next; + + return match; +} + +static void +insert_inodev_entry(struct inodev_entry *entry) +{ + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device, + inodev_set.i_size); + struct inodev_entry **curr; + + entry->prev = NULL; + + curr = &inodev_set.i_hash[index]; + if (*curr != NULL) + (*curr)->prev = entry; + + entry->next = *curr; + *curr = entry; + + return; +} + +static void +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) +{ + unsigned int index = + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); + struct acl_role_label **curr; + + role->prev = NULL; + + curr = &acl_role_set.r_hash[index]; + if (*curr != NULL) + (*curr)->prev = role; + + role->next = *curr; + *curr = role; + + return; +} + +static void +insert_acl_role_label(struct acl_role_label *role) +{ + int i; + + if (role->roletype & GR_ROLE_DOMAIN) { + for (i = 0; i < role->domain_child_num; i++) + __insert_acl_role_label(role, role->domain_children[i]); + } else + __insert_acl_role_label(role, role->uidgid); +} + +static int +insert_name_entry(char *name, const ino_t inode, const dev_t device) +{ + struct name_entry **curr, *nentry; + struct inodev_entry *ientry; + unsigned int len = strlen(name); + unsigned int key = full_name_hash(name, len); + unsigned int index = key % name_set.n_size; + + curr = &name_set.n_hash[index]; + + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len))) + curr = &((*curr)->next); + + if (*curr != NULL) + return 1; + + nentry = acl_alloc(sizeof (struct name_entry)); + if (nentry == NULL) + return 0; + ientry = acl_alloc(sizeof (struct inodev_entry)); + if (ientry == NULL) + return 0; + ientry->nentry = nentry; + + nentry->key = key; + nentry->name = name; + nentry->inode = inode; + nentry->device = device; + nentry->len = len; + + nentry->prev = NULL; + curr = &name_set.n_hash[index]; + if (*curr != NULL) + (*curr)->prev = nentry; + nentry->next = *curr; + *curr = nentry; + + /* insert us into the table searchable by inode/dev */ + insert_inodev_entry(ientry); + + return 1; +} + +static void +insert_acl_obj_label(struct acl_object_label *obj, + struct acl_subject_label *subj) +{ + unsigned int index = + fhash(obj->inode, obj->device, subj->obj_hash_size); + struct acl_object_label **curr; + + + obj->prev = NULL; + + curr = &subj->obj_hash[index]; + if (*curr != NULL) + (*curr)->prev = obj; + + obj->next = *curr; + *curr = obj; + + return; +} + +static void +insert_acl_subj_label(struct acl_subject_label *obj, + struct acl_role_label *role) +{ + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size); + struct acl_subject_label **curr; + + obj->prev = NULL; + + curr = &role->subj_hash[index]; + if (*curr != NULL) + (*curr)->prev = obj; + + obj->next = *curr; + *curr = obj; + + return; +} + +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */ + +static void * +create_table(__u32 * len, int elementsize) +{ + unsigned int table_sizes[] = { + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, + 4194301, 8388593, 16777213, 33554393, 67108859, 134217689, + 268435399, 536870909, 1073741789, 2147483647 + }; + void *newtable = NULL; + unsigned int pwr = 0; + + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && + table_sizes[pwr] <= *len) + pwr++; + + if (table_sizes[pwr] <= *len) + return newtable; + + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE) + newtable = + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL); + else + newtable = vmalloc(table_sizes[pwr] * elementsize); + + *len = table_sizes[pwr]; + + return newtable; +} + +static int +init_variables(const struct gr_arg *arg) +{ + struct task_struct *reaper = child_reaper(current); + unsigned int stacksize; + + subj_map_set.s_size = arg->role_db.num_subjects; + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children; + name_set.n_size = arg->role_db.num_objects; + inodev_set.i_size = arg->role_db.num_objects; + + if (!subj_map_set.s_size || !acl_role_set.r_size || + !name_set.n_size || !inodev_set.i_size) + return 1; + + if (!gr_init_uidset()) + return 1; + + /* set up the stack that holds allocation info */ + + stacksize = arg->role_db.num_pointers + 5; + + if (!acl_alloc_stack_init(stacksize)) + return 1; + + /* grab reference for the real root dentry and vfsmount */ + read_lock(&reaper->fs->lock); + real_root_mnt = mntget(reaper->fs->rootmnt); + real_root = dget(reaper->fs->root); + read_unlock(&reaper->fs->lock); + + fakefs_obj = acl_alloc(sizeof(struct acl_object_label)); + if (fakefs_obj == NULL) + return 1; + fakefs_obj->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC; + + subj_map_set.s_hash = + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *)); + acl_role_set.r_hash = + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *)); + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *)); + inodev_set.i_hash = + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *)); + + if (!subj_map_set.s_hash || !acl_role_set.r_hash || + !name_set.n_hash || !inodev_set.i_hash) + return 1; + + memset(subj_map_set.s_hash, 0, + sizeof(struct subject_map *) * subj_map_set.s_size); + memset(acl_role_set.r_hash, 0, + sizeof (struct acl_role_label *) * acl_role_set.r_size); + memset(name_set.n_hash, 0, + sizeof (struct name_entry *) * name_set.n_size); + memset(inodev_set.i_hash, 0, + sizeof (struct inodev_entry *) * inodev_set.i_size); + + return 0; +} + +/* free information not needed after startup + currently contains user->kernel pointer mappings for subjects +*/ + +static void +free_init_variables(void) +{ + __u32 i; + + if (subj_map_set.s_hash) { + for (i = 0; i < subj_map_set.s_size; i++) { + if (subj_map_set.s_hash[i]) { + kfree(subj_map_set.s_hash[i]); + subj_map_set.s_hash[i] = NULL; + } + } + + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <= + PAGE_SIZE) + kfree(subj_map_set.s_hash); + else + vfree(subj_map_set.s_hash); + } + + return; +} + +static void +free_variables(void) +{ + struct acl_subject_label *s; + struct acl_role_label *r; + struct task_struct *task, *task2; + unsigned int i, x; + + gr_clear_learn_entries(); + + read_lock(&tasklist_lock); + do_each_thread(task2, task) { + task->acl_sp_role = 0; + task->acl_role_id = 0; + task->acl = NULL; + task->role = NULL; + } while_each_thread(task2, task); + read_unlock(&tasklist_lock); + + /* release the reference to the real root dentry and vfsmount */ + if (real_root) + dput(real_root); + real_root = NULL; + if (real_root_mnt) + mntput(real_root_mnt); + real_root_mnt = NULL; + + /* free all object hash tables */ + + FOR_EACH_ROLE_START(r, i) + if (r->subj_hash == NULL) + break; + FOR_EACH_SUBJECT_START(r, s, x) + if (s->obj_hash == NULL) + break; + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) + kfree(s->obj_hash); + else + vfree(s->obj_hash); + FOR_EACH_SUBJECT_END(s, x) + FOR_EACH_NESTED_SUBJECT_START(r, s) + if (s->obj_hash == NULL) + break; + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) + kfree(s->obj_hash); + else + vfree(s->obj_hash); + FOR_EACH_NESTED_SUBJECT_END(s) + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE) + kfree(r->subj_hash); + else + vfree(r->subj_hash); + r->subj_hash = NULL; + FOR_EACH_ROLE_END(r,i) + + acl_free_all(); + + if (acl_role_set.r_hash) { + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <= + PAGE_SIZE) + kfree(acl_role_set.r_hash); + else + vfree(acl_role_set.r_hash); + } + if (name_set.n_hash) { + if ((name_set.n_size * sizeof (struct name_entry *)) <= + PAGE_SIZE) + kfree(name_set.n_hash); + else + vfree(name_set.n_hash); + } + + if (inodev_set.i_hash) { + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <= + PAGE_SIZE) + kfree(inodev_set.i_hash); + else + vfree(inodev_set.i_hash); + } + + gr_free_uidset(); + + memset(&name_set, 0, sizeof (struct name_db)); + memset(&inodev_set, 0, sizeof (struct inodev_db)); + memset(&acl_role_set, 0, sizeof (struct acl_role_db)); + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db)); + + default_role = NULL; + + return; +} + +static __u32 +count_user_objs(struct acl_object_label *userp) +{ + struct acl_object_label o_tmp; + __u32 num = 0; + + while (userp) { + if (copy_from_user(&o_tmp, userp, + sizeof (struct acl_object_label))) + break; + + userp = o_tmp.prev; + num++; + } + + return num; +} + +static struct acl_subject_label * +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role); + +static int +copy_user_glob(struct acl_object_label *obj) +{ + struct acl_object_label *g_tmp, **guser; + unsigned int len; + char *tmp; + + if (obj->globbed == NULL) + return 0; + + guser = &obj->globbed; + while (*guser) { + g_tmp = (struct acl_object_label *) + acl_alloc(sizeof (struct acl_object_label)); + if (g_tmp == NULL) + return -ENOMEM; + + if (copy_from_user(g_tmp, *guser, + sizeof (struct acl_object_label))) + return -EFAULT; + + len = strnlen_user(g_tmp->filename, PATH_MAX); + + if (!len || len >= PATH_MAX) + return -EINVAL; + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return -ENOMEM; + + if (copy_from_user(tmp, g_tmp->filename, len)) + return -EFAULT; + + g_tmp->filename = tmp; + + *guser = g_tmp; + guser = &(g_tmp->next); + } + + return 0; +} + +static int +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, + struct acl_role_label *role) +{ + struct acl_object_label *o_tmp; + unsigned int len; + int ret; + char *tmp; + + while (userp) { + if ((o_tmp = (struct acl_object_label *) + acl_alloc(sizeof (struct acl_object_label))) == NULL) + return -ENOMEM; + + if (copy_from_user(o_tmp, userp, + sizeof (struct acl_object_label))) + return -EFAULT; + + userp = o_tmp->prev; + + len = strnlen_user(o_tmp->filename, PATH_MAX); + + if (!len || len >= PATH_MAX) + return -EINVAL; + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return -ENOMEM; + + if (copy_from_user(tmp, o_tmp->filename, len)) + return -EFAULT; + + o_tmp->filename = tmp; + + insert_acl_obj_label(o_tmp, subj); + if (!insert_name_entry(o_tmp->filename, o_tmp->inode, + o_tmp->device)) + return -ENOMEM; + + ret = copy_user_glob(o_tmp); + if (ret) + return ret; + + if (o_tmp->nested) { + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role); + if (IS_ERR(o_tmp->nested)) + return PTR_ERR(o_tmp->nested); + + /* insert into nested subject list */ + o_tmp->nested->next = role->hash->first; + role->hash->first = o_tmp->nested; + } + } + + return 0; +} + +static __u32 +count_user_subjs(struct acl_subject_label *userp) +{ + struct acl_subject_label s_tmp; + __u32 num = 0; + + while (userp) { + if (copy_from_user(&s_tmp, userp, + sizeof (struct acl_subject_label))) + break; + + userp = s_tmp.prev; + /* do not count nested subjects against this count, since + they are not included in the hash table, but are + attached to objects. We have already counted + the subjects in userspace for the allocation + stack + */ + if (!(s_tmp.mode & GR_NESTED)) + num++; + } + + return num; +} + +static int +copy_user_allowedips(struct acl_role_label *rolep) +{ + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; + + ruserip = rolep->allowed_ips; + + while (ruserip) { + rlast = rtmp; + + if ((rtmp = (struct role_allowed_ip *) + acl_alloc(sizeof (struct role_allowed_ip))) == NULL) + return -ENOMEM; + + if (copy_from_user(rtmp, ruserip, + sizeof (struct role_allowed_ip))) + return -EFAULT; + + ruserip = rtmp->prev; + + if (!rlast) { + rtmp->prev = NULL; + rolep->allowed_ips = rtmp; + } else { + rlast->next = rtmp; + rtmp->prev = rlast; + } + + if (!ruserip) + rtmp->next = NULL; + } + + return 0; +} + +static int +copy_user_transitions(struct acl_role_label *rolep) +{ + struct role_transition *rusertp, *rtmp = NULL, *rlast; + + unsigned int len; + char *tmp; + + rusertp = rolep->transitions; + + while (rusertp) { + rlast = rtmp; + + if ((rtmp = (struct role_transition *) + acl_alloc(sizeof (struct role_transition))) == NULL) + return -ENOMEM; + + if (copy_from_user(rtmp, rusertp, + sizeof (struct role_transition))) + return -EFAULT; + + rusertp = rtmp->prev; + + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN); + + if (!len || len >= GR_SPROLE_LEN) + return -EINVAL; + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return -ENOMEM; + + if (copy_from_user(tmp, rtmp->rolename, len)) + return -EFAULT; + + rtmp->rolename = tmp; + + if (!rlast) { + rtmp->prev = NULL; + rolep->transitions = rtmp; + } else { + rlast->next = rtmp; + rtmp->prev = rlast; + } + + if (!rusertp) + rtmp->next = NULL; + } + + return 0; +} + +static struct acl_subject_label * +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role) +{ + struct acl_subject_label *s_tmp = NULL, *s_tmp2; + unsigned int len; + char *tmp; + __u32 num_objs; + struct acl_ip_label **i_tmp, *i_utmp2; + struct gr_hash_struct ghash; + struct subject_map *subjmap; + unsigned int i_num; + int err; + + s_tmp = lookup_subject_map(userp); + + /* we've already copied this subject into the kernel, just return + the reference to it, and don't copy it over again + */ + if (s_tmp) + return(s_tmp); + + if ((s_tmp = (struct acl_subject_label *) + acl_alloc(sizeof (struct acl_subject_label))) == NULL) + return ERR_PTR(-ENOMEM); + + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL); + if (subjmap == NULL) + return ERR_PTR(-ENOMEM); + + subjmap->user = userp; + subjmap->kernel = s_tmp; + insert_subj_map_entry(subjmap); + + if (copy_from_user(s_tmp, userp, + sizeof (struct acl_subject_label))) + return ERR_PTR(-EFAULT); + + len = strnlen_user(s_tmp->filename, PATH_MAX); + + if (!len || len >= PATH_MAX) + return ERR_PTR(-EINVAL); + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(tmp, s_tmp->filename, len)) + return ERR_PTR(-EFAULT); + + s_tmp->filename = tmp; + + if (!strcmp(s_tmp->filename, "/")) + role->root_label = s_tmp; + + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct))) + return ERR_PTR(-EFAULT); + + /* copy user and group transition tables */ + + if (s_tmp->user_trans_num) { + uid_t *uidlist; + + uidlist = (uid_t *)acl_alloc(s_tmp->user_trans_num * sizeof(uid_t)); + if (uidlist == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t))) + return ERR_PTR(-EFAULT); + + s_tmp->user_transitions = uidlist; + } + + if (s_tmp->group_trans_num) { + gid_t *gidlist; + + gidlist = (gid_t *)acl_alloc(s_tmp->group_trans_num * sizeof(gid_t)); + if (gidlist == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t))) + return ERR_PTR(-EFAULT); + + s_tmp->group_transitions = gidlist; + } + + /* set up object hash table */ + num_objs = count_user_objs(ghash.first); + + s_tmp->obj_hash_size = num_objs; + s_tmp->obj_hash = + (struct acl_object_label **) + create_table(&(s_tmp->obj_hash_size), sizeof(void *)); + + if (!s_tmp->obj_hash) + return ERR_PTR(-ENOMEM); + + memset(s_tmp->obj_hash, 0, + s_tmp->obj_hash_size * + sizeof (struct acl_object_label *)); + + /* add in objects */ + err = copy_user_objs(ghash.first, s_tmp, role); + + if (err) + return ERR_PTR(err); + + /* set pointer for parent subject */ + if (s_tmp->parent_subject) { + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role); + + if (IS_ERR(s_tmp2)) + return s_tmp2; + + s_tmp->parent_subject = s_tmp2; + } + + /* add in ip acls */ + + if (!s_tmp->ip_num) { + s_tmp->ips = NULL; + goto insert; + } + + i_tmp = + (struct acl_ip_label **) acl_alloc(s_tmp->ip_num * + sizeof (struct + acl_ip_label *)); + + if (!i_tmp) + return ERR_PTR(-ENOMEM); + + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { + *(i_tmp + i_num) = + (struct acl_ip_label *) + acl_alloc(sizeof (struct acl_ip_label)); + if (!*(i_tmp + i_num)) + return ERR_PTR(-ENOMEM); + + if (copy_from_user + (&i_utmp2, s_tmp->ips + i_num, + sizeof (struct acl_ip_label *))) + return ERR_PTR(-EFAULT); + + if (copy_from_user + (*(i_tmp + i_num), i_utmp2, + sizeof (struct acl_ip_label))) + return ERR_PTR(-EFAULT); + + if ((*(i_tmp + i_num))->iface == NULL) + continue; + + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ); + if (!len || len >= IFNAMSIZ) + return ERR_PTR(-EINVAL); + tmp = acl_alloc(len); + if (tmp == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len)) + return ERR_PTR(-EFAULT); + (*(i_tmp + i_num))->iface = tmp; + } + + s_tmp->ips = i_tmp; + +insert: + if (!insert_name_entry(s_tmp->filename, s_tmp->inode, + s_tmp->device)) + return ERR_PTR(-ENOMEM); + + return s_tmp; +} + +static int +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) +{ + struct acl_subject_label s_pre; + struct acl_subject_label * ret; + int err; + + while (userp) { + if (copy_from_user(&s_pre, userp, + sizeof (struct acl_subject_label))) + return -EFAULT; + + /* do not add nested subjects here, add + while parsing objects + */ + + if (s_pre.mode & GR_NESTED) { + userp = s_pre.prev; + continue; + } + + ret = do_copy_user_subj(userp, role); + + err = PTR_ERR(ret); + if (IS_ERR(ret)) + return err; + + insert_acl_subj_label(ret, role); + + userp = s_pre.prev; + } + + return 0; +} + +static int +copy_user_acl(struct gr_arg *arg) +{ + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2; + struct sprole_pw *sptmp; + struct gr_hash_struct *ghash; + uid_t *domainlist; + unsigned int r_num; + unsigned int len; + char *tmp; + int err = 0; + __u16 i; + __u32 num_subjs; + + /* we need a default and kernel role */ + if (arg->role_db.num_roles < 2) + return -EINVAL; + + /* copy special role authentication info from userspace */ + + num_sprole_pws = arg->num_sprole_pws; + acl_special_roles = (struct sprole_pw **) acl_alloc(num_sprole_pws * sizeof(struct sprole_pw *)); + + if (!acl_special_roles) { + err = -ENOMEM; + goto cleanup; + } + + for (i = 0; i < num_sprole_pws; i++) { + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); + if (!sptmp) { + err = -ENOMEM; + goto cleanup; + } + if (copy_from_user(sptmp, arg->sprole_pws + i, + sizeof (struct sprole_pw))) { + err = -EFAULT; + goto cleanup; + } + + len = + strnlen_user(sptmp->rolename, GR_SPROLE_LEN); + + if (!len || len >= GR_SPROLE_LEN) { + err = -EINVAL; + goto cleanup; + } + + if ((tmp = (char *) acl_alloc(len)) == NULL) { + err = -ENOMEM; + goto cleanup; + } + + if (copy_from_user(tmp, sptmp->rolename, len)) { + err = -EFAULT; + goto cleanup; + } + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "Copying special role %s\n", tmp); +#endif + sptmp->rolename = tmp; + acl_special_roles[i] = sptmp; + } + + r_utmp = (struct acl_role_label **) arg->role_db.r_table; + + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) { + r_tmp = acl_alloc(sizeof (struct acl_role_label)); + + if (!r_tmp) { + err = -ENOMEM; + goto cleanup; + } + + if (copy_from_user(&r_utmp2, r_utmp + r_num, + sizeof (struct acl_role_label *))) { + err = -EFAULT; + goto cleanup; + } + + if (copy_from_user(r_tmp, r_utmp2, + sizeof (struct acl_role_label))) { + err = -EFAULT; + goto cleanup; + } + + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN); + + if (!len || len >= PATH_MAX) { + err = -EINVAL; + goto cleanup; + } + + if ((tmp = (char *) acl_alloc(len)) == NULL) { + err = -ENOMEM; + goto cleanup; + } + if (copy_from_user(tmp, r_tmp->rolename, len)) { + err = -EFAULT; + goto cleanup; + } + r_tmp->rolename = tmp; + + if (!strcmp(r_tmp->rolename, "default") + && (r_tmp->roletype & GR_ROLE_DEFAULT)) { + default_role = r_tmp; + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { + kernel_role = r_tmp; + } + + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) { + err = -ENOMEM; + goto cleanup; + } + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) { + err = -EFAULT; + goto cleanup; + } + + r_tmp->hash = ghash; + + num_subjs = count_user_subjs(r_tmp->hash->first); + + r_tmp->subj_hash_size = num_subjs; + r_tmp->subj_hash = + (struct acl_subject_label **) + create_table(&(r_tmp->subj_hash_size), sizeof(void *)); + + if (!r_tmp->subj_hash) { + err = -ENOMEM; + goto cleanup; + } + + err = copy_user_allowedips(r_tmp); + if (err) + goto cleanup; + + /* copy domain info */ + if (r_tmp->domain_children != NULL) { + domainlist = acl_alloc(r_tmp->domain_child_num * sizeof(uid_t)); + if (domainlist == NULL) { + err = -ENOMEM; + goto cleanup; + } + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) { + err = -EFAULT; + goto cleanup; + } + r_tmp->domain_children = domainlist; + } + + err = copy_user_transitions(r_tmp); + if (err) + goto cleanup; + + memset(r_tmp->subj_hash, 0, + r_tmp->subj_hash_size * + sizeof (struct acl_subject_label *)); + + err = copy_user_subjs(r_tmp->hash->first, r_tmp); + + if (err) + goto cleanup; + + /* set nested subject list to null */ + r_tmp->hash->first = NULL; + + insert_acl_role_label(r_tmp); + } + + goto return_err; + cleanup: + free_variables(); + return_err: + return err; + +} + +static int +gracl_init(struct gr_arg *args) +{ + int error = 0; + + memcpy(gr_system_salt, args->salt, GR_SALT_LEN); + memcpy(gr_system_sum, args->sum, GR_SHA_LEN); + + if (init_variables(args)) { + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); + error = -ENOMEM; + free_variables(); + goto out; + } + + error = copy_user_acl(args); + free_init_variables(); + if (error) { + free_variables(); + goto out; + } + + if ((error = gr_set_acls(0))) { + free_variables(); + goto out; + } + + gr_status |= GR_READY; + out: + return error; +} + +/* derived from glibc fnmatch() 0: match, 1: no match*/ + +static int +glob_match(const char *p, const char *n) +{ + char c; + + while ((c = *p++) != '\0') { + switch (c) { + case '?': + if (*n == '\0') + return 1; + else if (*n == '/') + return 1; + break; + case '\\': + if (*n != c) + return 1; + break; + case '*': + for (c = *p++; c == '?' || c == '*'; c = *p++) { + if (*n == '/') + return 1; + else if (c == '?') { + if (*n == '\0') + return 1; + else + ++n; + } + } + if (c == '\0') { + return 0; + } else { + const char *endp; + + if ((endp = strchr(n, '/')) == NULL) + endp = n + strlen(n); + + if (c == '[') { + for (--p; n < endp; ++n) + if (!glob_match(p, n)) + return 0; + } else if (c == '/') { + while (*n != '\0' && *n != '/') + ++n; + if (*n == '/' && !glob_match(p, n + 1)) + return 0; + } else { + for (--p; n < endp; ++n) + if (*n == c && !glob_match(p, n)) + return 0; + } + + return 1; + } + case '[': + { + int not; + char cold; + + if (*n == '\0' || *n == '/') + return 1; + + not = (*p == '!' || *p == '^'); + if (not) + ++p; + + c = *p++; + for (;;) { + unsigned char fn = (unsigned char)*n; + + if (c == '\0') + return 1; + else { + if (c == fn) + goto matched; + cold = c; + c = *p++; + + if (c == '-' && *p != ']') { + unsigned char cend = *p++; + + if (cend == '\0') + return 1; + + if (cold <= fn && fn <= cend) + goto matched; + + c = *p++; + } + } + + if (c == ']') + break; + } + if (!not) + return 1; + break; + matched: + while (c != ']') { + if (c == '\0') + return 1; + + c = *p++; + } + if (not) + return 1; + } + break; + default: + if (c != *n) + return 1; + } + + ++n; + } + + if (*n == '\0') + return 0; + + if (*n == '/') + return 0; + + return 1; +} + +static struct acl_object_label * +chk_glob_label(struct acl_object_label *globbed, + struct dentry *dentry, struct vfsmount *mnt, char **path) +{ + struct acl_object_label *tmp; + + if (*path == NULL) + *path = gr_to_filename_nolock(dentry, mnt); + + tmp = globbed; + + while (tmp) { + if (!glob_match(tmp->filename, *path)) + return tmp; + tmp = tmp->next; + } + + return NULL; +} + +static struct acl_object_label * +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, + const ino_t curr_ino, const dev_t curr_dev, + const struct acl_subject_label *subj, char **path) +{ + struct acl_subject_label *tmpsubj; + struct acl_object_label *retval; + struct acl_object_label *retval2; + + tmpsubj = (struct acl_subject_label *) subj; + read_lock(&gr_inode_lock); + do { + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj); + if (retval) { + if (retval->globbed) { + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry, + (struct vfsmount *)orig_mnt, path); + if (retval2) + retval = retval2; + } + break; + } + } while ((tmpsubj = tmpsubj->parent_subject)); + read_unlock(&gr_inode_lock); + + return retval; +} + +static __inline__ struct acl_object_label * +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, + const struct dentry *curr_dentry, + const struct acl_subject_label *subj, char **path) +{ + return __full_lookup(orig_dentry, orig_mnt, + curr_dentry->d_inode->i_ino, + curr_dentry->d_inode->i_sb->s_dev, subj, path); +} + +static struct acl_object_label * +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj, char *path) +{ + struct dentry *dentry = (struct dentry *) l_dentry; + struct vfsmount *mnt = (struct vfsmount *) l_mnt; + struct acl_object_label *retval; + + spin_lock(&dcache_lock); + + if (unlikely(mnt == shm_mnt || mnt == pipe_mnt || mnt == sock_mnt || + /* ignore Eric Biederman */ + IS_PRIVATE(l_dentry->d_inode))) { + retval = fakefs_obj; + goto out; + } + + for (;;) { + if (dentry == real_root && mnt == real_root_mnt) + break; + + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { + if (mnt->mnt_parent == mnt) + break; + + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path); + if (retval != NULL) + goto out; + + dentry = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + continue; + } + + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path); + if (retval != NULL) + goto out; + + dentry = dentry->d_parent; + } + + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path); + + if (retval == NULL) + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path); +out: + spin_unlock(&dcache_lock); + return retval; +} + +static __inline__ struct acl_object_label * +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj) +{ + char *path = NULL; + return __chk_obj_label(l_dentry, l_mnt, subj, path); +} + +static __inline__ struct acl_object_label * +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj, char *path) +{ + return __chk_obj_label(l_dentry, l_mnt, subj, path); +} + +static struct acl_subject_label * +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_role_label *role) +{ + struct dentry *dentry = (struct dentry *) l_dentry; + struct vfsmount *mnt = (struct vfsmount *) l_mnt; + struct acl_subject_label *retval; + + spin_lock(&dcache_lock); + + for (;;) { + if (dentry == real_root && mnt == real_root_mnt) + break; + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { + if (mnt->mnt_parent == mnt) + break; + + read_lock(&gr_inode_lock); + retval = + lookup_acl_subj_label(dentry->d_inode->i_ino, + dentry->d_inode->i_sb->s_dev, role); + read_unlock(&gr_inode_lock); + if (retval != NULL) + goto out; + + dentry = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + continue; + } + + read_lock(&gr_inode_lock); + retval = lookup_acl_subj_label(dentry->d_inode->i_ino, + dentry->d_inode->i_sb->s_dev, role); + read_unlock(&gr_inode_lock); + if (retval != NULL) + goto out; + + dentry = dentry->d_parent; + } + + read_lock(&gr_inode_lock); + retval = lookup_acl_subj_label(dentry->d_inode->i_ino, + dentry->d_inode->i_sb->s_dev, role); + read_unlock(&gr_inode_lock); + + if (unlikely(retval == NULL)) { + read_lock(&gr_inode_lock); + retval = lookup_acl_subj_label(real_root->d_inode->i_ino, + real_root->d_inode->i_sb->s_dev, role); + read_unlock(&gr_inode_lock); + } +out: + spin_unlock(&dcache_lock); + + return retval; +} + +static void +gr_log_learn(const struct task_struct *task, const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode) +{ + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, + task->uid, task->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry, + task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename, + 1, 1, gr_to_filename(dentry, mnt), (unsigned long) mode, NIPQUAD(task->signal->curr_ip)); + + return; +} + +static void +gr_log_learn_sysctl(const struct task_struct *task, const char *path, const __u32 mode) +{ + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, + task->uid, task->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry, + task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename, + 1, 1, path, (unsigned long) mode, NIPQUAD(task->signal->curr_ip)); + + return; +} + +static void +gr_log_learn_id_change(const struct task_struct *task, const char type, const unsigned int real, + const unsigned int effective, const unsigned int fs) +{ + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, + task->uid, task->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry, + task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename, + type, real, effective, fs, NIPQUAD(task->signal->curr_ip)); + + return; +} + +__u32 +gr_check_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, const struct vfsmount * old_mnt) +{ + struct acl_object_label *obj; + __u32 oldmode, newmode; + __u32 needmode; + + if (unlikely(!(gr_status & GR_READY))) + return (GR_CREATE | GR_LINK); + + obj = chk_obj_label(old_dentry, old_mnt, current->acl); + oldmode = obj->mode; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + oldmode |= (GR_CREATE | GR_LINK); + + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS; + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) + needmode |= GR_SETID | GR_AUDIT_SETID; + + newmode = + gr_check_create(new_dentry, parent_dentry, parent_mnt, + oldmode | needmode); + + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | + GR_SETID | GR_READ | GR_FIND | GR_DELETE | + GR_INHERIT | GR_AUDIT_INHERIT); + + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID)) + goto bad; + + if ((oldmode & needmode) != needmode) + goto bad; + + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS); + if ((newmode & needmode) != needmode) + goto bad; + + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK)) + return newmode; +bad: + needmode = oldmode; + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) + needmode |= GR_SETID; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) { + gr_log_learn(current, old_dentry, old_mnt, needmode); + return (GR_CREATE | GR_LINK); + } else if (newmode & GR_SUPPRESS) + return GR_SUPPRESS; + else + return 0; +} + +__u32 +gr_search_file(const struct dentry * dentry, const __u32 mode, + const struct vfsmount * mnt) +{ + __u32 retval = mode; + struct acl_subject_label *curracl; + struct acl_object_label *currobj; + + if (unlikely(!(gr_status & GR_READY))) + return (mode & ~GR_AUDITS); + + curracl = current->acl; + + currobj = chk_obj_label(dentry, mnt, curracl); + retval = currobj->mode & mode; + + if (unlikely + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE) + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + retval = new_mode; + + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN) + new_mode |= GR_INHERIT; + + if (!(mode & GR_NOLEARN)) + gr_log_learn(current, dentry, mnt, new_mode); + } + + return retval; +} + +__u32 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, + const struct vfsmount * mnt, const __u32 mode) +{ + struct name_entry *match; + struct acl_object_label *matchpo; + struct acl_subject_label *curracl; + char *path; + __u32 retval; + + if (unlikely(!(gr_status & GR_READY))) + return (mode & ~GR_AUDITS); + + preempt_disable(); + path = gr_to_filename_rbac(new_dentry, mnt); + match = lookup_name_entry(path); + + if (!match) + goto check_parent; + + curracl = current->acl; + + read_lock(&gr_inode_lock); + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); + read_unlock(&gr_inode_lock); + + if (matchpo) { + if ((matchpo->mode & mode) != + (mode & ~(GR_AUDITS | GR_SUPPRESS)) + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + gr_log_learn(current, new_dentry, mnt, new_mode); + + preempt_enable(); + return new_mode; + } + preempt_enable(); + return (matchpo->mode & mode); + } + + check_parent: + curracl = current->acl; + + matchpo = chk_obj_create_label(parent, mnt, curracl, path); + retval = matchpo->mode & mode; + + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + gr_log_learn(current, new_dentry, mnt, new_mode); + preempt_enable(); + return new_mode; + } + + preempt_enable(); + return retval; +} + +int +gr_check_hidden_task(const struct task_struct *task) +{ + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW)) + return 1; + + return 0; +} + +int +gr_check_protected_task(const struct task_struct *task) +{ + if (unlikely(!(gr_status & GR_READY) || !task)) + return 0; + + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && + task->acl != current->acl) + return 1; + + return 0; +} + +void +gr_copy_label(struct task_struct *tsk) +{ + tsk->signal->used_accept = 0; + tsk->acl_sp_role = 0; + tsk->acl_role_id = current->acl_role_id; + tsk->acl = current->acl; + tsk->role = current->role; + tsk->signal->curr_ip = current->signal->curr_ip; + if (current->exec_file) + get_file(current->exec_file); + tsk->exec_file = current->exec_file; + tsk->is_writable = current->is_writable; + if (unlikely(current->signal->used_accept)) + current->signal->curr_ip = 0; + + return; +} + +static void +gr_set_proc_res(struct task_struct *task) +{ + struct acl_subject_label *proc; + unsigned short i; + + proc = task->acl; + + if (proc->mode & (GR_LEARN | GR_INHERITLEARN)) + return; + + for (i = 0; i < (GR_NLIMITS - 1); i++) { + if (!(proc->resmask & (1 << i))) + continue; + + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur; + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max; + } + + return; +} + +int +gr_check_user_change(int real, int effective, int fs) +{ + unsigned int i; + __u16 num; + uid_t *uidlist; + int curuid; + int realok = 0; + int effectiveok = 0; + int fsok = 0; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + gr_log_learn_id_change(current, 'u', real, effective, fs); + + num = current->acl->user_trans_num; + uidlist = current->acl->user_transitions; + + if (uidlist == NULL) + return 0; + + if (real == -1) + realok = 1; + if (effective == -1) + effectiveok = 1; + if (fs == -1) + fsok = 1; + + if (current->acl->user_trans_type & GR_ID_ALLOW) { + for (i = 0; i < num; i++) { + curuid = (int)uidlist[i]; + if (real == curuid) + realok = 1; + if (effective == curuid) + effectiveok = 1; + if (fs == curuid) + fsok = 1; + } + } else if (current->acl->user_trans_type & GR_ID_DENY) { + for (i = 0; i < num; i++) { + curuid = (int)uidlist[i]; + if (real == curuid) + break; + if (effective == curuid) + break; + if (fs == curuid) + break; + } + /* not in deny list */ + if (i == num) { + realok = 1; + effectiveok = 1; + fsok = 1; + } + } + + if (realok && effectiveok && fsok) + return 0; + else { + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); + return 1; + } +} + +int +gr_check_group_change(int real, int effective, int fs) +{ + unsigned int i; + __u16 num; + gid_t *gidlist; + int curgid; + int realok = 0; + int effectiveok = 0; + int fsok = 0; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + gr_log_learn_id_change(current, 'g', real, effective, fs); + + num = current->acl->group_trans_num; + gidlist = current->acl->group_transitions; + + if (gidlist == NULL) + return 0; + + if (real == -1) + realok = 1; + if (effective == -1) + effectiveok = 1; + if (fs == -1) + fsok = 1; + + if (current->acl->group_trans_type & GR_ID_ALLOW) { + for (i = 0; i < num; i++) { + curgid = (int)gidlist[i]; + if (real == curgid) + realok = 1; + if (effective == curgid) + effectiveok = 1; + if (fs == curgid) + fsok = 1; + } + } else if (current->acl->group_trans_type & GR_ID_DENY) { + for (i = 0; i < num; i++) { + curgid = (int)gidlist[i]; + if (real == curgid) + break; + if (effective == curgid) + break; + if (fs == curgid) + break; + } + /* not in deny list */ + if (i == num) { + realok = 1; + effectiveok = 1; + fsok = 1; + } + } + + if (realok && effectiveok && fsok) + return 0; + else { + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); + return 1; + } +} + +void +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid) +{ + struct acl_role_label *role = task->role; + struct acl_subject_label *subj = NULL; + struct acl_object_label *obj; + struct file *filp; + + if (unlikely(!(gr_status & GR_READY))) + return; + + filp = task->exec_file; + + /* kernel process, we'll give them the kernel role */ + if (unlikely(!filp)) { + task->role = kernel_role; + task->acl = kernel_role->root_label; + return; + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) + role = lookup_acl_role_label(task, uid, gid); + + /* perform subject lookup in possibly new role + we can use this result below in the case where role == task->role + */ + subj = chk_subj_label(filp->f_dentry, filp->f_vfsmnt, role); + + /* if we changed uid/gid, but result in the same role + and are using inheritance, don't lose the inherited subject + if current subject is other than what normal lookup + would result in, we arrived via inheritance, don't + lose subject + */ + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) && + (subj == task->acl))) + task->acl = subj; + + task->role = role; + + task->is_writable = 0; + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); +#endif + + gr_set_proc_res(task); + + return; +} + +int +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt) +{ + struct task_struct *task = current; + struct acl_subject_label *newacl; + struct acl_object_label *obj; + __u32 retmode; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + newacl = chk_subj_label(dentry, mnt, task->role); + + task_lock(task); + if (((task->ptrace & PT_PTRACED) && !(task->acl->mode & + GR_POVERRIDE) && (task->acl != newacl) && + !(task->role->roletype & GR_ROLE_GOD) && + !gr_search_file(dentry, GR_PTRACERD, mnt) && + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) || + (atomic_read(&task->fs->count) > 1 || + atomic_read(&task->files->count) > 1 || + atomic_read(&task->sighand->count) > 1)) { + task_unlock(task); + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt); + return -EACCES; + } + task_unlock(task); + + obj = chk_obj_label(dentry, mnt, task->acl); + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); + + if (!(task->acl->mode & GR_INHERITLEARN) && + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) { + if (obj->nested) + task->acl = obj->nested; + else + task->acl = newacl; + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt); + + task->is_writable = 0; + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(dentry, mnt, default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(dentry, mnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + + gr_set_proc_res(task); + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); +#endif + return 0; +} + +static void +do_handle_delete(const ino_t ino, const dev_t dev) +{ + struct acl_object_label *matchpo; + struct acl_subject_label *matchps; + struct acl_subject_label *subj; + struct acl_role_label *role; + unsigned int i, x; + + FOR_EACH_ROLE_START(role, i) + FOR_EACH_SUBJECT_START(role, subj, x) + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) + matchpo->mode |= GR_DELETED; + FOR_EACH_SUBJECT_END(subj,x) + FOR_EACH_NESTED_SUBJECT_START(role, subj) + if (subj->inode == ino && subj->device == dev) + subj->mode |= GR_DELETED; + FOR_EACH_NESTED_SUBJECT_END(subj) + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL) + matchps->mode |= GR_DELETED; + FOR_EACH_ROLE_END(role,i) + + return; +} + +void +gr_handle_delete(const ino_t ino, const dev_t dev) +{ + if (unlikely(!(gr_status & GR_READY))) + return; + + write_lock(&gr_inode_lock); + if (unlikely((unsigned long)lookup_inodev_entry(ino, dev))) + do_handle_delete(ino, dev); + write_unlock(&gr_inode_lock); + + return; +} + +static void +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice, + const ino_t newinode, const dev_t newdevice, + struct acl_subject_label *subj) +{ + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; + + while (match && (match->inode != oldinode || + match->device != olddevice || + !(match->mode & GR_DELETED))) + match = match->next; + + if (match && (match->inode == oldinode) + && (match->device == olddevice) + && (match->mode & GR_DELETED)) { + if (match->prev == NULL) { + subj->obj_hash[index] = match->next; + if (match->next != NULL) + match->next->prev = NULL; + } else { + match->prev->next = match->next; + if (match->next != NULL) + match->next->prev = match->prev; + } + match->prev = NULL; + match->next = NULL; + match->inode = newinode; + match->device = newdevice; + match->mode &= ~GR_DELETED; + + insert_acl_obj_label(match, subj); + } + + return; +} + +static void +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice, + const ino_t newinode, const dev_t newdevice, + struct acl_role_label *role) +{ + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size); + struct acl_subject_label *match; + + match = role->subj_hash[index]; + + while (match && (match->inode != oldinode || + match->device != olddevice || + !(match->mode & GR_DELETED))) + match = match->next; + + if (match && (match->inode == oldinode) + && (match->device == olddevice) + && (match->mode & GR_DELETED)) { + if (match->prev == NULL) { + role->subj_hash[index] = match->next; + if (match->next != NULL) + match->next->prev = NULL; + } else { + match->prev->next = match->next; + if (match->next != NULL) + match->next->prev = match->prev; + } + match->prev = NULL; + match->next = NULL; + match->inode = newinode; + match->device = newdevice; + match->mode &= ~GR_DELETED; + + insert_acl_subj_label(match, role); + } + + return; +} + +static void +update_inodev_entry(const ino_t oldinode, const dev_t olddevice, + const ino_t newinode, const dev_t newdevice) +{ + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size); + struct inodev_entry *match; + + match = inodev_set.i_hash[index]; + + while (match && (match->nentry->inode != oldinode || + match->nentry->device != olddevice)) + match = match->next; + + if (match && (match->nentry->inode == oldinode) + && (match->nentry->device == olddevice)) { + if (match->prev == NULL) { + inodev_set.i_hash[index] = match->next; + if (match->next != NULL) + match->next->prev = NULL; + } else { + match->prev->next = match->next; + if (match->next != NULL) + match->next->prev = match->prev; + } + match->prev = NULL; + match->next = NULL; + match->nentry->inode = newinode; + match->nentry->device = newdevice; + + insert_inodev_entry(match); + } + + return; +} + +static void +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, + const struct vfsmount *mnt) +{ + struct acl_subject_label *subj; + struct acl_role_label *role; + unsigned int i, x; + + FOR_EACH_ROLE_START(role, i) + update_acl_subj_label(matchn->inode, matchn->device, + dentry->d_inode->i_ino, + dentry->d_inode->i_sb->s_dev, role); + + FOR_EACH_NESTED_SUBJECT_START(role, subj) + if ((subj->inode == dentry->d_inode->i_ino) && + (subj->device == dentry->d_inode->i_sb->s_dev)) { + subj->inode = dentry->d_inode->i_ino; + subj->device = dentry->d_inode->i_sb->s_dev; + } + FOR_EACH_NESTED_SUBJECT_END(subj) + FOR_EACH_SUBJECT_START(role, subj, x) + update_acl_obj_label(matchn->inode, matchn->device, + dentry->d_inode->i_ino, + dentry->d_inode->i_sb->s_dev, subj); + FOR_EACH_SUBJECT_END(subj,x) + FOR_EACH_ROLE_END(role,i) + + update_inodev_entry(matchn->inode, matchn->device, + dentry->d_inode->i_ino, dentry->d_inode->i_sb->s_dev); + + return; +} + +void +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) +{ + struct name_entry *matchn; + + if (unlikely(!(gr_status & GR_READY))) + return; + + preempt_disable(); + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt)); + + if (unlikely((unsigned long)matchn)) { + write_lock(&gr_inode_lock); + do_handle_create(matchn, dentry, mnt); + write_unlock(&gr_inode_lock); + } + preempt_enable(); + + return; +} + +void +gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace) +{ + struct name_entry *matchn; + + if (unlikely(!(gr_status & GR_READY))) + return; + + preempt_disable(); + matchn = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt)); + + /* we wouldn't have to check d_inode if it weren't for + NFS silly-renaming + */ + + write_lock(&gr_inode_lock); + if (unlikely(replace && new_dentry->d_inode)) { + if (unlikely(lookup_inodev_entry(new_dentry->d_inode->i_ino, + new_dentry->d_inode->i_sb->s_dev) && + (old_dentry->d_inode->i_nlink <= 1))) + do_handle_delete(new_dentry->d_inode->i_ino, + new_dentry->d_inode->i_sb->s_dev); + } + + if (unlikely(lookup_inodev_entry(old_dentry->d_inode->i_ino, + old_dentry->d_inode->i_sb->s_dev) && + (old_dentry->d_inode->i_nlink <= 1))) + do_handle_delete(old_dentry->d_inode->i_ino, + old_dentry->d_inode->i_sb->s_dev); + + if (unlikely((unsigned long)matchn)) + do_handle_create(matchn, old_dentry, mnt); + + write_unlock(&gr_inode_lock); + preempt_enable(); + + return; +} + +static int +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt, + unsigned char **sum) +{ + struct acl_role_label *r; + struct role_allowed_ip *ipp; + struct role_transition *trans; + unsigned int i; + int found = 0; + + /* check transition table */ + + for (trans = current->role->transitions; trans; trans = trans->next) { + if (!strcmp(rolename, trans->rolename)) { + found = 1; + break; + } + } + + if (!found) + return 0; + + /* handle special roles that do not require authentication + and check ip */ + + FOR_EACH_ROLE_START(r, i) + if (!strcmp(rolename, r->rolename) && + (r->roletype & GR_ROLE_SPECIAL)) { + found = 0; + if (r->allowed_ips != NULL) { + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) { + if ((ntohl(current->signal->curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask)) + found = 1; + } + } else + found = 2; + if (!found) + return 0; + + if (((mode == SPROLE) && (r->roletype & GR_ROLE_NOPW)) || + ((mode == SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) { + *salt = NULL; + *sum = NULL; + return 1; + } + } + FOR_EACH_ROLE_END(r,i) + + for (i = 0; i < num_sprole_pws; i++) { + if (!strcmp(rolename, acl_special_roles[i]->rolename)) { + *salt = acl_special_roles[i]->salt; + *sum = acl_special_roles[i]->sum; + return 1; + } + } + + return 0; +} + +static void +assign_special_role(char *rolename) +{ + struct acl_object_label *obj; + struct acl_role_label *r; + struct acl_role_label *assigned = NULL; + struct task_struct *tsk; + struct file *filp; + unsigned int i; + + FOR_EACH_ROLE_START(r, i) + if (!strcmp(rolename, r->rolename) && + (r->roletype & GR_ROLE_SPECIAL)) + assigned = r; + FOR_EACH_ROLE_END(r,i) + + if (!assigned) + return; + + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + + tsk = current->parent; + if (tsk == NULL) + goto out_unlock; + + filp = tsk->exec_file; + if (filp == NULL) + goto out_unlock; + + tsk->is_writable = 0; + + tsk->acl_sp_role = 1; + tsk->acl_role_id = ++acl_sp_role_value; + tsk->role = assigned; + tsk->acl = chk_subj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role); + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + tsk->is_writable = 1; + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + tsk->is_writable = 1; + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid); +#endif + +out_unlock: + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return; +} + +int gr_check_secure_terminal(struct task_struct *task) +{ + struct task_struct *p, *p2, *p3; + struct files_struct *files; + struct fdtable *fdt; + struct file *our_file = NULL, *file; + int i; + + if (task->signal->tty == NULL) + return 1; + + files = get_files_struct(task); + if (files != NULL) { + rcu_read_lock(); + fdt = files_fdtable(files); + for (i=0; i < fdt->max_fds; i++) { + file = fcheck_files(files, i); + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) { + get_file(file); + our_file = file; + } + } + rcu_read_unlock(); + put_files_struct(files); + } + + if (our_file == NULL) + return 1; + + read_lock(&tasklist_lock); + do_each_thread(p2, p) { + files = get_files_struct(p); + if (files == NULL || + (p->signal && p->signal->tty == task->signal->tty)) { + if (files != NULL) + put_files_struct(files); + continue; + } + rcu_read_lock(); + fdt = files_fdtable(files); + for (i=0; i < fdt->max_fds; i++) { + file = fcheck_files(files, i); + if (file && S_ISCHR(file->f_dentry->d_inode->i_mode) && + file->f_dentry->d_inode->i_rdev == our_file->f_dentry->d_inode->i_rdev) { + p3 = task; + while (p3->pid > 0) { + if (p3 == p) + break; + p3 = p3->parent; + } + if (p3 == p) + break; + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p); + gr_handle_alertkill(p); + rcu_read_unlock(); + put_files_struct(files); + read_unlock(&tasklist_lock); + fput(our_file); + return 0; + } + } + rcu_read_unlock(); + put_files_struct(files); + } while_each_thread(p2, p); + read_unlock(&tasklist_lock); + + fput(our_file); + return 1; +} + +ssize_t +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos) +{ + struct gr_arg_wrapper uwrap; + unsigned char *sprole_salt; + unsigned char *sprole_sum; + int error = sizeof (struct gr_arg_wrapper); + int error2 = 0; + + down(&gr_dev_sem); + + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) { + error = -EPERM; + goto out; + } + + if (count != sizeof (struct gr_arg_wrapper)) { + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper)); + error = -EINVAL; + goto out; + } + + + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) { + gr_auth_expires = 0; + gr_auth_attempts = 0; + } + + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) { + error = -EFAULT; + goto out; + } + + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) { + error = -EINVAL; + goto out; + } + + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) { + error = -EFAULT; + goto out; + } + + if (gr_usermode->mode != SPROLE && gr_usermode->mode != SPROLEPAM && + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && + time_after(gr_auth_expires, get_seconds())) { + error = -EBUSY; + goto out; + } + + /* if non-root trying to do anything other than use a special role, + do not attempt authentication, do not count towards authentication + locking + */ + + if (gr_usermode->mode != SPROLE && gr_usermode->mode != STATUS && + gr_usermode->mode != UNSPROLE && gr_usermode->mode != SPROLEPAM && + current->uid) { + error = -EPERM; + goto out; + } + + /* ensure pw and special role name are null terminated */ + + gr_usermode->pw[GR_PW_LEN - 1] = '\0'; + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; + + /* Okay. + * We have our enough of the argument structure..(we have yet + * to copy_from_user the tables themselves) . Copy the tables + * only if we need them, i.e. for loading operations. */ + + switch (gr_usermode->mode) { + case STATUS: + if (gr_status & GR_READY) { + error = 1; + if (!gr_check_secure_terminal(current)) + error = 3; + } else + error = 2; + goto out; + case SHUTDOWN: + if ((gr_status & GR_READY) + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + gr_status &= ~GR_READY; + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG); + free_variables(); + memset(gr_usermode, 0, sizeof (struct gr_arg)); + memset(gr_system_salt, 0, GR_SALT_LEN); + memset(gr_system_sum, 0, GR_SHA_LEN); + } else if (gr_status & GR_READY) { + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG); + error = -EPERM; + } else { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG); + error = -EAGAIN; + } + break; + case ENABLE: + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode))) + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION); + else { + if (gr_status & GR_READY) + error = -EAGAIN; + else + error = error2; + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION); + } + break; + case RELOAD: + if (!(gr_status & GR_READY)) { + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION); + error = -EAGAIN; + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + lock_kernel(); + gr_status &= ~GR_READY; + free_variables(); + if (!(error2 = gracl_init(gr_usermode))) { + unlock_kernel(); + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION); + } else { + unlock_kernel(); + error = error2; + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); + } + } else { + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); + error = -EPERM; + } + break; + case SEGVMOD: + if (unlikely(!(gr_status & GR_READY))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG); + if (gr_usermode->segv_device && gr_usermode->segv_inode) { + struct acl_subject_label *segvacl; + segvacl = + lookup_acl_subj_label(gr_usermode->segv_inode, + gr_usermode->segv_device, + current->role); + if (segvacl) { + segvacl->crashes = 0; + segvacl->expires = 0; + } + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) { + gr_remove_uid(gr_usermode->segv_uid); + } + } else { + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG); + error = -EPERM; + } + break; + case SPROLE: + case SPROLEPAM: + if (unlikely(!(gr_status & GR_READY))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) { + current->role->expires = 0; + current->role->auth_attempts = 0; + } + + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && + time_after(current->role->expires, get_seconds())) { + error = -EBUSY; + goto out; + } + + if (lookup_special_role_auth + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum) + && ((!sprole_salt && !sprole_sum) + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { + char *p = ""; + assign_special_role(gr_usermode->sp_role); + read_lock(&tasklist_lock); + if (current->parent) + p = current->parent->role->rolename; + read_unlock(&tasklist_lock); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG, + p, acl_sp_role_value); + } else { + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role); + error = -EPERM; + if(!(current->role->auth_attempts++)) + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; + + goto out; + } + break; + case UNSPROLE: + if (unlikely(!(gr_status & GR_READY))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (current->role->roletype & GR_ROLE_SPECIAL) { + char *p = ""; + int i = 0; + + read_lock(&tasklist_lock); + if (current->parent) { + p = current->parent->role->rolename; + i = current->parent->acl_role_id; + } + read_unlock(&tasklist_lock); + + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i); + gr_set_acls(1); + } else { + gr_log_str(GR_DONT_AUDIT, GR_UNSPROLEF_ACL_MSG, current->role->rolename); + error = -EPERM; + goto out; + } + break; + default: + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode); + error = -EINVAL; + break; + } + + if (error != -EPERM) + goto out; + + if(!(gr_auth_attempts++)) + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; + + out: + up(&gr_dev_sem); + return error; +} + +int +gr_set_acls(const int type) +{ + struct acl_object_label *obj; + struct task_struct *task, *task2; + struct file *filp; + struct acl_role_label *role = current->role; + __u16 acl_role_id = current->acl_role_id; + + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + do_each_thread(task2, task) { + /* check to see if we're called from the exit handler, + if so, only replace ACLs that have inherited the admin + ACL */ + + if (type && (task->role != role || + task->acl_role_id != acl_role_id)) + continue; + + task->acl_role_id = 0; + task->acl_sp_role = 0; + + if ((filp = task->exec_file)) { + task->role = lookup_acl_role_label(task, task->uid, task->gid); + + task->acl = + chk_subj_label(filp->f_dentry, filp->f_vfsmnt, + task->role); + if (task->acl) { + struct acl_subject_label *curr; + curr = task->acl; + + task->is_writable = 0; + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + + gr_set_proc_res(task); + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); +#endif + } else { + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid); + return 1; + } + } else { + // it's a kernel process + task->role = kernel_role; + task->acl = kernel_role->root_label; +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN + task->acl->mode &= ~GR_PROCFIND; +#endif + } + } while_each_thread(task2, task); + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return 0; +} + +void +gr_learn_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt) +{ + struct acl_subject_label *acl; + + if (unlikely((gr_status & GR_READY) && + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) + goto skip_reslog; + +#ifdef CONFIG_GRKERNSEC_RESLOG + gr_log_resource(task, res, wanted, gt); +#endif + skip_reslog: + + if (unlikely(!(gr_status & GR_READY) || !wanted)) + return; + + acl = task->acl; + + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) || + !(acl->resmask & (1 << (unsigned short) res)))) + return; + + if (wanted >= acl->res[res].rlim_cur) { + unsigned long res_add; + + res_add = wanted; + switch (res) { + case RLIMIT_CPU: + res_add += GR_RLIM_CPU_BUMP; + break; + case RLIMIT_FSIZE: + res_add += GR_RLIM_FSIZE_BUMP; + break; + case RLIMIT_DATA: + res_add += GR_RLIM_DATA_BUMP; + break; + case RLIMIT_STACK: + res_add += GR_RLIM_STACK_BUMP; + break; + case RLIMIT_CORE: + res_add += GR_RLIM_CORE_BUMP; + break; + case RLIMIT_RSS: + res_add += GR_RLIM_RSS_BUMP; + break; + case RLIMIT_NPROC: + res_add += GR_RLIM_NPROC_BUMP; + break; + case RLIMIT_NOFILE: + res_add += GR_RLIM_NOFILE_BUMP; + break; + case RLIMIT_MEMLOCK: + res_add += GR_RLIM_MEMLOCK_BUMP; + break; + case RLIMIT_AS: + res_add += GR_RLIM_AS_BUMP; + break; + case RLIMIT_LOCKS: + res_add += GR_RLIM_LOCKS_BUMP; + break; + } + + acl->res[res].rlim_cur = res_add; + + if (wanted > acl->res[res].rlim_max) + acl->res[res].rlim_max = res_add; + + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, + task->role->roletype, acl->filename, + acl->res[res].rlim_cur, acl->res[res].rlim_max, + "", (unsigned long) res); + } + + return; +} + +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS +void +pax_set_initial_flags(struct linux_binprm *bprm) +{ + struct task_struct *task = current; + struct acl_subject_label *proc; + unsigned long flags; + + if (unlikely(!(gr_status & GR_READY))) + return; + + flags = pax_get_flags(task); + + proc = task->acl; + + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC) + flags &= ~MF_PAX_PAGEEXEC; + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC) + flags &= ~MF_PAX_SEGMEXEC; + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP) + flags &= ~MF_PAX_RANDMMAP; + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP) + flags &= ~MF_PAX_EMUTRAMP; + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT) + flags &= ~MF_PAX_MPROTECT; + + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC) + flags |= MF_PAX_PAGEEXEC; + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC) + flags |= MF_PAX_SEGMEXEC; + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP) + flags |= MF_PAX_RANDMMAP; + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP) + flags |= MF_PAX_EMUTRAMP; + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT) + flags |= MF_PAX_MPROTECT; + + pax_set_flags(task, flags); + + return; +} +#endif + +#ifdef CONFIG_SYSCTL +/* Eric Biederman likes breaking userland ABI and every inode-based security + system to save 35kb of memory */ + +/* we modify the passed in filename, but adjust it back before returning */ +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len) +{ + struct name_entry *nmatch; + char *p, *lastp = NULL; + struct acl_object_label *obj = NULL, *tmp; + struct acl_subject_label *tmpsubj; + int done = 0; + char c = '\0'; + + read_lock(&gr_inode_lock); + + p = name + len - 1; + do { + nmatch = lookup_name_entry(name); + if (lastp != NULL) + *lastp = c; + + if (nmatch == NULL) + goto next_component; + tmpsubj = current->acl; + do { + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj); + if (obj != NULL) { + tmp = obj->globbed; + while (tmp) { + if (!glob_match(tmp->filename, name)) { + obj = tmp; + goto found_obj; + } + tmp = tmp->next; + } + goto found_obj; + } + } while ((tmpsubj = tmpsubj->parent_subject)); +next_component: + /* end case */ + if (p == name) + break; + + while (*p != '/') + p--; + if (p == name) + lastp = p + 1; + else { + lastp = p; + p--; + } + c = *lastp; + *lastp = '\0'; + } while (1); +found_obj: + read_unlock(&gr_inode_lock); + /* obj returned will always be non-null */ + return obj; +} + +/* returns 0 when allowing, non-zero on error + op of 0 is used for readdir, so we don't log the names of hidden files +*/ +__u32 +gr_handle_sysctl(const struct ctl_table *table, const int op) +{ + ctl_table *tmp; + struct nameidata nd; + const char *proc_sys = "/proc/sys"; + char *path; + struct acl_object_label *obj; + unsigned short len = 0, pos = 0, depth = 0, i; + __u32 err = 0; + __u32 mode = 0; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + /* for now, ignore operations on non-sysctl entries if it's not a + readdir*/ + if (table->child != NULL && op != 0) + return 0; + + mode |= GR_FIND; + /* it's only a read if it's an entry, read on dirs is for readdir */ + if (op & 004) + mode |= GR_READ; + if (op & 002) + mode |= GR_WRITE; + + preempt_disable(); + + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); + + /* it's only a read/write if it's an actual entry, not a dir + (which are opened for readdir) + */ + + /* convert the requested sysctl entry into a pathname */ + + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) { + len += strlen(tmp->procname); + len++; + depth++; + } + + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) { + /* deny */ + goto out; + } + + memset(path, 0, PAGE_SIZE); + + memcpy(path, proc_sys, strlen(proc_sys)); + + pos += strlen(proc_sys); + + for (; depth > 0; depth--) { + path[pos] = '/'; + pos++; + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) { + if (depth == i) { + memcpy(path + pos, tmp->procname, + strlen(tmp->procname)); + pos += strlen(tmp->procname); + } + i++; + } + } + + obj = gr_lookup_by_name(path, pos); + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS); + + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) && + ((err & mode) != mode))) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + err = 0; + gr_log_learn_sysctl(current, path, new_mode); + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) { + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path); + err = -ENOENT; + } else if (!(err & GR_FIND)) { + err = -ENOENT; + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) { + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied", + path, (mode & GR_READ) ? " reading" : "", + (mode & GR_WRITE) ? " writing" : ""); + err = -EACCES; + } else if ((err & mode) != mode) { + err = -EACCES; + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) { + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful", + path, (mode & GR_READ) ? " reading" : "", + (mode & GR_WRITE) ? " writing" : ""); + err = 0; + } else + err = 0; + + out: + preempt_enable(); + + return err; +} +#endif + +int +gr_handle_proc_ptrace(struct task_struct *task) +{ + struct file *filp; + struct task_struct *tmp = task; + struct task_struct *curtemp = current; + __u32 retmode; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + filp = task->exec_file; + + while (tmp->pid > 0) { + if (tmp == curtemp) + break; + tmp = tmp->parent; + } + + if (!filp || (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE))) { + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return 1; + } + + retmode = gr_search_file(filp->f_dentry, GR_NOPTRACE, filp->f_vfsmnt); + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + + if (retmode & GR_NOPTRACE) + return 1; + + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD) + && (current->acl != task->acl || (current->acl != current->role->root_label + && current->pid != task->pid))) + return 1; + + return 0; +} + +int +gr_handle_ptrace(struct task_struct *task, const long request) +{ + struct task_struct *tmp = task; + struct task_struct *curtemp = current; + __u32 retmode; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + read_lock(&tasklist_lock); + while (tmp->pid > 0) { + if (tmp == curtemp) + break; + tmp = tmp->parent; + } + + if (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE)) { + read_unlock(&tasklist_lock); + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + read_unlock(&tasklist_lock); + + read_lock(&grsec_exec_file_lock); + if (unlikely(!task->exec_file)) { + read_unlock(&grsec_exec_file_lock); + return 0; + } + + retmode = gr_search_file(task->exec_file->f_dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_vfsmnt); + read_unlock(&grsec_exec_file_lock); + + if (retmode & GR_NOPTRACE) { + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + + if (retmode & GR_PTRACERD) { + switch (request) { + case PTRACE_POKETEXT: + case PTRACE_POKEDATA: + case PTRACE_POKEUSR: +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) + case PTRACE_SETREGS: + case PTRACE_SETFPREGS: +#endif +#ifdef CONFIG_X86 + case PTRACE_SETFPXREGS: +#endif +#ifdef CONFIG_ALTIVEC + case PTRACE_SETVRREGS: +#endif + return 1; + default: + return 0; + } + } else if (!(current->acl->mode & GR_POVERRIDE) && + !(current->role->roletype & GR_ROLE_GOD) && + (current->acl != task->acl)) { + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + + return 0; +} + +static int is_writable_mmap(const struct file *filp) +{ + struct task_struct *task = current; + struct acl_object_label *obj, *obj2; + + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) && + !task->is_writable && S_ISREG(filp->f_dentry->d_inode->i_mode)) { + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); + obj2 = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, + task->role->root_label); + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_dentry, filp->f_vfsmnt); + return 1; + } + } + return 0; +} + +int +gr_acl_handle_mmap(const struct file *file, const unsigned long prot) +{ + __u32 mode; + + if (unlikely(!file || !(prot & PROT_EXEC))) + return 1; + + if (is_writable_mmap(file)) + return 0; + + mode = + gr_search_file(file->f_dentry, + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, + file->f_vfsmnt); + + if (!gr_tpe_allow(file)) + return 0; + + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_dentry, file->f_vfsmnt); + return 0; + } else if (unlikely(!(mode & GR_EXEC))) { + return 0; + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_dentry, file->f_vfsmnt); + return 1; + } + + return 1; +} + +int +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) +{ + __u32 mode; + + if (unlikely(!file || !(prot & PROT_EXEC))) + return 1; + + if (is_writable_mmap(file)) + return 0; + + mode = + gr_search_file(file->f_dentry, + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, + file->f_vfsmnt); + + if (!gr_tpe_allow(file)) + return 0; + + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_dentry, file->f_vfsmnt); + return 0; + } else if (unlikely(!(mode & GR_EXEC))) { + return 0; + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_dentry, file->f_vfsmnt); + return 1; + } + + return 1; +} + +void +gr_acl_handle_psacct(struct task_struct *task, const long code) +{ + unsigned long runtime; + unsigned long cputime; + unsigned int wday, cday; + __u8 whr, chr; + __u8 wmin, cmin; + __u8 wsec, csec; + + if (unlikely(!(gr_status & GR_READY) || !task->acl || + !(task->acl->mode & GR_PROCACCT))) + return; + + runtime = xtime.tv_sec - task->start_time.tv_sec; + wday = runtime / (3600 * 24); + runtime -= wday * (3600 * 24); + whr = runtime / 3600; + runtime -= whr * 3600; + wmin = runtime / 60; + runtime -= wmin * 60; + wsec = runtime; + + cputime = (task->utime + task->stime) / HZ; + cday = cputime / (3600 * 24); + cputime -= cday * (3600 * 24); + chr = cputime / 3600; + cputime -= chr * 3600; + cmin = cputime / 60; + cputime -= cmin * 60; + csec = cputime; + + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code); + + return; +} + +void gr_set_kernel_label(struct task_struct *task) +{ + if (gr_status & GR_READY) { + task->role = kernel_role; + task->acl = kernel_role->root_label; + } + return; +} + +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino) +{ + struct task_struct *task = current; + struct dentry *dentry = file->f_dentry; + struct vfsmount *mnt = file->f_vfsmnt; + struct acl_object_label *obj, *tmp; + struct acl_subject_label *subj; + unsigned int bufsize; + int is_not_root; + char *path; + + if (unlikely(!(gr_status & GR_READY))) + return 1; + + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + return 1; + + /* ignore Eric Biederman */ + if (IS_PRIVATE(dentry->d_inode)) + return 1; + + subj = task->acl; + do { + obj = lookup_acl_obj_label(ino, dentry->d_inode->i_sb->s_dev, subj); + if (obj != NULL) + return (obj->mode & GR_FIND) ? 1 : 0; + } while ((subj = subj->parent_subject)); + + obj = chk_obj_label(dentry, mnt, task->acl); + if (obj->globbed == NULL) + return (obj->mode & GR_FIND) ? 1 : 0; + + is_not_root = ((obj->filename[0] == '/') && + (obj->filename[1] == '\0')) ? 0 : 1; + bufsize = PAGE_SIZE - namelen - is_not_root; + + /* check bufsize > PAGE_SIZE || bufsize == 0 */ + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1))) + return 1; + + preempt_disable(); + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), + bufsize); + + bufsize = strlen(path); + + /* if base is "/", don't append an additional slash */ + if (is_not_root) + *(path + bufsize) = '/'; + memcpy(path + bufsize + is_not_root, name, namelen); + *(path + bufsize + namelen + is_not_root) = '\0'; + + tmp = obj->globbed; + while (tmp) { + if (!glob_match(tmp->filename, path)) { + preempt_enable(); + return (tmp->mode & GR_FIND) ? 1 : 0; + } + tmp = tmp->next; + } + preempt_enable(); + return (obj->mode & GR_FIND) ? 1 : 0; +} + +EXPORT_SYMBOL(gr_learn_resource); +EXPORT_SYMBOL(gr_set_kernel_label); +#ifdef CONFIG_SECURITY +EXPORT_SYMBOL(gr_check_user_change); +EXPORT_SYMBOL(gr_check_group_change); +#endif + diff -urNp linux-2.6.22.1/grsecurity/gracl_cap.c linux-2.6.22.1/grsecurity/gracl_cap.c --- linux-2.6.22.1/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl_cap.c 2007-08-02 12:24:21.000000000 -0400 @@ -0,0 +1,112 @@ +#include +#include +#include +#include +#include +#include +#include + +static const char *captab_log[] = { + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL" +}; + +EXPORT_SYMBOL(gr_task_is_capable); +EXPORT_SYMBOL(gr_is_capable_nolog); + +int +gr_task_is_capable(struct task_struct *task, const int cap) +{ + struct acl_subject_label *curracl; + __u32 cap_drop = 0, cap_mask = 0; + + if (!gr_acl_is_enabled()) + return 1; + + curracl = task->acl; + + cap_drop = curracl->cap_lower; + cap_mask = curracl->cap_mask; + + while ((curracl = curracl->parent_subject)) { + if (!(cap_mask & (1 << cap)) && (curracl->cap_mask & (1 << cap))) + cap_drop |= curracl->cap_lower & (1 << cap); + cap_mask |= curracl->cap_mask; + } + + if (!cap_raised(cap_drop, cap)) + return 1; + + curracl = task->acl; + + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) + && cap_raised(task->cap_effective, cap)) { + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, + task->role->roletype, task->uid, + task->gid, task->exec_file ? + gr_to_filename(task->exec_file->f_dentry, + task->exec_file->f_vfsmnt) : curracl->filename, + curracl->filename, 0UL, + 0UL, "", (unsigned long) cap, NIPQUAD(task->signal->curr_ip)); + return 1; + } + + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(task->cap_effective, cap)) + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]); + return 0; +} + +int +gr_is_capable_nolog(const int cap) +{ + struct acl_subject_label *curracl; + __u32 cap_drop = 0, cap_mask = 0; + + if (!gr_acl_is_enabled()) + return 1; + + curracl = current->acl; + + cap_drop = curracl->cap_lower; + cap_mask = curracl->cap_mask; + + while ((curracl = curracl->parent_subject)) { + cap_drop |= curracl->cap_lower & (cap_mask & ~curracl->cap_mask); + cap_mask |= curracl->cap_mask; + } + + if (!cap_raised(cap_drop, cap)) + return 1; + + return 0; +} + diff -urNp linux-2.6.22.1/grsecurity/gracl_fs.c linux-2.6.22.1/grsecurity/gracl_fs.c --- linux-2.6.22.1/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl_fs.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,423 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +__u32 +gr_acl_handle_hidden_file(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + __u32 mode; + + if (unlikely(!dentry->d_inode)) + return GR_FIND; + + mode = + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt); + + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); + return mode; + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); + return 0; + } else if (unlikely(!(mode & GR_FIND))) + return 0; + + return GR_FIND; +} + +__u32 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, + const int fmode) +{ + __u32 reqmode = GR_FIND; + __u32 mode; + + if (unlikely(!dentry->d_inode)) + return reqmode; + + if (unlikely(fmode & O_APPEND)) + reqmode |= GR_APPEND; + else if (unlikely(fmode & FMODE_WRITE)) + reqmode |= GR_WRITE; + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY))) + reqmode |= GR_READ; + + mode = + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, + mnt); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +__u32 +gr_acl_handle_creat(const struct dentry * dentry, + const struct dentry * p_dentry, + const struct vfsmount * p_mnt, const int fmode, + const int imode) +{ + __u32 reqmode = GR_WRITE | GR_CREATE; + __u32 mode; + + if (unlikely(fmode & O_APPEND)) + reqmode |= GR_APPEND; + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY))) + reqmode |= GR_READ; + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID)))) + reqmode |= GR_SETID; + + mode = + gr_check_create(dentry, p_dentry, p_mnt, + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +__u32 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt, + const int fmode) +{ + __u32 mode, reqmode = GR_FIND; + + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode)) + reqmode |= GR_EXEC; + if (fmode & S_IWOTH) + reqmode |= GR_WRITE; + if (fmode & S_IROTH) + reqmode |= GR_READ; + + mode = + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, + mnt); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : "", + reqmode & GR_EXEC ? " executing" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : "", + reqmode & GR_EXEC ? " executing" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt) +{ + __u32 mode; + + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); + + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt); + return mode; + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt); + return 0; + } else if (unlikely((mode & (reqmode)) != (reqmode))) + return 0; + + return (reqmode); +} + +__u32 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG); +} + +__u32 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG); +} + +__u32 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG); +} + +__u32 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG); +} + +__u32 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt, + mode_t mode) +{ + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode))) + return 1; + + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) { + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, + GR_FCHMOD_ACL_MSG); + } else { + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG); + } +} + +__u32 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt, + mode_t mode) +{ + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) { + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, + GR_CHMOD_ACL_MSG); + } else { + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG); + } +} + +__u32 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG); +} + +__u32 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG); +} + +__u32 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE, + GR_UNIXCONNECT_ACL_MSG); +} + +/* hardlinks require at minimum create permission, + any additional privilege required is based on the + privilege of the file being linked to +*/ +__u32 +gr_acl_handle_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, + const struct vfsmount * old_mnt, const char *to) +{ + __u32 mode; + __u32 needmode = GR_CREATE | GR_LINK; + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK; + + mode = + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry, + old_mnt); + + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) { + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); + return mode; + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); + return 0; + } else if (unlikely((mode & needmode) != needmode)) + return 0; + + return 1; +} + +__u32 +gr_acl_handle_symlink(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, const char *from) +{ + __u32 needmode = GR_WRITE | GR_CREATE; + __u32 mode; + + mode = + gr_check_create(new_dentry, parent_dentry, parent_mnt, + GR_CREATE | GR_AUDIT_CREATE | + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS); + + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) { + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); + return mode; + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); + return 0; + } else if (unlikely((mode & needmode) != needmode)) + return 0; + + return (GR_WRITE | GR_CREATE); +} + +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt) +{ + __u32 mode; + + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); + + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt); + return mode; + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt); + return 0; + } else if (unlikely((mode & (reqmode)) != (reqmode))) + return 0; + + return (reqmode); +} + +__u32 +gr_acl_handle_mknod(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const int mode) +{ + __u32 reqmode = GR_WRITE | GR_CREATE; + if (unlikely(mode & (S_ISUID | S_ISGID))) + reqmode |= GR_SETID; + + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, + reqmode, GR_MKNOD_ACL_MSG); +} + +__u32 +gr_acl_handle_mkdir(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt) +{ + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG); +} + +#define RENAME_CHECK_SUCCESS(old, new) \ + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \ + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ))) + +int +gr_acl_handle_rename(struct dentry *new_dentry, + struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + struct dentry *old_dentry, + struct inode *old_parent_inode, + struct vfsmount *old_mnt, const char *newname) +{ + __u32 comp1, comp2; + int error = 0; + + if (unlikely(!gr_acl_is_enabled())) + return 0; + + if (!new_dentry->d_inode) { + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt, + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ | + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS); + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE | + GR_DELETE | GR_AUDIT_DELETE | + GR_AUDIT_READ | GR_AUDIT_WRITE | + GR_SUPPRESS, old_mnt); + } else { + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | + GR_CREATE | GR_DELETE | + GR_AUDIT_CREATE | GR_AUDIT_DELETE | + GR_AUDIT_READ | GR_AUDIT_WRITE | + GR_SUPPRESS, parent_mnt); + comp2 = + gr_search_file(old_dentry, + GR_READ | GR_WRITE | GR_AUDIT_READ | + GR_DELETE | GR_AUDIT_DELETE | + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); + } + + if (RENAME_CHECK_SUCCESS(comp1, comp2) && + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS))) + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS) + && !(comp2 & GR_SUPPRESS)) { + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); + error = -EACCES; + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2))) + error = -EACCES; + + return error; +} + +void +gr_acl_handle_exit(void) +{ + u16 id; + char *rolename; + struct file *exec_file; + + if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) { + id = current->acl_role_id; + rolename = current->role->rolename; + gr_set_acls(1); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id); + } + + write_lock(&grsec_exec_file_lock); + exec_file = current->exec_file; + current->exec_file = NULL; + write_unlock(&grsec_exec_file_lock); + + if (exec_file) + fput(exec_file); +} + +int +gr_acl_handle_procpidmem(const struct task_struct *task) +{ + if (unlikely(!gr_acl_is_enabled())) + return 0; + + if (task->acl->mode & GR_PROTPROCFD) + return -EACCES; + + return 0; +} diff -urNp linux-2.6.22.1/grsecurity/gracl_ip.c linux-2.6.22.1/grsecurity/gracl_ip.c --- linux-2.6.22.1/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl_ip.c 2007-08-02 11:56:40.000000000 -0400 @@ -0,0 +1,313 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GR_BIND 0x01 +#define GR_CONNECT 0x02 +#define GR_INVERT 0x04 + +static const char * gr_protocols[256] = { + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt", + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet", + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1", + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp", + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++", + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre", + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile", + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63", + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv", + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp", + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim", + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip", + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp", + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup", + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135", + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143", + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151", + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159", + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167", + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175", + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183", + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191", + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199", + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207", + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215", + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223", + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231", + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239", + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247", + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255", + }; + +static const char * gr_socktypes[11] = { + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", + "unknown:7", "unknown:8", "unknown:9", "packet" + }; + +const char * +gr_proto_to_name(unsigned char proto) +{ + return gr_protocols[proto]; +} + +const char * +gr_socktype_to_name(unsigned char type) +{ + return gr_socktypes[type]; +} + +int +gr_search_socket(const int domain, const int type, const int protocol) +{ + struct acl_subject_label *curr; + + if (unlikely(!gr_acl_is_enabled())) + goto exit; + + if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET) + || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255)) + goto exit; // let the kernel handle it + + curr = current->acl; + + if (!curr->ips) + goto exit; + + if ((curr->ip_type & (1 << type)) && + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32)))) + goto exit; + + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { + /* we don't place acls on raw sockets , and sometimes + dgram/ip sockets are opened for ioctl and not + bind/connect, so we'll fake a bind learn log */ + if (type == SOCK_RAW || type == SOCK_PACKET) { + __u32 fakeip = 0; + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, current->uid, + current->gid, current->exec_file ? + gr_to_filename(current->exec_file->f_dentry, + current->exec_file->f_vfsmnt) : + curr->filename, curr->filename, + NIPQUAD(fakeip), 0, type, + protocol, GR_CONNECT, +NIPQUAD(current->signal->curr_ip)); + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) { + __u32 fakeip = 0; + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, current->uid, + current->gid, current->exec_file ? + gr_to_filename(current->exec_file->f_dentry, + current->exec_file->f_vfsmnt) : + curr->filename, curr->filename, + NIPQUAD(fakeip), 0, type, + protocol, GR_BIND, NIPQUAD(current->signal->curr_ip)); + } + /* we'll log when they use connect or bind */ + goto exit; + } + + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, "inet", + gr_socktype_to_name(type), gr_proto_to_name(protocol)); + + return 0; + exit: + return 1; +} + +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask) +{ + if ((ip->mode & mode) && + (ip_port >= ip->low) && + (ip_port <= ip->high) && + ((ntohl(ip_addr) & our_netmask) == + (ntohl(our_addr) & our_netmask)) + && (ip->proto[protocol / 32] & (1 << (protocol % 32))) + && (ip->type & (1 << type))) { + if (ip->mode & GR_INVERT) + return 2; // specifically denied + else + return 1; // allowed + } + + return 0; // not specifically allowed, may continue parsing +} + +static int +gr_search_connectbind(const int mode, const struct sock *sk, + const struct sockaddr_in *addr, const int type) +{ + char iface[IFNAMSIZ] = {0}; + struct acl_subject_label *curr; + struct acl_ip_label *ip; + struct net_device *dev; + struct in_device *idev; + unsigned long i; + int ret; + __u32 ip_addr = 0; + __u32 our_addr; + __u32 our_netmask; + char *p; + __u16 ip_port = 0; + + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET)) + return 1; + + curr = current->acl; + + if (!curr->ips) + return 1; + + ip_addr = addr->sin_addr.s_addr; + ip_port = ntohs(addr->sin_port); + + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, current->uid, + current->gid, current->exec_file ? + gr_to_filename(current->exec_file->f_dentry, + current->exec_file->f_vfsmnt) : + curr->filename, curr->filename, + NIPQUAD(ip_addr), ip_port, type, + sk->sk_protocol, mode, NIPQUAD(current->signal->curr_ip)); + return 1; + } + + for (i = 0; i < curr->ip_num; i++) { + ip = *(curr->ips + i); + if (ip->iface != NULL) { + strncpy(iface, ip->iface, IFNAMSIZ - 1); + p = strchr(iface, ':'); + if (p != NULL) + *p = '\0'; + dev = dev_get_by_name(iface); + if (dev == NULL) + continue; + idev = in_dev_get(dev); + if (idev == NULL) { + dev_put(dev); + continue; + } + rcu_read_lock(); + for_ifa(idev) { + if (!strcmp(ip->iface, ifa->ifa_label)) { + our_addr = ifa->ifa_address; + our_netmask = 0xffffffff; + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); + if (ret == 1) { + rcu_read_unlock(); + in_dev_put(idev); + dev_put(dev); + return 1; + } else if (ret == 2) { + rcu_read_unlock(); + in_dev_put(idev); + dev_put(dev); + goto denied; + } + } + } endfor_ifa(idev); + rcu_read_unlock(); + in_dev_put(idev); + dev_put(dev); + } else { + our_addr = ip->addr; + our_netmask = ip->netmask; + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); + if (ret == 1) + return 1; + else if (ret == 2) + goto denied; + } + } + +denied: + if (mode == GR_BIND) + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, NIPQUAD(ip_addr), ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); + else if (mode == GR_CONNECT) + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, NIPQUAD(ip_addr), ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); + + return 0; +} + +int +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) +{ + return gr_search_connectbind(GR_CONNECT, sock->sk, addr, sock->type); +} + +int +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) +{ + return gr_search_connectbind(GR_BIND, sock->sk, addr, sock->type); +} + +int gr_search_listen(const struct socket *sock) +{ + struct sock *sk = sock->sk; + struct sockaddr_in addr; + + addr.sin_addr.s_addr = inet_sk(sk)->saddr; + addr.sin_port = inet_sk(sk)->sport; + + return gr_search_connectbind(GR_BIND, sock->sk, &addr, sock->type); +} + +int gr_search_accept(const struct socket *sock) +{ + struct sock *sk = sock->sk; + struct sockaddr_in addr; + + addr.sin_addr.s_addr = inet_sk(sk)->saddr; + addr.sin_port = inet_sk(sk)->sport; + + return gr_search_connectbind(GR_BIND, sock->sk, &addr, sock->type); +} + +int +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) +{ + if (addr) + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM); + else { + struct sockaddr_in sin; + const struct inet_sock *inet = inet_sk(sk); + + sin.sin_addr.s_addr = inet->daddr; + sin.sin_port = inet->dport; + + return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM); + } +} + +int +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) +{ + struct sockaddr_in sin; + + if (unlikely(skb->len < sizeof (struct udphdr))) + return 1; // skip this packet + + sin.sin_addr.s_addr = ip_hdr(skb)->saddr; + sin.sin_port = udp_hdr(skb)->source; + + return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM); +} diff -urNp linux-2.6.22.1/grsecurity/gracl_learn.c linux-2.6.22.1/grsecurity/gracl_learn.c --- linux-2.6.22.1/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl_learn.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,211 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf, + size_t count, loff_t *ppos); +extern int gr_acl_is_enabled(void); + +static DECLARE_WAIT_QUEUE_HEAD(learn_wait); +static int gr_learn_attached; + +/* use a 512k buffer */ +#define LEARN_BUFFER_SIZE (512 * 1024) + +static spinlock_t gr_learn_lock = SPIN_LOCK_UNLOCKED; +static DECLARE_MUTEX(gr_learn_user_sem); + +/* we need to maintain two buffers, so that the kernel context of grlearn + uses a semaphore around the userspace copying, and the other kernel contexts + use a spinlock when copying into the buffer, since they cannot sleep +*/ +static char *learn_buffer; +static char *learn_buffer_user; +static int learn_buffer_len; +static int learn_buffer_user_len; + +static ssize_t +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos) +{ + DECLARE_WAITQUEUE(wait, current); + ssize_t retval = 0; + + add_wait_queue(&learn_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + do { + down(&gr_learn_user_sem); + spin_lock(&gr_learn_lock); + if (learn_buffer_len) + break; + spin_unlock(&gr_learn_lock); + up(&gr_learn_user_sem); + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + + schedule(); + } while (1); + + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len); + learn_buffer_user_len = learn_buffer_len; + retval = learn_buffer_len; + learn_buffer_len = 0; + + spin_unlock(&gr_learn_lock); + + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len)) + retval = -EFAULT; + + up(&gr_learn_user_sem); +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(&learn_wait, &wait); + return retval; +} + +static unsigned int +poll_learn(struct file * file, poll_table * wait) +{ + poll_wait(file, &learn_wait, wait); + + if (learn_buffer_len) + return (POLLIN | POLLRDNORM); + + return 0; +} + +void +gr_clear_learn_entries(void) +{ + char *tmp; + + down(&gr_learn_user_sem); + if (learn_buffer != NULL) { + spin_lock(&gr_learn_lock); + tmp = learn_buffer; + learn_buffer = NULL; + spin_unlock(&gr_learn_lock); + vfree(learn_buffer); + } + if (learn_buffer_user != NULL) { + vfree(learn_buffer_user); + learn_buffer_user = NULL; + } + learn_buffer_len = 0; + up(&gr_learn_user_sem); + + return; +} + +void +gr_add_learn_entry(const char *fmt, ...) +{ + va_list args; + unsigned int len; + + if (!gr_learn_attached) + return; + + spin_lock(&gr_learn_lock); + + /* leave a gap at the end so we know when it's "full" but don't have to + compute the exact length of the string we're trying to append + */ + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) { + spin_unlock(&gr_learn_lock); + wake_up_interruptible(&learn_wait); + return; + } + if (learn_buffer == NULL) { + spin_unlock(&gr_learn_lock); + return; + } + + va_start(args, fmt); + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args); + va_end(args); + + learn_buffer_len += len + 1; + + spin_unlock(&gr_learn_lock); + wake_up_interruptible(&learn_wait); + + return; +} + +static int +open_learn(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ && gr_learn_attached) + return -EBUSY; + if (file->f_mode & FMODE_READ) { + int retval = 0; + down(&gr_learn_user_sem); + if (learn_buffer == NULL) + learn_buffer = vmalloc(LEARN_BUFFER_SIZE); + if (learn_buffer_user == NULL) + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE); + if (learn_buffer == NULL) { + retval = -ENOMEM; + goto out_error; + } + if (learn_buffer_user == NULL) { + retval = -ENOMEM; + goto out_error; + } + learn_buffer_len = 0; + learn_buffer_user_len = 0; + gr_learn_attached = 1; +out_error: + up(&gr_learn_user_sem); + return retval; + } + return 0; +} + +static int +close_learn(struct inode *inode, struct file *file) +{ + char *tmp; + + if (file->f_mode & FMODE_READ) { + down(&gr_learn_user_sem); + if (learn_buffer != NULL) { + spin_lock(&gr_learn_lock); + tmp = learn_buffer; + learn_buffer = NULL; + spin_unlock(&gr_learn_lock); + vfree(tmp); + } + if (learn_buffer_user != NULL) { + vfree(learn_buffer_user); + learn_buffer_user = NULL; + } + learn_buffer_len = 0; + learn_buffer_user_len = 0; + gr_learn_attached = 0; + up(&gr_learn_user_sem); + } + + return 0; +} + +struct file_operations grsec_fops = { + .read = read_learn, + .write = write_grsec_handler, + .open = open_learn, + .release = close_learn, + .poll = poll_learn, +}; diff -urNp linux-2.6.22.1/grsecurity/gracl_res.c linux-2.6.22.1/grsecurity/gracl_res.c --- linux-2.6.22.1/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl_res.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,45 @@ +#include +#include +#include +#include + +static const char *restab_log[] = { + [RLIMIT_CPU] = "RLIMIT_CPU", + [RLIMIT_FSIZE] = "RLIMIT_FSIZE", + [RLIMIT_DATA] = "RLIMIT_DATA", + [RLIMIT_STACK] = "RLIMIT_STACK", + [RLIMIT_CORE] = "RLIMIT_CORE", + [RLIMIT_RSS] = "RLIMIT_RSS", + [RLIMIT_NPROC] = "RLIMIT_NPROC", + [RLIMIT_NOFILE] = "RLIMIT_NOFILE", + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK", + [RLIMIT_AS] = "RLIMIT_AS", + [RLIMIT_LOCKS] = "RLIMIT_LOCKS", + [RLIMIT_LOCKS + 1] = "RLIMIT_CRASH" +}; + +void +gr_log_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt) +{ + if (res == RLIMIT_NPROC && + (cap_raised(task->cap_effective, CAP_SYS_ADMIN) || + cap_raised(task->cap_effective, CAP_SYS_RESOURCE))) + return; + else if (res == RLIMIT_MEMLOCK && + cap_raised(task->cap_effective, CAP_IPC_LOCK)) + return; + + if (!gr_acl_is_enabled() && !grsec_resource_logging) + return; + + preempt_disable(); + + if (unlikely(((gt && wanted > task->signal->rlim[res].rlim_cur) || + (!gt && wanted >= task->signal->rlim[res].rlim_cur)) && + task->signal->rlim[res].rlim_cur != RLIM_INFINITY)) + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], task->signal->rlim[res].rlim_cur); + preempt_enable_no_resched(); + + return; +} diff -urNp linux-2.6.22.1/grsecurity/gracl_segv.c linux-2.6.22.1/grsecurity/gracl_segv.c --- linux-2.6.22.1/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl_segv.c 2007-08-02 12:02:19.000000000 -0400 @@ -0,0 +1,301 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct crash_uid *uid_set; +static unsigned short uid_used; +static spinlock_t gr_uid_lock = SPIN_LOCK_UNLOCKED; +extern rwlock_t gr_inode_lock; +extern struct acl_subject_label * + lookup_acl_subj_label(const ino_t inode, const dev_t dev, + struct acl_role_label *role); +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t); + +int +gr_init_uidset(void) +{ + uid_set = + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL); + uid_used = 0; + + return uid_set ? 1 : 0; +} + +void +gr_free_uidset(void) +{ + if (uid_set) + kfree(uid_set); + + return; +} + +int +gr_find_uid(const uid_t uid) +{ + struct crash_uid *tmp = uid_set; + uid_t buid; + int low = 0, high = uid_used - 1, mid; + + while (high >= low) { + mid = (low + high) >> 1; + buid = tmp[mid].uid; + if (buid == uid) + return mid; + if (buid > uid) + high = mid - 1; + if (buid < uid) + low = mid + 1; + } + + return -1; +} + +static __inline__ void +gr_insertsort(void) +{ + unsigned short i, j; + struct crash_uid index; + + for (i = 1; i < uid_used; i++) { + index = uid_set[i]; + j = i; + while ((j > 0) && uid_set[j - 1].uid > index.uid) { + uid_set[j] = uid_set[j - 1]; + j--; + } + uid_set[j] = index; + } + + return; +} + +static __inline__ void +gr_insert_uid(const uid_t uid, const unsigned long expires) +{ + int loc; + + if (uid_used == GR_UIDTABLE_MAX) + return; + + loc = gr_find_uid(uid); + + if (loc >= 0) { + uid_set[loc].expires = expires; + return; + } + + uid_set[uid_used].uid = uid; + uid_set[uid_used].expires = expires; + uid_used++; + + gr_insertsort(); + + return; +} + +void +gr_remove_uid(const unsigned short loc) +{ + unsigned short i; + + for (i = loc + 1; i < uid_used; i++) + uid_set[i - 1] = uid_set[i]; + + uid_used--; + + return; +} + +int +gr_check_crash_uid(const uid_t uid) +{ + int loc; + int ret = 0; + + if (unlikely(!gr_acl_is_enabled())) + return 0; + + spin_lock(&gr_uid_lock); + loc = gr_find_uid(uid); + + if (loc < 0) + goto out_unlock; + + if (time_before_eq(uid_set[loc].expires, get_seconds())) + gr_remove_uid(loc); + else + ret = 1; + +out_unlock: + spin_unlock(&gr_uid_lock); + return ret; +} + +static __inline__ int +proc_is_setxid(const struct task_struct *task) +{ + if (task->uid != task->euid || task->uid != task->suid || + task->uid != task->fsuid) + return 1; + if (task->gid != task->egid || task->gid != task->sgid || + task->gid != task->fsgid) + return 1; + + return 0; +} +static __inline__ int +gr_fake_force_sig(int sig, struct task_struct *t) +{ + unsigned long int flags; + int ret, blocked, ignored; + struct k_sigaction *action; + + spin_lock_irqsave(&t->sighand->siglock, flags); + action = &t->sighand->action[sig-1]; + ignored = action->sa.sa_handler == SIG_IGN; + blocked = sigismember(&t->blocked, sig); + if (blocked || ignored) { + action->sa.sa_handler = SIG_DFL; + if (blocked) { + sigdelset(&t->blocked, sig); + recalc_sigpending_and_wake(t); + } + } + ret = specific_send_sig_info(sig, (void*)1L, t); + spin_unlock_irqrestore(&t->sighand->siglock, flags); + + return ret; +} + +void +gr_handle_crash(struct task_struct *task, const int sig) +{ + struct acl_subject_label *curr; + struct acl_subject_label *curr2; + struct task_struct *tsk, *tsk2; + + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL) + return; + + if (unlikely(!gr_acl_is_enabled())) + return; + + curr = task->acl; + + if (!(curr->resmask & (1 << GR_CRASH_RES))) + return; + + if (time_before_eq(curr->expires, get_seconds())) { + curr->expires = 0; + curr->crashes = 0; + } + + curr->crashes++; + + if (!curr->expires) + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max; + + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && + time_after(curr->expires, get_seconds())) { + if (task->uid && proc_is_setxid(task)) { + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); + spin_lock(&gr_uid_lock); + gr_insert_uid(task->uid, curr->expires); + spin_unlock(&gr_uid_lock); + curr->expires = 0; + curr->crashes = 0; + read_lock(&tasklist_lock); + do_each_thread(tsk2, tsk) { + if (tsk != task && tsk->uid == task->uid) + gr_fake_force_sig(SIGKILL, tsk); + } while_each_thread(tsk2, tsk); + read_unlock(&tasklist_lock); + } else { + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); + read_lock(&tasklist_lock); + do_each_thread(tsk2, tsk) { + if (likely(tsk != task)) { + curr2 = tsk->acl; + + if (curr2->device == curr->device && + curr2->inode == curr->inode) + gr_fake_force_sig(SIGKILL, tsk); + } + } while_each_thread(tsk2, tsk); + read_unlock(&tasklist_lock); + } + } + + return; +} + +int +gr_check_crash_exec(const struct file *filp) +{ + struct acl_subject_label *curr; + + if (unlikely(!gr_acl_is_enabled())) + return 0; + + read_lock(&gr_inode_lock); + curr = lookup_acl_subj_label(filp->f_dentry->d_inode->i_ino, + filp->f_dentry->d_inode->i_sb->s_dev, + current->role); + read_unlock(&gr_inode_lock); + + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) || + (!curr->crashes && !curr->expires)) + return 0; + + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && + time_after(curr->expires, get_seconds())) + return 1; + else if (time_before_eq(curr->expires, get_seconds())) { + curr->crashes = 0; + curr->expires = 0; + } + + return 0; +} + +void +gr_handle_alertkill(struct task_struct *task) +{ + struct acl_subject_label *curracl; + __u32 curr_ip; + struct task_struct *p, *p2; + + if (unlikely(!gr_acl_is_enabled())) + return; + + curracl = task->acl; + curr_ip = task->signal->curr_ip; + + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) { + read_lock(&tasklist_lock); + do_each_thread(p2, p) { + if (p->signal->curr_ip == curr_ip) + gr_fake_force_sig(SIGKILL, p); + } while_each_thread(p2, p); + read_unlock(&tasklist_lock); + } else if (curracl->mode & GR_KILLPROC) + gr_fake_force_sig(SIGKILL, task); + + return; +} diff -urNp linux-2.6.22.1/grsecurity/gracl_shm.c linux-2.6.22.1/grsecurity/gracl_shm.c --- linux-2.6.22.1/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/gracl_shm.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,33 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +int +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime, const uid_t cuid, const int shmid) +{ + struct task_struct *task; + + if (!gr_acl_is_enabled()) + return 1; + + task = find_task_by_pid(shm_cprid); + + if (unlikely(!task)) + task = find_task_by_pid(shm_lapid); + + if (unlikely(task && (time_before((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) || + (task->pid == shm_lapid)) && + (task->acl->mode & GR_PROTSHM) && + (task->acl != current->acl))) { + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid); + return 0; + } + + return 1; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_chdir.c linux-2.6.22.1/grsecurity/grsec_chdir.c --- linux-2.6.22.1/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_chdir.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,19 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + if ((grsec_enable_chdir && grsec_enable_group && + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir && + !grsec_enable_group)) { + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt); + } +#endif + return; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_chroot.c linux-2.6.22.1/grsecurity/grsec_chroot.c --- linux-2.6.22.1/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_chroot.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,335 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int +gr_handle_chroot_unix(const pid_t pid) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + struct pid *spid = NULL; + + if (unlikely(!grsec_enable_chroot_unix)) + return 1; + + if (likely(!proc_is_chrooted(current))) + return 1; + + read_lock(&tasklist_lock); + + spid = find_pid(pid); + if (spid) { + struct task_struct *p; + p = pid_task(spid, PIDTYPE_PID); + task_lock(p); + if (unlikely(!have_same_root(current, p))) { + task_unlock(p); + read_unlock(&tasklist_lock); + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG); + return 0; + } + task_unlock(p); + } + read_unlock(&tasklist_lock); +#endif + return 1; +} + +int +gr_handle_chroot_nice(void) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) { + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + if (grsec_enable_chroot_nice && (niceval < task_nice(p)) + && proc_is_chrooted(current)) { + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_chroot_rawio(const struct inode *inode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (grsec_enable_chroot_caps && proc_is_chrooted(current) && + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO)) + return 1; +#endif + return 0; +} + +int +gr_pid_is_chrooted(struct task_struct *p) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL) + return 0; + + task_lock(p); + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) || + !have_same_root(current, p)) { + task_unlock(p); + return 1; + } + task_unlock(p); +#endif + return 0; +} + +EXPORT_SYMBOL(gr_pid_is_chrooted); + +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR) +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt) +{ + struct dentry *dentry = (struct dentry *)u_dentry; + struct vfsmount *mnt = (struct vfsmount *)u_mnt; + struct dentry *realroot; + struct vfsmount *realrootmnt; + struct dentry *currentroot; + struct vfsmount *currentmnt; + struct task_struct *reaper = child_reaper(current); + int ret = 1; + + read_lock(&reaper->fs->lock); + realrootmnt = mntget(reaper->fs->rootmnt); + realroot = dget(reaper->fs->root); + read_unlock(&reaper->fs->lock); + + read_lock(¤t->fs->lock); + currentmnt = mntget(current->fs->rootmnt); + currentroot = dget(current->fs->root); + read_unlock(¤t->fs->lock); + + spin_lock(&dcache_lock); + for (;;) { + if (unlikely((dentry == realroot && mnt == realrootmnt) + || (dentry == currentroot && mnt == currentmnt))) + break; + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) { + if (mnt->mnt_parent == mnt) + break; + dentry = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + continue; + } + dentry = dentry->d_parent; + } + spin_unlock(&dcache_lock); + + dput(currentroot); + mntput(currentmnt); + + /* access is outside of chroot */ + if (dentry == realroot && mnt == realrootmnt) + ret = 0; + + dput(realroot); + mntput(realrootmnt); + return ret; +} +#endif + +int +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + if (!grsec_enable_chroot_fchdir) + return 1; + + if (!proc_is_chrooted(current)) + return 1; + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt); + return 0; + } +#endif + return 1; +} + +int +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + struct pid *pid = NULL; + time_t starttime; + + if (unlikely(!grsec_enable_chroot_shmat)) + return 1; + + if (likely(!proc_is_chrooted(current))) + return 1; + + read_lock(&tasklist_lock); + + pid = find_pid(shm_cprid); + if (pid) { + struct task_struct *p; + p = pid_task(pid, PIDTYPE_PID); + task_lock(p); + starttime = p->start_time.tv_sec; + if (unlikely(!have_same_root(current, p) && + time_before((unsigned long)starttime, (unsigned long)shm_createtime))) { + task_unlock(p); + read_unlock(&tasklist_lock); + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); + return 0; + } + task_unlock(p); + } else { + pid = find_pid(shm_lapid); + if (pid) { + struct task_struct *p; + p = pid_task(pid, PIDTYPE_PID); + task_lock(p); + if (unlikely(!have_same_root(current, p))) { + task_unlock(p); + read_unlock(&tasklist_lock); + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); + return 0; + } + task_unlock(p); + } + } + + read_unlock(&tasklist_lock); +#endif + return 1; +} + +void +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + if (grsec_enable_chroot_execlog && proc_is_chrooted(current)) + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt); +#endif + return; +} + +int +gr_handle_chroot_mknod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && + proc_is_chrooted(current)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_mount(const struct dentry *dentry, + const struct vfsmount *mnt, const char *dev_name) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) { + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_pivot(void) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) { + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + if (grsec_enable_chroot_double && proc_is_chrooted(current) && + !gr_is_outside_chroot(dentry, mnt)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +void +gr_handle_chroot_caps(struct task_struct *task) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) { + task->cap_permitted = + cap_drop(task->cap_permitted, GR_CHROOT_CAPS); + task->cap_inheritable = + cap_drop(task->cap_inheritable, GR_CHROOT_CAPS); + task->cap_effective = + cap_drop(task->cap_effective, GR_CHROOT_CAPS); + } +#endif + return; +} + +int +gr_handle_chroot_sysctl(const int op) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current) + && (op & 002)) + return -EACCES; +#endif + return 0; +} + +void +gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + if (grsec_enable_chroot_chdir) + set_fs_pwd(current->fs, mnt, dentry); +#endif + return; +} + +int +gr_handle_chroot_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + if (grsec_enable_chroot_chmod && + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && + proc_is_chrooted(current)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +#ifdef CONFIG_SECURITY +EXPORT_SYMBOL(gr_handle_chroot_caps); +#endif diff -urNp linux-2.6.22.1/grsecurity/grsec_disabled.c linux-2.6.22.1/grsecurity/grsec_disabled.c --- linux-2.6.22.1/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_disabled.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,418 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS +void +pax_set_initial_flags(struct linux_binprm *bprm) +{ + return; +} +#endif + +#ifdef CONFIG_SYSCTL +__u32 +gr_handle_sysctl(const struct ctl_table * table, const int op) +{ + return 0; +} +#endif + +int +gr_acl_is_enabled(void) +{ + return 0; +} + +int +gr_handle_rawio(const struct inode *inode) +{ + return 0; +} + +void +gr_acl_handle_psacct(struct task_struct *task, const long code) +{ + return; +} + +int +gr_handle_ptrace(struct task_struct *task, const long request) +{ + return 0; +} + +int +gr_handle_proc_ptrace(struct task_struct *task) +{ + return 0; +} + +void +gr_learn_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt) +{ + return; +} + +int +gr_set_acls(const int type) +{ + return 0; +} + +int +gr_check_hidden_task(const struct task_struct *tsk) +{ + return 0; +} + +int +gr_check_protected_task(const struct task_struct *task) +{ + return 0; +} + +void +gr_copy_label(struct task_struct *tsk) +{ + return; +} + +void +gr_set_pax_flags(struct task_struct *task) +{ + return; +} + +int +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return 0; +} + +void +gr_handle_delete(const ino_t ino, const dev_t dev) +{ + return; +} + +void +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return; +} + +void +gr_handle_crash(struct task_struct *task, const int sig) +{ + return; +} + +int +gr_check_crash_exec(const struct file *filp) +{ + return 0; +} + +int +gr_check_crash_uid(const uid_t uid) +{ + return 0; +} + +void +gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace) +{ + return; +} + +int +gr_search_socket(const int family, const int type, const int protocol) +{ + return 1; +} + +int +gr_search_connectbind(const int mode, const struct socket *sock, + const struct sockaddr_in *addr) +{ + return 1; +} + +int +gr_task_is_capable(struct task_struct *task, const int cap) +{ + return 1; +} + +int +gr_is_capable_nolog(const int cap) +{ + return 1; +} + +void +gr_handle_alertkill(struct task_struct *task) +{ + return; +} + +__u32 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_hidden_file(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, + const int fmode) +{ + return 1; +} + +__u32 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +int +gr_acl_handle_mmap(const struct file *file, const unsigned long prot, + unsigned int *vm_flags) +{ + return 1; +} + +__u32 +gr_acl_handle_truncate(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_access(const struct dentry * dentry, + const struct vfsmount * mnt, const int fmode) +{ + return 1; +} + +__u32 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt, + mode_t mode) +{ + return 1; +} + +__u32 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt, + mode_t mode) +{ + return 1; +} + +__u32 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +void +grsecurity_init(void) +{ + return; +} + +__u32 +gr_acl_handle_mknod(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const int mode) +{ + return 1; +} + +__u32 +gr_acl_handle_mkdir(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_symlink(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, const char *from) +{ + return 1; +} + +__u32 +gr_acl_handle_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, + const struct vfsmount * old_mnt, const char *to) +{ + return 1; +} + +int +gr_acl_handle_rename(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct inode *old_parent_inode, + const struct vfsmount *old_mnt, const char *newname) +{ + return 0; +} + +int +gr_acl_handle_filldir(const struct file *file, const char *name, + const int namelen, const ino_t ino) +{ + return 1; +} + +int +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime, const uid_t cuid, const int shmid) +{ + return 1; +} + +int +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) +{ + return 1; +} + +int +gr_search_accept(const struct socket *sock) +{ + return 1; +} + +int +gr_search_listen(const struct socket *sock) +{ + return 1; +} + +int +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) +{ + return 1; +} + +__u32 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_creat(const struct dentry * dentry, + const struct dentry * p_dentry, + const struct vfsmount * p_mnt, const int fmode, + const int imode) +{ + return 1; +} + +void +gr_acl_handle_exit(void) +{ + return; +} + +int +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) +{ + return 1; +} + +void +gr_set_role_label(const uid_t uid, const gid_t gid) +{ + return; +} + +int +gr_acl_handle_procpidmem(const struct task_struct *task) +{ + return 0; +} + +int +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) +{ + return 1; +} + +int +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) +{ + return 1; +} + +void +gr_set_kernel_label(struct task_struct *task) +{ + return; +} + +int +gr_check_user_change(int real, int effective, int fs) +{ + return 0; +} + +int +gr_check_group_change(int real, int effective, int fs) +{ + return 0; +} + + +EXPORT_SYMBOL(gr_task_is_capable); +EXPORT_SYMBOL(gr_is_capable_nolog); +EXPORT_SYMBOL(gr_learn_resource); +EXPORT_SYMBOL(gr_set_kernel_label); +#ifdef CONFIG_SECURITY +EXPORT_SYMBOL(gr_check_user_change); +EXPORT_SYMBOL(gr_check_group_change); +#endif diff -urNp linux-2.6.22.1/grsecurity/grsec_exec.c linux-2.6.22.1/grsecurity/grsec_exec.c --- linux-2.6.22.1/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_exec.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_GRKERNSEC_EXECLOG +static char gr_exec_arg_buf[132]; +static DECLARE_MUTEX(gr_exec_arg_sem); +#endif + +int +gr_handle_nproc(void) +{ +#ifdef CONFIG_GRKERNSEC_EXECVE + if (grsec_enable_execve && current->user && + (atomic_read(¤t->user->processes) > + current->signal->rlim[RLIMIT_NPROC].rlim_cur) && + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) { + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG); + return -EAGAIN; + } +#endif + return 0; +} + +void +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *__user *argv) +{ +#ifdef CONFIG_GRKERNSEC_EXECLOG + char *grarg = gr_exec_arg_buf; + unsigned int i, x, execlen = 0; + char c; + + if (!((grsec_enable_execlog && grsec_enable_group && + in_group_p(grsec_audit_gid)) + || (grsec_enable_execlog && !grsec_enable_group))) + return; + + down(&gr_exec_arg_sem); + memset(grarg, 0, sizeof(gr_exec_arg_buf)); + + if (unlikely(argv == NULL)) + goto log; + + for (i = 0; i < bprm->argc && execlen < 128; i++) { + const char __user *p; + unsigned int len; + + if (copy_from_user(&p, argv + i, sizeof(p))) + goto log; + if (!p) + goto log; + len = strnlen_user(p, 128 - execlen); + if (len > 128 - execlen) + len = 128 - execlen; + else if (len > 0) + len--; + if (copy_from_user(grarg + execlen, p, len)) + goto log; + + /* rewrite unprintable characters */ + for (x = 0; x < len; x++) { + c = *(grarg + execlen + x); + if (c < 32 || c > 126) + *(grarg + execlen + x) = ' '; + } + + execlen += len; + *(grarg + execlen) = ' '; + *(grarg + execlen + 1) = '\0'; + execlen++; + } + + log: + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_dentry, + bprm->file->f_vfsmnt, grarg); + up(&gr_exec_arg_sem); +#endif + return; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_fifo.c linux-2.6.22.1/grsecurity/grsec_fifo.c --- linux-2.6.22.1/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_fifo.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,22 @@ +#include +#include +#include +#include +#include + +int +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt, + const struct dentry *dir, const int flag, const int acc_mode) +{ +#ifdef CONFIG_GRKERNSEC_FIFO + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) && + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) && + (dentry->d_inode->i_uid != dir->d_inode->i_uid) && + (current->fsuid != dentry->d_inode->i_uid)) { + if (!generic_permission(dentry->d_inode, acc_mode, NULL)) + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid); + return -EACCES; + } +#endif + return 0; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_fork.c linux-2.6.22.1/grsecurity/grsec_fork.c --- linux-2.6.22.1/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_fork.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,15 @@ +#include +#include +#include +#include +#include + +void +gr_log_forkfail(const int retval) +{ +#ifdef CONFIG_GRKERNSEC_FORKFAIL + if (grsec_enable_forkfail && retval != -ERESTARTNOINTR) + gr_log_int(GR_DONT_AUDIT, GR_FAILFORK_MSG, retval); +#endif + return; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_init.c linux-2.6.22.1/grsecurity/grsec_init.c --- linux-2.6.22.1/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_init.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,230 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +int grsec_enable_shm; +int grsec_enable_link; +int grsec_enable_dmesg; +int grsec_enable_fifo; +int grsec_enable_execve; +int grsec_enable_execlog; +int grsec_enable_signal; +int grsec_enable_forkfail; +int grsec_enable_time; +int grsec_enable_audit_textrel; +int grsec_enable_group; +int grsec_audit_gid; +int grsec_enable_chdir; +int grsec_enable_audit_ipc; +int grsec_enable_mount; +int grsec_enable_chroot_findtask; +int grsec_enable_chroot_mount; +int grsec_enable_chroot_shmat; +int grsec_enable_chroot_fchdir; +int grsec_enable_chroot_double; +int grsec_enable_chroot_pivot; +int grsec_enable_chroot_chdir; +int grsec_enable_chroot_chmod; +int grsec_enable_chroot_mknod; +int grsec_enable_chroot_nice; +int grsec_enable_chroot_execlog; +int grsec_enable_chroot_caps; +int grsec_enable_chroot_sysctl; +int grsec_enable_chroot_unix; +int grsec_enable_tpe; +int grsec_tpe_gid; +int grsec_enable_tpe_all; +int grsec_enable_socket_all; +int grsec_socket_all_gid; +int grsec_enable_socket_client; +int grsec_socket_client_gid; +int grsec_enable_socket_server; +int grsec_socket_server_gid; +int grsec_resource_logging; +int grsec_lock; + +spinlock_t grsec_alert_lock = SPIN_LOCK_UNLOCKED; +unsigned long grsec_alert_wtime = 0; +unsigned long grsec_alert_fyet = 0; + +spinlock_t grsec_audit_lock = SPIN_LOCK_UNLOCKED; + +rwlock_t grsec_exec_file_lock = RW_LOCK_UNLOCKED; + +char *gr_shared_page[4]; + +char *gr_alert_log_fmt; +char *gr_audit_log_fmt; +char *gr_alert_log_buf; +char *gr_audit_log_buf; + +extern struct gr_arg *gr_usermode; +extern unsigned char *gr_system_salt; +extern unsigned char *gr_system_sum; + +void +grsecurity_init(void) +{ + int j; + /* create the per-cpu shared pages */ + + for (j = 0; j < 4; j++) { + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE); + if (gr_shared_page[j] == NULL) { + panic("Unable to allocate grsecurity shared page"); + return; + } + } + + /* allocate log buffers */ + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL); + if (!gr_alert_log_fmt) { + panic("Unable to allocate grsecurity alert log format buffer"); + return; + } + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL); + if (!gr_audit_log_fmt) { + panic("Unable to allocate grsecurity audit log format buffer"); + return; + } + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL); + if (!gr_alert_log_buf) { + panic("Unable to allocate grsecurity alert log buffer"); + return; + } + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL); + if (!gr_audit_log_buf) { + panic("Unable to allocate grsecurity audit log buffer"); + return; + } + + /* allocate memory for authentication structure */ + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL); + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL); + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL); + + if (!gr_usermode || !gr_system_salt || !gr_system_sum) { + panic("Unable to allocate grsecurity authentication structure"); + return; + } + +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) +#ifndef CONFIG_GRKERNSEC_SYSCTL + grsec_lock = 1; +#endif +#ifdef CONFIG_GRKERNSEC_SHM + grsec_enable_shm = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL + grsec_enable_audit_textrel = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP + grsec_enable_group = 1; + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + grsec_enable_chdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + grsec_enable_audit_ipc = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + grsec_enable_mount = 1; +#endif +#ifdef CONFIG_GRKERNSEC_LINK + grsec_enable_link = 1; +#endif +#ifdef CONFIG_GRKERNSEC_DMESG + grsec_enable_dmesg = 1; +#endif +#ifdef CONFIG_GRKERNSEC_FIFO + grsec_enable_fifo = 1; +#endif +#ifdef CONFIG_GRKERNSEC_EXECVE + grsec_enable_execve = 1; +#endif +#ifdef CONFIG_GRKERNSEC_EXECLOG + grsec_enable_execlog = 1; +#endif +#ifdef CONFIG_GRKERNSEC_SIGNAL + grsec_enable_signal = 1; +#endif +#ifdef CONFIG_GRKERNSEC_FORKFAIL + grsec_enable_forkfail = 1; +#endif +#ifdef CONFIG_GRKERNSEC_TIME + grsec_enable_time = 1; +#endif +#ifdef CONFIG_GRKERNSEC_RESLOG + grsec_resource_logging = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + grsec_enable_chroot_findtask = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + grsec_enable_chroot_unix = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + grsec_enable_chroot_mount = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + grsec_enable_chroot_fchdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + grsec_enable_chroot_shmat = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + grsec_enable_chroot_double = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + grsec_enable_chroot_pivot = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + grsec_enable_chroot_chdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + grsec_enable_chroot_chmod = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + grsec_enable_chroot_mknod = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + grsec_enable_chroot_nice = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + grsec_enable_chroot_execlog = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + grsec_enable_chroot_caps = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + grsec_enable_chroot_sysctl = 1; +#endif +#ifdef CONFIG_GRKERNSEC_TPE + grsec_enable_tpe = 1; + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID; +#ifdef CONFIG_GRKERNSEC_TPE_ALL + grsec_enable_tpe_all = 1; +#endif +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + grsec_enable_socket_all = 1; + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID; +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + grsec_enable_socket_client = 1; + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID; +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + grsec_enable_socket_server = 1; + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID; +#endif +#endif + + return; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_ipc.c linux-2.6.22.1/grsecurity/grsec_ipc.c --- linux-2.6.22.1/grsecurity/grsec_ipc.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_ipc.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,81 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_msgget(const int ret, const int msgflg) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if (((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && + !grsec_enable_group)) && (ret >= 0) + && (msgflg & IPC_CREAT)) + gr_log_noargs(GR_DO_AUDIT, GR_MSGQ_AUDIT_MSG); +#endif + return; +} + +void +gr_log_msgrm(const uid_t uid, const uid_t cuid) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if ((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || + (grsec_enable_audit_ipc && !grsec_enable_group)) + gr_log_int_int(GR_DO_AUDIT, GR_MSGQR_AUDIT_MSG, uid, cuid); +#endif + return; +} + +void +gr_log_semget(const int err, const int semflg) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if (((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && + !grsec_enable_group)) && (err >= 0) + && (semflg & IPC_CREAT)) + gr_log_noargs(GR_DO_AUDIT, GR_SEM_AUDIT_MSG); +#endif + return; +} + +void +gr_log_semrm(const uid_t uid, const uid_t cuid) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if ((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || + (grsec_enable_audit_ipc && !grsec_enable_group)) + gr_log_int_int(GR_DO_AUDIT, GR_SEMR_AUDIT_MSG, uid, cuid); +#endif + return; +} + +void +gr_log_shmget(const int err, const int shmflg, const size_t size) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if (((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && + !grsec_enable_group)) && (err >= 0) + && (shmflg & IPC_CREAT)) + gr_log_int(GR_DO_AUDIT, GR_SHM_AUDIT_MSG, size); +#endif + return; +} + +void +gr_log_shmrm(const uid_t uid, const uid_t cuid) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if ((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || + (grsec_enable_audit_ipc && !grsec_enable_group)) + gr_log_int_int(GR_DO_AUDIT, GR_SHMR_AUDIT_MSG, uid, cuid); +#endif + return; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_link.c linux-2.6.22.1/grsecurity/grsec_link.c --- linux-2.6.22.1/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_link.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,39 @@ +#include +#include +#include +#include +#include + +int +gr_handle_follow_link(const struct inode *parent, + const struct inode *inode, + const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_LINK + if (grsec_enable_link && S_ISLNK(inode->i_mode) && + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) && + (parent->i_mode & S_IWOTH) && (current->fsuid != inode->i_uid)) { + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_hardlink(const struct dentry *dentry, + const struct vfsmount *mnt, + struct inode *inode, const int mode, const char *to) +{ +#ifdef CONFIG_GRKERNSEC_LINK + if (grsec_enable_link && current->fsuid != inode->i_uid && + (!S_ISREG(mode) || (mode & S_ISUID) || + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) || + (generic_permission(inode, MAY_READ | MAY_WRITE, NULL))) && + !capable(CAP_FOWNER) && current->uid) { + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to); + return -EPERM; + } +#endif + return 0; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_log.c linux-2.6.22.1/grsecurity/grsec_log.c --- linux-2.6.22.1/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_log.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,269 @@ +#include +#include +#include +#include +#include +#include + +#define BEGIN_LOCKS(x) \ + read_lock(&tasklist_lock); \ + read_lock(&grsec_exec_file_lock); \ + if (x != GR_DO_AUDIT) \ + spin_lock(&grsec_alert_lock); \ + else \ + spin_lock(&grsec_audit_lock) + +#define END_LOCKS(x) \ + if (x != GR_DO_AUDIT) \ + spin_unlock(&grsec_alert_lock); \ + else \ + spin_unlock(&grsec_audit_lock); \ + read_unlock(&grsec_exec_file_lock); \ + read_unlock(&tasklist_lock); \ + if (x == GR_DONT_AUDIT) \ + gr_handle_alertkill(current) + +enum { + FLOODING, + NO_FLOODING +}; + +extern char *gr_alert_log_fmt; +extern char *gr_audit_log_fmt; +extern char *gr_alert_log_buf; +extern char *gr_audit_log_buf; + +static int gr_log_start(int audit) +{ + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT; + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt; + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + + if (audit == GR_DO_AUDIT) + goto set_fmt; + + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) { + grsec_alert_wtime = jiffies; + grsec_alert_fyet = 0; + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { + grsec_alert_fyet++; + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { + grsec_alert_wtime = jiffies; + grsec_alert_fyet++; + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); + return FLOODING; + } else return FLOODING; + +set_fmt: + memset(buf, 0, PAGE_SIZE); + if (current->signal->curr_ip && gr_acl_is_enabled()) { + sprintf(fmt, "%s%s", loglevel, "grsec: From %u.%u.%u.%u: (%.64s:%c:%.950s) "); + snprintf(buf, PAGE_SIZE - 1, fmt, NIPQUAD(current->signal->curr_ip), current->role->rolename, gr_roletype_to_char(), current->acl->filename); + } else if (current->signal->curr_ip) { + sprintf(fmt, "%s%s", loglevel, "grsec: From %u.%u.%u.%u: "); + snprintf(buf, PAGE_SIZE - 1, fmt, NIPQUAD(current->signal->curr_ip)); + } else if (gr_acl_is_enabled()) { + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) "); + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename); + } else { + sprintf(fmt, "%s%s", loglevel, "grsec: "); + strcpy(buf, fmt); + } + + return NO_FLOODING; +} + +static void gr_log_middle(int audit, const char *msg, va_list ap) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + unsigned int len = strlen(buf); + + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); + + return; +} + +static void gr_log_middle_varargs(int audit, const char *msg, ...) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + unsigned int len = strlen(buf); + va_list ap; + + va_start(ap, msg); + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); + va_end(ap); + + return; +} + +static void gr_log_end(int audit) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + unsigned int len = strlen(buf); + + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current)); + printk("%s\n", buf); + + return; +} + +void gr_log_varargs(int audit, const char *msg, int argtypes, ...) +{ + int logtype; + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied"; + char *str1, *str2, *str3; + int num1, num2; + unsigned long ulong1, ulong2; + struct dentry *dentry; + struct vfsmount *mnt; + struct file *file; + struct task_struct *task; + va_list ap; + + BEGIN_LOCKS(audit); + logtype = gr_log_start(audit); + if (logtype == FLOODING) { + END_LOCKS(audit); + return; + } + va_start(ap, argtypes); + switch (argtypes) { + case GR_TTYSNIFF: + task = va_arg(ap, struct task_struct *); + gr_log_middle_varargs(audit, msg, NIPQUAD(task->signal->curr_ip), gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid); + break; + case GR_SYSCTL_HIDDEN: + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, str1); + break; + case GR_RBAC: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt)); + break; + case GR_RBAC_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1); + break; + case GR_STR_RBAC: + str1 = va_arg(ap, char *); + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt)); + break; + case GR_RBAC_MODE2: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + str2 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2); + break; + case GR_RBAC_MODE3: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + str2 = va_arg(ap, char *); + str3 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3); + break; + case GR_FILENAME: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt)); + break; + case GR_STR_FILENAME: + str1 = va_arg(ap, char *); + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt)); + break; + case GR_FILENAME_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1); + break; + case GR_FILENAME_TWO_INT: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + num1 = va_arg(ap, int); + num2 = va_arg(ap, int); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2); + break; + case GR_FILENAME_TWO_INT_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + num1 = va_arg(ap, int); + num2 = va_arg(ap, int); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1); + break; + case GR_TEXTREL: + file = va_arg(ap, struct file *); + ulong1 = va_arg(ap, unsigned long); + ulong2 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_dentry, file->f_vfsmnt) : "", ulong1, ulong2); + break; + case GR_PTRACE: + task = va_arg(ap, struct task_struct *); + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_dentry, task->exec_file->f_vfsmnt) : "(none)", task->comm, task->pid); + break; + case GR_RESOURCE: + task = va_arg(ap, struct task_struct *); + ulong1 = va_arg(ap, unsigned long); + str1 = va_arg(ap, char *); + ulong2 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid); + break; + case GR_CAP: + task = va_arg(ap, struct task_struct *); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid); + break; + case GR_SIG: + task = va_arg(ap, struct task_struct *); + num1 = va_arg(ap, int); + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid); + break; + case GR_CRASH1: + task = va_arg(ap, struct task_struct *); + ulong1 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid, task->uid, ulong1); + break; + case GR_CRASH2: + task = va_arg(ap, struct task_struct *); + ulong1 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid, ulong1); + break; + case GR_PSACCT: + { + unsigned int wday, cday; + __u8 whr, chr; + __u8 wmin, cmin; + __u8 wsec, csec; + char cur_tty[64] = { 0 }; + char parent_tty[64] = { 0 }; + + task = va_arg(ap, struct task_struct *); + wday = va_arg(ap, unsigned int); + cday = va_arg(ap, unsigned int); + whr = va_arg(ap, int); + chr = va_arg(ap, int); + wmin = va_arg(ap, int); + cmin = va_arg(ap, int); + wsec = va_arg(ap, int); + csec = va_arg(ap, int); + ulong1 = va_arg(ap, unsigned long); + + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, NIPQUAD(task->signal->curr_ip), tty_name(task->signal->tty, cur_tty), task->uid, task->euid, task->gid, task->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, NIPQUAD(task->parent->signal->curr_ip), tty_name(task->parent->signal->tty, parent_tty), task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid); + } + break; + default: + gr_log_middle(audit, msg, ap); + } + va_end(ap); + gr_log_end(audit); + END_LOCKS(audit); +} diff -urNp linux-2.6.22.1/grsecurity/grsec_mem.c linux-2.6.22.1/grsecurity/grsec_mem.c --- linux-2.6.22.1/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_mem.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,71 @@ +#include +#include +#include +#include +#include + +void +gr_handle_ioperm(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG); + return; +} + +void +gr_handle_iopl(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG); + return; +} + +void +gr_handle_mem_write(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_WRITE_MSG); + return; +} + +void +gr_handle_kmem_write(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_KMEM_MSG); + return; +} + +void +gr_handle_open_port(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_PORT_OPEN_MSG); + return; +} + +int +gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma) +{ + unsigned long start, end; + + start = offset; + end = start + vma->vm_end - vma->vm_start; + + if (start > end) { + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG); + return -EPERM; + } + + /* allowed ranges : ISA I/O BIOS */ + if ((start >= __pa(high_memory)) +#ifdef CONFIG_X86 + || (start >= 0x000a0000 && end <= 0x00100000) + || (start >= 0x00000000 && end <= 0x00001000) +#endif + ) + return 0; + + if (vma->vm_flags & VM_WRITE) { + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG); + return -EPERM; + } else + vma->vm_flags &= ~VM_MAYWRITE; + + return 0; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_mount.c linux-2.6.22.1/grsecurity/grsec_mount.c --- linux-2.6.22.1/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_mount.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,34 @@ +#include +#include +#include +#include + +void +gr_log_remount(const char *devname, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none"); +#endif + return; +} + +void +gr_log_unmount(const char *devname, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none"); +#endif + return; +} + +void +gr_log_mount(const char *from, const char *to, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to); +#endif + return; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_sig.c linux-2.6.22.1/grsecurity/grsec_sig.c --- linux-2.6.22.1/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_sig.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,59 @@ +#include +#include +#include +#include + +void +gr_log_signal(const int sig, const struct task_struct *t) +{ +#ifdef CONFIG_GRKERNSEC_SIGNAL + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) || + (sig == SIGABRT) || (sig == SIGBUS))) { + if (t->pid == current->pid) { + gr_log_int(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, sig); + } else { + gr_log_sig(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig); + } + } +#endif + return; +} + +int +gr_handle_signal(const struct task_struct *p, const int sig) +{ +#ifdef CONFIG_GRKERNSEC + if (current->pid > 1 && gr_check_protected_task(p)) { + gr_log_sig(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig); + return -EPERM; + } else if (gr_pid_is_chrooted((struct task_struct *)p)) { + return -EPERM; + } +#endif + return 0; +} + +void gr_handle_brute_attach(struct task_struct *p) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + if (p->parent && p->parent->exec_file == p->exec_file) + p->parent->brute = 1; + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); +#endif + return; +} + +void gr_handle_brute_check(void) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE + if (current->brute) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(30 * HZ); + } +#endif + return; +} + diff -urNp linux-2.6.22.1/grsecurity/grsec_sock.c linux-2.6.22.1/grsecurity/grsec_sock.c --- linux-2.6.22.1/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_sock.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,263 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_IP_NF_MATCH_STEALTH_MODULE) +extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif); +EXPORT_SYMBOL(udp_v4_lookup); +#endif + +EXPORT_SYMBOL(gr_cap_rtnetlink); + +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb); +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr); + +EXPORT_SYMBOL(gr_search_udp_recvmsg); +EXPORT_SYMBOL(gr_search_udp_sendmsg); + +#ifdef CONFIG_UNIX_MODULE +EXPORT_SYMBOL(gr_acl_handle_unix); +EXPORT_SYMBOL(gr_acl_handle_mknod); +EXPORT_SYMBOL(gr_handle_chroot_unix); +EXPORT_SYMBOL(gr_handle_create); +#endif + +#ifdef CONFIG_GRKERNSEC +#define gr_conn_table_size 32749 +struct conn_table_entry { + struct conn_table_entry *next; + struct signal_struct *sig; +}; + +struct conn_table_entry *gr_conn_table[gr_conn_table_size]; +spinlock_t gr_conn_table_lock = SPIN_LOCK_UNLOCKED; + +extern const char * gr_socktype_to_name(unsigned char type); +extern const char * gr_proto_to_name(unsigned char proto); + +static __inline__ int +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size) +{ + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size); +} + +static __inline__ int +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, + __u16 sport, __u16 dport) +{ + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr && + sig->gr_sport == sport && sig->gr_dport == dport)) + return 1; + else + return 0; +} + +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent) +{ + struct conn_table_entry **match; + unsigned int index; + + index = conn_hash(sig->gr_saddr, sig->gr_daddr, + sig->gr_sport, sig->gr_dport, + gr_conn_table_size); + + newent->sig = sig; + + match = &gr_conn_table[index]; + newent->next = *match; + *match = newent; + + return; +} + +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig) +{ + struct conn_table_entry *match, *last = NULL; + unsigned int index; + + index = conn_hash(sig->gr_saddr, sig->gr_daddr, + sig->gr_sport, sig->gr_dport, + gr_conn_table_size); + + match = gr_conn_table[index]; + while (match && !conn_match(match->sig, + sig->gr_saddr, sig->gr_daddr, sig->gr_sport, + sig->gr_dport)) { + last = match; + match = match->next; + } + + if (match) { + if (last) + last->next = match->next; + else + gr_conn_table[index] = NULL; + kfree(match); + } + + return; +} + +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr, + __u16 sport, __u16 dport) +{ + struct conn_table_entry *match; + unsigned int index; + + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size); + + match = gr_conn_table[index]; + while (match && !conn_match(match->sig, saddr, daddr, sport, dport)) + match = match->next; + + if (match) + return match->sig; + else + return NULL; +} + +#endif + +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet) +{ +#ifdef CONFIG_GRKERNSEC + struct signal_struct *sig = task->signal; + struct conn_table_entry *newent; + + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC); + if (newent == NULL) + return; + /* no bh lock needed since we are called with bh disabled */ + spin_lock(&gr_conn_table_lock); + gr_del_task_from_ip_table_nolock(sig); + sig->gr_saddr = inet->rcv_saddr; + sig->gr_daddr = inet->daddr; + sig->gr_sport = inet->sport; + sig->gr_dport = inet->dport; + gr_add_to_task_ip_table_nolock(sig, newent); + spin_unlock(&gr_conn_table_lock); +#endif + return; +} + +void gr_del_task_from_ip_table(struct task_struct *task) +{ +#ifdef CONFIG_GRKERNSEC + spin_lock(&gr_conn_table_lock); + gr_del_task_from_ip_table_nolock(task->signal); + spin_unlock(&gr_conn_table_lock); +#endif + return; +} + +void +gr_attach_curr_ip(const struct sock *sk) +{ +#ifdef CONFIG_GRKERNSEC + struct signal_struct *p, *set; + const struct inet_sock *inet = inet_sk(sk); + + if (unlikely(sk->sk_protocol != IPPROTO_TCP)) + return; + + set = current->signal; + + spin_lock_bh(&gr_conn_table_lock); + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr, + inet->dport, inet->sport); + if (unlikely(p != NULL)) { + set->curr_ip = p->curr_ip; + set->used_accept = 1; + gr_del_task_from_ip_table_nolock(p); + spin_unlock_bh(&gr_conn_table_lock); + return; + } + spin_unlock_bh(&gr_conn_table_lock); + + set->curr_ip = inet->daddr; + set->used_accept = 1; +#endif + return; +} + +int +gr_handle_sock_all(const int family, const int type, const int protocol) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) && + (family != AF_UNIX) && (family != AF_LOCAL)) { + gr_log_int_str2(GR_DONT_AUDIT, GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol)); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_server(const struct sockaddr *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + if (grsec_enable_socket_server && + in_group_p(grsec_socket_server_gid) && + sck && (sck->sa_family != AF_UNIX) && + (sck->sa_family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_server_other(const struct sock *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + if (grsec_enable_socket_server && + in_group_p(grsec_socket_server_gid) && + sck && (sck->sk_family != AF_UNIX) && + (sck->sk_family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_client(const struct sockaddr *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) && + sck && (sck->sa_family != AF_UNIX) && + (sck->sa_family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG); + return -EACCES; + } +#endif + return 0; +} + +__u32 +gr_cap_rtnetlink(void) +{ +#ifdef CONFIG_GRKERNSEC + if (!gr_acl_is_enabled()) + return current->cap_effective; + else if (cap_raised(current->cap_effective, CAP_NET_ADMIN) && + gr_task_is_capable(current, CAP_NET_ADMIN)) + return current->cap_effective; + else + return 0; +#else + return current->cap_effective; +#endif +} diff -urNp linux-2.6.22.1/grsecurity/grsec_sysctl.c linux-2.6.22.1/grsecurity/grsec_sysctl.c --- linux-2.6.22.1/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_sysctl.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,456 @@ +#include +#include +#include +#include +#include + +#ifdef CONFIG_GRKERNSEC_MODSTOP +int grsec_modstop; +#endif + +int +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op) +{ +#ifdef CONFIG_GRKERNSEC_SYSCTL + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & 002)) { + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); + return -EACCES; + } +#endif +#ifdef CONFIG_GRKERNSEC_MODSTOP + if (!strcmp(dirname, "grsecurity") && !strcmp(name, "disable_modules") && + grsec_modstop && (op & 002)) { + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); + return -EACCES; + } +#endif + return 0; +} + +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_MODSTOP) +enum {GS_LINK=1, GS_FIFO, GS_EXECVE, GS_EXECLOG, GS_SIGNAL, +GS_FORKFAIL, GS_TIME, GS_CHROOT_SHMAT, GS_CHROOT_UNIX, GS_CHROOT_MNT, +GS_CHROOT_FCHDIR, GS_CHROOT_DBL, GS_CHROOT_PVT, GS_CHROOT_CD, GS_CHROOT_CM, +GS_CHROOT_MK, GS_CHROOT_NI, GS_CHROOT_EXECLOG, GS_CHROOT_CAPS, +GS_CHROOT_SYSCTL, GS_TPE, GS_TPE_GID, GS_TPE_ALL, GS_SIDCAPS, +GS_SOCKET_ALL, GS_SOCKET_ALL_GID, GS_SOCKET_CLIENT, +GS_SOCKET_CLIENT_GID, GS_SOCKET_SERVER, GS_SOCKET_SERVER_GID, +GS_GROUP, GS_GID, GS_ACHDIR, GS_AMOUNT, GS_AIPC, GS_DMSG, +GS_TEXTREL, GS_FINDTASK, GS_SHM, GS_LOCK, GS_MODSTOP, GS_RESLOG}; + + +ctl_table grsecurity_table[] = { +#ifdef CONFIG_GRKERNSEC_SYSCTL +#ifdef CONFIG_GRKERNSEC_LINK + { + .ctl_name = GS_LINK, + .procname = "linking_restrictions", + .data = &grsec_enable_link, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_FIFO + { + .ctl_name = GS_FIFO, + .procname = "fifo_restrictions", + .data = &grsec_enable_fifo, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_EXECVE + { + .ctl_name = GS_EXECVE, + .procname = "execve_limiting", + .data = &grsec_enable_execve, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_EXECLOG + { + .ctl_name = GS_EXECLOG, + .procname = "exec_logging", + .data = &grsec_enable_execlog, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SIGNAL + { + .ctl_name = GS_SIGNAL, + .procname = "signal_logging", + .data = &grsec_enable_signal, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_FORKFAIL + { + .ctl_name = GS_FORKFAIL, + .procname = "forkfail_logging", + .data = &grsec_enable_forkfail, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_TIME + { + .ctl_name = GS_TIME, + .procname = "timechange_logging", + .data = &grsec_enable_time, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + { + .ctl_name = GS_CHROOT_SHMAT, + .procname = "chroot_deny_shmat", + .data = &grsec_enable_chroot_shmat, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + { + .ctl_name = GS_CHROOT_UNIX, + .procname = "chroot_deny_unix", + .data = &grsec_enable_chroot_unix, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + { + .ctl_name = GS_CHROOT_MNT, + .procname = "chroot_deny_mount", + .data = &grsec_enable_chroot_mount, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + { + .ctl_name = GS_CHROOT_FCHDIR, + .procname = "chroot_deny_fchdir", + .data = &grsec_enable_chroot_fchdir, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + { + .ctl_name = GS_CHROOT_DBL, + .procname = "chroot_deny_chroot", + .data = &grsec_enable_chroot_double, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + { + .ctl_name = GS_CHROOT_PVT, + .procname = "chroot_deny_pivot", + .data = &grsec_enable_chroot_pivot, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + { + .ctl_name = GS_CHROOT_CD, + .procname = "chroot_enforce_chdir", + .data = &grsec_enable_chroot_chdir, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + { + .ctl_name = GS_CHROOT_CM, + .procname = "chroot_deny_chmod", + .data = &grsec_enable_chroot_chmod, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + { + .ctl_name = GS_CHROOT_MK, + .procname = "chroot_deny_mknod", + .data = &grsec_enable_chroot_mknod, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + { + .ctl_name = GS_CHROOT_NI, + .procname = "chroot_restrict_nice", + .data = &grsec_enable_chroot_nice, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + { + .ctl_name = GS_CHROOT_EXECLOG, + .procname = "chroot_execlog", + .data = &grsec_enable_chroot_execlog, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + { + .ctl_name = GS_CHROOT_CAPS, + .procname = "chroot_caps", + .data = &grsec_enable_chroot_caps, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + { + .ctl_name = GS_CHROOT_SYSCTL, + .procname = "chroot_deny_sysctl", + .data = &grsec_enable_chroot_sysctl, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_TPE + { + .ctl_name = GS_TPE, + .procname = "tpe", + .data = &grsec_enable_tpe, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = GS_TPE_GID, + .procname = "tpe_gid", + .data = &grsec_tpe_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_TPE_ALL + { + .ctl_name = GS_TPE_ALL, + .procname = "tpe_restrict_all", + .data = &grsec_enable_tpe_all, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + { + .ctl_name = GS_SOCKET_ALL, + .procname = "socket_all", + .data = &grsec_enable_socket_all, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = GS_SOCKET_ALL_GID, + .procname = "socket_all_gid", + .data = &grsec_socket_all_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + { + .ctl_name = GS_SOCKET_CLIENT, + .procname = "socket_client", + .data = &grsec_enable_socket_client, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = GS_SOCKET_CLIENT_GID, + .procname = "socket_client_gid", + .data = &grsec_socket_client_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + { + .ctl_name = GS_SOCKET_SERVER, + .procname = "socket_server", + .data = &grsec_enable_socket_server, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = GS_SOCKET_SERVER_GID, + .procname = "socket_server_gid", + .data = &grsec_socket_server_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP + { + .ctl_name = GS_GROUP, + .procname = "audit_group", + .data = &grsec_enable_group, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = GS_GID, + .procname = "audit_gid", + .data = &grsec_audit_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + { + .ctl_name = GS_ACHDIR, + .procname = "audit_chdir", + .data = &grsec_enable_chdir, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + { + .ctl_name = GS_AMOUNT, + .procname = "audit_mount", + .data = &grsec_enable_mount, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + { + .ctl_name = GS_AIPC, + .procname = "audit_ipc", + .data = &grsec_enable_audit_ipc, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL + { + .ctl_name = GS_TEXTREL, + .procname = "audit_textrel", + .data = &grsec_enable_audit_textrel, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_DMESG + { + .ctl_name = GS_DMSG, + .procname = "dmesg", + .data = &grsec_enable_dmesg, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + { + .ctl_name = GS_FINDTASK, + .procname = "chroot_findtask", + .data = &grsec_enable_chroot_findtask, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SHM + { + .ctl_name = GS_SHM, + .procname = "destroy_unused_shm", + .data = &grsec_enable_shm, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_RESLOG + { + .ctl_name = GS_RESLOG, + .procname = "resource_logging", + .data = &grsec_resource_logging, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif + { + .ctl_name = GS_LOCK, + .procname = "grsec_lock", + .data = &grsec_lock, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif +#ifdef CONFIG_GRKERNSEC_MODSTOP + { + .ctl_name = GS_MODSTOP, + .procname = "disable_modules", + .data = &grsec_modstop, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, +#endif + { .ctl_name = 0 } +}; +#endif + +int gr_check_modstop(void) +{ +#ifdef CONFIG_GRKERNSEC_MODSTOP + if (grsec_modstop == 1) { + gr_log_noargs(GR_DONT_AUDIT, GR_STOPMOD_MSG); + return 1; + } +#endif + return 0; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_textrel.c linux-2.6.22.1/grsecurity/grsec_textrel.c --- linux-2.6.22.1/grsecurity/grsec_textrel.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_textrel.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,16 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_textrel(struct vm_area_struct * vma) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL + if (grsec_enable_audit_textrel) + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff); +#endif + return; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_time.c linux-2.6.22.1/grsecurity/grsec_time.c --- linux-2.6.22.1/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_time.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,13 @@ +#include +#include +#include + +void +gr_log_timechange(void) +{ +#ifdef CONFIG_GRKERNSEC_TIME + if (grsec_enable_time) + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG); +#endif + return; +} diff -urNp linux-2.6.22.1/grsecurity/grsec_tpe.c linux-2.6.22.1/grsecurity/grsec_tpe.c --- linux-2.6.22.1/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsec_tpe.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,37 @@ +#include +#include +#include +#include +#include + +extern int gr_acl_tpe_check(void); + +int +gr_tpe_allow(const struct file *file) +{ +#ifdef CONFIG_GRKERNSEC + struct inode *inode = file->f_dentry->d_parent->d_inode; + + if (current->uid && ((grsec_enable_tpe && +#ifdef CONFIG_GRKERNSEC_TPE_INVERT + !in_group_p(grsec_tpe_gid) +#else + in_group_p(grsec_tpe_gid) +#endif + ) || gr_acl_tpe_check()) && + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) || + (inode->i_mode & S_IWOTH))))) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_dentry, file->f_vfsmnt); + return 0; + } +#ifdef CONFIG_GRKERNSEC_TPE_ALL + if (current->uid && grsec_enable_tpe && grsec_enable_tpe_all && + ((inode->i_uid && (inode->i_uid != current->uid)) || + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_dentry, file->f_vfsmnt); + return 0; + } +#endif +#endif + return 1; +} diff -urNp linux-2.6.22.1/grsecurity/grsum.c linux-2.6.22.1/grsecurity/grsum.c --- linux-2.6.22.1/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/grsum.c 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,58 @@ +#include +#include +#include +#include +#include +#include + + +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE) +#error "crypto and sha256 must be built into the kernel" +#endif + +int +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) +{ + char *p; + struct crypto_hash *tfm; + struct hash_desc desc; + struct scatterlist sg; + unsigned char temp_sum[GR_SHA_LEN]; + volatile int retval = 0; + volatile int dummy = 0; + unsigned int i; + + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + /* should never happen, since sha256 should be built in */ + return 1; + } + + desc.tfm = tfm; + desc.flags = 0; + + crypto_hash_init(&desc); + + p = salt; + sg_set_buf(&sg, p, GR_SALT_LEN); + crypto_hash_update(&desc, &sg, sg.length); + + p = entry->pw; + sg_set_buf(&sg, p, strlen(p)); + + crypto_hash_update(&desc, &sg, sg.length); + + crypto_hash_final(&desc, temp_sum); + + memset(entry->pw, 0, GR_PW_LEN); + + for (i = 0; i < GR_SHA_LEN; i++) + if (sum[i] != temp_sum[i]) + retval = 1; + else + dummy = 1; // waste a cycle + + crypto_free_hash(tfm); + + return retval; +} diff -urNp linux-2.6.22.1/grsecurity/Kconfig linux-2.6.22.1/grsecurity/Kconfig --- linux-2.6.22.1/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/Kconfig 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,873 @@ +# +# grecurity configuration +# + +menu "Grsecurity" + +config GRKERNSEC + bool "Grsecurity" + select CRYPTO + select CRYPTO_SHA256 + help + If you say Y here, you will be able to configure many features + that will enhance the security of your system. It is highly + recommended that you say Y here and read through the help + for each option so that you fully understand the features and + can evaluate their usefulness for your machine. + +choice + prompt "Security Level" + depends GRKERNSEC + default GRKERNSEC_CUSTOM + +config GRKERNSEC_LOW + bool "Low" + select GRKERNSEC_LINK + select GRKERNSEC_FIFO + select GRKERNSEC_EXECVE + select GRKERNSEC_RANDNET + select GRKERNSEC_DMESG + select GRKERNSEC_CHROOT_CHDIR + select GRKERNSEC_MODSTOP if (MODULES) + + help + If you choose this option, several of the grsecurity options will + be enabled that will give you greater protection against a number + of attacks, while assuring that none of your software will have any + conflicts with the additional security measures. If you run a lot + of unusual software, or you are having problems with the higher + security levels, you should say Y here. With this option, the + following features are enabled: + + - Linking restrictions + - FIFO restrictions + - Enforcing RLIMIT_NPROC on execve + - Restricted dmesg + - Enforced chdir("/") on chroot + - Runtime module disabling + +config GRKERNSEC_MEDIUM + bool "Medium" + select PAX + select PAX_EI_PAX + select PAX_PT_PAX_FLAGS + select PAX_HAVE_ACL_FLAGS + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) + select GRKERNSEC_CHROOT_SYSCTL + select GRKERNSEC_LINK + select GRKERNSEC_FIFO + select GRKERNSEC_EXECVE + select GRKERNSEC_DMESG + select GRKERNSEC_RANDNET + select GRKERNSEC_FORKFAIL + select GRKERNSEC_TIME + select GRKERNSEC_SIGNAL + select GRKERNSEC_CHROOT + select GRKERNSEC_CHROOT_UNIX + select GRKERNSEC_CHROOT_MOUNT + select GRKERNSEC_CHROOT_PIVOT + select GRKERNSEC_CHROOT_DOUBLE + select GRKERNSEC_CHROOT_CHDIR + select GRKERNSEC_CHROOT_MKNOD + select GRKERNSEC_PROC + select GRKERNSEC_PROC_USERGROUP + select GRKERNSEC_MODSTOP if (MODULES) + select PAX_RANDUSTACK + select PAX_ASLR + select PAX_RANDMMAP + + help + If you say Y here, several features in addition to those included + in the low additional security level will be enabled. These + features provide even more security to your system, though in rare + cases they may be incompatible with very old or poorly written + software. If you enable this option, make sure that your auth + service (identd) is running as gid 1001. With this option, + the following features (in addition to those provided in the + low additional security level) will be enabled: + + - Randomized TCP source ports + - Failed fork logging + - Time change logging + - Signal logging + - Deny mounts in chroot + - Deny double chrooting + - Deny sysctl writes in chroot + - Deny mknod in chroot + - Deny access to abstract AF_UNIX sockets out of chroot + - Deny pivot_root in chroot + - Denied writes of /dev/kmem, /dev/mem, and /dev/port + - /proc restrictions with special GID set to 10 (usually wheel) + - Address Space Layout Randomization (ASLR) + +config GRKERNSEC_HIGH + bool "High" + select GRKERNSEC_LINK + select GRKERNSEC_FIFO + select GRKERNSEC_EXECVE + select GRKERNSEC_DMESG + select GRKERNSEC_FORKFAIL + select GRKERNSEC_TIME + select GRKERNSEC_SIGNAL + select GRKERNSEC_CHROOT_SHMAT + select GRKERNSEC_CHROOT_UNIX + select GRKERNSEC_CHROOT_MOUNT + select GRKERNSEC_CHROOT_FCHDIR + select GRKERNSEC_CHROOT_PIVOT + select GRKERNSEC_CHROOT_DOUBLE + select GRKERNSEC_CHROOT_CHDIR + select GRKERNSEC_CHROOT_MKNOD + select GRKERNSEC_CHROOT_CAPS + select GRKERNSEC_CHROOT_SYSCTL + select GRKERNSEC_CHROOT_FINDTASK + select GRKERNSEC_PROC + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) + select GRKERNSEC_HIDESYM + select GRKERNSEC_BRUTE + select GRKERNSEC_SHM if (SYSVIPC) + select GRKERNSEC_PROC_USERGROUP + select GRKERNSEC_KMEM + select GRKERNSEC_RESLOG + select GRKERNSEC_RANDNET + select GRKERNSEC_PROC_ADD + select GRKERNSEC_CHROOT_CHMOD + select GRKERNSEC_CHROOT_NICE + select GRKERNSEC_AUDIT_MOUNT + select GRKERNSEC_MODSTOP if (MODULES) + select PAX + select PAX_RANDUSTACK + select PAX_ASLR + select PAX_RANDMMAP + select PAX_NOEXEC + select PAX_MPROTECT + select PAX_EI_PAX + select PAX_PT_PAX_FLAGS + select PAX_HAVE_ACL_FLAGS + select PAX_KERNEXEC if (!X86_64 && !EFI && !COMPAT_VDSO && !PARAVIRT && X86_WP_WORKS_OK) + select PAX_MEMORY_UDEREF if (!X86_64 && !COMPAT_VDSO) + select PAX_RANDKSTACK if (X86_TSC && !X86_64) + select PAX_SEGMEXEC if (X86 && !X86_64) + select PAX_PAGEEXEC if (!X86) + select PAX_EMUPLT if (ALPHA || PARISC || PPC32 || SPARC32 || SPARC64) + select PAX_DLRESOLVE if (SPARC32 || SPARC64) + select PAX_SYSCALL if (PPC32) + select PAX_EMUTRAMP if (PARISC) + select PAX_EMUSIGRT if (PARISC) + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) + help + If you say Y here, many of the features of grsecurity will be + enabled, which will protect you against many kinds of attacks + against your system. The heightened security comes at a cost + of an increased chance of incompatibilities with rare software + on your machine. Since this security level enables PaX, you should + view and read about the PaX + project. While you are there, download chpax and run it on + binaries that cause problems with PaX. Also remember that + since the /proc restrictions are enabled, you must run your + identd as gid 1001. This security level enables the following + features in addition to those listed in the low and medium + security levels: + + - Additional /proc restrictions + - Chmod restrictions in chroot + - No signals, ptrace, or viewing of processes outside of chroot + - Capability restrictions in chroot + - Deny fchdir out of chroot + - Priority restrictions in chroot + - Segmentation-based implementation of PaX + - Mprotect restrictions + - Removal of addresses from /proc//[smaps|maps|stat] + - Kernel stack randomization + - Mount/unmount/remount logging + - Kernel symbol hiding + - Destroy unused shared memory + - Prevention of memory exhaustion-based exploits +config GRKERNSEC_CUSTOM + bool "Custom" + help + If you say Y here, you will be able to configure every grsecurity + option, which allows you to enable many more features that aren't + covered in the basic security levels. These additional features + include TPE, socket restrictions, and the sysctl system for + grsecurity. It is advised that you read through the help for + each option to determine its usefulness in your situation. + +endchoice + +menu "Address Space Protection" +depends on GRKERNSEC + +config GRKERNSEC_KMEM + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port" + help + If you say Y here, /dev/kmem and /dev/mem won't be allowed to + be written to via mmap or otherwise to modify the running kernel. + /dev/port will also not be allowed to be opened. If you have module + support disabled, enabling this will close up four ways that are + currently used to insert malicious code into the running kernel. + Even with all these features enabled, we still highly recommend that + you use the RBAC system, as it is still possible for an attacker to + modify the running kernel through privileged I/O granted by ioperm/iopl. + If you are not using XFree86, you may be able to stop this additional + case by enabling the 'Disable privileged I/O' option. Though nothing + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem, + but only to video memory, which is the only writing we allow in this + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will + not be allowed to mprotect it with PROT_WRITE later. + It is highly recommended that you say Y here if you meet all the + conditions above. + +config GRKERNSEC_IO + bool "Disable privileged I/O" + depends on X86 + select RTC + help + If you say Y here, all ioperm and iopl calls will return an error. + Ioperm and iopl can be used to modify the running kernel. + Unfortunately, some programs need this access to operate properly, + the most notable of which are XFree86 and hwclock. hwclock can be + remedied by having RTC support in the kernel, so CONFIG_RTC is + enabled if this option is enabled, to ensure that hwclock operates + correctly. XFree86 still will not operate correctly with this option + enabled, so DO NOT CHOOSE Y IF YOU USE XFree86. If you use XFree86 + and you still want to protect your kernel against modification, + use the RBAC system. + +config GRKERNSEC_PROC_MEMMAP + bool "Remove addresses from /proc//[smaps|maps|stat]" + depends on PAX_NOEXEC || PAX_ASLR + help + If you say Y here, the /proc//maps and /proc//stat files will + give no information about the addresses of its mappings if + PaX features that rely on random addresses are enabled on the task. + If you use PaX it is greatly recommended that you say Y here as it + closes up a hole that makes the full ASLR useless for suid + binaries. + +config GRKERNSEC_BRUTE + bool "Deter exploit bruteforcing" + help + If you say Y here, attempts to bruteforce exploits against forking + daemons such as apache or sshd will be deterred. When a child of a + forking daemon is killed by PaX or crashes due to an illegal + instruction, the parent process will be delayed 30 seconds upon every + subsequent fork until the administrator is able to assess the + situation and restart the daemon. It is recommended that you also + enable signal logging in the auditing section so that logs are + generated when a process performs an illegal instruction. + +config GRKERNSEC_MODSTOP + bool "Runtime module disabling" + depends on MODULES + help + If you say Y here, you will be able to disable the ability to (un)load + modules at runtime. This feature is useful if you need the ability + to load kernel modules at boot time, but do not want to allow an + attacker to load a rootkit kernel module into the system, or to remove + a loaded kernel module important to system functioning. You should + enable the /dev/mem protection feature as well, since rootkits can be + inserted into the kernel via other methods than kernel modules. Since + an untrusted module could still be loaded by modifying init scripts and + rebooting the system, it is also recommended that you enable the RBAC + system. If you enable this option, a sysctl option with name + "disable_modules" will be created. Setting this option to "1" disables + module loading. After this option is set, no further writes to it are + allowed until the system is rebooted. + +config GRKERNSEC_HIDESYM + bool "Hide kernel symbols" + help + If you say Y here, getting information on loaded modules, and + displaying all kernel symbols through a syscall will be restricted + to users with CAP_SYS_MODULE. This option is only effective + provided the following conditions are met: + 1) The kernel using grsecurity is not precompiled by some distribution + 2) You are using the RBAC system and hiding other files such as your + kernel image and System.map + 3) You have the additional /proc restrictions enabled, which removes + /proc/kcore + If the above conditions are met, this option will aid to provide a + useful protection against local and remote kernel exploitation of + overflows and arbitrary read/write vulnerabilities. + +endmenu +menu "Role Based Access Control Options" +depends on GRKERNSEC + +config GRKERNSEC_ACL_HIDEKERN + bool "Hide kernel processes" + help + If you say Y here, all kernel threads will be hidden to all + processes but those whose subject has the "view hidden processes" + flag. + +config GRKERNSEC_ACL_MAXTRIES + int "Maximum tries before password lockout" + default 3 + help + This option enforces the maximum number of times a user can attempt + to authorize themselves with the grsecurity RBAC system before being + denied the ability to attempt authorization again for a specified time. + The lower the number, the harder it will be to brute-force a password. + +config GRKERNSEC_ACL_TIMEOUT + int "Time to wait after max password tries, in seconds" + default 30 + help + This option specifies the time the user must wait after attempting to + authorize to the RBAC system with the maximum number of invalid + passwords. The higher the number, the harder it will be to brute-force + a password. + +endmenu +menu "Filesystem Protections" +depends on GRKERNSEC + +config GRKERNSEC_PROC + bool "Proc restrictions" + help + If you say Y here, the permissions of the /proc filesystem + will be altered to enhance system security and privacy. You MUST + choose either a user only restriction or a user and group restriction. + Depending upon the option you choose, you can either restrict users to + see only the processes they themselves run, or choose a group that can + view all processes and files normally restricted to root if you choose + the "restrict to user only" option. NOTE: If you're running identd as + a non-root user, you will have to run it as the group you specify here. + +config GRKERNSEC_PROC_USER + bool "Restrict /proc to user only" + depends on GRKERNSEC_PROC + help + If you say Y here, non-root users will only be able to view their own + processes, and restricts them from viewing network-related information, + and viewing kernel symbol and module information. + +config GRKERNSEC_PROC_USERGROUP + bool "Allow special group" + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER + help + If you say Y here, you will be able to select a group that will be + able to view all processes, network-related information, and + kernel and symbol information. This option is useful if you want + to run identd as a non-root user. + +config GRKERNSEC_PROC_GID + int "GID for special group" + depends on GRKERNSEC_PROC_USERGROUP + default 1001 + +config GRKERNSEC_PROC_ADD + bool "Additional restrictions" + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP + help + If you say Y here, additional restrictions will be placed on + /proc that keep normal users from viewing device information and + slabinfo information that could be useful for exploits. + +config GRKERNSEC_LINK + bool "Linking restrictions" + help + If you say Y here, /tmp race exploits will be prevented, since users + will no longer be able to follow symlinks owned by other users in + world-writable +t directories (i.e. /tmp), unless the owner of the + symlink is the owner of the directory. users will also not be + able to hardlink to files they do not own. If the sysctl option is + enabled, a sysctl option with name "linking_restrictions" is created. + +config GRKERNSEC_FIFO + bool "FIFO restrictions" + help + If you say Y here, users will not be able to write to FIFOs they don't + own in world-writable +t directories (i.e. /tmp), unless the owner of + the FIFO is the same owner of the directory it's held in. If the sysctl + option is enabled, a sysctl option with name "fifo_restrictions" is + created. + +config GRKERNSEC_CHROOT + bool "Chroot jail restrictions" + help + If you say Y here, you will be able to choose several options that will + make breaking out of a chrooted jail much more difficult. If you + encounter no software incompatibilities with the following options, it + is recommended that you enable each one. + +config GRKERNSEC_CHROOT_MOUNT + bool "Deny mounts" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to + mount or remount filesystems. If the sysctl option is enabled, a + sysctl option with name "chroot_deny_mount" is created. + +config GRKERNSEC_CHROOT_DOUBLE + bool "Deny double-chroots" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to chroot + again outside the chroot. This is a widely used method of breaking + out of a chroot jail and should not be allowed. If the sysctl + option is enabled, a sysctl option with name + "chroot_deny_chroot" is created. + +config GRKERNSEC_CHROOT_PIVOT + bool "Deny pivot_root in chroot" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to use + a function called pivot_root() that was introduced in Linux 2.3.41. It + works similar to chroot in that it changes the root filesystem. This + function could be misused in a chrooted process to attempt to break out + of the chroot, and therefore should not be allowed. If the sysctl + option is enabled, a sysctl option with name "chroot_deny_pivot" is + created. + +config GRKERNSEC_CHROOT_CHDIR + bool "Enforce chdir(\"/\") on all chroots" + depends on GRKERNSEC_CHROOT + help + If you say Y here, the current working directory of all newly-chrooted + applications will be set to the the root directory of the chroot. + The man page on chroot(2) states: + Note that this call does not change the current working + directory, so that `.' can be outside the tree rooted at + `/'. In particular, the super-user can escape from a + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'. + + It is recommended that you say Y here, since it's not known to break + any software. If the sysctl option is enabled, a sysctl option with + name "chroot_enforce_chdir" is created. + +config GRKERNSEC_CHROOT_CHMOD + bool "Deny (f)chmod +s" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to chmod + or fchmod files to make them have suid or sgid bits. This protects + against another published method of breaking a chroot. If the sysctl + option is enabled, a sysctl option with name "chroot_deny_chmod" is + created. + +config GRKERNSEC_CHROOT_FCHDIR + bool "Deny fchdir out of chroot" + depends on GRKERNSEC_CHROOT + help + If you say Y here, a well-known method of breaking chroots by fchdir'ing + to a file descriptor of the chrooting process that points to a directory + outside the filesystem will be stopped. If the sysctl option + is enabled, a sysctl option with name "chroot_deny_fchdir" is created. + +config GRKERNSEC_CHROOT_MKNOD + bool "Deny mknod" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be allowed to + mknod. The problem with using mknod inside a chroot is that it + would allow an attacker to create a device entry that is the same + as one on the physical root of your system, which could range from + anything from the console device to a device for your harddrive (which + they could then use to wipe the drive or steal data). It is recommended + that you say Y here, unless you run into software incompatibilities. + If the sysctl option is enabled, a sysctl option with name + "chroot_deny_mknod" is created. + +config GRKERNSEC_CHROOT_SHMAT + bool "Deny shmat() out of chroot" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to attach + to shared memory segments that were created outside of the chroot jail. + It is recommended that you say Y here. If the sysctl option is enabled, + a sysctl option with name "chroot_deny_shmat" is created. + +config GRKERNSEC_CHROOT_UNIX + bool "Deny access to abstract AF_UNIX sockets out of chroot" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to + connect to abstract (meaning not belonging to a filesystem) Unix + domain sockets that were bound outside of a chroot. It is recommended + that you say Y here. If the sysctl option is enabled, a sysctl option + with name "chroot_deny_unix" is created. + +config GRKERNSEC_CHROOT_FINDTASK + bool "Protect outside processes" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to + kill, send signals with fcntl, ptrace, capget, getpgid, getsid, + or view any process outside of the chroot. If the sysctl + option is enabled, a sysctl option with name "chroot_findtask" is + created. + +config GRKERNSEC_CHROOT_NICE + bool "Restrict priority changes" + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to raise + the priority of processes in the chroot, or alter the priority of + processes outside the chroot. This provides more security than simply + removing CAP_SYS_NICE from the process' capability set. If the + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice" + is created. + +config GRKERNSEC_CHROOT_SYSCTL + bool "Deny sysctl writes" + depends on GRKERNSEC_CHROOT + help + If you say Y here, an attacker in a chroot will not be able to + write to sysctl entries, either by sysctl(2) or through a /proc + interface. It is strongly recommended that you say Y here. If the + sysctl option is enabled, a sysctl option with name + "chroot_deny_sysctl" is created. + +config GRKERNSEC_CHROOT_CAPS + bool "Capability restrictions" + depends on GRKERNSEC_CHROOT + help + If you say Y here, the capabilities on all root processes within a + chroot jail will be lowered to stop module insertion, raw i/o, + system and net admin tasks, rebooting the system, modifying immutable + files, modifying IPC owned by another, and changing the system time. + This is left an option because it can break some apps. Disable this + if your chrooted apps are having problems performing those kinds of + tasks. If the sysctl option is enabled, a sysctl option with + name "chroot_caps" is created. + +endmenu +menu "Kernel Auditing" +depends on GRKERNSEC + +config GRKERNSEC_AUDIT_GROUP + bool "Single group for auditing" + help + If you say Y here, the exec, chdir, (un)mount, and ipc logging features + will only operate on a group you specify. This option is recommended + if you only want to watch certain users instead of having a large + amount of logs from the entire system. If the sysctl option is enabled, + a sysctl option with name "audit_group" is created. + +config GRKERNSEC_AUDIT_GID + int "GID for auditing" + depends on GRKERNSEC_AUDIT_GROUP + default 1007 + +config GRKERNSEC_EXECLOG + bool "Exec logging" + help + If you say Y here, all execve() calls will be logged (since the + other exec*() calls are frontends to execve(), all execution + will be logged). Useful for shell-servers that like to keep track + of their users. If the sysctl option is enabled, a sysctl option with + name "exec_logging" is created. + WARNING: This option when enabled will produce a LOT of logs, especially + on an active system. + +config GRKERNSEC_RESLOG + bool "Resource logging" + help + If you say Y here, all attempts to overstep resource limits will + be logged with the resource name, the requested size, and the current + limit. It is highly recommended that you say Y here. If the sysctl + option is enabled, a sysctl option with name "resource_logging" is + created. If the RBAC system is enabled, the sysctl value is ignored. + +config GRKERNSEC_CHROOT_EXECLOG + bool "Log execs within chroot" + help + If you say Y here, all executions inside a chroot jail will be logged + to syslog. This can cause a large amount of logs if certain + applications (eg. djb's daemontools) are installed on the system, and + is therefore left as an option. If the sysctl option is enabled, a + sysctl option with name "chroot_execlog" is created. + +config GRKERNSEC_AUDIT_CHDIR + bool "Chdir logging" + help + If you say Y here, all chdir() calls will be logged. If the sysctl + option is enabled, a sysctl option with name "audit_chdir" is created. + +config GRKERNSEC_AUDIT_MOUNT + bool "(Un)Mount logging" + help + If you say Y here, all mounts and unmounts will be logged. If the + sysctl option is enabled, a sysctl option with name "audit_mount" is + created. + +config GRKERNSEC_AUDIT_IPC + bool "IPC logging" + help + If you say Y here, creation and removal of message queues, semaphores, + and shared memory will be logged. If the sysctl option is enabled, a + sysctl option with name "audit_ipc" is created. + +config GRKERNSEC_SIGNAL + bool "Signal logging" + help + If you say Y here, certain important signals will be logged, such as + SIGSEGV, which will as a result inform you of when a error in a program + occurred, which in some cases could mean a possible exploit attempt. + If the sysctl option is enabled, a sysctl option with name + "signal_logging" is created. + +config GRKERNSEC_FORKFAIL + bool "Fork failure logging" + help + If you say Y here, all failed fork() attempts will be logged. + This could suggest a fork bomb, or someone attempting to overstep + their process limit. If the sysctl option is enabled, a sysctl option + with name "forkfail_logging" is created. + +config GRKERNSEC_TIME + bool "Time change logging" + help + If you say Y here, any changes of the system clock will be logged. + If the sysctl option is enabled, a sysctl option with name + "timechange_logging" is created. + +config GRKERNSEC_PROC_IPADDR + bool "/proc//ipaddr support" + help + If you say Y here, a new entry will be added to each /proc/ + directory that contains the IP address of the person using the task. + The IP is carried across local TCP and AF_UNIX stream sockets. + This information can be useful for IDS/IPSes to perform remote response + to a local attack. The entry is readable by only the owner of the + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via + the RBAC system), and thus does not create privacy concerns. + +config GRKERNSEC_AUDIT_TEXTREL + bool 'ELF text relocations logging (READ HELP)' + depends on PAX_MPROTECT + help + If you say Y here, text relocations will be logged with the filename + of the offending library or binary. The purpose of the feature is + to help Linux distribution developers get rid of libraries and + binaries that need text relocations which hinder the future progress + of PaX. Only Linux distribution developers should say Y here, and + never on a production machine, as this option creates an information + leak that could aid an attacker in defeating the randomization of + a single memory region. If the sysctl option is enabled, a sysctl + option with name "audit_textrel" is created. + +endmenu + +menu "Executable Protections" +depends on GRKERNSEC + +config GRKERNSEC_EXECVE + bool "Enforce RLIMIT_NPROC on execs" + help + If you say Y here, users with a resource limit on processes will + have the value checked during execve() calls. The current system + only checks the system limit during fork() calls. If the sysctl option + is enabled, a sysctl option with name "execve_limiting" is created. + +config GRKERNSEC_SHM + bool "Destroy unused shared memory" + depends on SYSVIPC + help + If you say Y here, shared memory will be destroyed when no one is + attached to it. Otherwise, resources involved with the shared + memory can be used up and not be associated with any process (as the + shared memory still exists, and the creating process has exited). If + the sysctl option is enabled, a sysctl option with name + "destroy_unused_shm" is created. + +config GRKERNSEC_DMESG + bool "Dmesg(8) restriction" + help + If you say Y here, non-root users will not be able to use dmesg(8) + to view up to the last 4kb of messages in the kernel's log buffer. + If the sysctl option is enabled, a sysctl option with name "dmesg" is + created. + +config GRKERNSEC_TPE + bool "Trusted Path Execution (TPE)" + help + If you say Y here, you will be able to choose a gid to add to the + supplementary groups of users you want to mark as "untrusted." + These users will not be able to execute any files that are not in + root-owned directories writable only by root. If the sysctl option + is enabled, a sysctl option with name "tpe" is created. + +config GRKERNSEC_TPE_ALL + bool "Partially restrict non-root users" + depends on GRKERNSEC_TPE + help + If you say Y here, All non-root users other than the ones in the + group specified in the main TPE option will only be allowed to + execute files in directories they own that are not group or + world-writable, or in directories owned by root and writable only by + root. If the sysctl option is enabled, a sysctl option with name + "tpe_restrict_all" is created. + +config GRKERNSEC_TPE_INVERT + bool "Invert GID option" + depends on GRKERNSEC_TPE + help + If you say Y here, the group you specify in the TPE configuration will + decide what group TPE restrictions will be *disabled* for. This + option is useful if you want TPE restrictions to be applied to most + users on the system. + +config GRKERNSEC_TPE_GID + int "GID for untrusted users" + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT + default 1005 + help + If you have selected the "Invert GID option" above, setting this + GID determines what group TPE restrictions will be *disabled* for. + If you have not selected the "Invert GID option" above, setting this + GID determines what group TPE restrictions will be *enabled* for. + If the sysctl option is enabled, a sysctl option with name "tpe_gid" + is created. + +config GRKERNSEC_TPE_GID + int "GID for trusted users" + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT + default 1005 + help + If you have selected the "Invert GID option" above, setting this + GID determines what group TPE restrictions will be *disabled* for. + If you have not selected the "Invert GID option" above, setting this + GID determines what group TPE restrictions will be *enabled* for. + If the sysctl option is enabled, a sysctl option with name "tpe_gid" + is created. + +endmenu +menu "Network Protections" +depends on GRKERNSEC + +config GRKERNSEC_RANDNET + bool "Larger entropy pools" + help + If you say Y here, the entropy pools used for many features of Linux + and grsecurity will be doubled in size. Since several grsecurity + features use additional randomness, it is recommended that you say Y + here. Saying Y here has a similar effect as modifying + /proc/sys/kernel/random/poolsize. + +config GRKERNSEC_SOCKET + bool "Socket restrictions" + help + If you say Y here, you will be able to choose from several options. + If you assign a GID on your system and add it to the supplementary + groups of users you want to restrict socket access to, this patch + will perform up to three things, based on the option(s) you choose. + +config GRKERNSEC_SOCKET_ALL + bool "Deny any sockets to group" + depends on GRKERNSEC_SOCKET + help + If you say Y here, you will be able to choose a GID of whose users will + be unable to connect to other hosts from your machine or run server + applications from your machine. If the sysctl option is enabled, a + sysctl option with name "socket_all" is created. + +config GRKERNSEC_SOCKET_ALL_GID + int "GID to deny all sockets for" + depends on GRKERNSEC_SOCKET_ALL + default 1004 + help + Here you can choose the GID to disable socket access for. Remember to + add the users you want socket access disabled for to the GID + specified here. If the sysctl option is enabled, a sysctl option + with name "socket_all_gid" is created. + +config GRKERNSEC_SOCKET_CLIENT + bool "Deny client sockets to group" + depends on GRKERNSEC_SOCKET + help + If you say Y here, you will be able to choose a GID of whose users will + be unable to connect to other hosts from your machine, but will be + able to run servers. If this option is enabled, all users in the group + you specify will have to use passive mode when initiating ftp transfers + from the shell on your machine. If the sysctl option is enabled, a + sysctl option with name "socket_client" is created. + +config GRKERNSEC_SOCKET_CLIENT_GID + int "GID to deny client sockets for" + depends on GRKERNSEC_SOCKET_CLIENT + default 1003 + help + Here you can choose the GID to disable client socket access for. + Remember to add the users you want client socket access disabled for to + the GID specified here. If the sysctl option is enabled, a sysctl + option with name "socket_client_gid" is created. + +config GRKERNSEC_SOCKET_SERVER + bool "Deny server sockets to group" + depends on GRKERNSEC_SOCKET + help + If you say Y here, you will be able to choose a GID of whose users will + be unable to run server applications from your machine. If the sysctl + option is enabled, a sysctl option with name "socket_server" is created. + +config GRKERNSEC_SOCKET_SERVER_GID + int "GID to deny server sockets for" + depends on GRKERNSEC_SOCKET_SERVER + default 1002 + help + Here you can choose the GID to disable server socket access for. + Remember to add the users you want server socket access disabled for to + the GID specified here. If the sysctl option is enabled, a sysctl + option with name "socket_server_gid" is created. + +endmenu +menu "Sysctl support" +depends on GRKERNSEC && SYSCTL + +config GRKERNSEC_SYSCTL + bool "Sysctl support" + help + If you say Y here, you will be able to change the options that + grsecurity runs with at bootup, without having to recompile your + kernel. You can echo values to files in /proc/sys/kernel/grsecurity + to enable (1) or disable (0) various features. All the sysctl entries + are mutable until the "grsec_lock" entry is set to a non-zero value. + All features enabled in the kernel configuration are disabled at boot + if you do not say Y to the "Turn on features by default" option. + All options should be set at startup, and the grsec_lock entry should + be set to a non-zero value after all the options are set. + *THIS IS EXTREMELY IMPORTANT* + +config GRKERNSEC_SYSCTL_ON + bool "Turn on features by default" + depends on GRKERNSEC_SYSCTL + help + If you say Y here, instead of having all features enabled in the + kernel configuration disabled at boot time, the features will be + enabled at boot time. It is recommended you say Y here unless + there is some reason you would want all sysctl-tunable features to + be disabled by default. As mentioned elsewhere, it is important + to enable the grsec_lock entry once you have finished modifying + the sysctl entries. + +endmenu +menu "Logging Options" +depends on GRKERNSEC + +config GRKERNSEC_FLOODTIME + int "Seconds in between log messages (minimum)" + default 10 + help + This option allows you to enforce the number of seconds between + grsecurity log messages. The default should be suitable for most + people, however, if you choose to change it, choose a value small enough + to allow informative logs to be produced, but large enough to + prevent flooding. + +config GRKERNSEC_FLOODBURST + int "Number of messages in a burst (maximum)" + default 4 + help + This option allows you to choose the maximum number of messages allowed + within the flood time interval you chose in a separate option. The + default should be suitable for most people, however if you find that + many of your logs are being interpreted as flooding, you may want to + raise this value. + +endmenu + +endmenu diff -urNp linux-2.6.22.1/grsecurity/Makefile linux-2.6.22.1/grsecurity/Makefile --- linux-2.6.22.1/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/grsecurity/Makefile 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,20 @@ +# grsecurity's ACL system was originally written in 2001 by Michael Dalton +# during 2001-2005 it has been completely redesigned by Brad Spengler +# into an RBAC system +# +# All code in this directory and various hooks inserted throughout the kernel +# are copyright Brad Spengler, and released under the GPL v2 or higher + +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \ + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \ + grsec_time.o grsec_tpe.o grsec_ipc.o grsec_link.o grsec_textrel.o + +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o \ + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \ + gracl_learn.o grsec_log.o +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o + +ifndef CONFIG_GRKERNSEC +obj-y += grsec_disabled.o +endif + diff -urNp linux-2.6.22.1/include/acpi/acmacros.h linux-2.6.22.1/include/acpi/acmacros.h --- linux-2.6.22.1/include/acpi/acmacros.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/acpi/acmacros.h 2007-08-02 11:38:47.000000000 -0400 @@ -617,7 +617,7 @@ #define ACPI_DUMP_PATHNAME(a,b,c,d) #define ACPI_DUMP_RESOURCE_LIST(a) #define ACPI_DUMP_BUFFER(a,b) -#define ACPI_DEBUG_PRINT(pl) +#define ACPI_DEBUG_PRINT(pl) do {} while (0) #define ACPI_DEBUG_PRINT_RAW(pl) #define return_VOID return diff -urNp linux-2.6.22.1/include/asm-alpha/a.out.h linux-2.6.22.1/include/asm-alpha/a.out.h --- linux-2.6.22.1/include/asm-alpha/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-alpha/a.out.h 2007-08-02 11:38:47.000000000 -0400 @@ -98,7 +98,7 @@ struct exec set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000L \ ? ADDR_LIMIT_32BIT : 0) | PER_OSF4)) -#define STACK_TOP \ +#define __STACK_TOP \ (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) #endif diff -urNp linux-2.6.22.1/include/asm-alpha/elf.h linux-2.6.22.1/include/asm-alpha/elf.h --- linux-2.6.22.1/include/asm-alpha/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-alpha/elf.h 2007-08-02 11:38:47.000000000 -0400 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) + +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) +#endif + /* $0 is set by ld.so to a pointer to a function which might be registered using atexit. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have diff -urNp linux-2.6.22.1/include/asm-alpha/kmap_types.h linux-2.6.22.1/include/asm-alpha/kmap_types.h --- linux-2.6.22.1/include/asm-alpha/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-alpha/kmap_types.h 2007-08-02 11:38:47.000000000 -0400 @@ -24,7 +24,8 @@ D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR +D(13) KM_CLEARPAGE, +D(14) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.22.1/include/asm-alpha/pgtable.h linux-2.6.22.1/include/asm-alpha/pgtable.h --- linux-2.6.22.1/include/asm-alpha/pgtable.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-alpha/pgtable.h 2007-08-02 11:38:47.000000000 -0400 @@ -101,6 +101,17 @@ struct vm_area_struct; #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) diff -urNp linux-2.6.22.1/include/asm-arm/a.out.h linux-2.6.22.1/include/asm-arm/a.out.h --- linux-2.6.22.1/include/asm-arm/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-arm/a.out.h 2007-08-02 11:38:47.000000000 -0400 @@ -28,7 +28,7 @@ struct exec #define M_ARM 103 #ifdef __KERNEL__ -#define STACK_TOP ((current->personality == PER_LINUX_32BIT) ? \ +#define __STACK_TOP ((current->personality == PER_LINUX_32BIT) ? \ TASK_SIZE : TASK_SIZE_26) #endif diff -urNp linux-2.6.22.1/include/asm-arm/elf.h linux-2.6.22.1/include/asm-arm/elf.h --- linux-2.6.22.1/include/asm-arm/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-arm/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -110,6 +110,13 @@ extern char elf_platform[]; #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x00008000UL + +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) +#endif + /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we have no such handler. */ diff -urNp linux-2.6.22.1/include/asm-arm/kmap_types.h linux-2.6.22.1/include/asm-arm/kmap_types.h --- linux-2.6.22.1/include/asm-arm/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-arm/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -18,6 +18,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-arm26/kmap_types.h linux-2.6.22.1/include/asm-arm26/kmap_types.h --- linux-2.6.22.1/include/asm-arm26/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-arm26/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -6,7 +6,8 @@ */ enum km_type { KM_IRQ0, - KM_USER1 + KM_USER1, + KM_CLEARPAGE }; #endif diff -urNp linux-2.6.22.1/include/asm-avr32/a.out.h linux-2.6.22.1/include/asm-avr32/a.out.h --- linux-2.6.22.1/include/asm-avr32/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-avr32/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -19,7 +19,7 @@ struct exec #ifdef __KERNEL__ -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE #endif diff -urNp linux-2.6.22.1/include/asm-avr32/elf.h linux-2.6.22.1/include/asm-avr32/elf.h --- linux-2.6.22.1/include/asm-avr32/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-avr32/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x00001000UL + +#define PAX_DELTA_MMAP_LEN 15 +#define PAX_DELTA_STACK_LEN 15 +#endif /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff -urNp linux-2.6.22.1/include/asm-avr32/kmap_types.h linux-2.6.22.1/include/asm-avr32/kmap_types.h --- linux-2.6.22.1/include/asm-avr32/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-avr32/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -22,7 +22,8 @@ D(10) KM_IRQ0, D(11) KM_IRQ1, D(12) KM_SOFTIRQ0, D(13) KM_SOFTIRQ1, -D(14) KM_TYPE_NR +D(14) KM_CLEARPAGE, +D(15) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.22.1/include/asm-blackfin/kmap_types.h linux-2.6.22.1/include/asm-blackfin/kmap_types.h --- linux-2.6.22.1/include/asm-blackfin/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-blackfin/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -15,6 +15,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-cris/kmap_types.h linux-2.6.22.1/include/asm-cris/kmap_types.h --- linux-2.6.22.1/include/asm-cris/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-cris/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -19,6 +19,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-frv/kmap_types.h linux-2.6.22.1/include/asm-frv/kmap_types.h --- linux-2.6.22.1/include/asm-frv/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-frv/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -23,6 +23,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-generic/futex.h linux-2.6.22.1/include/asm-generic/futex.h --- linux-2.6.22.1/include/asm-generic/futex.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-generic/futex.h 2007-08-02 11:38:48.000000000 -0400 @@ -8,7 +8,7 @@ #include static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -50,7 +50,7 @@ futex_atomic_op_inuser (int encoded_op, } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval) { return -ENOSYS; } diff -urNp linux-2.6.22.1/include/asm-generic/vmlinux.lds.h linux-2.6.22.1/include/asm-generic/vmlinux.lds.h --- linux-2.6.22.1/include/asm-generic/vmlinux.lds.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-generic/vmlinux.lds.h 2007-08-02 11:38:48.000000000 -0400 @@ -19,6 +19,7 @@ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_rodata) = .; \ *(.rodata) *(.rodata.*) \ + *(.data.read_only) \ *(__vermagic) /* Kernel version magic */ \ } \ \ diff -urNp linux-2.6.22.1/include/asm-h8300/kmap_types.h linux-2.6.22.1/include/asm-h8300/kmap_types.h --- linux-2.6.22.1/include/asm-h8300/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-h8300/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -15,6 +15,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-i386/alternative.h linux-2.6.22.1/include/asm-i386/alternative.h --- linux-2.6.22.1/include/asm-i386/alternative.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/alternative.h 2007-08-02 11:38:48.000000000 -0400 @@ -54,7 +54,7 @@ static inline void alternatives_smp_swit " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ + ".section .altinstr_replacement,\"a\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */\ ".previous" :: "i" (feature) : "memory") @@ -78,7 +78,7 @@ static inline void alternatives_smp_swit " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ + ".section .altinstr_replacement,\"a\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */\ ".previous" :: "i" (feature), ##input) diff -urNp linux-2.6.22.1/include/asm-i386/a.out.h linux-2.6.22.1/include/asm-i386/a.out.h --- linux-2.6.22.1/include/asm-i386/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -19,7 +19,11 @@ struct exec #ifdef __KERNEL__ -#define STACK_TOP TASK_SIZE +#ifdef CONFIG_PAX_SEGMEXEC +#define __STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?TASK_SIZE/2:TASK_SIZE) +#else +#define __STACK_TOP TASK_SIZE +#endif #endif diff -urNp linux-2.6.22.1/include/asm-i386/apic.h linux-2.6.22.1/include/asm-i386/apic.h --- linux-2.6.22.1/include/asm-i386/apic.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/apic.h 2007-08-02 11:38:48.000000000 -0400 @@ -8,7 +8,7 @@ #include #include -#define Dprintk(x...) +#define Dprintk(x...) do {} while (0) /* * Debugging macros diff -urNp linux-2.6.22.1/include/asm-i386/cache.h linux-2.6.22.1/include/asm-i386/cache.h --- linux-2.6.22.1/include/asm-i386/cache.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/cache.h 2007-08-02 11:38:48.000000000 -0400 @@ -10,5 +10,6 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_only __attribute__((__section__(".data.read_only"))) #endif diff -urNp linux-2.6.22.1/include/asm-i386/checksum.h linux-2.6.22.1/include/asm-i386/checksum.h --- linux-2.6.22.1/include/asm-i386/checksum.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/checksum.h 2007-08-02 11:38:48.000000000 -0400 @@ -30,6 +30,12 @@ asmlinkage __wsum csum_partial(const voi asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic_to_user(const unsigned char *src, unsigned char *dst, + int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); + +asmlinkage __wsum csum_partial_copy_generic_from_user(const unsigned char *src, unsigned char *dst, + int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); + /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. @@ -49,7 +55,7 @@ __wsum csum_partial_copy_from_user(const int len, __wsum sum, int *err_ptr) { might_sleep(); - return csum_partial_copy_generic((__force void *)src, dst, + return csum_partial_copy_generic_from_user((__force void *)src, dst, len, sum, err_ptr, NULL); } @@ -180,7 +186,7 @@ static __inline__ __wsum csum_and_copy_t { might_sleep(); if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr); + return csum_partial_copy_generic_to_user(src, (__force void *)dst, len, sum, NULL, err_ptr); if (len) *err_ptr = -EFAULT; diff -urNp linux-2.6.22.1/include/asm-i386/desc.h linux-2.6.22.1/include/asm-i386/desc.h --- linux-2.6.22.1/include/asm-i386/desc.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/desc.h 2007-08-02 11:38:48.000000000 -0400 @@ -7,26 +7,22 @@ #ifndef __ASSEMBLY__ #include -#include #include +#include #include +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; + struct Xgt_desc_struct { unsigned short size; - unsigned long address __attribute__((packed)); + struct desc_struct *address __attribute__((packed)); unsigned short pad; } __attribute__ ((packed)); -struct gdt_page -{ - struct desc_struct gdt[GDT_ENTRIES]; -} __attribute__((aligned(PAGE_SIZE))); -DECLARE_PER_CPU(struct gdt_page, gdt_page); - static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { - return per_cpu(gdt_page, cpu).gdt; + return cpu_gdt_table[cpu]; } extern struct Xgt_desc_struct idt_descr; @@ -81,8 +77,20 @@ static inline void pack_gate(__u32 *a, _ static inline void write_dt_entry(struct desc_struct *dt, int entry, u32 entry_low, u32 entry_high) { + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + dt[entry].a = entry_low; dt[entry].b = entry_high; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } static inline void native_set_ldt(const void *addr, unsigned int entries) @@ -139,8 +147,19 @@ static inline void native_load_tls(struc unsigned int i; struct desc_struct *gdt = get_cpu_gdt_table(cpu); +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) @@ -175,7 +194,7 @@ static inline void __set_tss_desc(unsign ((info)->seg_32bit << 22) | \ ((info)->limit_in_pages << 23) | \ ((info)->useable << 20) | \ - 0x7000) + 0x7100) #define LDT_empty(info) (\ (info)->base_addr == 0 && \ @@ -207,15 +226,25 @@ static inline void load_LDT(mm_context_t preempt_enable(); } -static inline unsigned long get_desc_base(unsigned long *desc) +static inline unsigned long get_desc_base(struct desc_struct *desc) { unsigned long base; - base = ((desc[0] >> 16) & 0x0000ffff) | - ((desc[1] << 16) & 0x00ff0000) | - (desc[1] & 0xff000000); + base = ((desc->a >> 16) & 0x0000ffff) | + ((desc->b << 16) & 0x00ff0000) | + (desc->b & 0xff000000); return base; } +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) +{ + __u32 a, b; + + if (likely(limit)) + limit = (limit - 1UL) >> PAGE_SHIFT; + pack_descriptor(&a, &b, base, limit, 0xFB, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, a, b); +} + #else /* __ASSEMBLY__ */ /* diff -urNp linux-2.6.22.1/include/asm-i386/elf.h linux-2.6.22.1/include/asm-i386/elf.h --- linux-2.6.22.1/include/asm-i386/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -73,7 +73,18 @@ typedef struct user_fxsr_struct elf_fpxr the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ +#ifdef CONFIG_PAX_SEGMEXEC +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) +#else #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000000UL + +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) +#endif /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is now struct_user_regs, they are different) */ @@ -131,7 +142,7 @@ extern int dump_task_extended_fpu (struc #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) +#define VDSO_CURRENT_BASE (current->mm->context.vdso) #define VDSO_PRELINK 0 #define VDSO_SYM(x) \ diff -urNp linux-2.6.22.1/include/asm-i386/futex.h linux-2.6.22.1/include/asm-i386/futex.h --- linux-2.6.22.1/include/asm-i386/futex.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/futex.h 2007-08-02 11:38:48.000000000 -0400 @@ -11,8 +11,11 @@ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ + "movw %w6, %%ds\n"\ "1: " insn "\n" \ -"2: .section .fixup,\"ax\"\n\ +"2: pushl %%ss\n\ + popl %%ds\n\ + .section .fixup,\"ax\"\n\ 3: mov %3, %1\n\ jmp 2b\n\ .previous\n\ @@ -21,16 +24,19 @@ .long 1b,3b\n\ .previous" \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ - : "i" (-EFAULT), "0" (oparg), "1" (0)) + : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ -"1: movl %2, %0\n\ +" movw %w7, %%es\n\ +1: movl %%es:%2, %0\n\ movl %0, %3\n" \ insn "\n" \ -"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ +"2: " LOCK_PREFIX "cmpxchgl %3, %%es:%2\n\ jnz 1b\n\ -3: .section .fixup,\"ax\"\n\ +3: pushl %%ss\n\ + popl %%es\n\ + .section .fixup,\"ax\"\n\ 4: mov %5, %1\n\ jmp 3b\n\ .previous\n\ @@ -40,10 +46,10 @@ .previous" \ : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ "=&r" (tem) \ - : "r" (oparg), "i" (-EFAULT), "1" (0)) + : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS)) static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -59,7 +65,7 @@ futex_atomic_op_inuser (int encoded_op, pagefault_disable(); if (op == FUTEX_OP_SET) - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); + __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg); else { #ifndef CONFIG_X86_BSWAP if (boot_cpu_data.x86 == 3) @@ -68,7 +74,7 @@ futex_atomic_op_inuser (int encoded_op, #endif switch (op) { case FUTEX_OP_ADD: - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, + __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: @@ -105,15 +111,17 @@ futex_atomic_op_inuser (int encoded_op, } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval) { if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; __asm__ __volatile__( - "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" - - "2: .section .fixup, \"ax\" \n" + " movw %w5, %%ds \n" + "1: " LOCK_PREFIX "cmpxchgl %3, %%ds:%1 \n" + "2: pushl %%ss \n" + " popl %%ds \n" + " .section .fixup, \"ax\" \n" "3: mov %2, %0 \n" " jmp 2b \n" " .previous \n" @@ -124,7 +132,7 @@ futex_atomic_cmpxchg_inatomic(int __user " .previous \n" : "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS) : "memory" ); diff -urNp linux-2.6.22.1/include/asm-i386/i387.h linux-2.6.22.1/include/asm-i386/i387.h --- linux-2.6.22.1/include/asm-i386/i387.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/i387.h 2007-08-02 11:38:48.000000000 -0400 @@ -40,13 +40,8 @@ extern void kernel_fpu_begin(void); #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) /* We need a safe address that is cheap to find and that is already - in L1 during context switch. The best choices are unfortunately - different for UP and SMP */ -#ifdef CONFIG_SMP -#define safe_address (__per_cpu_offset[0]) -#else -#define safe_address (kstat_cpu(0).cpustat.user) -#endif + in L1 during context switch. */ +#define safe_address (init_tss[smp_processor_id()].x86_tss.esp0) /* * These must be called with preempt disabled diff -urNp linux-2.6.22.1/include/asm-i386/irqflags.h linux-2.6.22.1/include/asm-i386/irqflags.h --- linux-2.6.22.1/include/asm-i386/irqflags.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/irqflags.h 2007-08-02 11:38:48.000000000 -0400 @@ -108,6 +108,8 @@ static inline unsigned long __raw_local_ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit #define INTERRUPT_RETURN iret #define GET_CR0_INTO_EAX movl %cr0, %eax +#define GET_CR0_INTO_EDX movl %cr0, %edx +#define SET_CR0_FROM_EDX movl %edx, %cr0 #endif /* __ASSEMBLY__ */ #endif /* CONFIG_PARAVIRT */ diff -urNp linux-2.6.22.1/include/asm-i386/kmap_types.h linux-2.6.22.1/include/asm-i386/kmap_types.h --- linux-2.6.22.1/include/asm-i386/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -22,7 +22,8 @@ D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR +D(13) KM_CLEARPAGE, +D(14) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.22.1/include/asm-i386/mach-default/apm.h linux-2.6.22.1/include/asm-i386/mach-default/apm.h --- linux-2.6.22.1/include/asm-i386/mach-default/apm.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/mach-default/apm.h 2007-08-02 11:38:48.000000000 -0400 @@ -36,7 +36,7 @@ static inline void apm_bios_call_asm(u32 __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" + "lcall *%%ss:apm_bios_entry\n\t" "setc %%al\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" @@ -60,7 +60,7 @@ static inline u8 apm_bios_call_simple_as __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" + "lcall *%%ss:apm_bios_entry\n\t" "setc %%bl\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" diff -urNp linux-2.6.22.1/include/asm-i386/mman.h linux-2.6.22.1/include/asm-i386/mman.h --- linux-2.6.22.1/include/asm-i386/mman.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/mman.h 2007-08-02 11:38:48.000000000 -0400 @@ -14,4 +14,12 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#define arch_mmap_check i386_mmap_check +int i386_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags); +#endif +#endif + #endif /* __I386_MMAN_H__ */ diff -urNp linux-2.6.22.1/include/asm-i386/mmu_context.h linux-2.6.22.1/include/asm-i386/mmu_context.h --- linux-2.6.22.1/include/asm-i386/mmu_context.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/mmu_context.h 2007-08-02 11:38:48.000000000 -0400 @@ -55,6 +55,22 @@ static inline void switch_mm(struct mm_s */ if (unlikely(prev->context.ldt != next->context.ldt)) load_LDT_nolock(&next->context); + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + if (!nx_enabled) { + smp_mb__before_clear_bit(); + cpu_clear(cpu, prev->context.cpu_user_cs_mask); + smp_mb__after_clear_bit(); + cpu_set(cpu, next->context.cpu_user_cs_mask); + } +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || + prev->context.user_cs_limit != next->context.user_cs_limit)) + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#endif + } #ifdef CONFIG_SMP else { @@ -67,6 +83,19 @@ static inline void switch_mm(struct mm_s */ load_cr3(next->pgd); load_LDT_nolock(&next->context); + +#ifdef CONFIG_PAX_PAGEEXEC + if (!nx_enabled) + cpu_set(cpu, next->context.cpu_user_cs_mask); +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +#ifdef CONFIG_PAX_PAGEEXEC + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled)) +#endif + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#endif + } } #endif diff -urNp linux-2.6.22.1/include/asm-i386/mmu.h linux-2.6.22.1/include/asm-i386/mmu.h --- linux-2.6.22.1/include/asm-i386/mmu.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/mmu.h 2007-08-02 11:38:48.000000000 -0400 @@ -11,8 +11,19 @@ typedef struct { int size; struct semaphore sem; - void *ldt; - void *vdso; + struct desc_struct *ldt; + unsigned long vdso; + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + unsigned long user_cs_base; + unsigned long user_cs_limit; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpumask_t cpu_user_cs_mask; +#endif + +#endif + } mm_context_t; #endif diff -urNp linux-2.6.22.1/include/asm-i386/module.h linux-2.6.22.1/include/asm-i386/module.h --- linux-2.6.22.1/include/asm-i386/module.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/module.h 2007-08-02 11:09:16.000000000 -0400 @@ -70,6 +70,12 @@ struct mod_arch_specific #define MODULE_STACKSIZE "" #endif -#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE +#ifdef CONFIG_GRKERNSEC +#define MODULE_GRSEC "GRSECURTY " +#else +#define MODULE_GRSEC "" +#endif + +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC #endif /* _ASM_I386_MODULE_H */ diff -urNp linux-2.6.22.1/include/asm-i386/page.h linux-2.6.22.1/include/asm-i386/page.h --- linux-2.6.22.1/include/asm-i386/page.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/page.h 2007-08-02 11:38:48.000000000 -0400 @@ -10,6 +10,7 @@ #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) #ifdef __KERNEL__ +#include #ifndef __ASSEMBLY__ #ifdef CONFIG_X86_USE_3DNOW @@ -90,7 +91,6 @@ static inline pte_t native_make_pte(unsi typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; -#define boot_pte_t pte_t /* or would you rather have a typedef */ static inline unsigned long native_pgd_val(pgd_t pgd) { @@ -175,6 +175,15 @@ extern int page_is_ram(unsigned long pag #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) #endif +#ifdef CONFIG_PAX_KERNEXEC +#define __KERNEL_TEXT_OFFSET (__PAGE_OFFSET + ((LOAD_PHYSICAL_ADDR + 4*1024*1024 - 1) & ~(4*1024*1024 - 1))) +#ifndef __ASSEMBLY__ +extern unsigned char MODULES_VADDR[]; +extern unsigned char MODULES_END[]; +#endif +#else +#define __KERNEL_TEXT_OFFSET (0) +#endif #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) @@ -197,6 +206,10 @@ extern int page_is_ram(unsigned long pag ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifdef CONFIG_PAX_PAGEEXEC +#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1 +#endif + #include #include diff -urNp linux-2.6.22.1/include/asm-i386/paravirt.h linux-2.6.22.1/include/asm-i386/paravirt.h --- linux-2.6.22.1/include/asm-i386/paravirt.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/paravirt.h 2007-08-02 11:38:48.000000000 -0400 @@ -1041,23 +1041,23 @@ static inline unsigned long __raw_local_ #define INTERRUPT_RETURN \ PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \ - jmp *%cs:paravirt_ops+PARAVIRT_iret) + jmp *%ss:paravirt_ops+PARAVIRT_iret) #define DISABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \ pushl %eax; pushl %ecx; pushl %edx; \ - call *%cs:paravirt_ops+PARAVIRT_irq_disable; \ + call *%ss:paravirt_ops+PARAVIRT_irq_disable; \ popl %edx; popl %ecx; popl %eax) \ #define ENABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \ pushl %eax; pushl %ecx; pushl %edx; \ - call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ + call *%ss:paravirt_ops+PARAVIRT_irq_enable; \ popl %edx; popl %ecx; popl %eax) #define ENABLE_INTERRUPTS_SYSEXIT \ PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \ - jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) + jmp *%ss:paravirt_ops+PARAVIRT_irq_enable_sysexit) #define GET_CR0_INTO_EAX \ push %ecx; push %edx; \ diff -urNp linux-2.6.22.1/include/asm-i386/percpu.h linux-2.6.22.1/include/asm-i386/percpu.h --- linux-2.6.22.1/include/asm-i386/percpu.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/percpu.h 2007-08-02 11:38:48.000000000 -0400 @@ -22,7 +22,7 @@ #define PER_CPU_VAR(var) %fs:per_cpu__##var #else /* ! SMP */ #define PER_CPU(var, reg) \ - movl $per_cpu__##var, reg + movl per_cpu__##var, reg #define PER_CPU_VAR(var) per_cpu__##var #endif /* SMP */ @@ -42,12 +42,12 @@ */ #ifdef CONFIG_SMP /* Same as generic implementation except for optimized local access. */ -#define __GENERIC_PER_CPU /* This is used for other cpus to find our section. */ extern unsigned long __per_cpu_offset[]; +extern void setup_per_cpu_areas(void); -#define per_cpu_offset(x) (__per_cpu_offset[x]) +#define per_cpu_offset(x) (__per_cpu_offset[x] - (unsigned long)__per_cpu_start) /* Separate out the type, so (int[3], foo) works. */ #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name @@ -59,11 +59,11 @@ DECLARE_PER_CPU(unsigned long, this_cpu_ /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*({ \ - extern int simple_indentifier_##var(void); \ + extern int simple_identifier_##var(void); \ RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) #define __raw_get_cpu_var(var) (*({ \ - extern int simple_indentifier_##var(void); \ + extern int simple_identifier_##var(void); \ RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \ })) @@ -74,7 +74,7 @@ DECLARE_PER_CPU(unsigned long, this_cpu_ do { \ unsigned int __i; \ for_each_possible_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ + memcpy((pcpudst)+per_cpu_offset(__i), \ (src), (size)); \ } while (0) diff -urNp linux-2.6.22.1/include/asm-i386/pgalloc.h linux-2.6.22.1/include/asm-i386/pgalloc.h --- linux-2.6.22.1/include/asm-i386/pgalloc.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/pgalloc.h 2007-08-02 11:38:48.000000000 -0400 @@ -15,11 +15,19 @@ #define paravirt_release_pd(pfn) do { } while (0) #endif +#ifdef CONFIG_COMPAT_VDSO #define pmd_populate_kernel(mm, pmd, pte) \ do { \ paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ } while (0) +#else +#define pmd_populate_kernel(mm, pmd, pte) \ +do { \ + paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \ + set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte))); \ +} while (0) +#endif #define pmd_populate(mm, pmd, pte) \ do { \ diff -urNp linux-2.6.22.1/include/asm-i386/pgtable-2level.h linux-2.6.22.1/include/asm-i386/pgtable-2level.h --- linux-2.6.22.1/include/asm-i386/pgtable-2level.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/pgtable-2level.h 2007-08-02 11:38:48.000000000 -0400 @@ -22,7 +22,19 @@ static inline void native_set_pte_at(str } static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + *pmdp = pmd; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } #ifndef CONFIG_PARAVIRT #define set_pte(pteptr, pteval) native_set_pte(pteptr, pteval) diff -urNp linux-2.6.22.1/include/asm-i386/pgtable-3level.h linux-2.6.22.1/include/asm-i386/pgtable-3level.h --- linux-2.6.22.1/include/asm-i386/pgtable-3level.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/pgtable-3level.h 2007-08-02 11:38:48.000000000 -0400 @@ -82,11 +82,34 @@ static inline void native_set_pte_atomic } static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } static inline void native_set_pud(pud_t *pudp, pud_t pud) { + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + *pudp = pud; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } /* diff -urNp linux-2.6.22/include/asm-i386/pgtable.h linux-2.6.22/include/asm-i386/pgtable.h --- linux-2.6.22/include/asm-i386/pgtable.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22/include/asm-i386/pgtable.h 2007-07-10 14:56:30.000000000 -0400 @@ -34,7 +34,6 @@ struct vm_area_struct; */ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; -extern pgd_t swapper_pg_dir[1024]; extern struct kmem_cache *pmd_cache; extern spinlock_t pgd_lock; extern struct page *pgd_list; @@ -58,6 +57,11 @@ void paging_init(void); # include #endif +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +#ifdef CONFIG_X86_PAE +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; +#endif + #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) @@ -67,9 +71,11 @@ void paging_init(void); #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) +#ifndef CONFIG_X86_PAE #define TWOLEVEL_PGDIR_SHIFT 22 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) +#endif /* Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the @@ -136,7 +142,7 @@ void paging_init(void); #define PAGE_NONE \ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_SHARED_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) @@ -202,7 +208,7 @@ extern unsigned long long __PAGE_KERNEL, #undef TEST_ACCESS_OK /* The boot page tables (all created as a single array) */ -extern unsigned long pg0[]; +extern pte_t pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) @@ -225,29 +231,51 @@ static inline int pte_young(pte_t pte) static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } +#ifdef CONFIG_X86_PAE +# include +#else +# include +#endif + /* * The following only works if pte_present() is not true. */ static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } -static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } + +static inline pte_t pte_exprotect(pte_t pte) +{ +#ifdef CONFIG_X86_PAE + if (__supported_pte_mask & _PAGE_NX) + set_pte(&pte, __pte(pte_val(pte) | _PAGE_NX)); + else +#endif + set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); + return pte; +} + static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } -static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } + +static inline pte_t pte_mkexec(pte_t pte) +{ +#ifdef CONFIG_X86_PAE + if (__supported_pte_mask & _PAGE_NX) + set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); + else +#endif + set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); + return pte; +} + static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } -#ifdef CONFIG_X86_PAE -# include -#else -# include -#endif - #ifndef CONFIG_PARAVIRT /* * Rules for using pte_update - it must be called after any PTE update which @@ -538,6 +566,9 @@ static inline void paravirt_pagetable_se #endif /* !__ASSEMBLY__ */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + #ifdef CONFIG_FLATMEM #define kern_addr_valid(addr) (1) #endif /* CONFIG_FLATMEM */ diff -urNp linux-2.6.22.1/include/asm-i386/processor.h linux-2.6.22.1/include/asm-i386/processor.h --- linux-2.6.22.1/include/asm-i386/processor.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/processor.h 2007-08-02 11:38:48.000000000 -0400 @@ -100,8 +100,6 @@ struct cpuinfo_x86 { extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct doublefault_tss; -DECLARE_PER_CPU(struct tss_struct, init_tss); #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; @@ -220,11 +218,19 @@ extern int bootloader_type; */ #define TASK_SIZE (PAGE_OFFSET) +#ifdef CONFIG_PAX_SEGMEXEC +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) +#endif + /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#ifdef CONFIG_PAX_SEGMEXEC +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) +#endif + #define HAVE_ARCH_PICK_MMAP_LAYOUT /* @@ -345,6 +352,9 @@ struct tss_struct { #define ARCH_MIN_TASKALIGN 16 +extern struct tss_struct doublefault_tss; +extern struct tss_struct init_tss[NR_CPUS]; + struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; @@ -373,7 +383,7 @@ struct thread_struct { }; #define INIT_THREAD { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ + .esp0 = sizeof(init_stack) + (long)&init_stack - 8, \ .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ @@ -388,7 +398,7 @@ struct thread_struct { */ #define INIT_TSS { \ .x86_tss = { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ + .esp0 = sizeof(init_stack) + (long)&init_stack - 8, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ @@ -429,11 +439,7 @@ void show_trace(struct task_struct *task unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) -#define KSTK_TOP(info) \ -({ \ - unsigned long *__ptr = (unsigned long *)(info); \ - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ -}) +#define KSTK_TOP(info) ((info)->task.thread.esp0) /* * The below -8 is to reserve 8 bytes on top of the ring0 stack. @@ -448,7 +454,7 @@ unsigned long get_wchan(struct task_stru #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ + __regs__ = (struct pt_regs *)((task)->thread.esp0); \ __regs__ - 1; \ }) @@ -610,8 +616,8 @@ static inline void cpuid(unsigned int op } /* Some CPUID calls want 'count' to be placed in ecx */ -static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, - int *edx) +static inline void cpuid_count(unsigned int op, unsigned int count, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, + unsigned int *edx) { *eax = op; *ecx = count; diff -urNp linux-2.6.22.1/include/asm-i386/ptrace.h linux-2.6.22.1/include/asm-i386/ptrace.h --- linux-2.6.22.1/include/asm-i386/ptrace.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/ptrace.h 2007-08-02 11:38:48.000000000 -0400 @@ -35,17 +35,18 @@ struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); /* - * user_mode_vm(regs) determines whether a register set came from user mode. + * user_mode(regs) determines whether a register set came from user mode. * This is true if V8086 mode was enabled OR if the register set was from * protected mode with RPL-3 CS value. This tricky test checks that with * one comparison. Many places in the kernel can bypass this full check - * if they have already ruled out V8086 mode, so user_mode(regs) can be used. + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can + * be used. */ -static inline int user_mode(struct pt_regs *regs) +static inline int user_mode_novm(struct pt_regs *regs) { return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; } -static inline int user_mode_vm(struct pt_regs *regs) +static inline int user_mode(struct pt_regs *regs) { return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; } diff -urNp linux-2.6.22.1/include/asm-i386/reboot.h linux-2.6.22.1/include/asm-i386/reboot.h --- linux-2.6.22.1/include/asm-i386/reboot.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/reboot.h 2007-08-02 11:38:48.000000000 -0400 @@ -15,6 +15,6 @@ struct machine_ops extern struct machine_ops machine_ops; -void machine_real_restart(unsigned char *code, int length); +void machine_real_restart(const unsigned char *code, unsigned int length); #endif /* _ASM_REBOOT_H */ diff -urNp linux-2.6.22.1/include/asm-i386/segment.h linux-2.6.22.1/include/asm-i386/segment.h --- linux-2.6.22.1/include/asm-i386/segment.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/segment.h 2007-08-03 14:38:20.000000000 -0400 @@ -81,6 +81,12 @@ #define __KERNEL_PERCPU 0 #endif +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 16) +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) + +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 17) +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* @@ -140,9 +146,9 @@ #define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8) /* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ -#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) +#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xFFFCU) == __KERNEL_CS || ((x) & 0xFFFCU) == __USER_CS) /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) #endif diff -urNp linux-2.6.22.1/include/asm-i386/system.h linux-2.6.22.1/include/asm-i386/system.h --- linux-2.6.22.1/include/asm-i386/system.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/system.h 2007-08-02 11:38:48.000000000 -0400 @@ -183,6 +183,21 @@ static inline void native_wbinvd(void) /* Set the 'TS' bit */ #define stts() write_cr0(8 | read_cr0()) +#define pax_open_kernel(cr0) \ +do { \ + typecheck(unsigned long, cr0); \ + preempt_disable(); \ + cr0 = read_cr0(); \ + write_cr0(cr0 & ~X86_CR0_WP); \ +} while (0) + +#define pax_close_kernel(cr0) \ +do { \ + typecheck(unsigned long, cr0); \ + write_cr0(cr0); \ + preempt_enable_no_resched(); \ +} while (0) + #endif /* __KERNEL__ */ static inline unsigned long get_limit(unsigned long segment) @@ -190,7 +205,7 @@ static inline unsigned long get_limit(un unsigned long __limit; __asm__("lsll %1,%0" :"=r" (__limit):"r" (segment)); - return __limit+1; + return __limit; } #define nop() __asm__ __volatile__ ("nop") @@ -319,7 +334,7 @@ static inline void sched_cacheflush(void wbinvd(); } -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) (x) extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); diff -urNp linux-2.6.22.1/include/asm-i386/uaccess.h linux-2.6.22.1/include/asm-i386/uaccess.h --- linux-2.6.22.1/include/asm-i386/uaccess.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-i386/uaccess.h 2007-08-02 11:38:48.000000000 -0400 @@ -9,6 +9,7 @@ #include #include #include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -29,7 +30,8 @@ #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) +void __set_fs(mm_segment_t x, int cpu); +void set_fs(mm_segment_t x); #define segment_eq(a,b) ((a).seg == (b).seg) @@ -280,9 +282,12 @@ extern void __put_user_8(void); #define __put_user_u64(x, addr, err) \ __asm__ __volatile__( \ - "1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ + " movw %w5,%%ds\n" \ + "1: movl %%eax,%%ds:0(%2)\n" \ + "2: movl %%edx,%%ds:4(%2)\n" \ "3:\n" \ + " pushl %%ss\n" \ + " popl %%ds\n" \ ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ " jmp 3b\n" \ @@ -293,7 +298,8 @@ extern void __put_user_8(void); " .long 2b,4b\n" \ ".previous" \ : "=r"(err) \ - : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) + : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err), \ + "r"(__USER_DS)) #ifdef CONFIG_X86_WP_WORKS_OK @@ -332,8 +338,11 @@ struct __large_struct { unsigned long bu */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ __asm__ __volatile__( \ - "1: mov"itype" %"rtype"1,%2\n" \ + " movw %w5,%%ds\n" \ + "1: mov"itype" %"rtype"1,%%ds:%2\n" \ "2:\n" \ + " pushl %%ss\n" \ + " popl %%ds\n" \ ".section .fixup,\"ax\"\n" \ "3: movl %3,%0\n" \ " jmp 2b\n" \ @@ -343,7 +352,8 @@ struct __large_struct { unsigned long bu " .long 1b,3b\n" \ ".previous" \ : "=r"(err) \ - : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) + : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err), \ + "r"(__USER_DS)) #define __get_user_nocheck(x,ptr,size) \ @@ -371,8 +381,11 @@ do { \ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ __asm__ __volatile__( \ - "1: mov"itype" %2,%"rtype"1\n" \ + " movw %w5,%%ds\n" \ + "1: mov"itype" %%ds:%2,%"rtype"1\n" \ "2:\n" \ + " pushl %%ss\n" \ + " popl %%ds\n" \ ".section .fixup,\"ax\"\n" \ "3: movl %3,%0\n" \ " xor"itype" %"rtype"1,%"rtype"1\n" \ @@ -383,7 +396,7 @@ do { \ " .long 1b,3b\n" \ ".previous" \ : "=r"(err), ltype (x) \ - : "m"(__m(addr)), "i"(errret), "0"(err)) + : "m"(__m(addr)), "i"(errret), "0"(err), "r"(__USER_DS)) unsigned long __must_check __copy_to_user_ll(void __user *to, diff -urNp linux-2.6.22.1/include/asm-ia64/elf.h linux-2.6.22.1/include/asm-ia64/elf.h --- linux-2.6.22.1/include/asm-ia64/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-ia64/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -162,7 +162,12 @@ typedef elf_greg_t elf_gregset_t[ELF_NGR typedef struct ia64_fpreg elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#endif struct pt_regs; /* forward declaration... */ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst); diff -urNp linux-2.6.22.1/include/asm-ia64/kmap_types.h linux-2.6.22.1/include/asm-ia64/kmap_types.h --- linux-2.6.22.1/include/asm-ia64/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-ia64/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -22,7 +22,8 @@ D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR +D(13) KM_CLEARPAGE, +D(14) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.22.1/include/asm-ia64/pgtable.h linux-2.6.22.1/include/asm-ia64/pgtable.h --- linux-2.6.22.1/include/asm-ia64/pgtable.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-ia64/pgtable.h 2007-08-02 11:38:48.000000000 -0400 @@ -143,6 +143,17 @@ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_READONLY_NOEXEC PAGE_READONLY +# define PAGE_COPY_NOEXEC PAGE_COPY +#endif + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) diff -urNp linux-2.6.22.1/include/asm-ia64/processor.h linux-2.6.22.1/include/asm-ia64/processor.h --- linux-2.6.22.1/include/asm-ia64/processor.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-ia64/processor.h 2007-08-02 11:38:48.000000000 -0400 @@ -275,7 +275,7 @@ struct thread_struct { .on_ustack = 0, \ .ksp = 0, \ .map_base = DEFAULT_MAP_BASE, \ - .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ + .rbs_bot = __STACK_TOP - DEFAULT_USER_STACK_SIZE, \ .task_size = DEFAULT_TASK_SIZE, \ .last_fph_cpu = -1, \ INIT_THREAD_IA32 \ diff -urNp linux-2.6.22.1/include/asm-ia64/ustack.h linux-2.6.22.1/include/asm-ia64/ustack.h --- linux-2.6.22.1/include/asm-ia64/ustack.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-ia64/ustack.h 2007-08-02 11:38:48.000000000 -0400 @@ -10,10 +10,10 @@ /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) -#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) +#define __STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) #endif -/* Make a default stack size of 2GiB */ +/* Make a default stack size of 2GB */ #define DEFAULT_USER_STACK_SIZE (1UL << 31) #endif /* _ASM_IA64_USTACK_H */ diff -urNp linux-2.6.22.1/include/asm-m32r/kmap_types.h linux-2.6.22.1/include/asm-m32r/kmap_types.h --- linux-2.6.22.1/include/asm-m32r/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-m32r/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -21,7 +21,8 @@ D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR +D(13) KM_CLEARPAGE, +D(14) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.22.1/include/asm-m68k/kmap_types.h linux-2.6.22.1/include/asm-m68k/kmap_types.h --- linux-2.6.22.1/include/asm-m68k/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-m68k/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -15,6 +15,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-m68knommu/kmap_types.h linux-2.6.22.1/include/asm-m68knommu/kmap_types.h --- linux-2.6.22.1/include/asm-m68knommu/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-m68knommu/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -15,6 +15,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-mips/a.out.h linux-2.6.22.1/include/asm-mips/a.out.h --- linux-2.6.22.1/include/asm-mips/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-mips/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -35,10 +35,10 @@ struct exec #ifdef __KERNEL__ #ifdef CONFIG_32BIT -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE #endif #ifdef CONFIG_64BIT -#define STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE) +#define __STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE) #endif #endif diff -urNp linux-2.6.22.1/include/asm-mips/elf.h linux-2.6.22.1/include/asm-mips/elf.h --- linux-2.6.22.1/include/asm-mips/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-mips/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -371,4 +371,11 @@ extern int dump_task_fpu(struct task_str #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) #endif +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE ((current->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN ((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN ((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #endif /* _ASM_ELF_H */ diff -urNp linux-2.6.22.1/include/asm-mips/kmap_types.h linux-2.6.22.1/include/asm-mips/kmap_types.h --- linux-2.6.22.1/include/asm-mips/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-mips/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -22,7 +22,8 @@ D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR +D(13) KM_CLEARPAGE, +D(14) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.22.1/include/asm-mips/page.h linux-2.6.22.1/include/asm-mips/page.h --- linux-2.6.22.1/include/asm-mips/page.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-mips/page.h 2007-08-02 11:38:48.000000000 -0400 @@ -89,7 +89,7 @@ extern void copy_user_highpage(struct pa #ifdef CONFIG_CPU_MIPS32 typedef struct { unsigned long pte_low, pte_high; } pte_t; #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) #else typedef struct { unsigned long long pte; } pte_t; #define pte_val(x) ((x).pte) diff -urNp linux-2.6.22.1/include/asm-parisc/a.out.h linux-2.6.22.1/include/asm-parisc/a.out.h --- linux-2.6.22.1/include/asm-parisc/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-parisc/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -22,7 +22,7 @@ struct exec /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc. * prumpf */ -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE #endif diff -urNp linux-2.6.22.1/include/asm-parisc/elf.h linux-2.6.22.1/include/asm-parisc/elf.h --- linux-2.6.22.1/include/asm-parisc/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-parisc/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -337,6 +337,13 @@ struct pt_regs; /* forward declaration.. #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ diff -urNp linux-2.6.22.1/include/asm-parisc/kmap_types.h linux-2.6.22.1/include/asm-parisc/kmap_types.h --- linux-2.6.22.1/include/asm-parisc/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-parisc/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -22,7 +22,8 @@ D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR +D(13) KM_CLEARPAGE, +D(14) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.22.1/include/asm-parisc/pgtable.h linux-2.6.22.1/include/asm-parisc/pgtable.h --- linux-2.6.22.1/include/asm-parisc/pgtable.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-parisc/pgtable.h 2007-08-02 11:38:48.000000000 -0400 @@ -218,6 +218,17 @@ extern void *vmalloc_start; #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) #define PAGE_COPY PAGE_EXECREAD #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) diff -urNp linux-2.6.22.1/include/asm-powerpc/a.out.h linux-2.6.22.1/include/asm-powerpc/a.out.h --- linux-2.6.22.1/include/asm-powerpc/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-powerpc/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -23,12 +23,12 @@ struct exec #define STACK_TOP_USER64 TASK_SIZE_USER64 #define STACK_TOP_USER32 TASK_SIZE_USER32 -#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ +#define __STACK_TOP (test_thread_flag(TIF_32BIT) ? \ STACK_TOP_USER32 : STACK_TOP_USER64) #else /* __powerpc64__ */ -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE #endif /* __powerpc64__ */ #endif /* __KERNEL__ */ diff -urNp linux-2.6.22.1/include/asm-powerpc/elf.h linux-2.6.22.1/include/asm-powerpc/elf.h --- linux-2.6.22.1/include/asm-powerpc/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-powerpc/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -159,6 +159,18 @@ typedef elf_vrreg_t elf_vrregset_t[ELF_N typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; #endif +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (0x10000000UL) + +#ifdef __powerpc64__ +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28) +#else +#define PAX_DELTA_MMAP_LEN 15 +#define PAX_DELTA_STACK_LEN 15 +#endif +#endif + #ifdef __KERNEL__ /* * This is used to ensure we don't load something for the wrong architecture. diff -urNp linux-2.6.22.1/include/asm-powerpc/kmap_types.h linux-2.6.22.1/include/asm-powerpc/kmap_types.h --- linux-2.6.22.1/include/asm-powerpc/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-powerpc/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -26,6 +26,7 @@ enum km_type { KM_SOFTIRQ1, KM_PPC_SYNC_PAGE, KM_PPC_SYNC_ICACHE, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-powerpc/page_64.h linux-2.6.22.1/include/asm-powerpc/page_64.h --- linux-2.6.22.1/include/asm-powerpc/page_64.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-powerpc/page_64.h 2007-08-02 11:38:48.000000000 -0400 @@ -158,15 +158,18 @@ extern int is_hugepage_only_range(struct * stack by default, so in the absense of a PT_GNU_STACK program header * we turn execute permission off. */ -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_STACK_DEFAULT_FLAGS32 \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifndef CONFIG_PAX_PAGEEXEC #define VM_STACK_DEFAULT_FLAGS \ (test_thread_flag(TIF_32BIT) ? \ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) +#endif #include diff -urNp linux-2.6.22.1/include/asm-powerpc/page.h linux-2.6.22.1/include/asm-powerpc/page.h --- linux-2.6.22.1/include/asm-powerpc/page.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-powerpc/page.h 2007-08-02 11:38:48.000000000 -0400 @@ -71,8 +71,9 @@ * and needs to be executable. This means the whole heap ends * up being executable. */ -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS32 \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) diff -urNp linux-2.6.22.1/include/asm-ppc/mmu_context.h linux-2.6.22.1/include/asm-ppc/mmu_context.h --- linux-2.6.22.1/include/asm-ppc/mmu_context.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-ppc/mmu_context.h 2007-08-02 11:38:48.000000000 -0400 @@ -145,7 +145,8 @@ static inline void get_mmu_context(struc static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) { mm->context.id = NO_CONTEXT; - mm->context.vdso_base = 0; + if (t == current) + mm->context.vdso_base = ~0UL; return 0; } diff -urNp linux-2.6.22.1/include/asm-ppc/pgtable.h linux-2.6.22.1/include/asm-ppc/pgtable.h --- linux-2.6.22.1/include/asm-ppc/pgtable.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-ppc/pgtable.h 2007-08-02 11:38:48.000000000 -0400 @@ -440,11 +440,21 @@ extern unsigned long ioremap_bot, iorema #define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC | _PAGE_HWEXEC) #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC) + +#if defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_40x) && !defined(CONFIG_44x) +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_GUARDED) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif #define PAGE_KERNEL __pgprot(_PAGE_RAM) #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) @@ -456,21 +466,21 @@ extern unsigned long ioremap_bot, iorema * This is the closest we can get.. */ #define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_X -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY_X -#define __P100 PAGE_READONLY +#define __P001 PAGE_READONLY_NOEXEC +#define __P010 PAGE_COPY_NOEXEC +#define __P011 PAGE_COPY_NOEXEC +#define __P100 PAGE_READONLY_X #define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY +#define __P110 PAGE_COPY_X #define __P111 PAGE_COPY_X #define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_X -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED_X -#define __S100 PAGE_READONLY +#define __S001 PAGE_READONLY_NOEXEC +#define __S010 PAGE_SHARED_NOEXEC +#define __S011 PAGE_SHARED_NOEXEC +#define __S100 PAGE_READONLY_X #define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED +#define __S110 PAGE_SHARED_X #define __S111 PAGE_SHARED_X #ifndef __ASSEMBLY__ diff -urNp linux-2.6.22.1/include/asm-s390/kmap_types.h linux-2.6.22.1/include/asm-s390/kmap_types.h --- linux-2.6.22.1/include/asm-s390/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-s390/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -16,6 +16,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-sh/kmap_types.h linux-2.6.22.1/include/asm-sh/kmap_types.h --- linux-2.6.22.1/include/asm-sh/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sh/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -24,7 +24,8 @@ D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR +D(13) KM_CLEARPAGE, +D(14) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.22.1/include/asm-sparc/a.out.h linux-2.6.22.1/include/asm-sparc/a.out.h --- linux-2.6.22.1/include/asm-sparc/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -91,7 +91,7 @@ struct relocation_info /* used when head #include -#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE) +#define __STACK_TOP (PAGE_OFFSET - PAGE_SIZE) #endif /* __KERNEL__ */ diff -urNp linux-2.6.22.1/include/asm-sparc/elf.h linux-2.6.22.1/include/asm-sparc/elf.h --- linux-2.6.22.1/include/asm-sparc/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -143,6 +143,13 @@ do { unsigned long *dest = &(__elf_regs[ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. This can NOT be done in userspace on Sparc. */ diff -urNp linux-2.6.22.1/include/asm-sparc/kmap_types.h linux-2.6.22.1/include/asm-sparc/kmap_types.h --- linux-2.6.22.1/include/asm-sparc/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -15,6 +15,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-sparc/pgtable.h linux-2.6.22.1/include/asm-sparc/pgtable.h --- linux-2.6.22.1/include/asm-sparc/pgtable.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc/pgtable.h 2007-08-02 11:38:48.000000000 -0400 @@ -49,6 +49,13 @@ BTFIXUPDEF_INT(page_none) BTFIXUPDEF_INT(page_shared) BTFIXUPDEF_INT(page_copy) BTFIXUPDEF_INT(page_readonly) + +#ifdef CONFIG_PAX_PAGEEXEC +BTFIXUPDEF_INT(page_shared_noexec) +BTFIXUPDEF_INT(page_copy_noexec) +BTFIXUPDEF_INT(page_readonly_noexec) +#endif + BTFIXUPDEF_INT(page_kernel) #define PMD_SHIFT SUN4C_PMD_SHIFT @@ -70,6 +77,16 @@ BTFIXUPDEF_INT(page_kernel) #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(BTFIXUP_INT(page_shared_noexec)) +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec)) +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec)) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + extern unsigned long page_kernel; #ifdef MODULE diff -urNp linux-2.6.22.1/include/asm-sparc/pgtsrmmu.h linux-2.6.22.1/include/asm-sparc/pgtsrmmu.h --- linux-2.6.22.1/include/asm-sparc/pgtsrmmu.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc/pgtsrmmu.h 2007-08-02 11:38:48.000000000 -0400 @@ -115,6 +115,16 @@ SRMMU_EXEC | SRMMU_REF) #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ SRMMU_EXEC | SRMMU_REF) + +#ifdef CONFIG_PAX_PAGEEXEC +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_WRITE | SRMMU_REF) +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_REF) +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_REF) +#endif + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ SRMMU_DIRTY | SRMMU_REF) diff -urNp linux-2.6.22.1/include/asm-sparc/uaccess.h linux-2.6.22.1/include/asm-sparc/uaccess.h --- linux-2.6.22.1/include/asm-sparc/uaccess.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc/uaccess.h 2007-08-02 11:38:48.000000000 -0400 @@ -41,7 +41,7 @@ * No one can read/write anything from userland in the kernel space by setting * large size and address near to PAGE_OFFSET - a fault will break his intentions. */ -#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) +#define __user_ok(addr, size) ({ (void)(size); (addr) < __STACK_TOP; }) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) #define access_ok(type, addr, size) \ diff -urNp linux-2.6.22.1/include/asm-sparc64/a.out.h linux-2.6.22.1/include/asm-sparc64/a.out.h --- linux-2.6.22.1/include/asm-sparc64/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc64/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -98,7 +98,7 @@ struct relocation_info /* used when head #define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE) #define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL)) -#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ +#define __STACK_TOP (test_thread_flag(TIF_32BIT) ? \ STACK_TOP32 : STACK_TOP64) #endif diff -urNp linux-2.6.22.1/include/asm-sparc64/elf.h linux-2.6.22.1/include/asm-sparc64/elf.h --- linux-2.6.22.1/include/asm-sparc64/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc64/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -142,6 +142,12 @@ typedef struct { #define ELF_ET_DYN_BASE 0x0000010000000000UL #endif +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28 ) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29 ) +#endif /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. */ diff -urNp linux-2.6.22.1/include/asm-sparc64/kmap_types.h linux-2.6.22.1/include/asm-sparc64/kmap_types.h --- linux-2.6.22.1/include/asm-sparc64/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-sparc64/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -19,6 +19,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-um/kmap_types.h linux-2.6.22.1/include/asm-um/kmap_types.h --- linux-2.6.22.1/include/asm-um/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-um/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -23,6 +23,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-v850/kmap_types.h linux-2.6.22.1/include/asm-v850/kmap_types.h --- linux-2.6.22.1/include/asm-v850/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-v850/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -13,6 +13,7 @@ enum km_type { KM_PTE1, KM_IRQ0, KM_IRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-x86_64/a.out.h linux-2.6.22.1/include/asm-x86_64/a.out.h --- linux-2.6.22.1/include/asm-x86_64/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -21,7 +21,7 @@ struct exec #ifdef __KERNEL__ #include -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE #endif #endif /* __A_OUT_GNU_H__ */ diff -urNp linux-2.6.22.1/include/asm-x86_64/elf.h linux-2.6.22.1/include/asm-x86_64/elf.h --- linux-2.6.22.1/include/asm-x86_64/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -92,6 +92,13 @@ typedef struct user_i387_struct elf_fpre #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_IA32) ? 0x08048000UL : 0x400000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_IA32) ? 16 : 32) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_IA32) ? 16 : 32) +#endif + /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is now struct_user_regs, they are different). Assumes current is the process getting dumped. */ diff -urNp linux-2.6.22.1/include/asm-x86_64/futex.h linux-2.6.22.1/include/asm-x86_64/futex.h --- linux-2.6.22.1/include/asm-x86_64/futex.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/futex.h 2007-08-02 11:38:48.000000000 -0400 @@ -42,7 +42,7 @@ : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -95,7 +95,7 @@ futex_atomic_op_inuser (int encoded_op, } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval) { if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; diff -urNp linux-2.6.22.1/include/asm-x86_64/ia32.h linux-2.6.22.1/include/asm-x86_64/ia32.h --- linux-2.6.22.1/include/asm-x86_64/ia32.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/ia32.h 2007-08-02 11:38:48.000000000 -0400 @@ -156,7 +156,13 @@ struct ustat32 { char f_fpack[6]; }; -#define IA32_STACK_TOP IA32_PAGE_OFFSET +#ifdef CONFIG_PAX_RANDUSTACK +#define IA32_DELTA_STACK (current->mm->delta_stack) +#else +#define IA32_DELTA_STACK 0UL +#endif + +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - IA32_DELTA_STACK) #ifdef __KERNEL__ struct user_desc; diff -urNp linux-2.6.22.1/include/asm-x86_64/kmap_types.h linux-2.6.22.1/include/asm-x86_64/kmap_types.h --- linux-2.6.22.1/include/asm-x86_64/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -13,6 +13,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/asm-x86_64/page.h linux-2.6.22.1/include/asm-x86_64/page.h --- linux-2.6.22.1/include/asm-x86_64/page.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/page.h 2007-08-02 11:38:48.000000000 -0400 @@ -93,6 +93,8 @@ extern unsigned long phys_base; #define __START_KERNEL_map _AC(0xffffffff80000000, UL) #define __PAGE_OFFSET _AC(0xffff810000000000, UL) +#define __KERNEL_TEXT_OFFSET (0) + /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) diff -urNp linux-2.6.22.1/include/asm-x86_64/pgalloc.h linux-2.6.22.1/include/asm-x86_64/pgalloc.h --- linux-2.6.22.1/include/asm-x86_64/pgalloc.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/pgalloc.h 2007-08-02 11:38:48.000000000 -0400 @@ -6,7 +6,7 @@ #include #define pmd_populate_kernel(mm, pmd, pte) \ - set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(pte))) #define pud_populate(mm, pud, pmd) \ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))) #define pgd_populate(mm, pgd, pud) \ diff -urNp linux-2.6.22.1/include/asm-x86_64/pgtable.h linux-2.6.22.1/include/asm-x86_64/pgtable.h --- linux-2.6.22.1/include/asm-x86_64/pgtable.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/pgtable.h 2007-08-02 11:38:48.000000000 -0400 @@ -179,6 +179,10 @@ static inline pte_t ptep_get_and_clear_f #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) + +#define PAGE_READONLY_NOEXEC PAGE_READONLY +#define PAGE_SHARED_NOEXEC PAGE_SHARED + #define __PAGE_KERNEL \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) #define __PAGE_KERNEL_EXEC \ @@ -268,7 +272,15 @@ static inline pte_t pfn_pte(unsigned lon #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; } static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } -static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } + +static inline int pte_exec(pte_t pte) +{ + if (__supported_pte_mask & _PAGE_NX) + return !(pte_val(pte) & _PAGE_NX); + else + return (pte_val(pte) & _PAGE_USER); +} + static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } @@ -276,12 +288,30 @@ static inline int pte_file(pte_t pte) { static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } -static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } + +static inline pte_t pte_exprotect(pte_t pte) +{ + if (__supported_pte_mask & _PAGE_NX) + set_pte(&pte, __pte(pte_val(pte) | _PAGE_NX)); + else + set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); + return pte; +} + static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } -static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; } + +static inline pte_t pte_mkexec(pte_t pte) +{ + if (__supported_pte_mask & _PAGE_NX) + set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); + else + set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); + return pte; +} + static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } diff -urNp linux-2.6.22.1/include/asm-x86_64/system.h linux-2.6.22.1/include/asm-x86_64/system.h --- linux-2.6.22.1/include/asm-x86_64/system.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-x86_64/system.h 2007-08-02 11:38:48.000000000 -0400 @@ -159,7 +159,7 @@ static inline void sched_cacheflush(void void cpu_idle_wait(void); -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) (x) extern void free_init_pages(char *what, unsigned long begin, unsigned long end); #endif diff -urNp linux-2.6.22.1/include/asm-xtensa/kmap_types.h linux-2.6.22.1/include/asm-xtensa/kmap_types.h --- linux-2.6.22.1/include/asm-xtensa/kmap_types.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/asm-xtensa/kmap_types.h 2007-08-02 11:38:48.000000000 -0400 @@ -25,6 +25,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.22.1/include/linux/a.out.h linux-2.6.22.1/include/linux/a.out.h --- linux-2.6.22.1/include/linux/a.out.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/a.out.h 2007-08-02 11:38:48.000000000 -0400 @@ -7,6 +7,16 @@ #include +#ifdef CONFIG_PAX_RANDUSTACK +#define __DELTA_STACK (current->mm->delta_stack) +#else +#define __DELTA_STACK 0UL +#endif + +#ifndef STACK_TOP +#define STACK_TOP (__STACK_TOP - __DELTA_STACK) +#endif + #endif /* __STRUCT_EXEC_OVERRIDE__ */ /* these go in the N_MACHTYPE field */ @@ -37,6 +47,14 @@ enum machine_type { M_MIPS2 = 152 /* MIPS R6000/R4000 binary */ }; +/* Constants for the N_FLAGS field */ +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */ +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */ +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */ +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ + #if !defined (N_MAGIC) #define N_MAGIC(exec) ((exec).a_info & 0xffff) #endif diff -urNp linux-2.6.22.1/include/linux/binfmts.h linux-2.6.22.1/include/linux/binfmts.h --- linux-2.6.22.1/include/linux/binfmts.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/binfmts.h 2007-08-02 11:38:48.000000000 -0400 @@ -7,10 +7,10 @@ struct pt_regs; /* * MAX_ARG_PAGES defines the number of pages allocated for arguments - * and envelope for the new program. 32 should suffice, this gives - * a maximum env+arg of 128kB w/4KB pages! + * and envelope for the new program. 33 should suffice, this gives + * a maximum env+arg of 132kB w/4KB pages! */ -#define MAX_ARG_PAGES 32 +#define MAX_ARG_PAGES 33 /* sizeof(linux_binprm->buf) */ #define BINPRM_BUF_SIZE 128 @@ -40,6 +40,7 @@ struct linux_binprm{ unsigned interp_flags; unsigned interp_data; unsigned long loader, exec; + int misc; }; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 @@ -90,5 +91,8 @@ extern void compute_creds(struct linux_b extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); extern int set_binfmt(struct linux_binfmt *new); +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp); +void pax_report_insns(void *pc, void *sp); + #endif /* __KERNEL__ */ #endif /* _LINUX_BINFMTS_H */ diff -urNp linux-2.6.22.1/include/linux/cache.h linux-2.6.22.1/include/linux/cache.h --- linux-2.6.22.1/include/linux/cache.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/cache.h 2007-08-02 11:38:48.000000000 -0400 @@ -16,6 +16,10 @@ #define __read_mostly #endif +#ifndef __read_only +#define __read_only +#endif + #ifndef ____cacheline_aligned #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) #endif diff -urNp linux-2.6.22.1/include/linux/capability.h linux-2.6.22.1/include/linux/capability.h --- linux-2.6.22.1/include/linux/capability.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/capability.h 2007-08-02 11:09:16.000000000 -0400 @@ -366,6 +366,7 @@ static inline kernel_cap_t cap_invert(ke #define cap_is_fs_cap(c) (CAP_TO_MASK(c) & CAP_FS_MASK) int capable(int cap); +int capable_nolog(int cap); int __capable(struct task_struct *t, int cap); #endif /* __KERNEL__ */ diff -urNp linux-2.6.22.1/include/linux/elf.h linux-2.6.22.1/include/linux/elf.h --- linux-2.6.22.1/include/linux/elf.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/elf.h 2007-08-02 11:38:48.000000000 -0400 @@ -8,6 +8,10 @@ struct file; +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +#undef elf_read_implies_exec +#endif + #ifndef elf_read_implies_exec /* Executables for which elf_read_implies_exec() returns TRUE will have the READ_IMPLIES_EXEC personality flag set automatically. @@ -49,6 +53,16 @@ typedef __s64 Elf64_Sxword; #define PT_GNU_STACK (PT_LOOS + 0x474e551) +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580) + +/* Constants for the e_flags field */ +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */ +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */ +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */ +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ + /* These constants define the different elf file types */ #define ET_NONE 0 #define ET_REL 1 @@ -83,6 +97,8 @@ typedef __s64 Elf64_Sxword; #define DT_DEBUG 21 #define DT_TEXTREL 22 #define DT_JMPREL 23 +#define DT_FLAGS 30 + #define DF_TEXTREL 0x00000004 #define DT_ENCODING 32 #define OLD_DT_LOOS 0x60000000 #define DT_LOOS 0x6000000d @@ -229,6 +245,19 @@ typedef struct elf64_hdr { #define PF_W 0x2 #define PF_X 0x1 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */ +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */ +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */ +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */ +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */ +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */ +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */ +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */ +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */ +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */ +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */ +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */ + typedef struct elf32_phdr{ Elf32_Word p_type; Elf32_Off p_offset; @@ -321,6 +350,8 @@ typedef struct elf64_shdr { #define EI_OSABI 7 #define EI_PAD 8 +#define EI_PAX 14 + #define ELFMAG0 0x7f /* EI_MAG */ #define ELFMAG1 'E' #define ELFMAG2 'L' @@ -378,6 +409,7 @@ extern Elf32_Dyn _DYNAMIC []; #define elf_phdr elf32_phdr #define elf_note elf32_note #define elf_addr_t Elf32_Off +#define elf_dyn Elf32_Dyn #else @@ -386,6 +418,7 @@ extern Elf64_Dyn _DYNAMIC []; #define elf_phdr elf64_phdr #define elf_note elf64_note #define elf_addr_t Elf64_Off +#define elf_dyn Elf64_Dyn #endif diff -urNp linux-2.6.22.1/include/linux/ext4_fs_extents.h linux-2.6.22.1/include/linux/ext4_fs_extents.h --- linux-2.6.22.1/include/linux/ext4_fs_extents.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/ext4_fs_extents.h 2007-08-02 11:38:48.000000000 -0400 @@ -50,7 +50,7 @@ #ifdef EXT_DEBUG #define ext_debug(a...) printk(a) #else -#define ext_debug(a...) +#define ext_debug(a...) do {} while (0) #endif /* diff -urNp linux-2.6.22.1/include/linux/gracl.h linux-2.6.22.1/include/linux/gracl.h --- linux-2.6.22.1/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/include/linux/gracl.h 2007-08-02 12:25:41.000000000 -0400 @@ -0,0 +1,316 @@ +#ifndef GR_ACL_H +#define GR_ACL_H + +#include +#include +#include +#include + +/* Major status information */ + +#define GR_VERSION "grsecurity 2.1.11" +#define GRSECURITY_VERSION 0x2111 + +enum { + + SHUTDOWN = 0, + ENABLE = 1, + SPROLE = 2, + RELOAD = 3, + SEGVMOD = 4, + STATUS = 5, + UNSPROLE = 6, + PASSSET = 7, + SPROLEPAM = 8 +}; + +/* Password setup definitions + * kernel/grhash.c */ +enum { + GR_PW_LEN = 128, + GR_SALT_LEN = 16, + GR_SHA_LEN = 32, +}; + +enum { + GR_SPROLE_LEN = 64, +}; + +#define GR_NLIMITS (RLIMIT_LOCKS + 2) + +/* Begin Data Structures */ + +struct sprole_pw { + unsigned char *rolename; + unsigned char salt[GR_SALT_LEN]; + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */ +}; + +struct name_entry { + __u32 key; + ino_t inode; + dev_t device; + char *name; + __u16 len; + struct name_entry *prev; + struct name_entry *next; +}; + +struct inodev_entry { + struct name_entry *nentry; + struct inodev_entry *prev; + struct inodev_entry *next; +}; + +struct acl_role_db { + struct acl_role_label **r_hash; + __u32 r_size; +}; + +struct inodev_db { + struct inodev_entry **i_hash; + __u32 i_size; +}; + +struct name_db { + struct name_entry **n_hash; + __u32 n_size; +}; + +struct crash_uid { + uid_t uid; + unsigned long expires; +}; + +struct gr_hash_struct { + void **table; + void **nametable; + void *first; + __u32 table_size; + __u32 used_size; + int type; +}; + +/* Userspace Grsecurity ACL data structures */ + +struct acl_subject_label { + char *filename; + ino_t inode; + dev_t device; + __u32 mode; + __u32 cap_mask; + __u32 cap_lower; + + struct rlimit res[GR_NLIMITS]; + __u16 resmask; + + __u8 user_trans_type; + __u8 group_trans_type; + uid_t *user_transitions; + gid_t *group_transitions; + __u16 user_trans_num; + __u16 group_trans_num; + + __u32 ip_proto[8]; + __u32 ip_type; + struct acl_ip_label **ips; + __u32 ip_num; + + __u32 crashes; + unsigned long expires; + + struct acl_subject_label *parent_subject; + struct gr_hash_struct *hash; + struct acl_subject_label *prev; + struct acl_subject_label *next; + + struct acl_object_label **obj_hash; + __u32 obj_hash_size; + __u16 pax_flags; +}; + +struct role_allowed_ip { + __u32 addr; + __u32 netmask; + + struct role_allowed_ip *prev; + struct role_allowed_ip *next; +}; + +struct role_transition { + char *rolename; + + struct role_transition *prev; + struct role_transition *next; +}; + +struct acl_role_label { + char *rolename; + uid_t uidgid; + __u16 roletype; + + __u16 auth_attempts; + unsigned long expires; + + struct acl_subject_label *root_label; + struct gr_hash_struct *hash; + + struct acl_role_label *prev; + struct acl_role_label *next; + + struct role_transition *transitions; + struct role_allowed_ip *allowed_ips; + uid_t *domain_children; + __u16 domain_child_num; + + struct acl_subject_label **subj_hash; + __u32 subj_hash_size; +}; + +struct user_acl_role_db { + struct acl_role_label **r_table; + __u32 num_pointers; /* Number of allocations to track */ + __u32 num_roles; /* Number of roles */ + __u32 num_domain_children; /* Number of domain children */ + __u32 num_subjects; /* Number of subjects */ + __u32 num_objects; /* Number of objects */ +}; + +struct acl_object_label { + char *filename; + ino_t inode; + dev_t device; + __u32 mode; + + struct acl_subject_label *nested; + struct acl_object_label *globbed; + + /* next two structures not used */ + + struct acl_object_label *prev; + struct acl_object_label *next; +}; + +struct acl_ip_label { + char *iface; + __u32 addr; + __u32 netmask; + __u16 low, high; + __u8 mode; + __u32 type; + __u32 proto[8]; + + /* next two structures not used */ + + struct acl_ip_label *prev; + struct acl_ip_label *next; +}; + +struct gr_arg { + struct user_acl_role_db role_db; + unsigned char pw[GR_PW_LEN]; + unsigned char salt[GR_SALT_LEN]; + unsigned char sum[GR_SHA_LEN]; + unsigned char sp_role[GR_SPROLE_LEN]; + struct sprole_pw *sprole_pws; + dev_t segv_device; + ino_t segv_inode; + uid_t segv_uid; + __u16 num_sprole_pws; + __u16 mode; +}; + +struct gr_arg_wrapper { + struct gr_arg *arg; + __u32 version; + __u32 size; +}; + +struct subject_map { + struct acl_subject_label *user; + struct acl_subject_label *kernel; + struct subject_map *prev; + struct subject_map *next; +}; + +struct acl_subj_map_db { + struct subject_map **s_hash; + __u32 s_size; +}; + +/* End Data Structures Section */ + +/* Hash functions generated by empirical testing by Brad Spengler + Makes good use of the low bits of the inode. Generally 0-1 times + in loop for successful match. 0-3 for unsuccessful match. + Shift/add algorithm with modulus of table size and an XOR*/ + +static __inline__ unsigned int +rhash(const uid_t uid, const __u16 type, const unsigned int sz) +{ + return (((uid << type) + (uid ^ type)) % sz); +} + + static __inline__ unsigned int +shash(const struct acl_subject_label *userp, const unsigned int sz) +{ + return ((const unsigned long)userp % sz); +} + +static __inline__ unsigned int +fhash(const ino_t ino, const dev_t dev, const unsigned int sz) +{ + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz); +} + +static __inline__ unsigned int +nhash(const char *name, const __u16 len, const unsigned int sz) +{ + return full_name_hash(name, len) % sz; +} + +#define FOR_EACH_ROLE_START(role,iter) \ + role = NULL; \ + iter = 0; \ + while (iter < acl_role_set.r_size) { \ + if (role == NULL) \ + role = acl_role_set.r_hash[iter]; \ + if (role == NULL) { \ + iter++; \ + continue; \ + } + +#define FOR_EACH_ROLE_END(role,iter) \ + role = role->next; \ + if (role == NULL) \ + iter++; \ + } + +#define FOR_EACH_SUBJECT_START(role,subj,iter) \ + subj = NULL; \ + iter = 0; \ + while (iter < role->subj_hash_size) { \ + if (subj == NULL) \ + subj = role->subj_hash[iter]; \ + if (subj == NULL) { \ + iter++; \ + continue; \ + } + +#define FOR_EACH_SUBJECT_END(subj,iter) \ + subj = subj->next; \ + if (subj == NULL) \ + iter++; \ + } + + +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \ + subj = role->hash->first; \ + while (subj != NULL) { + +#define FOR_EACH_NESTED_SUBJECT_END(subj) \ + subj = subj->next; \ + } + +#endif + diff -urNp linux-2.6.22.1/include/linux/gralloc.h linux-2.6.22.1/include/linux/gralloc.h --- linux-2.6.22.1/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/include/linux/gralloc.h 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,8 @@ +#ifndef __GRALLOC_H +#define __GRALLOC_H + +void acl_free_all(void); +int acl_alloc_stack_init(unsigned long size); +void *acl_alloc(unsigned long len); + +#endif diff -urNp linux-2.6.22.1/include/linux/grdefs.h linux-2.6.22.1/include/linux/grdefs.h --- linux-2.6.22.1/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/include/linux/grdefs.h 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,131 @@ +#ifndef GRDEFS_H +#define GRDEFS_H + +/* Begin grsecurity status declarations */ + +enum { + GR_READY = 0x01, + GR_STATUS_INIT = 0x00 // disabled state +}; + +/* Begin ACL declarations */ + +/* Role flags */ + +enum { + GR_ROLE_USER = 0x0001, + GR_ROLE_GROUP = 0x0002, + GR_ROLE_DEFAULT = 0x0004, + GR_ROLE_SPECIAL = 0x0008, + GR_ROLE_AUTH = 0x0010, + GR_ROLE_NOPW = 0x0020, + GR_ROLE_GOD = 0x0040, + GR_ROLE_LEARN = 0x0080, + GR_ROLE_TPE = 0x0100, + GR_ROLE_DOMAIN = 0x0200, + GR_ROLE_PAM = 0x0400 +}; + +/* ACL Subject and Object mode flags */ +enum { + GR_DELETED = 0x80000000 +}; + +/* ACL Object-only mode flags */ +enum { + GR_READ = 0x00000001, + GR_APPEND = 0x00000002, + GR_WRITE = 0x00000004, + GR_EXEC = 0x00000008, + GR_FIND = 0x00000010, + GR_INHERIT = 0x00000020, + GR_SETID = 0x00000040, + GR_CREATE = 0x00000080, + GR_DELETE = 0x00000100, + GR_LINK = 0x00000200, + GR_AUDIT_READ = 0x00000400, + GR_AUDIT_APPEND = 0x00000800, + GR_AUDIT_WRITE = 0x00001000, + GR_AUDIT_EXEC = 0x00002000, + GR_AUDIT_FIND = 0x00004000, + GR_AUDIT_INHERIT= 0x00008000, + GR_AUDIT_SETID = 0x00010000, + GR_AUDIT_CREATE = 0x00020000, + GR_AUDIT_DELETE = 0x00040000, + GR_AUDIT_LINK = 0x00080000, + GR_PTRACERD = 0x00100000, + GR_NOPTRACE = 0x00200000, + GR_SUPPRESS = 0x00400000, + GR_NOLEARN = 0x00800000 +}; + +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \ + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \ + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK) + +/* ACL subject-only mode flags */ +enum { + GR_KILL = 0x00000001, + GR_VIEW = 0x00000002, + GR_PROTECTED = 0x00000004, + GR_LEARN = 0x00000008, + GR_OVERRIDE = 0x00000010, + /* just a placeholder, this mode is only used in userspace */ + GR_DUMMY = 0x00000020, + GR_PROTSHM = 0x00000040, + GR_KILLPROC = 0x00000080, + GR_KILLIPPROC = 0x00000100, + /* just a placeholder, this mode is only used in userspace */ + GR_NOTROJAN = 0x00000200, + GR_PROTPROCFD = 0x00000400, + GR_PROCACCT = 0x00000800, + GR_RELAXPTRACE = 0x00001000, + GR_NESTED = 0x00002000, + GR_INHERITLEARN = 0x00004000, + GR_PROCFIND = 0x00008000, + GR_POVERRIDE = 0x00010000, + GR_KERNELAUTH = 0x00020000, +}; + +enum { + GR_PAX_ENABLE_SEGMEXEC = 0x0001, + GR_PAX_ENABLE_PAGEEXEC = 0x0002, + GR_PAX_ENABLE_MPROTECT = 0x0004, + GR_PAX_ENABLE_RANDMMAP = 0x0008, + GR_PAX_ENABLE_EMUTRAMP = 0x0010, + GR_PAX_DISABLE_SEGMEXEC = 0x0100, + GR_PAX_DISABLE_PAGEEXEC = 0x0200, + GR_PAX_DISABLE_MPROTECT = 0x0400, + GR_PAX_DISABLE_RANDMMAP = 0x0800, + GR_PAX_DISABLE_EMUTRAMP = 0x1000, +}; + +enum { + GR_ID_USER = 0x01, + GR_ID_GROUP = 0x02, +}; + +enum { + GR_ID_ALLOW = 0x01, + GR_ID_DENY = 0x02, +}; + +#define GR_CRASH_RES 11 +#define GR_UIDTABLE_MAX 500 + +/* begin resource learning section */ +enum { + GR_RLIM_CPU_BUMP = 60, + GR_RLIM_FSIZE_BUMP = 50000, + GR_RLIM_DATA_BUMP = 10000, + GR_RLIM_STACK_BUMP = 1000, + GR_RLIM_CORE_BUMP = 10000, + GR_RLIM_RSS_BUMP = 500000, + GR_RLIM_NPROC_BUMP = 1, + GR_RLIM_NOFILE_BUMP = 5, + GR_RLIM_MEMLOCK_BUMP = 50000, + GR_RLIM_AS_BUMP = 500000, + GR_RLIM_LOCKS_BUMP = 2 +}; + +#endif diff -urNp linux-2.6.22.1/include/linux/grinternal.h linux-2.6.22.1/include/linux/grinternal.h --- linux-2.6.22.1/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/include/linux/grinternal.h 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,210 @@ +#ifndef __GRINTERNAL_H +#define __GRINTERNAL_H + +#ifdef CONFIG_GRKERNSEC + +#include +#include +#include +#include + +void gr_add_learn_entry(const char *fmt, ...); +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode, + const struct vfsmount *mnt); +__u32 gr_check_create(const struct dentry *new_dentry, + const struct dentry *parent, + const struct vfsmount *mnt, const __u32 mode); +int gr_check_protected_task(const struct task_struct *task); +__u32 to_gr_audit(const __u32 reqmode); +int gr_set_acls(const int type); + +int gr_acl_is_enabled(void); +char gr_roletype_to_char(void); + +void gr_handle_alertkill(struct task_struct *task); +char *gr_to_filename(const struct dentry *dentry, + const struct vfsmount *mnt); +char *gr_to_filename1(const struct dentry *dentry, + const struct vfsmount *mnt); +char *gr_to_filename2(const struct dentry *dentry, + const struct vfsmount *mnt); +char *gr_to_filename3(const struct dentry *dentry, + const struct vfsmount *mnt); + +extern int grsec_enable_link; +extern int grsec_enable_fifo; +extern int grsec_enable_execve; +extern int grsec_enable_shm; +extern int grsec_enable_execlog; +extern int grsec_enable_signal; +extern int grsec_enable_forkfail; +extern int grsec_enable_time; +extern int grsec_enable_chroot_shmat; +extern int grsec_enable_chroot_findtask; +extern int grsec_enable_chroot_mount; +extern int grsec_enable_chroot_double; +extern int grsec_enable_chroot_pivot; +extern int grsec_enable_chroot_chdir; +extern int grsec_enable_chroot_chmod; +extern int grsec_enable_chroot_mknod; +extern int grsec_enable_chroot_fchdir; +extern int grsec_enable_chroot_nice; +extern int grsec_enable_chroot_execlog; +extern int grsec_enable_chroot_caps; +extern int grsec_enable_chroot_sysctl; +extern int grsec_enable_chroot_unix; +extern int grsec_enable_tpe; +extern int grsec_tpe_gid; +extern int grsec_enable_tpe_all; +extern int grsec_enable_sidcaps; +extern int grsec_enable_socket_all; +extern int grsec_socket_all_gid; +extern int grsec_enable_socket_client; +extern int grsec_socket_client_gid; +extern int grsec_enable_socket_server; +extern int grsec_socket_server_gid; +extern int grsec_audit_gid; +extern int grsec_enable_group; +extern int grsec_enable_audit_ipc; +extern int grsec_enable_audit_textrel; +extern int grsec_enable_mount; +extern int grsec_enable_chdir; +extern int grsec_resource_logging; +extern int grsec_lock; + +extern spinlock_t grsec_alert_lock; +extern unsigned long grsec_alert_wtime; +extern unsigned long grsec_alert_fyet; + +extern spinlock_t grsec_audit_lock; + +extern rwlock_t grsec_exec_file_lock; + +#define gr_task_fullpath(tsk) (tsk->exec_file ? \ + gr_to_filename2(tsk->exec_file->f_dentry, \ + tsk->exec_file->f_vfsmnt) : "/") + +#define gr_parent_task_fullpath(tsk) (tsk->parent->exec_file ? \ + gr_to_filename3(tsk->parent->exec_file->f_dentry, \ + tsk->parent->exec_file->f_vfsmnt) : "/") + +#define gr_task_fullpath0(tsk) (tsk->exec_file ? \ + gr_to_filename(tsk->exec_file->f_dentry, \ + tsk->exec_file->f_vfsmnt) : "/") + +#define gr_parent_task_fullpath0(tsk) (tsk->parent->exec_file ? \ + gr_to_filename1(tsk->parent->exec_file->f_dentry, \ + tsk->parent->exec_file->f_vfsmnt) : "/") + +#define proc_is_chrooted(tsk_a) ((tsk_a->pid > 1) && (tsk_a->fs != NULL) && \ + ((tsk_a->fs->root->d_inode->i_sb->s_dev != \ + child_reaper(tsk_a)->fs->root->d_inode->i_sb->s_dev) || \ + (tsk_a->fs->root->d_inode->i_ino != \ + child_reaper(tsk_a)->fs->root->d_inode->i_ino))) + +#define have_same_root(tsk_a,tsk_b) ((tsk_a->fs != NULL) && (tsk_b->fs != NULL) && \ + (tsk_a->fs->root->d_inode->i_sb->s_dev == \ + tsk_b->fs->root->d_inode->i_sb->s_dev) && \ + (tsk_a->fs->root->d_inode->i_ino == \ + tsk_b->fs->root->d_inode->i_ino)) + +#define DEFAULTSECARGS(task) gr_task_fullpath(task), task->comm, \ + task->pid, task->uid, \ + task->euid, task->gid, task->egid, \ + gr_parent_task_fullpath(task), \ + task->parent->comm, task->parent->pid, \ + task->parent->uid, task->parent->euid, \ + task->parent->gid, task->parent->egid + +#define GR_CHROOT_CAPS ( \ + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \ + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \ + CAP_TO_MASK(CAP_IPC_OWNER)) + +#define security_learn(normal_msg,args...) \ +({ \ + read_lock(&grsec_exec_file_lock); \ + gr_add_learn_entry(normal_msg "\n", ## args); \ + read_unlock(&grsec_exec_file_lock); \ +}) + +enum { + GR_DO_AUDIT, + GR_DONT_AUDIT, + GR_DONT_AUDIT_GOOD +}; + +enum { + GR_TTYSNIFF, + GR_RBAC, + GR_RBAC_STR, + GR_STR_RBAC, + GR_RBAC_MODE2, + GR_RBAC_MODE3, + GR_FILENAME, + GR_SYSCTL_HIDDEN, + GR_NOARGS, + GR_ONE_INT, + GR_ONE_INT_TWO_STR, + GR_ONE_STR, + GR_STR_INT, + GR_TWO_INT, + GR_THREE_INT, + GR_FIVE_INT_TWO_STR, + GR_TWO_STR, + GR_THREE_STR, + GR_FOUR_STR, + GR_STR_FILENAME, + GR_FILENAME_STR, + GR_FILENAME_TWO_INT, + GR_FILENAME_TWO_INT_STR, + GR_TEXTREL, + GR_PTRACE, + GR_RESOURCE, + GR_CAP, + GR_SIG, + GR_CRASH1, + GR_CRASH2, + GR_PSACCT +}; + +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str) +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task) +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt) +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str) +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt) +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2) +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3) +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt) +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS) +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num) +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2) +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str) +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num) +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2) +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3) +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2) +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2) +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3) +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4) +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt) +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str) +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2) +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str) +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2) +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task) +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2) +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str) +#define gr_log_sig(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG, task, num) +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong) +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1) +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) + +void gr_log_varargs(int audit, const char *msg, int argtypes, ...); + +#endif + +#endif diff -urNp linux-2.6.22.1/include/linux/grmsg.h linux-2.6.22.1/include/linux/grmsg.h --- linux-2.6.22.1/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/include/linux/grmsg.h 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1,108 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " +#define GR_STOPMOD_MSG "denied modification of module state by " +#define GR_IOPERM_MSG "denied use of ioperm() by " +#define GR_IOPL_MSG "denied use of iopl() by " +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by " +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " +#define GR_KMEM_MSG "denied write of /dev/kmem by " +#define GR_PORT_OPEN_MSG "denied open of /dev/port by " +#define GR_MEM_WRITE_MSG "denied write of /dev/mem by " +#define GR_MEM_MMAP_MSG "denied mmap write of /dev/[k]mem by " +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%u.%u.%u.%u" +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%u.%u.%u.%u" +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by " +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by " +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%u.%u.%u.%u %.480s[%.16s:%d], parent %.480s[%.16s:%d] against " +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " +#define GR_NPROC_MSG "denied overstep of process limit by " +#define GR_EXEC_ACL_MSG "%s execution of %.950s by " +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by " +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds" +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds" +#define GR_MOUNT_CHROOT_MSG "denied mount of %.30s as %.930s from chroot by " +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by " +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by " +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by " +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by " +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by " +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " +#define GR_INITF_ACL_MSG "init_variables() failed %s by " +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use gracl=off from your boot loader" +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by " +#define GR_SHUTS_ACL_MSG "shutdown auth success for " +#define GR_SHUTF_ACL_MSG "shutdown auth failure for " +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by " +#define GR_ENABLEF_ACL_MSG "unable to load %s for " +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system" +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by " +#define GR_RELOADF_ACL_MSG "failed reload of %s for " +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for " +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " +#define GR_SPROLEF_ACL_MSG "special role %s failure for " +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for " +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " +#define GR_UNSPROLEF_ACL_MSG "special role unauth of %s failure for " +#define GR_INVMODE_ACL_MSG "invalid mode %d by " +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by " +#define GR_FAILFORK_MSG "failed fork with errno %d by " +#define GR_NICE_CHROOT_MSG "denied priority change by " +#define GR_UNISIGLOG_MSG "signal %d sent to " +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by " +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by " +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " +#define GR_TIME_MSG "time set by " +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by " +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by " +#define GR_SOCK2_MSG "denied socket(%d,%.16s,%.16s) by " +#define GR_BIND_MSG "denied bind() by " +#define GR_CONNECT_MSG "denied connect() by " +#define GR_BIND_ACL_MSG "denied bind() to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " +#define GR_CONNECT_ACL_MSG "denied connect() to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%u.%u.%u.%u\t%u\t%u\t%u\t%u\t%u.%u.%u.%u" +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " +#define GR_CAP_ACL_MSG "use of %s denied for " +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for " +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for " +#define GR_REMOUNT_AUDIT_MSG "remount of %.30s by " +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.30s by " +#define GR_MOUNT_AUDIT_MSG "mount of %.30s to %.64s by " +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by " +#define GR_MSGQ_AUDIT_MSG "message queue created by " +#define GR_MSGQR_AUDIT_MSG "message queue of uid:%u euid:%u removed by " +#define GR_SEM_AUDIT_MSG "semaphore created by " +#define GR_SEMR_AUDIT_MSG "semaphore of uid:%u euid:%u removed by " +#define GR_SHM_AUDIT_MSG "shared memory of size %d created by " +#define GR_SHMR_AUDIT_MSG "shared memory of uid:%u euid:%u removed by " +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for " +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " diff -urNp linux-2.6.22.1/include/linux/grsecurity.h linux-2.6.22.1/include/linux/grsecurity.h --- linux-2.6.22.1/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/include/linux/grsecurity.h 2007-08-02 12:19:12.000000000 -0400 @@ -0,0 +1,193 @@ +#ifndef GR_SECURITY_H +#define GR_SECURITY_H +#include +#include +#include + +void gr_handle_brute_attach(struct task_struct *p); +void gr_handle_brute_check(void); + +char gr_roletype_to_char(void); + +int gr_check_user_change(int real, int effective, int fs); +int gr_check_group_change(int real, int effective, int fs); + +void gr_del_task_from_ip_table(struct task_struct *p); + +int gr_pid_is_chrooted(struct task_struct *p); +int gr_handle_chroot_nice(void); +int gr_handle_chroot_sysctl(const int op); +int gr_handle_chroot_setpriority(struct task_struct *p, + const int niceval); +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt); +int gr_handle_chroot_chroot(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_handle_chroot_caps(struct task_struct *task); +void gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt); +int gr_handle_chroot_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode); +int gr_handle_chroot_mknod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode); +int gr_handle_chroot_mount(const struct dentry *dentry, + const struct vfsmount *mnt, + const char *dev_name); +int gr_handle_chroot_pivot(void); +int gr_handle_chroot_unix(const pid_t pid); + +int gr_handle_rawio(const struct inode *inode); +int gr_handle_nproc(void); + +void gr_handle_ioperm(void); +void gr_handle_iopl(void); + +int gr_tpe_allow(const struct file *file); + +int gr_random_pid(void); + +void gr_log_forkfail(const int retval); +void gr_log_timechange(void); +void gr_log_signal(const int sig, const struct task_struct *t); +void gr_log_chdir(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_log_chroot_exec(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_handle_exec_args(struct linux_binprm *bprm, char **argv); +void gr_log_remount(const char *devname, const int retval); +void gr_log_unmount(const char *devname, const int retval); +void gr_log_mount(const char *from, const char *to, const int retval); +void gr_log_msgget(const int ret, const int msgflg); +void gr_log_msgrm(const uid_t uid, const uid_t cuid); +void gr_log_semget(const int err, const int semflg); +void gr_log_semrm(const uid_t uid, const uid_t cuid); +void gr_log_shmget(const int err, const int shmflg, const size_t size); +void gr_log_shmrm(const uid_t uid, const uid_t cuid); +void gr_log_textrel(struct vm_area_struct *vma); + +int gr_handle_follow_link(const struct inode *parent, + const struct inode *inode, + const struct dentry *dentry, + const struct vfsmount *mnt); +int gr_handle_fifo(const struct dentry *dentry, + const struct vfsmount *mnt, + const struct dentry *dir, const int flag, + const int acc_mode); +int gr_handle_hardlink(const struct dentry *dentry, + const struct vfsmount *mnt, + struct inode *inode, + const int mode, const char *to); + +int gr_task_is_capable(struct task_struct *task, const int cap); +int gr_is_capable_nolog(const int cap); +void gr_learn_resource(const struct task_struct *task, const int limit, + const unsigned long wanted, const int gt); +void gr_copy_label(struct task_struct *tsk); +void gr_handle_crash(struct task_struct *task, const int sig); +int gr_handle_signal(const struct task_struct *p, const int sig); +int gr_check_crash_uid(const uid_t uid); +int gr_check_protected_task(const struct task_struct *task); +int gr_acl_handle_mmap(const struct file *file, + const unsigned long prot); +int gr_acl_handle_mprotect(const struct file *file, + const unsigned long prot); +int gr_check_hidden_task(const struct task_struct *tsk); +__u32 gr_acl_handle_truncate(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_utime(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_access(const struct dentry *dentry, + const struct vfsmount *mnt, const int fmode); +__u32 gr_acl_handle_fchmod(const struct dentry *dentry, + const struct vfsmount *mnt, mode_t mode); +__u32 gr_acl_handle_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, mode_t mode); +__u32 gr_acl_handle_chown(const struct dentry *dentry, + const struct vfsmount *mnt); +int gr_handle_ptrace(struct task_struct *task, const long request); +int gr_handle_proc_ptrace(struct task_struct *task); +__u32 gr_acl_handle_execve(const struct dentry *dentry, + const struct vfsmount *mnt); +int gr_check_crash_exec(const struct file *filp); +int gr_acl_is_enabled(void); +void gr_set_kernel_label(struct task_struct *task); +void gr_set_role_label(struct task_struct *task, const uid_t uid, + const gid_t gid); +int gr_set_proc_label(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_open(const struct dentry *dentry, + const struct vfsmount *mnt, const int fmode); +__u32 gr_acl_handle_creat(const struct dentry *dentry, + const struct dentry *p_dentry, + const struct vfsmount *p_mnt, const int fmode, + const int imode); +void gr_handle_create(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const int mode); +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt); +__u32 gr_acl_handle_rmdir(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_handle_delete(const ino_t ino, const dev_t dev); +__u32 gr_acl_handle_unlink(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const char *from); +__u32 gr_acl_handle_link(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct vfsmount *old_mnt, const char *to); +int gr_acl_handle_rename(struct dentry *new_dentry, + struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + struct dentry *old_dentry, + struct inode *old_parent_inode, + struct vfsmount *old_mnt, const char *newname); +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace); +__u32 gr_check_link(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct vfsmount *old_mnt); +int gr_acl_handle_filldir(const struct file *file, const char *name, + const unsigned int namelen, const ino_t ino); + +__u32 gr_acl_handle_unix(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_acl_handle_exit(void); +void gr_acl_handle_psacct(struct task_struct *task, const long code); +int gr_acl_handle_procpidmem(const struct task_struct *task); +__u32 gr_cap_rtnetlink(void); + +#ifdef CONFIG_SYSVIPC +void gr_shm_exit(struct task_struct *task); +#else +static inline void gr_shm_exit(struct task_struct *task) +{ + return; +} +#endif + +#ifdef CONFIG_GRKERNSEC +void gr_handle_mem_write(void); +void gr_handle_kmem_write(void); +void gr_handle_open_port(void); +int gr_handle_mem_mmap(const unsigned long offset, + struct vm_area_struct *vma); + +extern int grsec_enable_dmesg; +extern int grsec_enable_randsrc; +extern int grsec_enable_shm; +#endif + +#endif diff -urNp linux-2.6.22.1/include/linux/highmem.h linux-2.6.22.1/include/linux/highmem.h --- linux-2.6.22.1/include/linux/highmem.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/highmem.h 2007-08-02 11:38:48.000000000 -0400 @@ -92,6 +92,13 @@ static inline void clear_highpage(struct kunmap_atomic(kaddr, KM_USER0); } +static inline void sanitize_highpage(struct page *page) +{ + void *kaddr = kmap_atomic(page, KM_CLEARPAGE); + clear_page(kaddr); + kunmap_atomic(kaddr, KM_CLEARPAGE); +} + /* * Same but also flushes aliased cache contents to RAM. * @@ -100,14 +107,14 @@ static inline void clear_highpage(struct */ #define zero_user_page(page, offset, size, km_type) \ do { \ - void *kaddr; \ + void *__kaddr; \ \ BUG_ON((offset) + (size) > PAGE_SIZE); \ \ - kaddr = kmap_atomic(page, km_type); \ - memset((char *)kaddr + (offset), 0, (size)); \ + __kaddr = kmap_atomic(page, km_type); \ + memset((char *)__kaddr + (offset), 0, (size)); \ flush_dcache_page(page); \ - kunmap_atomic(kaddr, (km_type)); \ + kunmap_atomic(__kaddr, (km_type)); \ } while (0) static inline void __deprecated memclear_highpage_flush(struct page *page, diff -urNp linux-2.6.22.1/include/linux/irqflags.h linux-2.6.22.1/include/linux/irqflags.h --- linux-2.6.22.1/include/linux/irqflags.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/irqflags.h 2007-08-02 11:38:48.000000000 -0400 @@ -84,10 +84,10 @@ #define irqs_disabled() \ ({ \ - unsigned long flags; \ + unsigned long __flags; \ \ - raw_local_save_flags(flags); \ - raw_irqs_disabled_flags(flags); \ + raw_local_save_flags(__flags); \ + raw_irqs_disabled_flags(__flags); \ }) #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) diff -urNp linux-2.6.22.1/include/linux/jbd2.h linux-2.6.22.1/include/linux/jbd2.h --- linux-2.6.22.1/include/linux/jbd2.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/jbd2.h 2007-08-02 11:38:48.000000000 -0400 @@ -68,7 +68,7 @@ extern int jbd2_journal_enable_debug; } \ } while (0) #else -#define jbd_debug(f, a...) /**/ +#define jbd_debug(f, a...) do {} while (0) #endif extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry); diff -urNp linux-2.6.22.1/include/linux/jbd.h linux-2.6.22.1/include/linux/jbd.h --- linux-2.6.22.1/include/linux/jbd.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/jbd.h 2007-08-02 11:38:48.000000000 -0400 @@ -68,7 +68,7 @@ extern int journal_enable_debug; } \ } while (0) #else -#define jbd_debug(f, a...) /**/ +#define jbd_debug(f, a...) do {} while (0) #endif extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); diff -urNp linux-2.6.22.1/include/linux/libata.h linux-2.6.22.1/include/linux/libata.h --- linux-2.6.22.1/include/linux/libata.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/libata.h 2007-08-02 11:38:48.000000000 -0400 @@ -63,11 +63,11 @@ #ifdef ATA_VERBOSE_DEBUG #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) #else -#define VPRINTK(fmt, args...) +#define VPRINTK(fmt, args...) do {} while (0) #endif /* ATA_VERBOSE_DEBUG */ #else -#define DPRINTK(fmt, args...) -#define VPRINTK(fmt, args...) +#define DPRINTK(fmt, args...) do {} while (0) +#define VPRINTK(fmt, args...) do {} while (0) #endif /* ATA_DEBUG */ #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) diff -urNp linux-2.6.22.1/include/linux/mm.h linux-2.6.22.1/include/linux/mm.h --- linux-2.6.22.1/include/linux/mm.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/mm.h 2007-08-02 11:38:48.000000000 -0400 @@ -39,6 +39,7 @@ extern int sysctl_legacy_va_layout; #include #include #include +#include #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) @@ -112,6 +113,8 @@ struct vm_area_struct { #ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif + + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */ }; extern struct kmem_cache *vm_area_cachep; @@ -170,6 +173,14 @@ extern unsigned int kobjsize(const void #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ +#ifdef CONFIG_PAX_PAGEEXEC +#define VM_PAGEEXEC 0x08000000 /* vma->vm_page_prot needs special handling */ +#endif + +#ifdef CONFIG_PAX_MPROTECT +#define VM_MAYNOTWRITE 0x10000000 /* vma cannot be granted VM_WRITE any more */ +#endif + #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS #endif @@ -790,7 +805,7 @@ static inline int handle_mm_fault(struct extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); -void install_arg_page(struct vm_area_struct *, struct page *, unsigned long); +int install_arg_page(struct vm_area_struct *, struct page *, unsigned long); int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); @@ -834,6 +849,8 @@ struct shrinker; extern struct shrinker *set_shrinker(int, shrinker_t); extern void remove_shrinker(struct shrinker *shrinker); +pgprot_t vm_get_page_prot(unsigned long vm_flags); + /* * Some shared mappigns will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot @@ -842,10 +859,10 @@ extern void remove_shrinker(struct shrin */ static inline int vma_wants_writenotify(struct vm_area_struct *vma) { - unsigned int vm_flags = vma->vm_flags; + unsigned long vm_flags = vma->vm_flags; /* If it was private or non-writable, the write bit is already clear */ - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED)) return 0; /* The backer wishes to know when pages are first written to? */ @@ -854,8 +871,7 @@ static inline int vma_wants_writenotify( /* The open routine did something to the protections already? */ if (pgprot_val(vma->vm_page_prot) != - pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) + pgprot_val(vm_get_page_prot(vm_flags))) return 0; /* Specialty mapping? */ @@ -1087,6 +1103,7 @@ out: } extern int do_munmap(struct mm_struct *, unsigned long, size_t); +extern int __do_munmap(struct mm_struct *, unsigned long, size_t); extern unsigned long do_brk(unsigned long, unsigned long); @@ -1134,6 +1151,10 @@ extern struct vm_area_struct * find_vma( extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma); +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma); +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl); + /* Look up the first VMA which intersects the interval start_addr..end_addr-1, NULL if none. Assume start_addr < end_addr. */ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) @@ -1150,8 +1171,6 @@ static inline unsigned long vma_pages(st return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; } -pgprot_t vm_get_page_prot(unsigned long vm_flags); -struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); struct page *vmalloc_to_page(void *addr); unsigned long vmalloc_to_pfn(void *addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, @@ -1210,5 +1229,11 @@ extern int randomize_va_space; __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma); +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot); +#else +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {} +#endif + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff -urNp linux-2.6.22.1/include/linux/module.h linux-2.6.22.1/include/linux/module.h --- linux-2.6.22.1/include/linux/module.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/module.h 2007-08-02 11:38:48.000000000 -0400 @@ -296,16 +296,16 @@ struct module int (*init)(void); /* If this is non-NULL, vfree after init() returns */ - void *module_init; + void *module_init_rx, *module_init_rw; /* Here is the actual code + data, vfree'd on unload. */ - void *module_core; + void *module_core_rx, *module_core_rw; /* Here are the sizes of the init and core sections */ - unsigned long init_size, core_size; + unsigned long init_size_rw, core_size_rw; /* The size of the executable code in each section. */ - unsigned long init_text_size, core_text_size; + unsigned long init_size_rx, core_size_rx; /* The handle returned from unwind_add_table. */ void *unwind_info; diff -urNp linux-2.6.22.1/include/linux/moduleloader.h linux-2.6.22.1/include/linux/moduleloader.h --- linux-2.6.22.1/include/linux/moduleloader.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/moduleloader.h 2007-08-02 11:38:48.000000000 -0400 @@ -17,9 +17,21 @@ int module_frob_arch_sections(Elf_Ehdr * sections. Returns NULL on failure. */ void *module_alloc(unsigned long size); +#ifdef CONFIG_PAX_KERNEXEC +void *module_alloc_exec(unsigned long size); +#else +#define module_alloc_exec(x) module_alloc(x) +#endif + /* Free memory returned from module_alloc. */ void module_free(struct module *mod, void *module_region); +#ifdef CONFIG_PAX_KERNEXEC +void module_free_exec(struct module *mod, void *module_region); +#else +#define module_free_exec(x, y) module_free(x, y) +#endif + /* Apply the given relocation to the (simplified) ELF. Return -error or 0. */ int apply_relocate(Elf_Shdr *sechdrs, diff -urNp linux-2.6.22.1/include/linux/percpu.h linux-2.6.22.1/include/linux/percpu.h --- linux-2.6.22.1/include/linux/percpu.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/percpu.h 2007-08-02 11:38:48.000000000 -0400 @@ -18,7 +18,7 @@ #endif #define PERCPU_ENOUGH_ROOM \ - (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) + ((unsigned long)(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)) #endif /* PERCPU_ENOUGH_ROOM */ /* diff -urNp linux-2.6.22.1/include/linux/random.h linux-2.6.22.1/include/linux/random.h --- linux-2.6.22.1/include/linux/random.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/random.h 2007-08-02 11:38:48.000000000 -0400 @@ -72,6 +72,11 @@ unsigned long randomize_range(unsigned l u32 random32(void); void srandom32(u32 seed); +static inline unsigned long pax_get_random_long(void) +{ + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0); +} + #endif /* __KERNEL___ */ #endif /* _LINUX_RANDOM_H */ diff -urNp linux-2.6.22.1/include/linux/sched.h linux-2.6.22.1/include/linux/sched.h --- linux-2.6.22.1/include/linux/sched.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/sched.h 2007-08-02 11:38:48.000000000 -0400 @@ -89,6 +89,7 @@ struct sched_param { struct exec_domain; struct futex_pi_state; struct bio; +struct linux_binprm; /* * List of flags we want to share for kernel threads, @@ -386,6 +387,24 @@ struct mm_struct { /* aio bits */ rwlock_t ioctx_list_lock; struct kioctx *ioctx_list; + +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) + unsigned long pax_flags; +#endif + +#ifdef CONFIG_PAX_DLRESOLVE + unsigned long call_dl_resolve; +#endif + +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) + unsigned long call_syscall; +#endif + +#ifdef CONFIG_PAX_ASLR + unsigned long delta_mmap; /* randomized offset */ + unsigned long delta_stack; /* randomized offset */ +#endif + }; struct sighand_struct { @@ -506,6 +525,15 @@ struct signal_struct { #ifdef CONFIG_TASKSTATS struct taskstats *stats; #endif + +#ifdef CONFIG_GRKERNSEC + u32 curr_ip; + u32 gr_saddr; + u32 gr_daddr; + u16 gr_sport; + u16 gr_dport; + u8 used_accept:1; +#endif }; /* Context switch must be unlocked if interrupts are to be enabled */ @@ -899,8 +927,8 @@ struct task_struct { struct list_head thread_group; struct completion *vfork_done; /* for vfork() */ - int __user *set_child_tid; /* CLONE_CHILD_SETTID */ - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */ + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ unsigned int rt_priority; cputime_t utime, stime; @@ -1063,6 +1091,17 @@ struct task_struct { struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; +#ifdef CONFIG_GRKERNSEC + /* grsecurity */ + struct acl_subject_label *acl; + struct acl_role_label *role; + struct file *exec_file; + u16 acl_role_id; + u8 acl_sp_role:1; + u8 is_writable:1; + u8 brute:1; +#endif + atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; @@ -1078,6 +1117,46 @@ struct task_struct { #endif }; +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */ +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */ +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */ +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */ +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */ +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */ + +#ifdef CONFIG_PAX_SOFTMODE +extern unsigned int pax_softmode; +#endif + +extern int pax_check_flags(unsigned long *); + +/* if tsk != current then task_lock must be held on it */ +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) +static inline unsigned long pax_get_flags(struct task_struct *tsk) +{ + if (likely(tsk->mm)) + return tsk->mm->pax_flags; + else + return 0UL; +} + +/* if tsk != current then task_lock must be held on it */ +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags) +{ + if (likely(tsk->mm)) { + tsk->mm->pax_flags = flags; + return 0; + } + return -EINVAL; +} +#endif + +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS +extern void pax_set_initial_flags(struct linux_binprm *bprm); +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); +#endif + static inline pid_t process_group(struct task_struct *tsk) { return tsk->signal->pgrp; @@ -1662,6 +1741,12 @@ extern void arch_pick_mmap_layout(struct static inline void arch_pick_mmap_layout(struct mm_struct *mm) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } diff -urNp linux-2.6.22/include/linux/screen_info.h linux-2.6.22/include/linux/screen_info.h --- linux-2.6.22/include/linux/screen_info.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22/include/linux/screen_info.h 2007-07-10 14:56:30.000000000 -0400 @@ -42,9 +42,9 @@ struct screen_info { u16 pages; /* 0x32 */ u16 vesa_attributes; /* 0x34 */ u32 capabilities; /* 0x36 */ - /* 0x3a -- 0x3b reserved for future expansion */ - /* 0x3c -- 0x3f micro stack for relocatable kernels */ -}; + u16 vesapm_size; /* 0x3a -- 0x3b reserved for future expansion */ + u8 _reserved[4]; /* 0x3c -- 0x3f micro stack for relocatable kernels */ +} __attribute__((packed)); extern struct screen_info screen_info; diff -urNp linux-2.6.22.1/include/linux/security.h linux-2.6.22.1/include/linux/security.h --- linux-2.6.22.1/include/linux/security.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/security.h 2007-08-02 11:38:48.000000000 -0400 @@ -2779,7 +2779,7 @@ static inline struct dentry *securityfs_ mode_t mode, struct dentry *parent, void *data, - struct file_operations *fops) + const struct file_operations *fops) { return ERR_PTR(-ENODEV); } diff -urNp linux-2.6.22.1/include/linux/shm.h linux-2.6.22.1/include/linux/shm.h --- linux-2.6.22.1/include/linux/shm.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/shm.h 2007-08-02 11:09:16.000000000 -0400 @@ -86,6 +86,10 @@ struct shmid_kernel /* private to the ke pid_t shm_cprid; pid_t shm_lprid; struct user_struct *mlock_user; +#ifdef CONFIG_GRKERNSEC + time_t shm_createtime; + pid_t shm_lapid; +#endif }; /* shm_mode upper byte flags */ diff -urNp linux-2.6.22.1/include/linux/skbuff.h linux-2.6.22.1/include/linux/skbuff.h --- linux-2.6.22.1/include/linux/skbuff.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/skbuff.h 2007-08-02 11:38:48.000000000 -0400 @@ -369,7 +369,7 @@ extern void skb_truesize_bug(struc static inline void skb_truesize_check(struct sk_buff *skb) { - if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) + if (unlikely(skb->truesize < sizeof(struct sk_buff) + skb->len)) skb_truesize_bug(skb); } diff -urNp linux-2.6.22.1/include/linux/sysctl.h linux-2.6.22.1/include/linux/sysctl.h --- linux-2.6.22.1/include/linux/sysctl.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/sysctl.h 2007-08-02 11:47:35.000000000 -0400 @@ -165,9 +165,21 @@ enum KERN_MAX_LOCK_DEPTH=74, KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ -}; +#ifdef CONFIG_GRKERNSEC + KERN_GRSECURITY=98, /* grsecurity */ +#endif + +#ifdef CONFIG_PAX_SOFTMODE + KERN_PAX=99, /* PaX control */ +#endif +}; +#ifdef CONFIG_PAX_SOFTMODE +enum { + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */ +}; +#endif /* CTL_VM names: */ enum diff -urNp linux-2.6.22.1/include/linux/uaccess.h linux-2.6.22.1/include/linux/uaccess.h --- linux-2.6.22.1/include/linux/uaccess.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/uaccess.h 2007-08-02 11:38:48.000000000 -0400 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_ long ret; \ mm_segment_t old_fs = get_fs(); \ \ - set_fs(KERNEL_DS); \ pagefault_disable(); \ + set_fs(KERNEL_DS); \ ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ - pagefault_enable(); \ set_fs(old_fs); \ + pagefault_enable(); \ ret; \ }) diff -urNp linux-2.6.22.1/include/linux/udf_fs.h linux-2.6.22.1/include/linux/udf_fs.h --- linux-2.6.22.1/include/linux/udf_fs.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/linux/udf_fs.h 2007-08-02 11:38:48.000000000 -0400 @@ -45,7 +45,7 @@ printk (f, ##a); \ } #else -#define udf_debug(f, a...) /**/ +#define udf_debug(f, a...) do {} while (0) #endif #define udf_info(f, a...) \ diff -urNp linux-2.6.22.1/include/net/sctp/sctp.h linux-2.6.22.1/include/net/sctp/sctp.h --- linux-2.6.22.1/include/net/sctp/sctp.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/net/sctp/sctp.h 2007-08-02 11:38:48.000000000 -0400 @@ -306,8 +306,8 @@ extern int sctp_debug_flag; #else /* SCTP_DEBUG */ -#define SCTP_DEBUG_PRINTK(whatever...) -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0) +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0) #define SCTP_ENABLE_DEBUG #define SCTP_DISABLE_DEBUG #define SCTP_ASSERT(expr, str, func) diff -urNp linux-2.6.22.1/include/sound/core.h linux-2.6.22.1/include/sound/core.h --- linux-2.6.22.1/include/sound/core.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/include/sound/core.h 2007-08-02 11:38:48.000000000 -0400 @@ -396,9 +396,9 @@ void snd_verbose_printd(const char *file #else /* !CONFIG_SND_DEBUG */ -#define snd_printd(fmt, args...) /* nothing */ +#define snd_printd(fmt, args...) do {} while (0) #define snd_assert(expr, args...) (void)(expr) -#define snd_BUG() /* nothing */ +#define snd_BUG() do {} while (0) #endif /* CONFIG_SND_DEBUG */ @@ -412,7 +412,7 @@ void snd_verbose_printd(const char *file */ #define snd_printdd(format, args...) snd_printk(format, ##args) #else -#define snd_printdd(format, args...) /* nothing */ +#define snd_printdd(format, args...) do {} while (0) #endif diff -urNp linux-2.6.22.1/init/do_mounts.c linux-2.6.22.1/init/do_mounts.c --- linux-2.6.22.1/init/do_mounts.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/init/do_mounts.c 2007-08-02 11:38:48.000000000 -0400 @@ -67,11 +67,12 @@ static dev_t try_name(char *name, int pa /* read device number from .../dev */ - sprintf(path, "/sys/block/%s/dev", name); - fd = sys_open(path, 0, 0); + if (sizeof path <= snprintf(path, sizeof path, "/sys/block/%s/dev", name)) + goto fail; + fd = sys_open((char __user *)path, 0, 0); if (fd < 0) goto fail; - len = sys_read(fd, buf, 32); + len = sys_read(fd, (char __user *)buf, 32); sys_close(fd); if (len <= 0 || len == 32 || buf[len - 1] != '\n') goto fail; @@ -97,11 +98,12 @@ static dev_t try_name(char *name, int pa return res; /* otherwise read range from .../range */ - sprintf(path, "/sys/block/%s/range", name); - fd = sys_open(path, 0, 0); + if (sizeof path <= snprintf(path, sizeof path, "/sys/block/%s/range", name)) + goto fail; + fd = sys_open((char __user *)path, 0, 0); if (fd < 0) goto fail; - len = sys_read(fd, buf, 32); + len = sys_read(fd, (char __user *)buf, 32); sys_close(fd); if (len <= 0 || len == 32 || buf[len - 1] != '\n') goto fail; @@ -144,12 +146,12 @@ dev_t name_to_dev_t(char *name) int part, mount_result; #ifdef CONFIG_SYSFS - int mkdir_err = sys_mkdir("/sys", 0700); + int mkdir_err = sys_mkdir((char __user *)"/sys", 0700); /* * When changing resume2 parameter for Software Suspend, sysfs may * already be mounted. */ - mount_result = sys_mount("sysfs", "/sys", "sysfs", 0, NULL); + mount_result = sys_mount((char __user *)"sysfs", (char __user *)"/sys", (char __user *)"sysfs", 0, NULL); if (mount_result < 0 && mount_result != -EBUSY) goto out; #endif @@ -203,10 +205,10 @@ dev_t name_to_dev_t(char *name) done: #ifdef CONFIG_SYSFS if (mount_result >= 0) - sys_umount("/sys", 0); + sys_umount((char __user *)"/sys", 0); out: if (!mkdir_err) - sys_rmdir("/sys"); + sys_rmdir((char __user *)"/sys"); #endif return res; fail: @@ -276,11 +278,11 @@ static void __init get_fs_names(char *pa static int __init do_mount_root(char *name, char *fs, int flags, void *data) { - int err = sys_mount(name, "/root", fs, flags, data); + int err = sys_mount((char __user *)name, (char __user *)"/root", (char __user *)fs, flags, (void __user *)data); if (err) return err; - sys_chdir("/root"); + sys_chdir((char __user *)"/root"); ROOT_DEV = current->fs->pwdmnt->mnt_sb->s_dev; printk("VFS: Mounted root (%s filesystem)%s.\n", current->fs->pwdmnt->mnt_sb->s_type->name, @@ -366,18 +368,18 @@ void __init change_floppy(char *fmt, ... va_start(args, fmt); vsprintf(buf, fmt, args); va_end(args); - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0); + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0); if (fd >= 0) { sys_ioctl(fd, FDEJECT, 0); sys_close(fd); } printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf); - fd = sys_open("/dev/console", O_RDWR, 0); + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0); if (fd >= 0) { sys_ioctl(fd, TCGETS, (long)&termios); termios.c_lflag &= ~ICANON; sys_ioctl(fd, TCSETSF, (long)&termios); - sys_read(fd, &c, 1); + sys_read(fd, (char __user *)&c, 1); termios.c_lflag |= ICANON; sys_ioctl(fd, TCSETSF, (long)&termios); sys_close(fd); @@ -469,8 +471,8 @@ void __init prepare_namespace(void) mount_root(); out: - sys_mount(".", "/", NULL, MS_MOVE, NULL); - sys_chroot("."); + sys_mount((char __user *)".", (char __user *)"/", NULL, MS_MOVE, NULL); + sys_chroot((char __user *)"."); security_sb_post_mountroot(); } diff -urNp linux-2.6.22.1/init/do_mounts.h linux-2.6.22.1/init/do_mounts.h --- linux-2.6.22.1/init/do_mounts.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/init/do_mounts.h 2007-08-02 11:38:48.000000000 -0400 @@ -15,15 +15,15 @@ extern char *root_device_name; static inline int create_dev(char *name, dev_t dev) { - sys_unlink(name); - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); + sys_unlink((char __user *)name); + return sys_mknod((char __user *)name, S_IFBLK|0600, new_encode_dev(dev)); } #if BITS_PER_LONG == 32 static inline u32 bstat(char *name) { struct stat64 stat; - if (sys_stat64(name, &stat) != 0) + if (sys_stat64((char __user *)name, (struct stat64 __user *)&stat) != 0) return 0; if (!S_ISBLK(stat.st_mode)) return 0; diff -urNp linux-2.6.22.1/init/do_mounts_md.c linux-2.6.22.1/init/do_mounts_md.c --- linux-2.6.22.1/init/do_mounts_md.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/init/do_mounts_md.c 2007-08-02 11:38:48.000000000 -0400 @@ -167,7 +167,7 @@ static void __init md_setup_drive(void) partitioned ? "_d" : "", minor, md_setup_args[ent].device_names); - fd = sys_open(name, 0, 0); + fd = sys_open((char __user *)name, 0, 0); if (fd < 0) { printk(KERN_ERR "md: open failed - cannot start " "array %s\n", name); @@ -230,7 +230,7 @@ static void __init md_setup_drive(void) * array without it */ sys_close(fd); - fd = sys_open(name, 0, 0); + fd = sys_open((char __user *)name, 0, 0); sys_ioctl(fd, BLKRRPART, 0); } sys_close(fd); @@ -271,7 +271,7 @@ void __init md_run_setup(void) if (raid_noautodetect) printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n"); else { - int fd = sys_open("/dev/md0", 0, 0); + int fd = sys_open((char __user *)"/dev/md0", 0, 0); if (fd >= 0) { sys_ioctl(fd, RAID_AUTORUN, raid_autopart); sys_close(fd); diff -urNp linux-2.6.22.1/init/initramfs.c linux-2.6.22.1/init/initramfs.c --- linux-2.6.22.1/init/initramfs.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/init/initramfs.c 2007-08-02 11:38:48.000000000 -0400 @@ -240,7 +240,7 @@ static int __init maybe_link(void) if (nlink >= 2) { char *old = find_link(major, minor, ino, mode, collected); if (old) - return (sys_link(old, collected) < 0) ? -1 : 1; + return (sys_link((char __user *)old, (char __user *)collected) < 0) ? -1 : 1; } return 0; } @@ -249,11 +249,11 @@ static void __init clean_path(char *path { struct stat st; - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) { + if (!sys_newlstat((char __user *)path, (struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) { if (S_ISDIR(st.st_mode)) - sys_rmdir(path); + sys_rmdir((char __user *)path); else - sys_unlink(path); + sys_unlink((char __user *)path); } } @@ -276,7 +276,7 @@ static int __init do_name(void) int openflags = O_WRONLY|O_CREAT; if (ml != 1) openflags |= O_TRUNC; - wfd = sys_open(collected, openflags, mode); + wfd = sys_open((char __user *)collected, openflags, mode); if (wfd >= 0) { sys_fchown(wfd, uid, gid); @@ -285,15 +285,15 @@ static int __init do_name(void) } } } else if (S_ISDIR(mode)) { - sys_mkdir(collected, mode); - sys_chown(collected, uid, gid); - sys_chmod(collected, mode); + sys_mkdir((char __user *)collected, mode); + sys_chown((char __user *)collected, uid, gid); + sys_chmod((char __user *)collected, mode); } else if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { if (maybe_link() == 0) { - sys_mknod(collected, mode, rdev); - sys_chown(collected, uid, gid); - sys_chmod(collected, mode); + sys_mknod((char __user *)collected, mode, rdev); + sys_chown((char __user *)collected, uid, gid); + sys_chmod((char __user *)collected, mode); } } return 0; @@ -302,13 +302,13 @@ static int __init do_name(void) static int __init do_copy(void) { if (count >= body_len) { - sys_write(wfd, victim, body_len); + sys_write(wfd, (char __user *)victim, body_len); sys_close(wfd); eat(body_len); state = SkipIt; return 0; } else { - sys_write(wfd, victim, count); + sys_write(wfd, (char __user *)victim, count); body_len -= count; eat(count); return 1; @@ -319,8 +319,8 @@ static int __init do_symlink(void) { collected[N_ALIGN(name_len) + body_len] = '\0'; clean_path(collected, 0); - sys_symlink(collected + N_ALIGN(name_len), collected); - sys_lchown(collected, uid, gid); + sys_symlink((char __user *)collected + N_ALIGN(name_len), (char __user *)collected); + sys_lchown((char __user *)collected, uid, gid); state = SkipIt; next_state = Reset; return 0; diff -urNp linux-2.6.22.1/init/Kconfig linux-2.6.22.1/init/Kconfig --- linux-2.6.22.1/init/Kconfig 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/init/Kconfig 2007-08-02 11:09:16.000000000 -0400 @@ -395,6 +395,7 @@ config SYSCTL_SYSCALL config KALLSYMS bool "Load all symbols for debugging/ksymoops" if EMBEDDED default y + depends on !GRKERNSEC_HIDESYM help Say Y here to let the kernel print out symbolic crash information and symbolic stack backtraces. This increases the size of the kernel diff -urNp linux-2.6.22.1/init/main.c linux-2.6.22.1/init/main.c --- linux-2.6.22.1/init/main.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/init/main.c 2007-08-02 11:38:48.000000000 -0400 @@ -107,6 +107,7 @@ static inline void mark_rodata_ro(void) #ifdef CONFIG_TC extern void tc_init(void); #endif +extern void grsecurity_init(void); enum system_states system_state; EXPORT_SYMBOL(system_state); @@ -181,6 +182,17 @@ static int __init set_reset_devices(char __setup("reset_devices", set_reset_devices); +#ifdef CONFIG_PAX_SOFTMODE +unsigned int pax_softmode; + +static int __init setup_pax_softmode(char *str) +{ + get_option(&str, &pax_softmode); + return 1; +} +__setup("pax_softmode=", setup_pax_softmode); +#endif + static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; static const char *panic_later, *panic_param; @@ -828,6 +838,8 @@ static int __init kernel_init(void * unu prepare_namespace(); } + grsecurity_init(); + /* * Ok, we have completed the initial bootup, and * we're essentially up and running. Get rid of the diff -urNp linux-2.6.22.1/init/noinitramfs.c linux-2.6.22.1/init/noinitramfs.c --- linux-2.6.22.1/init/noinitramfs.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/init/noinitramfs.c 2007-08-02 11:38:48.000000000 -0400 @@ -29,7 +29,7 @@ static int __init default_rootfs(void) { int err; - err = sys_mkdir("/dev", 0755); + err = sys_mkdir((const char __user *)"/dev", 0755); if (err < 0) goto out; @@ -39,7 +39,7 @@ static int __init default_rootfs(void) if (err < 0) goto out; - err = sys_mkdir("/root", 0700); + err = sys_mkdir((const char __user *)"/root", 0700); if (err < 0) goto out; diff -urNp linux-2.6.22.1/ipc/msg.c linux-2.6.22.1/ipc/msg.c --- linux-2.6.22.1/ipc/msg.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/ipc/msg.c 2007-08-02 11:09:16.000000000 -0400 @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -288,6 +289,8 @@ asmlinkage long sys_msgget(key_t key, in } mutex_unlock(&msg_ids(ns).mutex); + gr_log_msgget(ret, msgflg); + return ret; } @@ -554,6 +557,7 @@ asmlinkage long sys_msgctl(int msqid, in break; } case IPC_RMID: + gr_log_msgrm(ipcp->uid, ipcp->cuid); freeque(ns, msq, msqid); break; } diff -urNp linux-2.6.22.1/ipc/sem.c linux-2.6.22.1/ipc/sem.c --- linux-2.6.22.1/ipc/sem.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/ipc/sem.c 2007-08-02 11:09:16.000000000 -0400 @@ -82,6 +82,7 @@ #include #include #include +#include #include #include "util.h" @@ -295,6 +296,9 @@ asmlinkage long sys_semget (key_t key, i } mutex_unlock(&sem_ids(ns).mutex); + + gr_log_semget(err, semflg); + return err; } @@ -896,6 +900,7 @@ static int semctl_down(struct ipc_namesp switch(cmd){ case IPC_RMID: + gr_log_semrm(ipcp->uid, ipcp->cuid); freeary(ns, sma, semid); err = 0; break; diff -urNp linux-2.6.22.1/ipc/shm.c linux-2.6.22.1/ipc/shm.c --- linux-2.6.22.1/ipc/shm.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/ipc/shm.c 2007-08-02 11:09:16.000000000 -0400 @@ -38,6 +38,7 @@ #include #include #include +#include #include @@ -77,6 +78,14 @@ static void shm_destroy (struct ipc_name static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif +#ifdef CONFIG_GRKERNSEC +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime, const uid_t cuid, + const int shmid); +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime); +#endif + static void __ipc_init __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) { ns->ids[IPC_SHM_IDS] = ids; @@ -89,6 +98,8 @@ static void __ipc_init __shm_init_ns(str static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp) { + gr_log_shmrm(shp->shm_perm.uid, shp->shm_perm.cuid); + if (shp->shm_nattch){ shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ @@ -218,6 +229,17 @@ static void shm_close(struct vm_area_str shp->shm_lprid = current->tgid; shp->shm_dtim = get_seconds(); shp->shm_nattch--; +#ifdef CONFIG_GRKERNSEC_SHM + if (grsec_enable_shm) { + if (shp->shm_nattch == 0) { + shp->shm_perm.mode |= SHM_DEST; + shm_destroy(ns, shp); + } else + shm_unlock(shp); + mutex_unlock(&shm_ids(ns).mutex); + return; + } +#endif if(shp->shm_nattch == 0 && shp->shm_perm.mode & SHM_DEST) shm_destroy(ns, shp); @@ -395,6 +417,9 @@ static int newseg (struct ipc_namespace shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = get_seconds(); +#ifdef CONFIG_GRKERNSEC + shp->shm_createtime = get_seconds(); +#endif shp->shm_segsz = size; shp->shm_nattch = 0; shp->id = shm_buildid(ns, id, shp->shm_perm.seq); @@ -452,6 +477,8 @@ asmlinkage long sys_shmget (key_t key, s } mutex_unlock(&shm_ids(ns).mutex); + gr_log_shmget(err, shmflg, size); + return err; } @@ -905,9 +932,21 @@ long do_shmat(int shmid, char __user *sh if (err) goto out_unlock; +#ifdef CONFIG_GRKERNSEC + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime, + shp->shm_perm.cuid, shmid) || + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) { + err = -EACCES; + goto out_unlock; + } +#endif + path.dentry = dget(shp->shm_file->f_path.dentry); path.mnt = mntget(shp->shm_file->f_path.mnt); shp->shm_nattch++; +#ifdef CONFIG_GRKERNSEC + shp->shm_lapid = current->pid; +#endif size = i_size_read(path.dentry->d_inode); shm_unlock(shp); @@ -1111,3 +1150,27 @@ static int sysvipc_shm_proc_show(struct shp->shm_ctim); } #endif + +void gr_shm_exit(struct task_struct *task) +{ +#ifdef CONFIG_GRKERNSEC_SHM + int i; + struct shmid_kernel *shp; + struct ipc_namespace *ns; + + ns = current->nsproxy->ipc_ns; + + if (!grsec_enable_shm) + return; + + for (i = 0; i <= shm_ids(ns).max_id; i++) { + shp = shm_get(ns, i); + if (shp && (shp->shm_cprid == task->pid) && + (shp->shm_nattch <= 0)) { + shp->shm_perm.mode |= SHM_DEST; + shm_destroy(ns, shp); + } + } +#endif + return; +} diff -urNp linux-2.6.22.1/kernel/acct.c linux-2.6.22.1/kernel/acct.c --- linux-2.6.22.1/kernel/acct.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/acct.c 2007-08-02 11:38:48.000000000 -0400 @@ -511,7 +511,7 @@ static void do_acct_process(struct file */ flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - file->f_op->write(file, (char *)&ac, + file->f_op->write(file, (char __user *)&ac, sizeof(acct_t), &file->f_pos); current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; set_fs(fs); diff -urNp linux-2.6.22.1/kernel/capability.c linux-2.6.22.1/kernel/capability.c --- linux-2.6.22.1/kernel/capability.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/capability.c 2007-08-02 11:09:16.000000000 -0400 @@ -12,6 +12,7 @@ #include #include #include +#include #include unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */ @@ -239,14 +240,25 @@ out: return ret; } +extern int gr_task_is_capable(struct task_struct *task, const int cap); +extern int gr_is_capable_nolog(const int cap); + int __capable(struct task_struct *t, int cap) { - if (security_capable(t, cap) == 0) { + if ((security_capable(t, cap) == 0) && gr_task_is_capable(t, cap)) { t->flags |= PF_SUPERPRIV; return 1; } return 0; } +int capable_nolog(int cap) +{ + if ((security_capable(current, cap) == 0) && gr_is_capable_nolog(cap)) { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; +} EXPORT_SYMBOL(__capable); #include @@ -258,3 +270,4 @@ int capable(int cap) return __capable(current, cap); } EXPORT_SYMBOL(capable); +EXPORT_SYMBOL(capable_nolog); diff -urNp linux-2.6.22.1/kernel/configs.c linux-2.6.22.1/kernel/configs.c --- linux-2.6.22.1/kernel/configs.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/configs.c 2007-08-02 11:09:16.000000000 -0400 @@ -79,8 +79,16 @@ static int __init ikconfig_init(void) struct proc_dir_entry *entry; /* create the current config file */ +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + entry = create_proc_entry("config.gz", S_IFREG | S_IRUSR, &proc_root); +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + entry = create_proc_entry("config.gz", S_IFREG | S_IRUSR | S_IRGRP, &proc_root); +#endif +#else entry = create_proc_entry("config.gz", S_IFREG | S_IRUGO, &proc_root); +#endif if (!entry) return -ENOMEM; diff -urNp linux-2.6.22.1/kernel/exit.c linux-2.6.22.1/kernel/exit.c --- linux-2.6.22.1/kernel/exit.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/exit.c 2007-08-02 11:38:48.000000000 -0400 @@ -44,6 +44,11 @@ #include #include #include +#include + +#ifdef CONFIG_GRKERNSEC +extern rwlock_t grsec_exec_file_lock; +#endif #include #include @@ -130,6 +135,7 @@ static void __exit_signal(struct task_st __unhash_process(tsk); + gr_del_task_from_ip_table(tsk); tsk->signal = NULL; tsk->sighand = NULL; spin_unlock(&sighand->siglock); @@ -282,12 +288,23 @@ static void reparent_to_kthreadd(void) { write_lock_irq(&tasklist_lock); +#ifdef CONFIG_GRKERNSEC + write_lock(&grsec_exec_file_lock); + if (current->exec_file) { + fput(current->exec_file); + current->exec_file = NULL; + } + write_unlock(&grsec_exec_file_lock); +#endif + ptrace_unlink(current); /* Reparent to init */ remove_parent(current); current->real_parent = current->parent = kthreadd_task; add_parent(current); + gr_set_kernel_label(current); + /* Set the exit signal to SIGCHLD so we signal init on exit */ current->exit_signal = SIGCHLD; @@ -382,6 +399,17 @@ void daemonize(const char *name, ...) vsnprintf(current->comm, sizeof(current->comm), name, args); va_end(args); +#ifdef CONFIG_GRKERNSEC + write_lock(&grsec_exec_file_lock); + if (current->exec_file) { + fput(current->exec_file); + current->exec_file = NULL; + } + write_unlock(&grsec_exec_file_lock); +#endif + + gr_set_kernel_label(current); + /* * If we were started as result of loading a module, close all of the * user space pages. We don't need them, and if we didn't close them @@ -943,11 +971,15 @@ fastcall NORET_TYPE void do_exit(long co taskstats_exit(tsk, group_dead); + gr_acl_handle_psacct(tsk, code); + gr_acl_handle_exit(); + exit_mm(tsk); if (group_dead) acct_process(); exit_sem(tsk); + gr_shm_exit(tsk); __exit_files(tsk); __exit_fs(tsk); exit_thread(); @@ -1148,7 +1180,7 @@ static int wait_task_zombie(struct task_ pid_t pid = p->pid; uid_t uid = p->uid; int exit_code = p->exit_code; - int why, status; + int why; if (unlikely(p->exit_state != EXIT_ZOMBIE)) return 0; diff -urNp linux-2.6.22.1/kernel/fork.c linux-2.6.22.1/kernel/fork.c --- linux-2.6.22.1/kernel/fork.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/fork.c 2007-08-02 11:38:48.000000000 -0400 @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -180,7 +181,7 @@ static struct task_struct *dup_task_stru setup_thread_stack(tsk, orig); #ifdef CONFIG_CC_STACKPROTECTOR - tsk->stack_canary = get_random_int(); + tsk->stack_canary = pax_get_random_long(); #endif /* One for us, one for whoever does the "release_task()" (usually parent) */ @@ -202,6 +203,10 @@ static inline int dup_mmap(struct mm_str unsigned long charge; struct mempolicy *pol; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *mpnt_m; +#endif + down_write(&oldmm->mmap_sem); flush_cache_dup_mm(oldmm); /* @@ -212,8 +217,8 @@ static inline int dup_mmap(struct mm_str mm->locked_vm = 0; mm->mmap = NULL; mm->mmap_cache = NULL; - mm->free_area_cache = oldmm->mmap_base; - mm->cached_hole_size = ~0UL; + mm->free_area_cache = oldmm->free_area_cache; + mm->cached_hole_size = oldmm->cached_hole_size; mm->map_count = 0; __set_mm_counter(mm, file_rss, 0); __set_mm_counter(mm, anon_rss, 0); @@ -232,6 +237,7 @@ static inline int dup_mmap(struct mm_str continue; } charge = 0; + if (mpnt->vm_flags & VM_ACCOUNT) { unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; if (security_vm_enough_memory(len)) @@ -250,6 +256,7 @@ static inline int dup_mmap(struct mm_str tmp->vm_flags &= ~VM_LOCKED; tmp->vm_mm = mm; tmp->vm_next = NULL; + tmp->vm_mirror = NULL; anon_vma_link(tmp); file = tmp->vm_file; if (file) { @@ -286,6 +293,29 @@ static inline int dup_mmap(struct mm_str if (retval) goto out; } + +#ifdef CONFIG_PAX_SEGMEXEC + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) { + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) { + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm); + + if (!mpnt->vm_mirror) + continue; + + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) { + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt); + mpnt->vm_mirror = mpnt_m; + } else { + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm); + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror; + mpnt_m->vm_mirror->vm_mirror = mpnt_m; + mpnt->vm_mirror->vm_mirror = mpnt; + } + } + BUG_ON(mpnt_m); + } +#endif + /* a new mm has just been created */ arch_dup_mmap(oldmm, mm); retval = 0; @@ -461,7 +491,7 @@ void mm_release(struct task_struct *tsk, if (tsk->clear_child_tid && !(tsk->flags & PF_SIGNALED) && atomic_read(&mm->mm_users) > 1) { - u32 __user * tidptr = tsk->clear_child_tid; + pid_t __user * tidptr = tsk->clear_child_tid; tsk->clear_child_tid = NULL; /* @@ -480,7 +510,7 @@ void mm_release(struct task_struct *tsk, * not set up a proper pointer then tough luck. */ put_user(0, tidptr); - sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); + sys_futex((u32 __user *)tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); } } @@ -996,6 +1026,9 @@ static struct task_struct *copy_process( } retval = -EAGAIN; + + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->user->processes), 0); + if (!vx_nproc_avail(1)) goto bad_fork_cleanup_vm; @@ -1131,6 +1164,8 @@ static struct task_struct *copy_process( if (retval) goto bad_fork_cleanup_namespaces; + gr_copy_label(p); + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? @@ -1309,6 +1344,8 @@ bad_fork_cleanup_count: bad_fork_free: free_task(p); fork_out: + gr_log_forkfail(retval); + return ERR_PTR(retval); } @@ -1382,6 +1419,8 @@ long do_fork(unsigned long clone_flags, if (!IS_ERR(p)) { struct completion vfork; + gr_handle_brute_check(); + if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); diff -urNp linux-2.6.22.1/kernel/futex.c linux-2.6.22.1/kernel/futex.c --- linux-2.6.22.1/kernel/futex.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/futex.c 2007-08-02 11:38:48.000000000 -0400 @@ -168,6 +168,11 @@ int get_futex_key(u32 __user *uaddr, str struct page *page; int err; +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((unsigned long)uaddr >= SEGMEXEC_TASK_SIZE)) + return -EFAULT; +#endif + /* * The futex address must be "naturally" aligned. */ @@ -194,8 +199,8 @@ int get_futex_key(u32 __user *uaddr, str * The futex is hashed differently depending on whether * it's in a shared or private mapping. So check vma first. */ - vma = find_extend_vma(mm, address); - if (unlikely(!vma)) + vma = find_vma(mm, address); + if (unlikely(!vma || address < vma->vm_start)) return -EFAULT; /* @@ -1921,7 +1926,7 @@ retry: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, - int *pi) + unsigned int *pi) { unsigned long uentry; diff -urNp linux-2.6.22.1/kernel/irq/handle.c linux-2.6.22.1/kernel/irq/handle.c --- linux-2.6.22.1/kernel/irq/handle.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/irq/handle.c 2007-08-02 11:38:48.000000000 -0400 @@ -55,7 +55,8 @@ struct irq_desc irq_desc[NR_IRQS] __cach .depth = 1, .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), #ifdef CONFIG_SMP - .affinity = CPU_MASK_ALL + .affinity = CPU_MASK_ALL, + .cpu = 0, #endif } }; diff -urNp linux-2.6.22.1/kernel/kallsyms.c linux-2.6.22.1/kernel/kallsyms.c --- linux-2.6.22.1/kernel/kallsyms.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/kallsyms.c 2007-08-02 11:38:48.000000000 -0400 @@ -65,6 +65,19 @@ static inline int is_kernel_text(unsigne static inline int is_kernel(unsigned long addr) { + +#ifdef CONFIG_PAX_KERNEXEC + +#ifdef CONFIG_MODULES + if ((unsigned long)MODULES_VADDR <= addr + __KERNEL_TEXT_OFFSET && + addr + __KERNEL_TEXT_OFFSET < (unsigned long)MODULES_END) + return 0; +#endif + + if (is_kernel_inittext(addr)) + return 1; +#endif + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) return 1; return in_gate_area_no_task(addr); @@ -374,7 +383,6 @@ static unsigned long get_ksymbol_core(st static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) { - iter->name[0] = '\0'; iter->nameoff = get_symbol_offset(new_pos); iter->pos = new_pos; } @@ -458,7 +466,7 @@ static int kallsyms_open(struct inode *i struct kallsym_iter *iter; int ret; - iter = kmalloc(sizeof(*iter), GFP_KERNEL); + iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return -ENOMEM; reset_iter(iter, 0); @@ -482,7 +490,15 @@ static int __init kallsyms_init(void) { struct proc_dir_entry *entry; +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + entry = create_proc_entry("kallsyms", S_IFREG | S_IRUSR, NULL); +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + entry = create_proc_entry("kallsyms", S_IFREG | S_IRUSR | S_IRGRP, NULL); +#endif +#else entry = create_proc_entry("kallsyms", 0444, NULL); +#endif if (entry) entry->proc_fops = &kallsyms_operations; return 0; diff -urNp linux-2.6.22.1/kernel/kprobes.c linux-2.6.22.1/kernel/kprobes.c --- linux-2.6.22.1/kernel/kprobes.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/kprobes.c 2007-08-02 11:38:48.000000000 -0400 @@ -168,7 +168,7 @@ kprobe_opcode_t __kprobes *get_insn_slot * kernel image and loaded module images reside. This is required * so x86_64 can correctly handle the %rip-relative fixups. */ - kip->insns = module_alloc(PAGE_SIZE); + kip->insns = module_alloc_exec(PAGE_SIZE); if (!kip->insns) { kfree(kip); return NULL; @@ -200,7 +200,7 @@ static int __kprobes collect_one_slot(st hlist_add_head(&kip->hlist, &kprobe_insn_pages); } else { - module_free(NULL, kip->insns); + module_free_exec(NULL, kip->insns); kfree(kip); } return 1; diff -urNp linux-2.6.22.1/kernel/module.c linux-2.6.22.1/kernel/module.c --- linux-2.6.22.1/kernel/module.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/module.c 2007-08-02 11:38:48.000000000 -0400 @@ -44,6 +44,11 @@ #include #include #include + +#ifdef CONFIG_PAX_KERNEXEC +#include +#endif + #include extern int module_sysfs_initialized; @@ -70,6 +75,8 @@ static LIST_HEAD(modules); static BLOCKING_NOTIFIER_HEAD(module_notify_list); +extern int gr_check_modstop(void); + int register_module_notifier(struct notifier_block * nb) { return blocking_notifier_chain_register(&module_notify_list, nb); @@ -349,7 +356,7 @@ static void *percpu_modalloc(unsigned lo unsigned int i; void *ptr; - if (align > PAGE_SIZE) { + if (align-1 >= PAGE_SIZE) { printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", name, align, PAGE_SIZE); align = PAGE_SIZE; @@ -663,6 +670,9 @@ sys_delete_module(const char __user *nam char name[MODULE_NAME_LEN]; int ret, forced = 0; + if (gr_check_modstop()) + return -EPERM; + if (!capable(CAP_SYS_MODULE)) return -EPERM; @@ -1216,16 +1226,19 @@ static void free_module(struct module *m module_unload_free(mod); /* This may be NULL, but that's OK */ - module_free(mod, mod->module_init); + module_free(mod, mod->module_init_rw); + module_free_exec(mod, mod->module_init_rx); kfree(mod->args); if (mod->percpu) percpu_modfree(mod->percpu); /* Free lock-classes: */ - lockdep_free_key_range(mod->module_core, mod->core_size); + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx); + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw); /* Finally, free the core (containing the module structure) */ - module_free(mod, mod->module_core); + module_free_exec(mod, mod->module_core_rx); + module_free(mod, mod->module_core_rw); } void *__symbol_get(const char *symbol) @@ -1290,6 +1303,10 @@ static int simplify_symbols(Elf_Shdr *se unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); int ret = 0; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + for (i = 1; i < n; i++) { switch (sym[i].st_shndx) { case SHN_COMMON: @@ -1308,10 +1325,19 @@ static int simplify_symbols(Elf_Shdr *se break; case SHN_UNDEF: + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + sym[i].st_value = resolve_symbol(sechdrs, versindex, strtab + sym[i].st_name, mod); +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + /* Ok if resolved. */ if (sym[i].st_value != 0) break; @@ -1326,11 +1352,27 @@ static int simplify_symbols(Elf_Shdr *se default: /* Divert to percpu allocation if a percpu var. */ - if (sym[i].st_shndx == pcpuindex) + if (sym[i].st_shndx == pcpuindex) { + +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) + secbase = (unsigned long)mod->percpu - (unsigned long)__per_cpu_start; +#else secbase = (unsigned long)mod->percpu; - else +#endif + + } else secbase = sechdrs[sym[i].st_shndx].sh_addr; + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + sym[i].st_value += secbase; + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + break; } } @@ -1382,11 +1424,14 @@ static void layout_sections(struct modul || strncmp(secstrings + s->sh_name, ".init", 5) == 0) continue; - s->sh_entsize = get_offset(&mod->core_size, s); + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) + s->sh_entsize = get_offset(&mod->core_size_rw, s); + else + s->sh_entsize = get_offset(&mod->core_size_rx, s); DEBUGP("\t%s\n", secstrings + s->sh_name); } if (m == 0) - mod->core_text_size = mod->core_size; + mod->core_size_rx = mod->core_size_rx; } DEBUGP("Init section allocation order:\n"); @@ -1400,12 +1445,15 @@ static void layout_sections(struct modul || strncmp(secstrings + s->sh_name, ".init", 5) != 0) continue; - s->sh_entsize = (get_offset(&mod->init_size, s) - | INIT_OFFSET_MASK); + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) + s->sh_entsize = get_offset(&mod->init_size_rw, s); + else + s->sh_entsize = get_offset(&mod->init_size_rx, s); + s->sh_entsize |= INIT_OFFSET_MASK; DEBUGP("\t%s\n", secstrings + s->sh_name); } if (m == 0) - mod->init_text_size = mod->init_size; + mod->init_size_rx = mod->init_size_rx; } } @@ -1532,14 +1580,28 @@ static void add_kallsyms(struct module * { unsigned int i; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + mod->symtab = (void *)sechdrs[symindex].sh_addr; mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym); mod->strtab = (void *)sechdrs[strindex].sh_addr; /* Set types up while we still have access to sections. */ + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + for (i = 0; i < mod->num_symtab; i++) mod->symtab[i].st_info = elf_type(&mod->symtab[i], sechdrs, secstrings, mod); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } #else static inline void add_kallsyms(struct module *mod, @@ -1587,6 +1649,10 @@ static struct module *load_module(void _ struct exception_table_entry *extable; mm_segment_t old_fs; +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs); if (len < sizeof(*hdr)) @@ -1745,21 +1811,57 @@ static struct module *load_module(void _ layout_sections(mod, hdr, sechdrs, secstrings); /* Do the allocs. */ - ptr = module_alloc(mod->core_size); + ptr = module_alloc(mod->core_size_rw); if (!ptr) { err = -ENOMEM; goto free_percpu; } - memset(ptr, 0, mod->core_size); - mod->module_core = ptr; + memset(ptr, 0, mod->core_size_rw); + mod->module_core_rw = ptr; + + ptr = module_alloc(mod->init_size_rw); + if (!ptr && mod->init_size_rw) { + err = -ENOMEM; + goto free_core_rw; + } + memset(ptr, 0, mod->init_size_rw); + mod->module_init_rw = ptr; + + ptr = module_alloc_exec(mod->core_size_rx); + if (!ptr) { + err = -ENOMEM; + goto free_init_rw; + } + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + memset(ptr, 0, mod->core_size_rx); - ptr = module_alloc(mod->init_size); - if (!ptr && mod->init_size) { +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + + mod->module_core_rx = ptr; + + ptr = module_alloc_exec(mod->init_size_rx); + if (!ptr && mod->init_size_rx) { err = -ENOMEM; - goto free_core; + goto free_core_rx; } - memset(ptr, 0, mod->init_size); - mod->module_init = ptr; + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + + memset(ptr, 0, mod->init_size_rx); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + + mod->module_init_rx = ptr; /* Transfer each section which specifies SHF_ALLOC */ DEBUGP("final section addresses:\n"); @@ -1769,17 +1871,44 @@ static struct module *load_module(void _ if (!(sechdrs[i].sh_flags & SHF_ALLOC)) continue; - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) - dest = mod->module_init - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); - else - dest = mod->module_core + sechdrs[i].sh_entsize; + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) { + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC)) + dest = mod->module_init_rw + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); + else + dest = mod->module_init_rx + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); + } else { + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC)) + dest = mod->module_core_rw + sechdrs[i].sh_entsize; + else + dest = mod->module_core_rx + sechdrs[i].sh_entsize; + } + + if (sechdrs[i].sh_type != SHT_NOBITS) { + +#ifdef CONFIG_PAX_KERNEXEC + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) + pax_open_kernel(cr0); +#endif - if (sechdrs[i].sh_type != SHT_NOBITS) - memcpy(dest, (void *)sechdrs[i].sh_addr, - sechdrs[i].sh_size); + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size); + +#ifdef CONFIG_PAX_KERNEXEC + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) + pax_close_kernel(cr0); +#endif + + } /* Update sh_addr to point to copy in image. */ - sechdrs[i].sh_addr = (unsigned long)dest; + +#ifdef CONFIG_PAX_KERNEXEC + if (sechdrs[i].sh_flags & SHF_EXECINSTR) + sechdrs[i].sh_addr = (unsigned long)dest - __KERNEL_TEXT_OFFSET; + else +#endif + + sechdrs[i].sh_addr = (unsigned long)dest; DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); } /* Module has been moved. */ @@ -1860,11 +1989,20 @@ static struct module *load_module(void _ if (!(sechdrs[info].sh_flags & SHF_ALLOC)) continue; +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + if (sechdrs[i].sh_type == SHT_REL) err = apply_relocate(sechdrs, strtab, symindex, i,mod); else if (sechdrs[i].sh_type == SHT_RELA) err = apply_relocate_add(sechdrs, strtab, symindex, i, mod); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + if (err < 0) goto cleanup; } @@ -1899,12 +2037,12 @@ static struct module *load_module(void _ * Do it before processing of module parameters, so the module * can provide parameter accessor functions of its own. */ - if (mod->module_init) - flush_icache_range((unsigned long)mod->module_init, - (unsigned long)mod->module_init - + mod->init_size); - flush_icache_range((unsigned long)mod->module_core, - (unsigned long)mod->module_core + mod->core_size); + if (mod->module_init_rx) + flush_icache_range((unsigned long)mod->module_init_rx, + (unsigned long)mod->module_init_rx + + mod->init_size_rx); + flush_icache_range((unsigned long)mod->module_core_rx, + (unsigned long)mod->module_core_rx + mod->core_size_rx); set_fs(old_fs); @@ -1947,9 +2085,13 @@ static struct module *load_module(void _ module_arch_cleanup(mod); cleanup: module_unload_free(mod); - module_free(mod, mod->module_init); - free_core: - module_free(mod, mod->module_core); + module_free_exec(mod, mod->module_init_rx); + free_core_rx: + module_free_exec(mod, mod->module_core_rx); + free_init_rw: + module_free(mod, mod->module_init_rw); + free_core_rw: + module_free(mod, mod->module_core_rw); free_percpu: if (percpu) percpu_modfree(percpu); @@ -1985,6 +2127,9 @@ sys_init_module(void __user *umod, struct module *mod; int ret = 0; + if (gr_check_modstop()) + return -EPERM; + /* Must have permission */ if (!capable(CAP_SYS_MODULE)) return -EPERM; @@ -2036,10 +2181,12 @@ sys_init_module(void __user *umod, /* Drop initial reference. */ module_put(mod); unwind_remove_table(mod->unwind_info, 1); - module_free(mod, mod->module_init); - mod->module_init = NULL; - mod->init_size = 0; - mod->init_text_size = 0; + module_free(mod, mod->module_init_rw); + module_free_exec(mod, mod->module_init_rx); + mod->module_init_rw = NULL; + mod->module_init_rx = NULL; + mod->init_size_rw = 0; + mod->init_size_rx = 0; mutex_unlock(&module_mutex); return 0; @@ -2047,6 +2194,13 @@ sys_init_module(void __user *umod, static inline int within(unsigned long addr, void *start, unsigned long size) { + +#ifdef CONFIG_PAX_KERNEXEC + if (addr + __KERNEL_TEXT_OFFSET >= (unsigned long)start && + addr + __KERNEL_TEXT_OFFSET < (unsigned long)start + size) + return 1; +#endif + return ((void *)addr >= start && (void *)addr < start + size); } @@ -2070,10 +2224,14 @@ static const char *get_ksymbol(struct mo unsigned long nextval; /* At worse, next value is at end of module */ - if (within(addr, mod->module_init, mod->init_size)) - nextval = (unsigned long)mod->module_init+mod->init_text_size; - else - nextval = (unsigned long)mod->module_core+mod->core_text_size; + if (within(addr, mod->module_init_rx, mod->init_size_rx)) + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx; + else if (within(addr, mod->module_init_rw, mod->init_size_rw)) + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw; + else if (within(addr, mod->module_core_rx, mod->core_size_rx)) + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx; + else + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw; /* Scan for closest preceeding symbol, and next symbol. (ELF starts real symbols at 1). */ @@ -2116,8 +2274,10 @@ const char *module_address_lookup(unsign struct module *mod; list_for_each_entry(mod, &modules, list) { - if (within(addr, mod->module_init, mod->init_size) - || within(addr, mod->module_core, mod->core_size)) { + if (within(addr, mod->module_init_rx, mod->init_size_rx) || + within(addr, mod->module_init_rw, mod->init_size_rw) || + within(addr, mod->module_core_rx, mod->core_size_rx) || + within(addr, mod->module_core_rw, mod->core_size_rw)) { if (modname) *modname = mod->name; return get_ksymbol(mod, addr, size, offset); @@ -2132,8 +2292,10 @@ int lookup_module_symbol_name(unsigned l mutex_lock(&module_mutex); list_for_each_entry(mod, &modules, list) { - if (within(addr, mod->module_init, mod->init_size) || - within(addr, mod->module_core, mod->core_size)) { + if (within(addr, mod->module_init_rx, mod->init_size_rx) || + within(addr, mod->module_init_rw, mod->init_size_rw) || + within(addr, mod->module_core_rx, mod->core_size_rx) || + within(addr, mod->module_core_rw, mod->core_size_rw)) { const char *sym; sym = get_ksymbol(mod, addr, NULL, NULL); @@ -2156,8 +2318,10 @@ int lookup_module_symbol_attrs(unsigned mutex_lock(&module_mutex); list_for_each_entry(mod, &modules, list) { - if (within(addr, mod->module_init, mod->init_size) || - within(addr, mod->module_core, mod->core_size)) { + if (within(addr, mod->module_init_rx, mod->init_size_rx) || + within(addr, mod->module_init_rw, mod->init_size_rw) || + within(addr, mod->module_core_rx, mod->core_size_rx) || + within(addr, mod->module_core_rw, mod->core_size_rw)) { const char *sym; sym = get_ksymbol(mod, addr, size, offset); @@ -2290,7 +2454,7 @@ static int m_show(struct seq_file *m, vo char buf[8]; seq_printf(m, "%s %lu", - mod->name, mod->init_size + mod->core_size); + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw); print_unload_info(m, mod); /* Informative for users. */ @@ -2299,7 +2463,7 @@ static int m_show(struct seq_file *m, vo mod->state == MODULE_STATE_COMING ? "Loading": "Live"); /* Used by oprofile and other similar tools. */ - seq_printf(m, " 0x%p", mod->module_core); + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw); /* Taints info */ if (mod->taints) @@ -2357,7 +2521,8 @@ int is_module_address(unsigned long addr spin_lock_irqsave(&modlist_lock, flags); list_for_each_entry(mod, &modules, list) { - if (within(addr, mod->module_core, mod->core_size)) { + if (within(addr, mod->module_core_rx, mod->core_size_rx) || + within(addr, mod->module_core_rw, mod->core_size_rw)) { spin_unlock_irqrestore(&modlist_lock, flags); return 1; } @@ -2375,8 +2540,8 @@ struct module *__module_text_address(uns struct module *mod; list_for_each_entry(mod, &modules, list) - if (within(addr, mod->module_init, mod->init_text_size) - || within(addr, mod->module_core, mod->core_text_size)) + if (within(addr, mod->module_init_rx, mod->init_size_rx) + || within(addr, mod->module_core_rx, mod->core_size_rx)) return mod; return NULL; } diff -urNp linux-2.6.22.1/kernel/mutex.c linux-2.6.22.1/kernel/mutex.c --- linux-2.6.22.1/kernel/mutex.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/mutex.c 2007-08-02 11:38:48.000000000 -0400 @@ -81,7 +81,7 @@ __mutex_lock_slowpath(atomic_t *lock_cou * * This function is similar to (but not equivalent to) down(). */ -void inline fastcall __sched mutex_lock(struct mutex *lock) +inline void fastcall __sched mutex_lock(struct mutex *lock) { might_sleep(); /* diff -urNp linux-2.6.22.1/kernel/params.c linux-2.6.22.1/kernel/params.c --- linux-2.6.22.1/kernel/params.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/params.c 2007-08-02 11:38:48.000000000 -0400 @@ -275,7 +275,7 @@ static int param_array(const char *name, unsigned int min, unsigned int max, void *elem, int elemsize, int (*set)(const char *, struct kernel_param *kp), - int *num) + unsigned int *num) { int ret; struct kernel_param kp; diff -urNp linux-2.6.22.1/kernel/pid.c linux-2.6.22.1/kernel/pid.c --- linux-2.6.22.1/kernel/pid.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/pid.c 2007-08-02 11:38:48.000000000 -0400 @@ -29,6 +29,7 @@ #include #include #include +#include #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) static struct hlist_head *pid_hash; @@ -38,7 +39,7 @@ struct pid init_struct_pid = INIT_STRUCT int pid_max = PID_MAX_DEFAULT; -#define RESERVED_PIDS 300 +#define RESERVED_PIDS 500 int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; @@ -319,6 +320,10 @@ struct task_struct *find_task_by_pid_typ nr = vx_rmap_pid(nr); task = pid_task(find_pid(nr), type); + + if (gr_pid_is_chrooted(task)) + return NULL; + if (task && (type != PIDTYPE_REALPID) && /* maybe VS_WATCH_P in the future? */ !vx_check(task->xid, VS_WATCH|VS_IDENT)) diff -urNp linux-2.6.22.1/kernel/posix-cpu-timers.c linux-2.6.22.1/kernel/posix-cpu-timers.c --- linux-2.6.22.1/kernel/posix-cpu-timers.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/posix-cpu-timers.c 2007-08-02 11:09:16.000000000 -0400 @@ -6,6 +6,7 @@ #include #include #include +#include static int check_clock(const clockid_t which_clock) { @@ -1144,6 +1145,7 @@ static void check_process_timers(struct __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); return; } + gr_learn_resource(tsk, RLIMIT_CPU, psecs, 1); if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) { /* * At the soft limit, send a SIGXCPU every second. diff -urNp linux-2.6.22.1/kernel/power/poweroff.c linux-2.6.22.1/kernel/power/poweroff.c --- linux-2.6.22.1/kernel/power/poweroff.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/power/poweroff.c 2007-08-02 11:38:48.000000000 -0400 @@ -35,7 +35,7 @@ static struct sysrq_key_op sysrq_powerof .enable_mask = SYSRQ_ENABLE_BOOT, }; -static int pm_sysrq_init(void) +static int __init pm_sysrq_init(void) { register_sysrq_key('o', &sysrq_poweroff_op); return 0; diff -urNp linux-2.6.22.1/kernel/printk.c linux-2.6.22.1/kernel/printk.c --- linux-2.6.22.1/kernel/printk.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/printk.c 2007-08-02 11:09:16.000000000 -0400 @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -186,6 +187,11 @@ int do_syslog(int type, char __user *buf char c; int error; +#ifdef CONFIG_GRKERNSEC_DMESG + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN)) + return -EPERM; +#endif + error = security_syslog(type); if (error) return error; diff -urNp linux-2.6.22.1/kernel/ptrace.c linux-2.6.22.1/kernel/ptrace.c --- linux-2.6.22.1/kernel/ptrace.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/ptrace.c 2007-08-02 11:11:54.000000000 -0400 @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -139,12 +140,12 @@ static int may_attach(struct task_struct (current->uid != task->uid) || (current->gid != task->egid) || (current->gid != task->sgid) || - (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) + (current->gid != task->gid)) && !capable_nolog(CAP_SYS_PTRACE)) return -EPERM; smp_rmb(); if (task->mm) dumpable = task->mm->dumpable; - if (!dumpable && !capable(CAP_SYS_PTRACE)) + if (!dumpable && !capable_nolog(CAP_SYS_PTRACE)) return -EPERM; if (!vx_check(task->xid, VS_ADMIN_P|VS_IDENT)) return -EPERM; @@ -490,6 +491,11 @@ asmlinkage long sys_ptrace(long request, if (ret < 0) goto out_put_task_struct; + if (gr_handle_ptrace(child, request)) { + ret = -EPERM; + goto out_put_task_struct; + } + ret = arch_ptrace(child, request, addr, data); if (ret < 0) goto out_put_task_struct; diff -urNp linux-2.6.22.1/kernel/rcupdate.c linux-2.6.22.1/kernel/rcupdate.c --- linux-2.6.22.1/kernel/rcupdate.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/rcupdate.c 2007-08-02 11:38:48.000000000 -0400 @@ -63,11 +63,11 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk .cpumask = CPU_MASK_NONE, }; -DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; -DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; +DEFINE_PER_CPU(struct rcu_data, rcu_data); +DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); /* Fake initialization required by compiler */ -static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; +static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet); static int blimit = 10; static int qhimark = 10000; static int qlowmark = 100; diff -urNp linux-2.6.22.1/kernel/resource.c linux-2.6.22.1/kernel/resource.c --- linux-2.6.22.1/kernel/resource.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/resource.c 2007-08-02 11:09:16.000000000 -0400 @@ -133,10 +133,27 @@ static int __init ioresources_init(void) { struct proc_dir_entry *entry; +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + entry = create_proc_entry("ioports", S_IRUSR, NULL); +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + entry = create_proc_entry("ioports", S_IRUSR | S_IRGRP, NULL); +#endif +#else entry = create_proc_entry("ioports", 0, NULL); +#endif if (entry) entry->proc_fops = &proc_ioports_operations; + +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + entry = create_proc_entry("iomem", S_IRUSR, NULL); +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + entry = create_proc_entry("iomem", S_IRUSR | S_IRGRP, NULL); +#endif +#else entry = create_proc_entry("iomem", 0, NULL); +#endif if (entry) entry->proc_fops = &proc_iomem_operations; return 0; diff -urNp linux-2.6.22.1/kernel/sched.c linux-2.6.22.1/kernel/sched.c --- linux-2.6.22.1/kernel/sched.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/sched.c 2007-08-02 11:38:48.000000000 -0400 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -3571,7 +3572,7 @@ asmlinkage void __sched schedule(void) unsigned long long now; unsigned long run_time; int cpu, idx, new_prio; - long *switch_count; + unsigned long *switch_count; struct rq *rq; /* @@ -4262,7 +4263,8 @@ asmlinkage long sys_nice(int increment) if (nice > 19) nice = 19; - if (increment < 0 && !can_nice(current, nice)) + if (increment < 0 && (!can_nice(current, nice) || + gr_handle_chroot_nice())) return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM; retval = security_task_setnice(current, nice); diff -urNp linux-2.6.22.1/kernel/signal.c linux-2.6.22.1/kernel/signal.c --- linux-2.6.22.1/kernel/signal.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/signal.c 2007-08-02 11:13:50.000000000 -0400 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -538,11 +539,11 @@ static int check_kill_permission(int sig return error; error = -EPERM; - if (((sig != SIGCONT) || + if ((((sig != SIGCONT) || (process_session(current) != process_session(t))) && (current->euid ^ t->suid) && (current->euid ^ t->uid) && (current->uid ^ t->suid) && (current->uid ^ t->uid) - && !capable(CAP_KILL)) + && !capable(CAP_KILL)) || gr_handle_signal(t, sig)) return error; error = -ESRCH; @@ -736,7 +737,7 @@ out_set: (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) -static int +int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { int ret = 0; @@ -773,6 +774,10 @@ force_sig_info(int sig, struct siginfo * } } ret = specific_send_sig_info(sig, info, t); + + gr_log_signal(sig, t); + gr_handle_crash(t, sig); + spin_unlock_irqrestore(&t->sighand->siglock, flags); return ret; diff -urNp linux-2.6.22.1/kernel/softirq.c linux-2.6.22.1/kernel/softirq.c --- linux-2.6.22.1/kernel/softirq.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/softirq.c 2007-08-02 11:38:48.000000000 -0400 @@ -470,9 +470,9 @@ void tasklet_kill(struct tasklet_struct printk("Attempt to kill tasklet from interrupt\n"); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { - do + do { yield(); - while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); diff -urNp linux-2.6.22.1/kernel/sys.c linux-2.6.22.1/kernel/sys.c --- linux-2.6.22.1/kernel/sys.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/sys.c 2007-08-02 11:38:48.000000000 -0400 @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -641,6 +642,12 @@ static int set_one_prio(struct task_stru error = -EACCES; goto out; } + + if (gr_handle_chroot_setpriority(p, niceval)) { + error = -EACCES; + goto out; + } + no_nice = security_task_setnice(p, niceval); if (no_nice) { error = no_nice; @@ -697,10 +704,10 @@ asmlinkage long sys_setpriority(int whic !(user = find_user(vx_current_xid(), who))) goto out_unlock; /* No processes for this user */ - do_each_thread(g, p) + do_each_thread(g, p) { if (p->uid == who) error = set_one_prio(p, niceval, error); - while_each_thread(g, p); + } while_each_thread(g, p); if (who != current->uid) free_uid(user); /* For find_user() */ break; @@ -759,13 +766,13 @@ asmlinkage long sys_getpriority(int whic !(user = find_user(vx_current_xid(), who))) goto out_unlock; /* No processes for this user */ - do_each_thread(g, p) + do_each_thread(g, p) { if (p->uid == who) { niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; } - while_each_thread(g, p); + } while_each_thread(g, p); if (who != current->uid) free_uid(user); /* for find_user() */ break; @@ -1031,6 +1038,9 @@ asmlinkage long sys_setregid(gid_t rgid, if (rgid != (gid_t) -1 || (egid != (gid_t) -1 && egid != old_rgid)) current->sgid = new_egid; + + gr_set_role_label(current, current->uid, new_rgid); + current->fsgid = new_egid; current->egid = new_egid; current->gid = new_rgid; @@ -1058,6 +1068,9 @@ asmlinkage long sys_setgid(gid_t gid) current->mm->dumpable = suid_dumpable; smp_wmb(); } + + gr_set_role_label(current, current->uid, gid); + current->gid = current->egid = current->sgid = current->fsgid = gid; } else if ((gid == current->gid) || (gid == current->sgid)) { if (old_egid != gid) { @@ -1095,6 +1108,9 @@ static int set_user(uid_t new_ruid, int current->mm->dumpable = suid_dumpable; smp_wmb(); } + + gr_set_role_label(current, new_ruid, current->gid); + current->uid = new_ruid; return 0; } @@ -1197,6 +1213,9 @@ asmlinkage long sys_setuid(uid_t uid) } else if ((uid != current->uid) && (uid != new_suid)) return -EPERM; + if (gr_check_crash_uid(uid)) + return -EPERM; + if (old_euid != uid) { current->mm->dumpable = suid_dumpable; smp_wmb(); @@ -1299,8 +1318,10 @@ asmlinkage long sys_setresgid(gid_t rgid current->egid = egid; } current->fsgid = current->egid; - if (rgid != (gid_t) -1) + if (rgid != (gid_t) -1) { + gr_set_role_label(current, current->uid, rgid); current->gid = rgid; + } if (sgid != (gid_t) -1) current->sgid = sgid; @@ -1448,7 +1469,10 @@ asmlinkage long sys_setpgid(pid_t pid, p write_lock_irq(&tasklist_lock); err = -ESRCH; - p = find_task_by_pid(pid); + /* grsec: replaced find_task_by_pid with equivalent call + which lacks the chroot restriction + */ + p = pid_task(find_pid(pid), PIDTYPE_PID); if (!p) goto out; @@ -2168,7 +2192,7 @@ asmlinkage long sys_prctl(int option, un error = current->mm->dumpable; break; case PR_SET_DUMPABLE: - if (arg2 < 0 || arg2 > 1) { + if (arg2 > 1) { error = -EINVAL; break; } diff -urNp linux-2.6.22.1/kernel/sysctl.c linux-2.6.22.1/kernel/sysctl.c --- linux-2.6.22.1/kernel/sysctl.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/sysctl.c 2007-08-02 11:38:48.000000000 -0400 @@ -58,6 +58,13 @@ extern int proc_nr_files(ctl_table *tabl #endif #if defined(CONFIG_SYSCTL) +#include +#include + +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op); +extern int gr_handle_sysctl_mod(const char *dirname, const char *name, + const int op); +extern int gr_handle_chroot_sysctl(const int op); /* External variables not in a header file. */ extern int C_A_D; @@ -141,7 +148,7 @@ static int proc_dointvec_taint(ctl_table static ctl_table root_table[]; static struct ctl_table_header root_table_header = - { root_table, LIST_HEAD_INIT(root_table_header.ctl_entry) }; + { root_table, LIST_HEAD_INIT(root_table_header.ctl_entry), 0, NULL }; static ctl_table kern_table[]; static ctl_table vm_table[]; @@ -155,11 +162,26 @@ extern ctl_table pty_table[]; #ifdef CONFIG_INOTIFY_USER extern ctl_table inotify_table[]; #endif +extern ctl_table grsecurity_table[]; #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT int sysctl_legacy_va_layout; #endif +#ifdef CONFIG_PAX_SOFTMODE +static ctl_table pax_table[] = { + { + .ctl_name = PAX_SOFTMODE, + .procname = "softmode", + .data = &pax_softmode, + .maxlen = sizeof(unsigned int), + .mode = 0600, + .proc_handler = &proc_dointvec, + }, + + { .ctl_name = 0 } +}; +#endif /* The default sysctl tables: */ @@ -202,7 +224,6 @@ static ctl_table root_table[] = { .mode = 0555, .child = dev_table, }, - { .ctl_name = 0 } }; @@ -616,6 +637,24 @@ static ctl_table kern_table[] = { }, #endif +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_MODSTOP) + { + .ctl_name = KERN_GRSECURITY, + .procname = "grsecurity", + .mode = 0500, + .child = grsecurity_table, + }, +#endif + +#ifdef CONFIG_PAX_SOFTMODE + { + .ctl_name = KERN_PAX, + .procname = "pax", + .mode = 0500, + .child = pax_table, + }, +#endif + { .ctl_name = 0 } }; @@ -1172,6 +1211,25 @@ static int test_perm(int mode, int op) int sysctl_perm(ctl_table *table, int op) { int error; + if (table->parent != NULL && table->parent->procname != NULL && + table->procname != NULL && + gr_handle_sysctl_mod(table->parent->procname, table->procname, op)) + return -EACCES; + if (gr_handle_chroot_sysctl(op)) + return -EACCES; + error = gr_handle_sysctl(table, op); + if (error) + return error; + error = security_sysctl(table, op); + if (error) + return error; + return test_perm(table->mode, op); +} + +int sysctl_perm_nochk(ctl_table *table, int op) +{ + int error; + error = security_sysctl(table, op); if (error) return error; @@ -1196,13 +1254,14 @@ repeat: if (n == table->ctl_name) { int error; if (table->child) { - if (sysctl_perm(table, 001)) + if (sysctl_perm_nochk(table, 001)) return -EPERM; name++; nlen--; table = table->child; goto repeat; } + error = do_sysctl_strategy(table, name, nlen, oldval, oldlenp, newval, newlen); diff -urNp linux-2.6.22.1/kernel/time.c linux-2.6.22.1/kernel/time.c --- linux-2.6.22.1/kernel/time.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/kernel/time.c 2007-08-02 11:38:48.000000000 -0400 @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -92,6 +93,9 @@ asmlinkage long sys_stime(time_t __user return err; vx_settimeofday(&tv); + + gr_log_timechange(); + return 0; } @@ -198,6 +202,8 @@ asmlinkage long sys_settimeofday(struct return -EFAULT; } + gr_log_timechange(); + return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); } @@ -252,7 +258,7 @@ EXPORT_SYMBOL(current_fs_time); * Avoid unnecessary multiplications/divisions in the * two most common HZ cases: */ -unsigned int inline jiffies_to_msecs(const unsigned long j) +inline unsigned int jiffies_to_msecs(const unsigned long j) { #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) return (MSEC_PER_SEC / HZ) * j; @@ -264,7 +270,7 @@ unsigned int inline jiffies_to_msecs(con } EXPORT_SYMBOL(jiffies_to_msecs); -unsigned int inline jiffies_to_usecs(const unsigned long j) +inline unsigned int jiffies_to_usecs(const unsigned long j) { #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) return (USEC_PER_SEC / HZ) * j; diff -urNp linux-2.6.22.1/lib/extable.c linux-2.6.22.1/lib/extable.c --- linux-2.6.22.1/lib/extable.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/lib/extable.c 2007-08-02 11:38:48.000000000 -0400 @@ -36,8 +36,20 @@ static int cmp_ex(const void *a, const v void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish) { + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + sort(start, finish - start, sizeof(struct exception_table_entry), cmp_ex, NULL); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + } #endif diff -urNp linux-2.6.22.1/lib/radix-tree.c linux-2.6.22.1/lib/radix-tree.c --- linux-2.6.22.1/lib/radix-tree.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/lib/radix-tree.c 2007-08-02 11:38:48.000000000 -0400 @@ -76,7 +76,7 @@ struct radix_tree_preload { int nr; struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; }; -DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, {NULL} }; static inline gfp_t root_gfp_mask(struct radix_tree_root *root) { diff -urNp linux-2.6.22.1/localversion-grsec linux-2.6.22.1/localversion-grsec --- linux-2.6.22.1/localversion-grsec 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/localversion-grsec 2007-08-02 11:09:16.000000000 -0400 @@ -0,0 +1 @@ +-grsec diff -urNp linux-2.6.22.1/Makefile linux-2.6.22.1/Makefile --- linux-2.6.22.1/Makefile 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/Makefile 2007-08-02 11:38:45.000000000 -0400 @@ -312,7 +312,7 @@ LINUXINCLUDE := -Iinclude \ CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE) -CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ +CFLAGS := -Wall -W -Wno-unused -Wno-sign-compare -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common AFLAGS := -D__ASSEMBLY__ @@ -553,7 +553,7 @@ export mod_strip_cmd ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ diff -urNp linux-2.6.22.1/mm/filemap.c linux-2.6.22.1/mm/filemap.c --- linux-2.6.22.1/mm/filemap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/filemap.c 2007-08-02 11:38:48.000000000 -0400 @@ -30,6 +30,7 @@ #include #include #include +#include #include "filemap.h" #include "internal.h" @@ -1704,7 +1705,7 @@ int generic_file_mmap(struct file * file struct address_space *mapping = file->f_mapping; if (!mapping->a_ops->readpage) - return -ENOEXEC; + return -ENODEV; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; return 0; @@ -1968,6 +1969,7 @@ inline int generic_write_checks(struct f *pos = i_size_read(inode); if (limit != RLIM_INFINITY) { + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0); if (*pos >= limit) { send_sig(SIGXFSZ, current, 0); return -EFBIG; diff -urNp linux-2.6.22.1/mm/fremap.c linux-2.6.22.1/mm/fremap.c --- linux-2.6.22.1/mm/fremap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/fremap.c 2007-08-02 11:38:48.000000000 -0400 @@ -84,6 +84,11 @@ int install_page(struct mm_struct *mm, s page_add_file_rmap(page); update_mmu_cache(vma, addr, pte_val); lazy_mmu_prot_update(pte_val); + +#ifdef CONFIG_PAX_SEGMEXEC + pax_mirror_file_pte(vma, addr, page, ptl); +#endif + err = 0; unlock: pte_unmap_unlock(pte, ptl); @@ -177,6 +182,13 @@ asmlinkage long sys_remap_file_pages(uns retry: vma = find_vma(mm, start); +#ifdef CONFIG_PAX_SEGMEXEC + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC)) { + up_read(&mm->mmap_sem); + return err; + } +#endif + /* * Make sure the vma is shared, that it supports prefaulting, * and that the remapped range is valid and fully within diff -urNp linux-2.6.22.1/mm/hugetlb.c linux-2.6.22.1/mm/hugetlb.c --- linux-2.6.22.1/mm/hugetlb.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/hugetlb.c 2007-08-02 11:38:48.000000000 -0400 @@ -433,6 +433,26 @@ void unmap_hugepage_range(struct vm_area } } +#ifdef CONFIG_PAX_SEGMEXEC +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m) +{ + struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *vma_m; + unsigned long address_m; + pte_t *ptep_m; + + vma_m = pax_find_mirror_vma(vma); + if (!vma_m) + return; + + BUG_ON(address >= SEGMEXEC_TASK_SIZE); + address_m = address + SEGMEXEC_TASK_SIZE; + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK); + get_page(page_m); + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0)); +} +#endif + static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t pte) { @@ -466,6 +486,11 @@ static int hugetlb_cow(struct mm_struct /* Break COW */ set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, new_page, 1)); + +#ifdef CONFIG_PAX_SEGMEXEC + pax_mirror_huge_pte(vma, address, new_page); +#endif + /* Make the old page be freed below */ new_page = old_page; } @@ -474,7 +499,7 @@ static int hugetlb_cow(struct mm_struct return VM_FAULT_MINOR; } -int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, +static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, int write_access) { int ret = VM_FAULT_SIGBUS; @@ -536,6 +561,10 @@ retry: && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); +#ifdef CONFIG_PAX_SEGMEXEC + pax_mirror_huge_pte(vma, address, page); +#endif + if (write_access && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ ret = hugetlb_cow(mm, vma, address, ptep, new_pte); @@ -562,6 +591,27 @@ int hugetlb_fault(struct mm_struct *mm, int ret; static DEFINE_MUTEX(hugetlb_instantiation_mutex); +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m; + + vma_m = pax_find_mirror_vma(vma); + if (vma_m) { + unsigned long address_m; + + if (vma->vm_start > vma_m->vm_start) { + address_m = address; + address -= SEGMEXEC_TASK_SIZE; + vma = vma_m; + } else + address_m = address + SEGMEXEC_TASK_SIZE; + + if (!huge_pte_alloc(mm, address_m)) + return VM_FAULT_OOM; + address_m &= HPAGE_MASK; + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE); + } +#endif + ptep = huge_pte_alloc(mm, address); if (!ptep) return VM_FAULT_OOM; diff -urNp linux-2.6.22.1/mm/madvise.c linux-2.6.22.1/mm/madvise.c --- linux-2.6.22.1/mm/madvise.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/madvise.c 2007-08-02 11:38:48.000000000 -0400 @@ -43,6 +43,10 @@ static long madvise_behavior(struct vm_a pgoff_t pgoff; int new_flags = vma->vm_flags; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m; +#endif + switch (behavior) { case MADV_NORMAL: new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; @@ -92,6 +96,13 @@ success: /* * vm_flags is protected by the mmap_sem held in write mode. */ + +#ifdef CONFIG_PAX_SEGMEXEC + vma_m = pax_find_mirror_vma(vma); + if (vma_m) + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT); +#endif + vma->vm_flags = new_flags; out: @@ -236,6 +247,17 @@ madvise_vma(struct vm_area_struct *vma, case MADV_DONTNEED: error = madvise_dontneed(vma, prev, start, end); + +#ifdef CONFIG_PAX_SEGMEXEC + if (!error) { + struct vm_area_struct *vma_m, *prev_m; + + vma_m = pax_find_mirror_vma(vma); + if (vma_m) + error = madvise_dontneed(vma_m, &prev_m, start + SEGMEXEC_TASK_SIZE, end + SEGMEXEC_TASK_SIZE); + } +#endif + break; default: @@ -306,6 +328,16 @@ asmlinkage long sys_madvise(unsigned lon if (end < start) goto out; +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { + if (end > SEGMEXEC_TASK_SIZE) + goto out; + } else +#endif + + if (end > TASK_SIZE) + goto out; + error = 0; if (end == start) goto out; diff -urNp linux-2.6.22.1/mm/memory.c linux-2.6.22.1/mm/memory.c --- linux-2.6.22.1/mm/memory.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/memory.c 2007-08-02 11:38:48.000000000 -0400 @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -322,6 +323,11 @@ int __pte_alloc(struct mm_struct *mm, pm int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) { + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + pte_t *new = pte_alloc_one_kernel(&init_mm, address); if (!new) return -ENOMEM; @@ -329,8 +335,19 @@ int __pte_alloc_kernel(pmd_t *pmd, unsig spin_lock(&init_mm.page_table_lock); if (pmd_present(*pmd)) /* Another has populated it */ pte_free_kernel(new); - else + else { + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + pmd_populate_kernel(&init_mm, pmd, new); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + + } spin_unlock(&init_mm.page_table_lock); return 0; } @@ -995,7 +1012,7 @@ int get_user_pages(struct task_struct *t struct vm_area_struct *vma; unsigned int foll_flags; - vma = find_extend_vma(mm, start); + vma = find_vma(mm, start); if (!vma && in_gate_area(tsk, start)) { unsigned long pg = start & PAGE_MASK; struct vm_area_struct *gate_vma = get_gate_vma(tsk); @@ -1035,7 +1052,7 @@ int get_user_pages(struct task_struct *t continue; } - if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) + if (!vma || start < vma->vm_start || (vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; @@ -1608,6 +1625,193 @@ static inline void cow_user_page(struct copy_user_highpage(dst, src, va, vma); } +#ifdef CONFIG_PAX_SEGMEXEC +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) +{ + struct mm_struct *mm = vma->vm_mm; + spinlock_t *ptl; + pte_t *pte, entry; + + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + entry = *pte; + if (!pte_present(entry)) { + if (!pte_none(entry)) { + BUG_ON(pte_file(entry)); + free_swap_and_cache(pte_to_swp_entry(entry)); + pte_clear_not_present_full(mm, address, pte, 0); + } + } else { + struct page *page; + + page = vm_normal_page(vma, address, entry); + if (page) { + flush_cache_page(vma, address, pte_pfn(entry)); + flush_icache_page(vma, page); + } + ptep_clear_flush(vma, address, pte); + BUG_ON(pte_dirty(entry)); + if (page) { + update_hiwater_rss(mm); + if (PageAnon(page)) + dec_mm_counter(mm, anon_rss); + else + dec_mm_counter(mm, file_rss); + page_remove_rmap(page, vma); + page_cache_release(page); + } + } + pte_unmap_unlock(pte, ptl); +} + +/* PaX: if vma is mirrored, synchronize the mirror's PTE + * + * the ptl of the lower mapped page is held on entry and is not released on exit + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc) + */ +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long address_m; + spinlock_t *ptl_m; + struct vm_area_struct *vma_m; + pmd_t *pmd_m; + pte_t *pte_m, entry_m; + + BUG_ON(!page_m || !PageAnon(page_m)); + + vma_m = pax_find_mirror_vma(vma); + if (!vma_m) + return; + + BUG_ON(!PageLocked(page_m)); + BUG_ON(address >= SEGMEXEC_TASK_SIZE); + address_m = address + SEGMEXEC_TASK_SIZE; + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); + pte_m = pte_offset_map_nested(pmd_m, address_m); + ptl_m = pte_lockptr(mm, pmd_m); + if (ptl != ptl_m) { + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); + if (!pte_none(*pte_m)) { + spin_unlock(ptl_m); + pte_unmap_nested(pte_m); + unlock_page(page_m); + return; + } + } + + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); + page_cache_get(page_m); + page_add_anon_rmap(page_m, vma_m, address_m); + inc_mm_counter(mm, anon_rss); + set_pte_at(mm, address_m, pte_m, entry_m); + update_mmu_cache(vma_m, address_m, entry_m); + lazy_mmu_prot_update(entry_m); + if (ptl != ptl_m) + spin_unlock(ptl_m); + pte_unmap_nested(pte_m); + unlock_page(page_m); +} + +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long address_m, pfn_m; + spinlock_t *ptl_m; + struct vm_area_struct *vma_m; + pmd_t *pmd_m; + pte_t *pte_m, entry_m; + + BUG_ON(!page_m || PageAnon(page_m)); + + vma_m = pax_find_mirror_vma(vma); + if (!vma_m) + return; + + BUG_ON(address >= SEGMEXEC_TASK_SIZE); + address_m = address + SEGMEXEC_TASK_SIZE; + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); + pte_m = pte_offset_map_nested(pmd_m, address_m); + ptl_m = pte_lockptr(mm, pmd_m); + if (ptl != ptl_m) { + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); + if (!pte_none(*pte_m)) { + spin_unlock(ptl_m); + pte_unmap_nested(pte_m); + return; + } + } + + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); + page_cache_get(page_m); + page_add_file_rmap(page_m); + inc_mm_counter(mm, file_rss); + set_pte_at(mm, address_m, pte_m, entry_m); + update_mmu_cache(vma_m, address_m, entry_m); + lazy_mmu_prot_update(entry_m); + if (ptl != ptl_m) + spin_unlock(ptl_m); + pte_unmap_nested(pte_m); +} + +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long address_m; + spinlock_t *ptl_m; + struct vm_area_struct *vma_m; + pmd_t *pmd_m; + pte_t *pte_m, entry_m; + + vma_m = pax_find_mirror_vma(vma); + if (!vma_m) + return; + + BUG_ON(address >= SEGMEXEC_TASK_SIZE); + address_m = address + SEGMEXEC_TASK_SIZE; + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); + pte_m = pte_offset_map_nested(pmd_m, address_m); + ptl_m = pte_lockptr(mm, pmd_m); + if (ptl != ptl_m) { + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); + if (!pte_none(*pte_m)) { + spin_unlock(ptl_m); + pte_unmap_nested(pte_m); + return; + } + } + + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot); + set_pte_at(mm, address_m, pte_m, entry_m); + if (ptl != ptl_m) + spin_unlock(ptl_m); + pte_unmap_nested(pte_m); +} + +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, spinlock_t *ptl) +{ + struct page *page_m; + pte_t entry; + + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC)) + return; + + entry = *pte; + page_m = vm_normal_page(vma, address, entry); + if (!page_m) + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl); + if (PageAnon(page_m)) { + spin_unlock(ptl); + lock_page(page_m); + spin_lock(ptl); + if (pte_same(entry, *pte)) + pax_mirror_anon_pte(vma, address, page_m, ptl); + else + unlock_page(page_m); + } else + pax_mirror_file_pte(vma, address, page_m, ptl); +} +#endif + /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address @@ -1724,6 +1928,12 @@ gotten: */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { + +#ifdef CONFIG_PAX_SEGMEXEC + if (pax_find_mirror_vma(vma)) + BUG_ON(TestSetPageLocked(new_page)); +#endif + if (old_page) { page_remove_rmap(old_page, vma); if (!PageAnon(old_page)) { @@ -1748,6 +1957,10 @@ gotten: lru_cache_add_active(new_page); page_add_new_anon_rmap(new_page, vma, address); +#ifdef CONFIG_PAX_SEGMEXEC + pax_mirror_anon_pte(vma, address, new_page, ptl); +#endif + /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; @@ -2008,6 +2221,7 @@ int vmtruncate(struct inode * inode, lof do_expand: limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + gr_learn_resource(current, RLIMIT_FSIZE, offset, 1); if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) @@ -2189,6 +2403,11 @@ static int do_swap_page(struct mm_struct swap_free(entry); if (vm_swap_full()) remove_exclusive_swap_page(page); + +#ifdef CONFIG_PAX_SEGMEXEC + if (write_access || !pax_find_mirror_vma(vma)) +#endif + unlock_page(page); if (write_access) { @@ -2201,6 +2420,11 @@ static int do_swap_page(struct mm_struct /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); lazy_mmu_prot_update(pte); + +#ifdef CONFIG_PAX_SEGMEXEC + pax_mirror_anon_pte(vma, address, page, ptl); +#endif + unlock: pte_unmap_unlock(page_table, ptl); out: @@ -2241,6 +2465,12 @@ static int do_anonymous_page(struct mm_s page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto release; + +#ifdef CONFIG_PAX_SEGMEXEC + if (pax_find_mirror_vma(vma)) + BUG_ON(TestSetPageLocked(page)); +#endif + inc_mm_counter(mm, anon_rss); lru_cache_add_active(page); page_add_new_anon_rmap(page, vma, address); @@ -2263,6 +2493,14 @@ static int do_anonymous_page(struct mm_s /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); + +#ifdef CONFIG_PAX_SEGMEXEC + if (write_access) + pax_mirror_anon_pte(vma, address, page, ptl); + else + pax_mirror_file_pte(vma, address, page, ptl); +#endif + unlock: pte_unmap_unlock(page_table, ptl); return VM_FAULT_MINOR; @@ -2382,6 +2620,12 @@ retry: */ /* Only go through if we didn't race with anybody else... */ if (pte_none(*page_table)) { + +#ifdef CONFIG_PAX_SEGMEXEC + if (anon && pax_find_mirror_vma(vma)) + BUG_ON(TestSetPageLocked(new_page)); +#endif + flush_icache_page(vma, new_page); entry = mk_pte(new_page, vma->vm_page_prot); if (write_access) @@ -2408,6 +2651,14 @@ retry: /* no need to invalidate: a not-present page shouldn't be cached */ update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); + +#ifdef CONFIG_PAX_SEGMEXEC + if (anon) + pax_mirror_anon_pte(vma, address, new_page, ptl); + else + pax_mirror_file_pte(vma, address, new_page, ptl); +#endif + unlock: pte_unmap_unlock(page_table, ptl); if (dirty_page) { @@ -2465,6 +2716,11 @@ static noinline int do_no_pfn(struct mm_ if (write_access) entry = maybe_mkwrite(pte_mkdirty(entry), vma); set_pte_at(mm, address, page_table, entry); + +#ifdef CONFIG_PAX_SEGMEXEC + pax_mirror_pfn_pte(vma, address, pfn, ptl); +#endif + } pte_unmap_unlock(page_table, ptl); return ret; @@ -2574,6 +2830,11 @@ static inline int handle_pte_fault(struc if (write_access) flush_tlb_page(vma, address); } + +#ifdef CONFIG_PAX_SEGMEXEC + pax_mirror_pte(vma, address, pte, ptl); +#endif + unlock: pte_unmap_unlock(pte, ptl); return VM_FAULT_MINOR; @@ -2590,6 +2851,10 @@ int __handle_mm_fault(struct mm_struct * pmd_t *pmd; pte_t *pte; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m; +#endif + __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); @@ -2597,6 +2862,34 @@ int __handle_mm_fault(struct mm_struct * if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, write_access); +#ifdef CONFIG_PAX_SEGMEXEC + vma_m = pax_find_mirror_vma(vma); + if (vma_m) { + unsigned long address_m; + pgd_t *pgd_m; + pud_t *pud_m; + pmd_t *pmd_m; + + if (vma->vm_start > vma_m->vm_start) { + address_m = address; + address -= SEGMEXEC_TASK_SIZE; + vma = vma_m; + } else + address_m = address + SEGMEXEC_TASK_SIZE; + + pgd_m = pgd_offset(mm, address_m); + pud_m = pud_alloc(mm, pgd_m, address_m); + if (!pud_m) + return VM_FAULT_OOM; + pmd_m = pmd_alloc(mm, pud_m, address_m); + if (!pmd_m) + return VM_FAULT_OOM; + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m)) + return VM_FAULT_OOM; + pax_unmap_mirror_pte(vma_m, address_m, pmd_m); + } +#endif + pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) @@ -2732,7 +3025,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); /* * Make sure the vDSO gets into every core dump. * Dumping its contents makes post-mortem fully interpretable later diff -urNp linux-2.6.22.1/mm/mempolicy.c linux-2.6.22.1/mm/mempolicy.c --- linux-2.6.22.1/mm/mempolicy.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/mempolicy.c 2007-08-02 11:38:48.000000000 -0400 @@ -401,6 +401,10 @@ static int mbind_range(struct vm_area_st struct vm_area_struct *next; int err; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m; +#endif + err = 0; for (; vma && vma->vm_start < end; vma = next) { next = vma->vm_next; @@ -412,6 +416,16 @@ static int mbind_range(struct vm_area_st err = policy_vma(vma, new); if (err) break; + +#ifdef CONFIG_PAX_SEGMEXEC + vma_m = pax_find_mirror_vma(vma); + if (vma_m) { + err = policy_vma(vma_m, new); + if (err) + break; + } +#endif + } return err; } @@ -731,7 +745,7 @@ static struct page *new_vma_page(struct } #endif -long do_mbind(unsigned long start, unsigned long len, +static long do_mbind(unsigned long start, unsigned long len, unsigned long mode, nodemask_t *nmask, unsigned long flags) { struct vm_area_struct *vma; @@ -759,6 +773,17 @@ long do_mbind(unsigned long start, unsig if (end < start) return -EINVAL; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) { + if (end > SEGMEXEC_TASK_SIZE) + return -EINVAL; + } else +#endif + + if (end > TASK_SIZE) + return -EINVAL; + if (end == start) return 0; diff -urNp linux-2.6.22.1/mm/mlock.c linux-2.6.22.1/mm/mlock.c --- linux-2.6.22.1/mm/mlock.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/mlock.c 2007-08-02 11:38:48.000000000 -0400 @@ -13,6 +13,7 @@ #include #include #include +#include int can_do_mlock(void) { @@ -95,6 +96,17 @@ static int do_mlock(unsigned long start, return -EINVAL; if (end == start) return 0; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { + if (end > SEGMEXEC_TASK_SIZE) + return -EINVAL; + } else +#endif + + if (end > TASK_SIZE) + return -EINVAL; + vma = find_vma_prev(current->mm, start, &prev); if (!vma || vma->vm_start > start) return -ENOMEM; @@ -155,6 +183,7 @@ asmlinkage long sys_mlock(unsigned long lock_limit >>= PAGE_SHIFT; /* check against resource limits */ + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1); if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); out: @@ -173,10 +202,10 @@ asmlinkage long sys_munlock(unsigned lon static int do_mlockall(int flags) { struct vm_area_struct * vma, * prev = NULL; - unsigned int def_flags = 0; + unsigned int def_flags = current->mm->def_flags & ~VM_LOCKED; if (flags & MCL_FUTURE) - def_flags = VM_LOCKED; + def_flags |= VM_LOCKED; current->mm->def_flags = def_flags; if (flags == MCL_FUTURE) goto out; @@ -184,6 +197,12 @@ static int do_mlockall(int flags) for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { unsigned int newflags; +#ifdef CONFIG_PAX_SEGMEXEC + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) + break; +#endif + + BUG_ON(vma->vm_end > TASK_SIZE); newflags = vma->vm_flags | VM_LOCKED; if (!(flags & MCL_CURRENT)) newflags &= ~VM_LOCKED; @@ -213,6 +243,7 @@ asmlinkage long sys_mlockall(int flags) lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1); if (!vx_vmlocked_avail(current->mm, current->mm->total_vm)) goto out; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || diff -urNp linux-2.6.22.1/mm/mmap.c linux-2.6.22.1/mm/mmap.c --- linux-2.6.22.1/mm/mmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/mmap.c 2007-08-02 11:38:48.000000000 -0400 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -60,15 +61,23 @@ static void unmap_region(struct mm_struc * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * */ -pgprot_t protection_map[16] = { +pgprot_t protection_map[16] __read_only = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; pgprot_t vm_get_page_prot(unsigned long vm_flags) { - return protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + pgprot_t prot = protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) + if (!nx_enabled && + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC && + (vm_flags & (VM_READ | VM_WRITE))) + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot))))); +#endif + + return prot; } EXPORT_SYMBOL(vm_get_page_prot); @@ -225,6 +234,7 @@ static struct vm_area_struct *remove_vma struct vm_area_struct *next = vma->vm_next; might_sleep(); + BUG_ON(vma->vm_mirror); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) @@ -252,6 +262,7 @@ asmlinkage unsigned long sys_brk(unsigne * not page aligned -Ram Gupta */ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; + gr_learn_resource(current, RLIMIT_DATA, brk - mm->start_data, 1); if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) goto out; @@ -352,8 +363,12 @@ find_vma_prepare(struct mm_struct *mm, u if (vma_tmp->vm_end > addr) { vma = vma_tmp; - if (vma_tmp->vm_start <= addr) - return vma; + if (vma_tmp->vm_start <= addr) { +//printk("PAX: prep: %08lx-%08lx %08lx pr:%p l:%p pa:%p ", +//vma->vm_start, vma->vm_end, addr, *pprev, *rb_link, *rb_parent); +//__print_symbol("%s\n", __builtin_extract_return_addr(__builtin_return_address(0))); + break; + } __rb_link = &__rb_parent->rb_left; } else { rb_prev = __rb_parent; @@ -677,6 +692,12 @@ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) { + +#ifdef CONFIG_PAX_SEGMEXEC + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE) + return 0; +#endif + if (is_mergeable_vma(vma, file, vm_flags) && is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { if (vma->vm_pgoff == vm_pgoff) @@ -696,6 +717,12 @@ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) { + +#ifdef CONFIG_PAX_SEGMEXEC + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE) + return 0; +#endif + if (is_mergeable_vma(vma, file, vm_flags) && is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { pgoff_t vm_pglen; @@ -738,12 +765,19 @@ can_vma_merge_after(struct vm_area_struc struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, + struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; +#ifdef CONFIG_PAX_SEGMEXEC + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE; + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL; + + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end); +#endif + /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. @@ -759,6 +793,15 @@ struct vm_area_struct *vma_merge(struct if (next && next->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; +#ifdef CONFIG_PAX_SEGMEXEC + if (prev) + prev_m = pax_find_mirror_vma(prev); + if (area) + area_m = pax_find_mirror_vma(area); + if (next) + next_m = pax_find_mirror_vma(next); +#endif + /* * Can it merge with the predecessor? */ @@ -778,9 +825,24 @@ struct vm_area_struct *vma_merge(struct /* cases 1, 6 */ vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL); - } else /* cases 2, 5, 7 */ + +#ifdef CONFIG_PAX_SEGMEXEC + if (prev_m) + vma_adjust(prev_m, prev_m->vm_start, + next_m->vm_end, prev_m->vm_pgoff, NULL); +#endif + + } else { /* cases 2, 5, 7 */ vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL); + +#ifdef CONFIG_PAX_SEGMEXEC + if (prev_m) + vma_adjust(prev_m, prev_m->vm_start, + end_m, prev_m->vm_pgoff, NULL); +#endif + + } return prev; } @@ -791,12 +853,27 @@ struct vm_area_struct *vma_merge(struct mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen)) { - if (prev && addr < prev->vm_end) /* case 4 */ + if (prev && addr < prev->vm_end) { /* case 4 */ vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL); - else /* cases 3, 8 */ + +#ifdef CONFIG_PAX_SEGMEXEC + if (prev_m) + vma_adjust(prev_m, prev_m->vm_start, + addr_m, prev_m->vm_pgoff, NULL); +#endif + + } else { /* cases 3, 8 */ vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL); + +#ifdef CONFIG_PAX_SEGMEXEC + if (area_m) + vma_adjust(area_m, addr_m, next_m->vm_end, + next_m->vm_pgoff - pglen, NULL); +#endif + + } return area; } @@ -871,14 +948,11 @@ none: void vm_stat_account(struct mm_struct *mm, unsigned long flags, struct file *file, long pages) { - const unsigned long stack_flags - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); - if (file) { mm->shared_vm += pages; if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) mm->exec_vm += pages; - } else if (flags & stack_flags) + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN)) mm->stack_vm += pages; if (flags & (VM_RESERVED|VM_IO)) mm->reserved_vm += pages; @@ -903,28 +977,32 @@ unsigned long do_mmap_pgoff(struct file int accountable = 1; unsigned long charged = 0, reqprot = prot; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m = NULL, *prev_m; +#endif + /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) prot |= PROT_EXEC; if (!len) return -EINVAL; - error = arch_mmap_check(addr, len, flags); - if (error) - return error; - /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len || len > TASK_SIZE) return -ENOMEM; + error = arch_mmap_check(addr, len, flags); + if (error) + return error; + /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; @@ -936,7 +1015,7 @@ unsigned long do_mmap_pgoff(struct file /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ - addr = get_unmapped_area(file, addr, len, pgoff, flags); + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0)); if (addr & ~PAGE_MASK) return addr; @@ -947,6 +1026,26 @@ unsigned long do_mmap_pgoff(struct file vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + +#ifdef CONFIG_PAX_MPROTECT + if (mm->pax_flags & MF_PAX_MPROTECT) { + if ((prot & (PROT_WRITE | PROT_EXEC)) != PROT_EXEC) + vm_flags &= ~(VM_EXEC | VM_MAYEXEC); + else + vm_flags &= ~(VM_WRITE | VM_MAYWRITE); + } +#endif + + } +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file) + vm_flags &= ~VM_PAGEEXEC; +#endif + if (flags & MAP_LOCKED) { if (!can_do_mlock()) return -EPERM; @@ -959,6 +1058,7 @@ unsigned long do_mmap_pgoff(struct file locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } @@ -1027,14 +1127,17 @@ unsigned long do_mmap_pgoff(struct file if (error) return error; + if (!gr_acl_handle_mmap(file, prot)) + return -EACCES; + /* Clear old maps */ error = -ENOMEM; -munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { if (do_munmap(mm, addr, len)) return -ENOMEM; - goto munmap_back; + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + BUG_ON(vma && vma->vm_start < addr + len); } /* Check against address space limit. */ @@ -1078,12 +1181,22 @@ munmap_back: goto unacct_error; } +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) { + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma_m) { + kmem_cache_free(vm_area_cachep, vma); + error = -ENOMEM; + goto unacct_error; + } + } +#endif + vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; - vma->vm_page_prot = protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { @@ -1101,6 +1214,14 @@ munmap_back: error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) { + vma->vm_flags |= VM_PAGEEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + } +#endif + } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) @@ -1125,13 +1246,18 @@ munmap_back: vm_flags = vma->vm_flags; if (vma_wants_writenotify(vma)) - vma->vm_page_prot = - protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; + vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); if (!file || !vma_merge(mm, prev, addr, vma->vm_end, vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { file = vma->vm_file; vma_link(mm, vma, prev, rb_link, rb_parent); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) + pax_mirror_vma(vma_m, vma); +#endif + if (correct_wcount) atomic_inc(&inode->i_writecount); } else { @@ -1142,10 +1268,12 @@ munmap_back: } mpol_free(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); + vma = NULL; } out: vx_vmpages_add(mm, len >> PAGE_SHIFT); vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); + track_exec_limit(mm, addr, addr + len, vm_flags); if (vm_flags & VM_LOCKED) { vx_vmlocked_add(mm, len >> PAGE_SHIFT); make_pages_present(addr, addr + len); @@ -1168,6 +1296,12 @@ unmap_and_free_vma: unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; free_vma: + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) + kmem_cache_free(vm_area_cachep, vma_m); +#endif + kmem_cache_free(vm_area_cachep, vma); unacct_error: if (charged) @@ -1203,6 +1337,10 @@ arch_get_unmapped_area(struct file *filp if (flags & MAP_FIXED) return addr; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); @@ -1211,10 +1349,10 @@ arch_get_unmapped_area(struct file *filp return addr; } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; + start_addr = addr = mm->free_area_cache; } else { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; } full_search: @@ -1225,9 +1363,8 @@ full_search: * Start a new search - just in case we missed * some holes. */ - if (start_addr != TASK_UNMAPPED_BASE) { - addr = TASK_UNMAPPED_BASE; - start_addr = addr; + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } @@ -1249,10 +1386,16 @@ full_search: void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { + +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) + return; +#endif + /* * Is this a new hole at the lowest possible address? */ - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { + if (addr >= mm->mmap_base && addr < mm->free_area_cache) { mm->free_area_cache = addr; mm->cached_hole_size = ~0UL; } @@ -1270,7 +1413,7 @@ arch_get_unmapped_area_topdown(struct fi { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - unsigned long addr = addr0; + unsigned long base = mm->mmap_base, addr = addr0; /* requested length too big for entire address space */ if (len > TASK_SIZE) @@ -1279,6 +1422,10 @@ arch_get_unmapped_area_topdown(struct fi if (flags & MAP_FIXED) return addr; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); @@ -1336,13 +1483,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ - mm->free_area_cache = mm->mmap_base; + mm->mmap_base = base; + mm->free_area_cache = base; mm->cached_hole_size = ~0UL; return addr; @@ -1351,6 +1506,12 @@ bottomup: void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { + +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) + return; +#endif + /* * Is this a new hole at the highest possible address? */ @@ -1358,8 +1519,10 @@ void arch_unmap_area_topdown(struct mm_s mm->free_area_cache = addr; /* dont allow allocations above current base */ - if (mm->free_area_cache > mm->mmap_base) + if (mm->free_area_cache > mm->mmap_base) { mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + } } unsigned long @@ -1459,6 +1622,32 @@ out: return prev ? prev->vm_next : vma; } +#ifdef CONFIG_PAX_SEGMEXEC +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma) +{ + struct vm_area_struct *vma_m; + + BUG_ON(!vma || vma->vm_start >= vma->vm_end); + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) { + BUG_ON(vma->vm_mirror); + return NULL; + } + BUG_ON(vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < vma->vm_start - SEGMEXEC_TASK_SIZE - 1); + vma_m = vma->vm_mirror; + BUG_ON(!vma_m || vma_m->vm_mirror != vma); + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start); + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma); + +#ifdef CONFIG_PAX_MPROTECT + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_MAYNOTWRITE)); +#else + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED)); +#endif + + return vma_m; +} +#endif + /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the @@ -1475,6 +1658,7 @@ static int acct_stack_growth(struct vm_a return -ENOMEM; /* Stack limit test */ + gr_learn_resource(current, RLIMIT_STACK, size, 1); if (size > rlim[RLIMIT_STACK].rlim_cur) return -ENOMEM; @@ -1484,6 +1668,7 @@ static int acct_stack_growth(struct vm_a unsigned long limit; locked = mm->locked_vm + grow; limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } @@ -1567,23 +1752,6 @@ int expand_stack(struct vm_area_struct * { return expand_upwards(vma, address); } - -struct vm_area_struct * -find_extend_vma(struct mm_struct *mm, unsigned long addr) -{ - struct vm_area_struct *vma, *prev; - - addr &= PAGE_MASK; - vma = find_vma_prev(mm, addr, &prev); - if (vma && (vma->vm_start <= addr)) - return vma; - if (!prev || expand_stack(prev, addr)) - return NULL; - if (prev->vm_flags & VM_LOCKED) { - make_pages_present(addr, prev->vm_end); - } - return prev; -} #else /* * vma is the first one with address < vma->vm_start. Have to extend vma. @@ -1612,6 +1780,12 @@ int expand_stack(struct vm_area_struct * if (address < vma->vm_start) { unsigned long size, grow; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m; + + vma_m = pax_find_mirror_vma(vma); +#endif + size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; @@ -1619,34 +1794,20 @@ int expand_stack(struct vm_area_struct * if (!error) { vma->vm_start = address; vma->vm_pgoff -= grow; + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) { + vma_m->vm_start -= grow << PAGE_SHIFT; + vma_m->vm_pgoff -= grow; + } +#endif + } } anon_vma_unlock(vma); return error; } - -struct vm_area_struct * -find_extend_vma(struct mm_struct * mm, unsigned long addr) -{ - struct vm_area_struct * vma; - unsigned long start; - - addr &= PAGE_MASK; - vma = find_vma(mm,addr); - if (!vma) - return NULL; - if (vma->vm_start <= addr) - return vma; - if (!(vma->vm_flags & VM_GROWSDOWN)) - return NULL; - start = vma->vm_start; - if (expand_stack(vma, addr)) - return NULL; - if (vma->vm_flags & VM_LOCKED) { - make_pages_present(addr, start); - } - return vma; -} #endif /* @@ -1662,6 +1827,10 @@ static void remove_vma_list(struct mm_st do { long nrpages = vma_pages(vma); +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_end <= SEGMEXEC_TASK_SIZE)) +#endif + vx_vmpages_sub(mm, nrpages); if (vma->vm_flags & VM_LOCKED) vx_vmlocked_sub(mm, nrpages); @@ -1708,6 +1869,16 @@ detach_vmas_to_be_unmapped(struct mm_str insertion_point = (prev ? &prev->vm_next : &mm->mmap); do { + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_mirror) { + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma); + vma->vm_mirror->vm_mirror = NULL; + vma->vm_mirror->vm_flags &= ~VM_EXEC; + vma->vm_mirror = NULL; + } +#endif + rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; tail_vma = vma; @@ -1727,6 +1897,112 @@ detach_vmas_to_be_unmapped(struct mm_str * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */ + +#ifdef CONFIG_PAX_SEGMEXEC +int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + unsigned long addr, int new_below) +{ + struct mempolicy *pol, *pol_m; + struct vm_area_struct *new, *vma_m, *new_m = NULL; + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE; + + if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK)) + return -EINVAL; + + vma_m = pax_find_mirror_vma(vma); + if (vma_m) { + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE); + if (mm->map_count >= sysctl_max_map_count-1) + return -ENOMEM; + } else if (mm->map_count >= sysctl_max_map_count) + return -ENOMEM; + + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + if (!new) + return -ENOMEM; + + if (vma_m) { + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + if (!new_m) { + kmem_cache_free(vm_area_cachep, new); + return -ENOMEM; + } + } + + /* most fields are the same, copy all, and then fixup */ + *new = *vma; + + if (new_below) + new->vm_end = addr; + else { + new->vm_start = addr; + new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); + } + + if (vma_m) { + *new_m = *vma_m; + new_m->vm_mirror = new; + new->vm_mirror = new_m; + + if (new_below) + new_m->vm_end = addr_m; + else { + new_m->vm_start = addr_m; + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT); + } + } + + pol = mpol_copy(vma_policy(vma)); + if (IS_ERR(pol)) { + if (new_m) + kmem_cache_free(vm_area_cachep, new_m); + kmem_cache_free(vm_area_cachep, new); + return PTR_ERR(pol); + } + + if (vma_m) { + pol_m = mpol_copy(vma_policy(vma_m)); + if (IS_ERR(pol_m)) { + mpol_free(pol); + kmem_cache_free(vm_area_cachep, new_m); + kmem_cache_free(vm_area_cachep, new); + return PTR_ERR(pol); + } + } + + vma_set_policy(new, pol); + + if (new->vm_file) + get_file(new->vm_file); + + if (new->vm_ops && new->vm_ops->open) + new->vm_ops->open(new); + + if (new_below) + vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + + ((addr - new->vm_start) >> PAGE_SHIFT), new); + else + vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + + if (vma_m) { + vma_set_policy(new_m, pol_m); + + if (new_m->vm_file) + get_file(new_m->vm_file); + + if (new_m->vm_ops && new_m->vm_ops->open) + new_m->vm_ops->open(new_m); + + if (new_below) + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff + + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m); + else + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m); + } + + return 0; +} +#else int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long addr, int new_below) { @@ -1774,14 +2055,28 @@ int split_vma(struct mm_struct * mm, str return 0; } +#endif /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. * Jeremy Fitzhardinge */ +#ifdef CONFIG_PAX_SEGMEXEC int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) { + int ret = __do_munmap(mm, start, len); + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC)) + return ret; + + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len); +} + +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +#else +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +#endif +{ unsigned long end; struct vm_area_struct *vma, *prev, *last; @@ -1834,6 +2124,8 @@ int do_munmap(struct mm_struct *mm, unsi /* Fix up all other VM information */ remove_vma_list(mm, vma); + track_exec_limit(mm, start, end, 0UL); + return 0; } @@ -1846,6 +2138,12 @@ asmlinkage long sys_munmap(unsigned long profile_munmap(addr); +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)) + return -EINVAL; +#endif + down_write(&mm->mmap_sem); ret = do_munmap(mm, addr, len); up_write(&mm->mmap_sem); @@ -1875,6 +2173,11 @@ unsigned long do_brk(unsigned long addr, struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; + unsigned long charged; + +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m = NULL; +#endif len = PAGE_ALIGN(len); if (!len) @@ -1888,19 +2191,34 @@ unsigned long do_brk(unsigned long addr, flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { + flags &= ~VM_EXEC; + +#ifdef CONFIG_PAX_MPROTECT + if (mm->pax_flags & MF_PAX_MPROTECT) + flags &= ~VM_MAYEXEC; +#endif + + } +#endif + error = arch_mmap_check(addr, len, flags); if (error) return error; + charged = len >> PAGE_SHIFT; + /* * mlock MCL_FUTURE? */ if (mm->def_flags & VM_LOCKED) { unsigned long locked, lock_limit; - locked = len >> PAGE_SHIFT; + locked = charged; locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT)) @@ -1916,23 +2234,23 @@ unsigned long do_brk(unsigned long addr, /* * Clear old maps. this also does some error checking for us */ - munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { if (do_munmap(mm, addr, len)) return -ENOMEM; - goto munmap_back; + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + BUG_ON(vma && vma->vm_start < addr + len); } /* Check against address space limits *after* clearing old maps... */ - if (!may_expand_vm(mm, len >> PAGE_SHIFT)) + if (!may_expand_vm(mm, charged)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) return -ENOMEM; - if (security_vm_enough_memory(len >> PAGE_SHIFT) || - !vx_vmpages_avail(mm, len >> PAGE_SHIFT)) + if (security_vm_enough_memory(charged) || + !vx_vmpages_avail(mm, charged)) return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ @@ -1942,24 +2260,41 @@ unsigned long do_brk(unsigned long addr, */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { - vm_unacct_memory(len >> PAGE_SHIFT); + vm_unacct_memory(charged); return -ENOMEM; } +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (flags & VM_EXEC)) { + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma_m) { + kmem_cache_free(vm_area_cachep, vma); + vm_unacct_memory(charged); + return -ENOMEM; + } + } +#endif + vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; - vma->vm_page_prot = protection_map[flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) + pax_mirror_vma(vma_m, vma); +#endif + out: - vx_vmpages_add(mm, len >> PAGE_SHIFT); + vx_vmpages_add(mm, charged); if (flags & VM_LOCKED) { - vx_vmlocked_add(mm, len >> PAGE_SHIFT); + vx_vmlocked_add(mm, charged); make_pages_present(addr, addr + len); } + track_exec_limit(mm, addr, addr + len, flags); return addr; } @@ -1990,8 +2325,10 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ - while (vma) + while (vma) { + vma->vm_mirror = NULL; vma = remove_vma(vma); + } BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } @@ -2005,6 +2342,10 @@ int insert_vm_struct(struct mm_struct * struct vm_area_struct * __vma, * prev; struct rb_node ** rb_link, * rb_parent; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m = NULL; +#endif + /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index @@ -2036,7 +2377,22 @@ int insert_vm_struct(struct mm_struct * (security_vm_enough_memory(vma_pages(vma)) || !vx_vmpages_avail(mm, vma_pages(vma)))) return -ENOMEM; + +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) { + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma_m) + return -ENOMEM; + } +#endif + vma_link(mm, vma, prev, rb_link, rb_parent); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) + pax_mirror_vma(vma_m, vma); +#endif + return 0; } @@ -2094,6 +2450,30 @@ struct vm_area_struct *copy_vma(struct v return new_vma; } +#ifdef CONFIG_PAX_SEGMEXEC +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma) +{ + struct vm_area_struct *prev_m; + struct rb_node **rb_link_m, *rb_parent_m; + + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)); + BUG_ON(vma->vm_mirror || vma_m->vm_mirror || vma_policy(vma)); + *vma_m = *vma; + vma_m->vm_start += SEGMEXEC_TASK_SIZE; + vma_m->vm_end += SEGMEXEC_TASK_SIZE; + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED); + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags); + if (vma_m->vm_file) + get_file(vma_m->vm_file); + if (vma_m->vm_ops && vma_m->vm_ops->open) + vma_m->vm_ops->open(vma_m); + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m); + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m); + vma_m->vm_mirror = vma; + vma->vm_mirror = vma_m; +} +#endif + /* * Return true if the calling process may expand its vm space by the passed * number of pages @@ -2104,7 +2484,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long lim; lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; - + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1); if (cur + npages > lim) return 0; if (!vx_vmpages_avail(mm, npages)) @@ -2118,7 +2498,7 @@ @@ static struct page *special_mapping_nopa { struct page **pages; - BUG_ON(address < vma->vm_start || address >= vma->vm_end); + BUG_ON(address < vma->vm_start || address >= vma->vm_end || (address & ~PAGE_MASK)); address -= vma->vm_start; for (pages = vma->vm_private_data; address > 0 && *pages; ++pages) @@ -2168,8 +2548,17 @@ int install_special_mapping(struct mm_st vma->vm_start = addr; vma->vm_end = addr + len; +#ifdef CONFIG_PAX_MPROTECT + if (mm->pax_flags & MF_PAX_MPROTECT) { + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) + vm_flags &= ~(VM_EXEC | VM_MAYEXEC); + else + vm_flags &= ~(VM_WRITE | VM_MAYWRITE); + } +#endif + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; - vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = &special_mapping_vmops; vma->vm_private_data = pages; diff -urNp linux-2.6.22.1/mm/mprotect.c linux-2.6.22.1/mm/mprotect.c --- linux-2.6.22.1/mm/mprotect.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/mprotect.c 2007-08-02 11:41:53.000000000 -0400 @@ -21,10 +21,17 @@ #include #include #include +#include + +#ifdef CONFIG_PAX_MPROTECT +#include +#endif + #include #include #include #include +#include static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, @@ -128,6 +135,49 @@ static void change_protection(struct vm_ flush_tlb_range(vma, start, end); } +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT +/* called while holding the mmap semaphor for writing */ +static inline void establish_user_cs_limit(struct mm_struct *mm, unsigned long start, unsigned long end) +{ + struct vm_area_struct *vma = find_vma(mm, start); + + for (; vma && vma->vm_start < end; vma = vma->vm_next) + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma)); +} + +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) +{ + unsigned long oldlimit, newlimit = 0UL; + + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled) + return; + + spin_lock(&mm->page_table_lock); + oldlimit = mm->context.user_cs_limit; + if ((prot & VM_EXEC) && oldlimit < end) + /* USER_CS limit moved up */ + newlimit = end; + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end) + /* USER_CS limit moved down */ + newlimit = start; + + if (newlimit) { + mm->context.user_cs_limit = newlimit; + +#ifdef CONFIG_SMP + wmb(); + cpus_clear(mm->context.cpu_user_cs_mask); + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask); +#endif + + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id()); + } + spin_unlock(&mm->page_table_lock); + if (newlimit == end) + establish_user_cs_limit(mm, oldlimit, end); +} +#endif + static int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) @@ -140,11 +190,39 @@ mprotect_fixup(struct vm_area_struct *vm int error; int dirty_accountable = 0; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m = NULL; + unsigned long start_m, end_m; + + start_m = start + SEGMEXEC_TASK_SIZE; + end_m = end + SEGMEXEC_TASK_SIZE; +#endif + if (newflags == oldflags) { *pprev = vma; return 0; } +#ifdef CONFIG_PAX_SEGMEXEC + if (pax_find_mirror_vma(vma) && !(newflags & VM_EXEC)) { + if (start != vma->vm_start) { + error = split_vma(mm, vma, start, 1); + if (error) + return -ENOMEM; + } + + if (end != vma->vm_end) { + error = split_vma(mm, vma, end, 0); + if (error) + return -ENOMEM; + } + + error = __do_munmap(mm, start_m, end_m - start_m); + if (error) + return -ENOMEM; + } +#endif + /* * If we make a private mapping writable we increase our commit; * but (without finer accounting) cannot reduce our commit if we @@ -187,17 +265,25 @@ mprotect_fixup(struct vm_area_struct *vm goto fail; } +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(oldflags & VM_EXEC) && (newflags & VM_EXEC)) { + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma_m) { + error = -ENOMEM; + goto fail; + } + } +#endif + success: /* * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ vma->vm_flags = newflags; - vma->vm_page_prot = protection_map[newflags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(newflags); if (vma_wants_writenotify(vma)) { - vma->vm_page_prot = protection_map[newflags & - (VM_READ|VM_WRITE|VM_EXEC)]; + vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); dirty_accountable = 1; } @@ -205,6 +291,12 @@ success: hugetlb_change_protection(vma, start, end, vma->vm_page_prot); else change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) + pax_mirror_vma(vma_m, vma); +#endif + vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages); return 0; @@ -214,6 +306,70 @@ fail: return error; } +#ifdef CONFIG_PAX_MPROTECT +/* PaX: non-PIC ELF libraries need relocations on their executable segments + * therefore we'll grant them VM_MAYWRITE once during their life. + * + * The checks favour ld-linux.so behaviour which operates on a per ELF segment + * basis because we want to allow the common case and not the special ones. + */ +static inline void pax_handle_maywrite(struct vm_area_struct *vma, unsigned long start) +{ + struct elfhdr elf_h; + struct elf_phdr elf_p; + elf_addr_t dyn_offset = 0UL; + elf_dyn dyn; + unsigned long i, j = 65536UL / sizeof(struct elf_phdr); + +#ifndef CONFIG_PAX_NOELFRELOCS + if ((vma->vm_start != start) || + !vma->vm_file || + !(vma->vm_flags & VM_MAYEXEC) || + (vma->vm_flags & VM_MAYNOTWRITE)) +#endif + + return; + + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) || + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || + +#ifdef CONFIG_PAX_ETEXECRELOCS + (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || +#else + elf_h.e_type != ET_DYN || +#endif + + !elf_check_arch(&elf_h) || + elf_h.e_phentsize != sizeof(struct elf_phdr) || + elf_h.e_phnum > j) + return; + + for (i = 0UL; i < elf_h.e_phnum; i++) { + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p))) + return; + if (elf_p.p_type == PT_DYNAMIC) { + dyn_offset = elf_p.p_offset; + j = i; + } + } + if (elf_h.e_phnum <= j) + return; + + i = 0UL; + do { + if (sizeof(dyn) != kernel_read(vma->vm_file, dyn_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn))) + return; + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) { + vma->vm_flags |= VM_MAYWRITE | VM_MAYNOTWRITE; + gr_log_textrel(vma); + return; + } + i++; + } while (dyn.d_tag != DT_NULL); + return; +} +#endif + asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot) { @@ -233,6 +389,17 @@ sys_mprotect(unsigned long start, size_t end = start + len; if (end <= start) return -ENOMEM; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { + if (end > SEGMEXEC_TASK_SIZE) + return -EINVAL; + } else +#endif + + if (end > TASK_SIZE) + return -EINVAL; + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) return -EINVAL; @@ -240,7 +407,7 @@ sys_mprotect(unsigned long start, size_t /* * Does the application expect PROT_READ to imply PROT_EXEC: */ - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot); @@ -272,6 +439,16 @@ sys_mprotect(unsigned long start, size_t if (start > vma->vm_start) prev = vma; + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) { + error = -EACCES; + goto out; + } + +#ifdef CONFIG_PAX_MPROTECT + if ((vma->vm_mm->pax_flags & MF_PAX_MPROTECT) && (prot & PROT_WRITE)) + pax_handle_maywrite(vma, start); +#endif + for (nstart = start ; ; ) { unsigned long newflags; @@ -285,6 +462,12 @@ sys_mprotect(unsigned long start, size_t goto out; } +#ifdef CONFIG_PAX_MPROTECT + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */ + if ((vma->vm_mm->pax_flags & MF_PAX_MPROTECT) && !(prot & PROT_WRITE) && (vma->vm_flags & VM_MAYNOTWRITE)) + newflags &= ~VM_MAYWRITE; +#endif + error = security_file_mprotect(vma, reqprot, prot); if (error) goto out; @@ -295,6 +478,9 @@ sys_mprotect(unsigned long start, size_t error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; + + track_exec_limit(current->mm, nstart, tmp, vm_flags); + nstart = tmp; if (nstart < prev->vm_end) diff -urNp linux-2.6.22.1/mm/mremap.c linux-2.6.22.1/mm/mremap.c --- linux-2.6.22.1/mm/mremap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/mremap.c 2007-08-02 11:38:48.000000000 -0400 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_str continue; pte = ptep_clear_flush(vma, old_addr, old_pte); pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); + +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC) + pte = pte_exprotect(pte); +#endif + set_pte_at(mm, new_addr, new_pte, pte); } @@ -254,6 +260,7 @@ unsigned long do_mremap(unsigned long ad struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; + unsigned long task_size = TASK_SIZE; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; @@ -272,6 +279,15 @@ unsigned long do_mremap(unsigned long ad if (!new_len) goto out; +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (new_len > task_size || addr > task_size-new_len || + old_len > task_size || addr > task_size-old_len) + goto out; + /* new_addr is only valid if MREMAP_FIXED is specified */ if (flags & MREMAP_FIXED) { if (new_addr & ~PAGE_MASK) @@ -279,16 +295,13 @@ unsigned long do_mremap(unsigned long ad if (!(flags & MREMAP_MAYMOVE)) goto out; - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) + if (new_addr > task_size - new_len) goto out; /* Check if the location we're moving into overlaps the * old location at all, and fail if it does. */ - if ((new_addr <= addr) && (new_addr+new_len) > addr) - goto out; - - if ((addr <= new_addr) && (addr+old_len) > new_addr) + if (addr + old_len > new_addr && new_addr + new_len > addr) goto out; ret = do_munmap(mm, new_addr, new_len); @@ -322,6 +335,14 @@ unsigned long do_mremap(unsigned long ad ret = -EINVAL; goto out; } + +#ifdef CONFIG_PAX_SEGMEXEC + if (pax_find_mirror_vma(vma)) { + ret = -EINVAL; + goto out; + } +#endif + /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto out; @@ -355,7 +376,7 @@ unsigned long do_mremap(unsigned long ad if (old_len == vma->vm_end - addr && !((flags & MREMAP_FIXED) && (addr != new_addr)) && (old_len != new_len || !(flags & MREMAP_MAYMOVE))) { - unsigned long max_addr = TASK_SIZE; + unsigned long max_addr = task_size; if (vma->vm_next) max_addr = vma->vm_next->vm_start; /* can we just expand the current mapping? */ @@ -373,6 +394,7 @@ unsigned long do_mremap(unsigned long ad addr + new_len); } ret = addr; + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags); goto out; } } @@ -383,8 +405,8 @@ unsigned long do_mremap(unsigned long ad */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { + unsigned long map_flags = 0; if (!(flags & MREMAP_FIXED)) { - unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; @@ -394,7 +416,12 @@ unsigned long do_mremap(unsigned long ad if (new_addr & ~PAGE_MASK) goto out; } + map_flags = vma->vm_flags; ret = move_vma(vma, addr, old_len, new_len, new_addr); + if (!(ret & ~PAGE_MASK)) { + track_exec_limit(current->mm, addr, addr + old_len, 0UL); + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags); + } } out: if (ret & ~PAGE_MASK) diff -urNp linux-2.6.22.1/mm/nommu.c linux-2.6.22.1/mm/nommu.c --- linux-2.6.22.1/mm/nommu.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/nommu.c 2007-08-02 11:38:48.000000000 -0400 @@ -359,15 +359,6 @@ struct vm_area_struct *find_vma(struct m EXPORT_SYMBOL(find_vma); /* - * find a VMA - * - we don't extend stack VMAs under NOMMU conditions - */ -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) -{ - return find_vma(mm, addr); -} - -/* * look up the first VMA exactly that exactly matches addr * - should be called with mm->mmap_sem at least held readlocked */ diff -urNp linux-2.6.22.1/mm/page_alloc.c linux-2.6.22.1/mm/page_alloc.c --- linux-2.6.22.1/mm/page_alloc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/page_alloc.c 2007-08-02 11:38:48.000000000 -0400 @@ -393,7 +393,7 @@ static inline int page_is_buddy(struct p static inline void __free_one_page(struct page *page, struct zone *zone, unsigned int order) { - unsigned long page_idx; + unsigned long page_idx, index; int order_size = 1 << order; if (unlikely(PageCompound(page))) @@ -404,6 +404,11 @@ static inline void __free_one_page(struc VM_BUG_ON(page_idx & (order_size - 1)); VM_BUG_ON(bad_range(zone, page)); +#ifdef CONFIG_PAX_MEMORY_SANITIZE + for (index = order_size; index; --index) + sanitize_highpage(page + index - 1); +#endif + __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); while (order < MAX_ORDER-1) { unsigned long combined_idx; diff -urNp linux-2.6.22.1/mm/rmap.c linux-2.6.22.1/mm/rmap.c --- linux-2.6.22.1/mm/rmap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/rmap.c 2007-08-02 11:38:48.000000000 -0400 @@ -63,6 +63,10 @@ int anon_vma_prepare(struct vm_area_stru struct mm_struct *mm = vma->vm_mm; struct anon_vma *allocated, *locked; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m; +#endif + anon_vma = find_mergeable_anon_vma(vma); if (anon_vma) { allocated = NULL; @@ -79,6 +83,15 @@ int anon_vma_prepare(struct vm_area_stru /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { + +#ifdef CONFIG_PAX_SEGMEXEC + vma_m = pax_find_mirror_vma(vma); + if (vma_m) { + vma_m->anon_vma = anon_vma; + __anon_vma_link(vma_m); + } +#endif + vma->anon_vma = anon_vma; list_add_tail(&vma->anon_vma_node, &anon_vma->head); allocated = NULL; diff -urNp linux-2.6.22.1/mm/shmem.c linux-2.6.22.1/mm/shmem.c --- linux-2.6.22.1/mm/shmem.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/shmem.c 2007-08-02 11:09:17.000000000 -0400 @@ -2484,7 +2484,7 @@ static struct file_system_type tmpfs_fs_ .get_sb = shmem_get_sb, .kill_sb = kill_litter_super, }; -static struct vfsmount *shm_mnt; +struct vfsmount *shm_mnt; static int __init init_tmpfs(void) { diff -urNp linux-2.6.22.1/mm/slab.c linux-2.6.22.1/mm/slab.c --- linux-2.6.22.1/mm/slab.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/slab.c 2007-08-02 11:38:48.000000000 -0400 @@ -306,7 +306,7 @@ struct kmem_list3 { * Need this for bootstrapping a per node allocator. */ #define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS]; #define CACHE_CACHE 0 #define SIZE_AC 1 #define SIZE_L3 (1 + MAX_NUMNODES) @@ -655,14 +655,14 @@ struct cache_names { static struct cache_names __initdata cache_names[] = { #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, #include - {NULL,} + {NULL, NULL} #undef CACHE }; static struct arraycache_init initarray_cache __initdata = - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} }; static struct arraycache_init initarray_generic = - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} }; /* internal cache of cache description objs */ static struct kmem_cache cache_cache = { @@ -2977,7 +2977,7 @@ retry: * there must be at least one object available for * allocation. */ - BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); + BUG_ON(slabp->inuse >= cachep->num); while (slabp->inuse < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); diff -urNp linux-2.6.22.1/mm/slub.c linux-2.6.22.1/mm/slub.c --- linux-2.6.22.1/mm/slub.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/slub.c 2007-08-03 12:34:39.000000000 -0400 @@ -1480,7 +1480,7 @@ debug: * * Otherwise we can simply pick the next object from the lockless free list. */ -static void __always_inline *slab_alloc(struct kmem_cache *s, +static __always_inline void *slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *addr) { struct page *page; @@ -1585,7 +1585,7 @@ debug: * If fastpath is not possible then fall back to __slab_free where we deal * with all sorts of special processing. */ -static void __always_inline slab_free(struct kmem_cache *s, +static __always_inline void slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr) { void **object = (void *)x; diff -urNp linux-2.6.22.1/mm/swap.c linux-2.6.22.1/mm/swap.c --- linux-2.6.22.1/mm/swap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/swap.c 2007-08-02 11:38:48.000000000 -0400 @@ -174,8 +174,8 @@ EXPORT_SYMBOL(mark_page_accessed); * lru_cache_add: add a page to the page lists * @page: the page to add */ -static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; -static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; +static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, 0, {NULL} }; +static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, 0, {NULL} }; void fastcall lru_cache_add(struct page *page) { diff -urNp linux-2.6.22.1/mm/tiny-shmem.c linux-2.6.22.1/mm/tiny-shmem.c --- linux-2.6.22.1/mm/tiny-shmem.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/tiny-shmem.c 2007-08-02 11:09:17.000000000 -0400 @@ -26,7 +26,7 @@ static struct file_system_type tmpfs_fs_ .kill_sb = kill_litter_super, }; -static struct vfsmount *shm_mnt; +struct vfsmount *shm_mnt; static int __init init_tmpfs(void) { diff -urNp linux-2.6.22.1/mm/vmalloc.c linux-2.6.22.1/mm/vmalloc.c --- linux-2.6.22.1/mm/vmalloc.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/mm/vmalloc.c 2007-08-02 11:38:48.000000000 -0400 @@ -195,6 +195,8 @@ static struct vm_struct *__get_vm_area_n write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { + if (addr > end - size) + goto out; if ((unsigned long)tmp->addr < addr) { if((unsigned long)tmp->addr + tmp->size >= addr) addr = ALIGN(tmp->size + @@ -206,8 +208,6 @@ static struct vm_struct *__get_vm_area_n if (size + addr <= (unsigned long)tmp->addr) goto found; addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); - if (addr > end - size) - goto out; } found: diff -urNp linux-2.6.22.1/net/core/flow.c linux-2.6.22.1/net/core/flow.c --- linux-2.6.22.1/net/core/flow.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/core/flow.c 2007-08-02 11:38:48.000000000 -0400 @@ -40,7 +40,7 @@ atomic_t flow_cache_genid = ATOMIC_INIT( static u32 flow_hash_shift; #define flow_hash_size (1 << flow_hash_shift) -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables); #define flow_table(cpu) (per_cpu(flow_tables, cpu)) @@ -53,7 +53,7 @@ struct flow_percpu_info { u32 hash_rnd; int count; } ____cacheline_aligned; -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 }; +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info); #define flow_hash_rnd_recalc(cpu) \ (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) @@ -70,7 +70,7 @@ struct flow_flush_info { atomic_t cpuleft; struct completion completion; }; -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets); #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) diff -urNp linux-2.6.22.1/net/dccp/ccids/ccid3.c linux-2.6.22.1/net/dccp/ccids/ccid3.c --- linux-2.6.22.1/net/dccp/ccids/ccid3.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/dccp/ccids/ccid3.c 2007-08-02 11:38:48.000000000 -0400 @@ -44,7 +44,7 @@ static int ccid3_debug; #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) #else -#define ccid3_pr_debug(format, a...) +#define ccid3_pr_debug(format, a...) do {} while (0) #endif static struct dccp_tx_hist *ccid3_tx_hist; @@ -829,7 +829,7 @@ static u32 ccid3_hc_rx_calc_first_li(str struct dccp_rx_hist_entry *entry, *next, *tail = NULL; u32 x_recv, p; suseconds_t rtt, delta; - struct timeval tstamp = { 0, }; + struct timeval tstamp = { 0, 0 }; int interval = 0; int win_count = 0; int step = 0; diff -urNp linux-2.6.22.1/net/dccp/dccp.h linux-2.6.22.1/net/dccp/dccp.h --- linux-2.6.22.1/net/dccp/dccp.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/dccp/dccp.h 2007-08-02 11:38:48.000000000 -0400 @@ -42,8 +42,8 @@ extern int dccp_debug; #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) #else -#define dccp_pr_debug(format, a...) -#define dccp_pr_debug_cat(format, a...) +#define dccp_pr_debug(format, a...) do {} while (0) +#define dccp_pr_debug_cat(format, a...) do {} while (0) #endif extern struct inet_hashinfo dccp_hashinfo; diff -urNp linux-2.6.22.1/net/ipv4/inet_connection_sock.c linux-2.6.22.1/net/ipv4/inet_connection_sock.c --- linux-2.6.22.1/net/ipv4/inet_connection_sock.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/ipv4/inet_connection_sock.c 2007-08-02 11:09:17.000000000 -0400 @@ -15,6 +15,7 @@ #include #include +#include #include #include diff -urNp linux-2.6.22.1/net/ipv4/inet_hashtables.c linux-2.6.22.1/net/ipv4/inet_hashtables.c --- linux-2.6.22.1/net/ipv4/inet_hashtables.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/ipv4/inet_hashtables.c 2007-08-02 11:09:17.000000000 -0400 @@ -18,11 +18,14 @@ #include #include #include +#include #include #include #include +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet); + /* * Allocate and initialize a new local port bind bucket. * The bindhash mutex for snum's hash chain must be held here. @@ -338,6 +341,8 @@ ok: } spin_unlock(&head->lock); + gr_update_task_in_ip_table(current, inet_sk(sk)); + if (tw) { inet_twsk_deschedule(tw, death_row); inet_twsk_put(tw); diff -urNp linux-2.6.22.1/net/ipv4/netfilter/ipt_stealth.c linux-2.6.22.1/net/ipv4/netfilter/ipt_stealth.c --- linux-2.6.22.1/net/ipv4/netfilter/ipt_stealth.c 1969-12-31 19:00:00.000000000 -0500 +++ linux-2.6.22.1/net/ipv4/netfilter/ipt_stealth.c 2007-08-02 11:09:17.000000000 -0400 @@ -0,0 +1,114 @@ +/* Kernel module to add stealth support. + * + * Copyright (C) 2002-2006 Brad Spengler + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +MODULE_LICENSE("GPL"); + +extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif); + +static int +match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct xt_match *match, + const void *matchinfo, + int offset, + unsigned int protoff, + int *hotdrop) +{ + struct iphdr *ip = ip_hdr(skb); + struct tcphdr th; + struct udphdr uh; + struct sock *sk = NULL; + + if (!ip || offset) return 0; + + switch(ip->protocol) { + case IPPROTO_TCP: + if (skb_copy_bits(skb, (ip_hdr(skb))->ihl*4, &th, sizeof(th)) < 0) { + *hotdrop = 1; + return 0; + } + if (!(th.syn && !th.ack)) return 0; + sk = inet_lookup_listener(&tcp_hashinfo, ip->daddr, th.dest, inet_iif(skb)); + break; + case IPPROTO_UDP: + if (skb_copy_bits(skb, (ip_hdr(skb))->ihl*4, &uh, sizeof(uh)) < 0) { + *hotdrop = 1; + return 0; + } + sk = udp_v4_lookup(ip->saddr, uh.source, ip->daddr, uh.dest, skb->dev->ifindex); + break; + default: + return 0; + } + + if(!sk) // port is being listened on, match this + return 1; + else { + sock_put(sk); + return 0; + } +} + +/* Called when user tries to insert an entry of this type. */ +static int +checkentry(const char *tablename, + const void *nip, + const struct xt_match *match, + void *matchinfo, + unsigned int hook_mask) +{ + const struct ipt_ip *ip = (const struct ipt_ip *)nip; + + if(((ip->proto == IPPROTO_TCP && !(ip->invflags & IPT_INV_PROTO)) || + ((ip->proto == IPPROTO_UDP) && !(ip->invflags & IPT_INV_PROTO))) + && (hook_mask & (1 << NF_IP_LOCAL_IN))) + return 1; + + printk("stealth: Only works on TCP and UDP for the INPUT chain.\n"); + + return 0; +} + + +static struct xt_match stealth_match = { + .name = "stealth", + .family = AF_INET, + .match = match, + .checkentry = checkentry, + .destroy = NULL, + .me = THIS_MODULE +}; + +static int __init init(void) +{ + return xt_register_match(&stealth_match); +} + +static void __exit fini(void) +{ + xt_unregister_match(&stealth_match); +} + +module_init(init); +module_exit(fini); diff -urNp linux-2.6.22.1/net/ipv4/netfilter/Kconfig linux-2.6.22.1/net/ipv4/netfilter/Kconfig --- linux-2.6.22.1/net/ipv4/netfilter/Kconfig 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/ipv4/netfilter/Kconfig 2007-08-02 11:09:17.000000000 -0400 @@ -130,6 +130,21 @@ config IP_NF_MATCH_ADDRTYPE If you want to compile it as a module, say M here and read . If unsure, say `N'. +config IP_NF_MATCH_STEALTH + tristate "stealth match support" + depends on IP_NF_IPTABLES + help + Enabling this option will drop all syn packets coming to unserved tcp + ports as well as all packets coming to unserved udp ports. If you + are using your system to route any type of packets (ie. via NAT) + you should put this module at the end of your ruleset, since it will + drop packets that aren't going to ports that are listening on your + machine itself, it doesn't take into account that the packet might be + destined for someone on your internal network if you're using NAT for + instance. + + To compile it as a module, choose M here. If unsure, say N. + # `filter', generic and specific targets config IP_NF_FILTER tristate "Packet filtering" @@ -403,4 +418,3 @@ config IP_NF_ARP_MANGLE hardware and network addresses. endmenu - diff -urNp linux-2.6.22.1/net/ipv4/netfilter/Makefile linux-2.6.22.1/net/ipv4/netfilter/Makefile --- linux-2.6.22.1/net/ipv4/netfilter/Makefile 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/ipv4/netfilter/Makefile 2007-08-02 11:09:17.000000000 -0400 @@ -49,6 +49,7 @@ obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o +obj-$(CONFIG_IP_NF_MATCH_STEALTH) += ipt_stealth.o # targets obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o diff -urNp linux-2.6.22.1/net/ipv4/tcp_ipv4.c linux-2.6.22.1/net/ipv4/tcp_ipv4.c --- linux-2.6.22.1/net/ipv4/tcp_ipv4.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/ipv4/tcp_ipv4.c 2007-08-02 11:09:17.000000000 -0400 @@ -61,6 +61,7 @@ #include #include #include +#include #include #include diff -urNp linux-2.6.22.1/net/ipv4/udp.c linux-2.6.22.1/net/ipv4/udp.c --- linux-2.6.22.1/net/ipv4/udp.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/ipv4/udp.c 2007-08-02 11:28:52.000000000 -0400 @@ -97,12 +97,19 @@ #include #include #include +#include #include #include #include #include #include "udp_impl.h" +extern int gr_search_udp_recvmsg(const struct sock *sk, + const struct sk_buff *skb); +extern int gr_search_udp_sendmsg(const struct sock *sk, + const struct sockaddr_in *addr); + + /* * Snmp MIB for the UDP layer */ @@ -286,6 +293,13 @@ static struct sock *__udp4_lib_lookup(__ return result; } +struct sock *udp_v4_lookup(__be32 saddr, __be16 sport, + __be32 daddr, __be16 dport, int dif) +{ + return __udp4_lib_lookup(saddr, sport, daddr, dport, dif, udp_hash); +} + + static inline struct sock *udp_v4_mcast_next(struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, @@ -569,9 +583,16 @@ int udp_sendmsg(struct kiocb *iocb, stru dport = usin->sin_port; if (dport == 0) return -EINVAL; + + if (!gr_search_udp_sendmsg(sk, usin)) + return -EPERM; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; + + if (!gr_search_udp_sendmsg(sk, NULL)) + return -EPERM; + daddr = inet->daddr; dport = inet->dport; /* Open fast path for connected socket. @@ -833,6 +854,11 @@ try_again: if (!skb) goto out; + if (!gr_search_udp_recvmsg(sk, skb)) { + err = -EPERM; + goto out_free; + } + ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) diff -urNp linux-2.6.22.1/net/ipv6/exthdrs.c linux-2.6.22.1/net/ipv6/exthdrs.c --- linux-2.6.22.1/net/ipv6/exthdrs.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/ipv6/exthdrs.c 2007-08-02 11:38:48.000000000 -0400 @@ -737,7 +737,7 @@ static struct tlvtype_proc tlvprochopopt .type = IPV6_TLV_JUMBO, .func = ipv6_hop_jumbo, }, - { -1, } + { -1, NULL } }; int ipv6_parse_hopopts(struct sk_buff **skbp) diff -urNp linux-2.6.22.1/net/ipv6/raw.c linux-2.6.22.1/net/ipv6/raw.c --- linux-2.6.22.1/net/ipv6/raw.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/ipv6/raw.c 2007-08-02 11:38:48.000000000 -0400 @@ -549,7 +549,7 @@ out: return err; } -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length, struct flowi *fl, struct rt6_info *rt, unsigned int flags) { diff -urNp linux-2.6.22.1/net/irda/ircomm/ircomm_tty.c linux-2.6.22.1/net/irda/ircomm/ircomm_tty.c --- linux-2.6.22.1/net/irda/ircomm/ircomm_tty.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/irda/ircomm/ircomm_tty.c 2007-08-02 11:38:48.000000000 -0400 @@ -371,7 +371,7 @@ static int ircomm_tty_open(struct tty_st IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); line = tty->index; - if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) { + if (line >= IRCOMM_TTY_PORTS) { return -ENODEV; } diff -urNp linux-2.6.22.1/net/mac80211/ieee80211.c linux-2.6.22.1/net/mac80211/ieee80211.c --- linux-2.6.22.1/net/mac80211/ieee80211.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/mac80211/ieee80211.c 2007-08-02 11:38:48.000000000 -0400 @@ -1118,7 +1118,7 @@ ieee80211_tx_h_ps_buf(struct ieee80211_t } -static void inline +static inline void __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, struct sk_buff *skb, struct net_device *dev, @@ -1164,7 +1164,7 @@ __ieee80211_tx_prepare(struct ieee80211_ } -static int inline is_ieee80211_device(struct net_device *dev, +static inline int is_ieee80211_device(struct net_device *dev, struct net_device *master) { return (wdev_priv(dev->ieee80211_ptr) == @@ -1173,7 +1173,7 @@ static int inline is_ieee80211_device(st /* Device in tx->dev has a reference added; use dev_put(tx->dev) when * finished with it. */ -static int inline ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, +static inline int ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, struct sk_buff *skb, struct net_device *mdev, struct ieee80211_tx_control *control) diff -urNp linux-2.6.22.1/net/mac80211/ieee80211_ioctl.c linux-2.6.22.1/net/mac80211/ieee80211_ioctl.c --- linux-2.6.22.1/net/mac80211/ieee80211_ioctl.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/mac80211/ieee80211_ioctl.c 2007-08-02 11:38:48.000000000 -0400 @@ -399,14 +399,14 @@ static const struct ieee80211_channel_ra { 5180, 5240, 17, 6 } /* IEEE 802.11a, channels 36..48 */, { 5260, 5320, 23, 6 } /* IEEE 802.11a, channels 52..64 */, { 5745, 5825, 30, 6 } /* IEEE 802.11a, channels 149..165, outdoor */, - { 0 } + { 0, 0, 0, 0 } }; static const struct ieee80211_channel_range ieee80211_mkk_channels[] = { { 2412, 2472, 20, 6 } /* IEEE 802.11b/g, channels 1..13 */, { 5170, 5240, 20, 6 } /* IEEE 802.11a, channels 34..48 */, { 5260, 5320, 20, 6 } /* IEEE 802.11a, channels 52..64 */, - { 0 } + { 0, 0, 0, 0 } }; diff -urNp linux-2.6.22.1/net/sctp/socket.c linux-2.6.22.1/net/sctp/socket.c --- linux-2.6.22.1/net/sctp/socket.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/sctp/socket.c 2007-08-02 11:38:48.000000000 -0400 @@ -1391,7 +1391,7 @@ SCTP_STATIC int sctp_sendmsg(struct kioc struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; - sctp_cmsgs_t cmsgs = { NULL }; + sctp_cmsgs_t cmsgs = { NULL, NULL }; int err; sctp_scope_t scope; long timeo; diff -urNp linux-2.6.22.1/net/socket.c linux-2.6.22.1/net/socket.c --- linux-2.6.22.1/net/socket.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/socket.c 2007-08-02 12:03:21.000000000 -0400 @@ -84,6 +84,7 @@ #include #include #include +#include #include #include @@ -93,6 +94,21 @@ #include #include +extern void gr_attach_curr_ip(const struct sock *sk); +extern int gr_handle_sock_all(const int family, const int type, + const int protocol); +extern int gr_handle_sock_server(const struct sockaddr *sck); +extern int gr_handle_sock_server_other(const struct socket *sck); +extern int gr_handle_sock_client(const struct sockaddr *sck); +extern int gr_search_connect(const struct socket * sock, + const struct sockaddr_in * addr); +extern int gr_search_bind(const struct socket * sock, + const struct sockaddr_in * addr); +extern int gr_search_listen(const struct socket * sock); +extern int gr_search_accept(const struct socket * sock); +extern int gr_search_socket(const int domain, const int type, + const int protocol); + static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); @@ -293,7 +309,7 @@ static int sockfs_get_sb(struct file_sys mnt); } -static struct vfsmount *sock_mnt __read_mostly; +struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", @@ -1203,6 +1219,16 @@ asmlinkage long sys_socket(int family, i int retval; struct socket *sock; + if(!gr_search_socket(family, type, protocol)) { + retval = -EACCES; + goto out; + } + + if (gr_handle_sock_all(family, type, protocol)) { + retval = -EACCES; + goto out; + } + retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; @@ -1330,6 +1356,12 @@ asmlinkage long sys_bind(int fd, struct if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, address); if (err >= 0) { + if (!gr_search_bind(sock, (struct sockaddr_in *)address) || + gr_handle_sock_server((struct sockaddr *)address)) { + err = -EACCES; + goto error; + } + err = security_socket_bind(sock, (struct sockaddr *)address, addrlen); @@ -1338,6 +1370,7 @@ asmlinkage long sys_bind(int fd, struct (struct sockaddr *) address, addrlen); } +error: fput_light(sock->file, fput_needed); } return err; @@ -1361,10 +1394,17 @@ asmlinkage long sys_listen(int fd, int b if ((unsigned)backlog > sysctl_somaxconn) backlog = sysctl_somaxconn; + if (gr_handle_sock_server_other(sock) || + !gr_search_listen(sock)) { + err = -EPERM; + goto error; + } + err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); +error: fput_light(sock->file, fput_needed); } return err; @@ -1401,6 +1441,13 @@ asmlinkage long sys_accept(int fd, struc newsock->type = sock->type; newsock->ops = sock->ops; + if (gr_handle_sock_server_other(sock) || + !gr_search_accept(sock)) { + err = -EPERM; + sock_release(newsock); + goto out_put; + } + /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. @@ -1444,6 +1491,7 @@ asmlinkage long sys_accept(int fd, struc err = newfd; security_socket_post_accept(sock, newsock); + gr_attach_curr_ip(newsock->sk); out_put: fput_light(sock->file, fput_needed); @@ -1477,6 +1525,7 @@ asmlinkage long sys_connect(int fd, stru { struct socket *sock; char address[MAX_SOCK_ADDR]; + struct sockaddr *sck; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); @@ -1486,6 +1535,13 @@ asmlinkage long sys_connect(int fd, stru if (err < 0) goto out_put; + sck = (struct sockaddr *)address; + if (!gr_search_connect(sock, (struct sockaddr_in *)sck) || + gr_handle_sock_client(sck)) { + err = -EACCES; + goto out_put; + } + err = security_socket_connect(sock, (struct sockaddr *)address, addrlen); if (err) @@ -1763,6 +1819,7 @@ asmlinkage long sys_shutdown(int fd, int err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } + return err; } diff -urNp linux-2.6.22.1/net/unix/af_unix.c linux-2.6.22.1/net/unix/af_unix.c --- linux-2.6.22.1/net/unix/af_unix.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/net/unix/af_unix.c 2007-08-02 11:09:17.000000000 -0400 @@ -115,6 +115,7 @@ #include #include #include +#include int sysctl_unix_max_dgram_qlen __read_mostly = 10; @@ -706,6 +707,11 @@ static struct sock *unix_find_other(stru if (err) goto put_fail; + if (!gr_acl_handle_unix(nd.dentry, nd.mnt)) { + err = -EACCES; + goto put_fail; + } + err = -ECONNREFUSED; if (!S_ISSOCK(nd.dentry->d_inode->i_mode)) goto put_fail; @@ -729,6 +735,13 @@ static struct sock *unix_find_other(stru if (u) { struct dentry *dentry; dentry = unix_sk(u)->dentry; + + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) { + err = -EPERM; + sock_put(u); + goto fail; + } + if (dentry) touch_atime(unix_sk(u)->mnt, dentry); } else @@ -807,9 +820,18 @@ static int unix_bind(struct socket *sock */ mode = S_IFSOCK | (SOCK_INODE(sock)->i_mode & ~current->fs->umask); + + if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) { + err = -EACCES; + goto out_mknod_dput; + } + err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0, NULL); if (err) goto out_mknod_dput; + + gr_handle_create(dentry, nd.mnt); + mutex_unlock(&nd.dentry->d_inode->i_mutex); dput(nd.dentry); nd.dentry = dentry; @@ -827,6 +849,10 @@ static int unix_bind(struct socket *sock goto out_unlock; } +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + sk->sk_peercred.pid = current->pid; +#endif + list = &unix_socket_table[addr->hash]; } else { list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; diff -urNp linux-2.6.22.1/scripts/pnmtologo.c linux-2.6.22.1/scripts/pnmtologo.c --- linux-2.6.22.1/scripts/pnmtologo.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/scripts/pnmtologo.c 2007-08-02 11:38:48.000000000 -0400 @@ -237,14 +237,14 @@ static void write_header(void) fprintf(out, " * Linux logo %s\n", logoname); fputs(" */\n\n", out); fputs("#include \n\n", out); - fprintf(out, "static unsigned char %s_data[] __initdata = {\n", + fprintf(out, "static unsigned char %s_data[] = {\n", logoname); } static void write_footer(void) { fputs("\n};\n\n", out); - fprintf(out, "struct linux_logo %s __initdata = {\n", logoname); + fprintf(out, "struct linux_logo %s = {\n", logoname); fprintf(out, " .type\t= %s,\n", logo_types[logo_type]); fprintf(out, " .width\t= %d,\n", logo_width); fprintf(out, " .height\t= %d,\n", logo_height); @@ -374,7 +374,7 @@ static void write_logo_clut224(void) fputs("\n};\n\n", out); /* write logo clut */ - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", + fprintf(out, "static unsigned char %s_clut[] = {\n", logoname); write_hex_cnt = 0; for (i = 0; i < logo_clutsize; i++) { diff -urNp linux-2.6.22.1/security/commoncap.c linux-2.6.22.1/security/commoncap.c --- linux-2.6.22.1/security/commoncap.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/security/commoncap.c 2007-08-02 11:09:17.000000000 -0400 @@ -22,10 +22,11 @@ #include #include #include +#include int cap_netlink_send(struct sock *sk, struct sk_buff *skb) { - cap_t(NETLINK_CB(skb).eff_cap) = vx_mbcap(cap_effective); + cap_t(NETLINK_CB(skb).eff_cap) = gr_cap_rtnetlink(); return 0; } @@ -43,7 +44,15 @@ EXPORT_SYMBOL(cap_netlink_recv); int cap_capable (struct task_struct *tsk, int cap) { /* Derived from include/linux/sched.h:capable. */ - if (vx_cap_raised(tsk->vx_info, tsk->cap_effective, cap)) + if (vx_cap_raised(tsk->vx_info, tsk->cap_effective, cap)) + return 0; + return -EPERM; +} + +int cap_capable_nolog (struct task_struct *tsk, int cap) +{ + /* tsk = current for all callers */ + if (vx_cap_raised(tsk->vx_info, tsk->cap_effective, cap) && gr_is_capable_nolog(cap)) return 0; return -EPERM; } @@ -164,8 +173,11 @@ void cap_bprm_apply_creds (struct linux_ } } - current->suid = current->euid = current->fsuid = bprm->e_uid; - current->sgid = current->egid = current->fsgid = bprm->e_gid; + if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid)) + current->suid = current->euid = current->fsuid = bprm->e_uid; + + if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid)) + current->sgid = current->egid = current->fsgid = bprm->e_gid; /* For init, we want to retain the capabilities set * in the init_task struct. Thus we skip the usual @@ -176,6 +188,8 @@ void cap_bprm_apply_creds (struct linux_ cap_intersect (new_permitted, bprm->cap_effective); } + gr_handle_chroot_caps(current); + /* AUD: Audit candidate if current->cap_effective is set */ current->keep_capabilities = 0; @@ -322,12 +336,13 @@ int cap_vm_enough_memory(long pages) { int cap_sys_admin = 0; - if (cap_capable(current, CAP_SYS_ADMIN) == 0) + if (cap_capable_nolog(current, CAP_SYS_ADMIN) == 0) cap_sys_admin = 1; return __vm_enough_memory(pages, cap_sys_admin); } EXPORT_SYMBOL(cap_capable); +EXPORT_SYMBOL(cap_capable_nolog); EXPORT_SYMBOL(cap_settime); EXPORT_SYMBOL(cap_ptrace); EXPORT_SYMBOL(cap_capget); diff -urNp linux-2.6.22.1/security/dummy.c linux-2.6.22.1/security/dummy.c --- linux-2.6.22.1/security/dummy.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/security/dummy.c 2007-08-02 11:09:17.000000000 -0400 @@ -28,6 +28,7 @@ #include #include #include +#include static int dummy_ptrace (struct task_struct *parent, struct task_struct *child) { @@ -138,8 +139,11 @@ static void dummy_bprm_apply_creds (stru } } - current->suid = current->euid = current->fsuid = bprm->e_uid; - current->sgid = current->egid = current->fsgid = bprm->e_gid; + if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid)) + current->suid = current->euid = current->fsuid = bprm->e_uid; + + if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid)) + current->sgid = current->egid = current->fsgid = bprm->e_gid; dummy_capget(current, ¤t->cap_effective, ¤t->cap_inheritable, ¤t->cap_permitted); } diff -urNp linux-2.6.22.1/security/Kconfig linux-2.6.22.1/security/Kconfig --- linux-2.6.22.1/security/Kconfig 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/security/Kconfig 2007-08-03 12:37:41.000000000 -0400 @@ -4,6 +4,429 @@ menu "Security options" +source grsecurity/Kconfig + +menu "PaX" + +config PAX + bool "Enable various PaX features" + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || PPC64 || SPARC32 || SPARC64 || X86 || X86_64) + help + This allows you to enable various PaX features. PaX adds + intrusion prevention mechanisms to the kernel that reduce + the risks posed by exploitable memory corruption bugs. + +menu "PaX Control" + depends on PAX + +config PAX_SOFTMODE + bool 'Support soft mode' + help + Enabling this option will allow you to run PaX in soft mode, that + is, PaX features will not be enforced by default, only on executables + marked explicitly. You must also enable PT_PAX_FLAGS support as it + is the only way to mark executables for soft mode use. + + Soft mode can be activated by using the "pax_softmode=1" kernel command + line option on boot. Furthermore you can control various PaX features + at runtime via the entries in /proc/sys/kernel/pax. + +config PAX_EI_PAX + bool 'Use legacy ELF header marking' + help + Enabling this option will allow you to control PaX features on + a per executable basis via the 'chpax' utility available at + http://pax.grsecurity.net/. The control flags will be read from + an otherwise reserved part of the ELF header. This marking has + numerous drawbacks (no support for soft-mode, toolchain does not + know about the non-standard use of the ELF header) therefore it + has been deprecated in favour of PT_PAX_FLAGS support. + + If you have applications not marked by the PT_PAX_FLAGS ELF + program header then you MUST enable this option otherwise they + will not get any protection. + + Note that if you enable PT_PAX_FLAGS marking support as well, + the PT_PAX_FLAG marks will override the legacy EI_PAX marks. + +config PAX_PT_PAX_FLAGS + bool 'Use ELF program header marking' + help + Enabling this option will allow you to control PaX features on + a per executable basis via the 'paxctl' utility available at + http://pax.grsecurity.net/. The control flags will be read from + a PaX specific ELF program header (PT_PAX_FLAGS). This marking + has the benefits of supporting both soft mode and being fully + integrated into the toolchain (the binutils patch is available + from http://pax.grsecurity.net). + + If you have applications not marked by the PT_PAX_FLAGS ELF + program header then you MUST enable the EI_PAX marking support + otherwise they will not get any protection. + + Note that if you enable the legacy EI_PAX marking support as well, + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks. + +choice + prompt 'MAC system integration' + default PAX_HAVE_ACL_FLAGS + help + Mandatory Access Control systems have the option of controlling + PaX flags on a per executable basis, choose the method supported + by your particular system. + + - "none": if your MAC system does not interact with PaX, + - "direct": if your MAC system defines pax_set_initial_flags() itself, + - "hook": if your MAC system uses the pax_set_initial_flags_func callback. + + NOTE: this option is for developers/integrators only. + +config PAX_NO_ACL_FLAGS + bool 'none' + +config PAX_HAVE_ACL_FLAGS + bool 'direct' + +config PAX_HOOK_ACL_FLAGS + bool 'hook' +endchoice + +endmenu + +menu "Non-executable pages" + depends on PAX + +config PAX_NOEXEC + bool "Enforce non-executable pages" + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || PPC64 || SPARC32 || SPARC64 || X86 || X86_64) + help + By design some architectures do not allow for protecting memory + pages against execution or even if they do, Linux does not make + use of this feature. In practice this means that if a page is + readable (such as the stack or heap) it is also executable. + + There is a well known exploit technique that makes use of this + fact and a common programming mistake where an attacker can + introduce code of his choice somewhere in the attacked program's + memory (typically the stack or the heap) and then execute it. + + If the attacked program was running with different (typically + higher) privileges than that of the attacker, then he can elevate + his own privilege level (e.g. get a root shell, write to files for + which he does not have write access to, etc). + + Enabling this option will let you choose from various features + that prevent the injection and execution of 'foreign' code in + a program. + + This will also break programs that rely on the old behaviour and + expect that dynamically allocated memory via the malloc() family + of functions is executable (which it is not). Notable examples + are the XFree86 4.x server, the java runtime and wine. + +config PAX_PAGEEXEC + bool "Paging based non-executable pages" + depends on !COMPAT_VDSO && PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2) + help + This implementation is based on the paging feature of the CPU. + On i386 without hardware non-executable bit support there is a + variable but usually low performance impact, however on Intel's + P4 core based CPUs it is very high so you should not enable this + for kernels meant to be used on such CPUs. + + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386 + with hardware non-executable bit support there is no performance + impact, on ppc the impact is negligible. + + Note that several architectures require various emulations due to + badly designed userland ABIs, this will cause a performance impact + but will disappear as soon as userland is fixed (e.g., ppc users + can make use of the secure-plt feature found in binutils). + +config PAX_SEGMEXEC + bool "Segmentation based non-executable pages" + depends on !COMPAT_VDSO && PAX_NOEXEC && X86_32 + help + This implementation is based on the segmentation feature of the + CPU and has a very small performance impact, however applications + will be limited to a 1.5 GB address space instead of the normal + 3 GB. + +config PAX_EMUTRAMP + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || PPC32 || X86_32) + default y if PARISC || PPC32 + help + There are some programs and libraries that for one reason or + another attempt to execute special small code snippets from + non-executable memory pages. Most notable examples are the + signal handler return code generated by the kernel itself and + the GCC trampolines. + + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then + such programs will no longer work under your kernel. + + As a remedy you can say Y here and use the 'chpax' or 'paxctl' + utilities to enable trampoline emulation for the affected programs + yet still have the protection provided by the non-executable pages. + + On parisc and ppc you MUST enable this option and EMUSIGRT as + well, otherwise your system will not even boot. + + Alternatively you can say N here and use the 'chpax' or 'paxctl' + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC + for the affected files. + + NOTE: enabling this feature *may* open up a loophole in the + protection provided by non-executable pages that an attacker + could abuse. Therefore the best solution is to not have any + files on your system that would require this option. This can + be achieved by not using libc5 (which relies on the kernel + signal handler return code) and not using or rewriting programs + that make use of the nested function implementation of GCC. + Skilled users can just fix GCC itself so that it implements + nested function calls in a way that does not interfere with PaX. + +config PAX_EMUSIGRT + bool "Automatically emulate sigreturn trampolines" + depends on PAX_EMUTRAMP && (PARISC || PPC32) + default y + help + Enabling this option will have the kernel automatically detect + and emulate signal return trampolines executing on the stack + that would otherwise lead to task termination. + + This solution is intended as a temporary one for users with + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17, + Modula-3 runtime, etc) or executables linked to such, basically + everything that does not specify its own SA_RESTORER function in + normal executable memory like glibc 2.1+ does. + + On parisc and ppc you MUST enable this option, otherwise your + system will not even boot. + + NOTE: this feature cannot be disabled on a per executable basis + and since it *does* open up a loophole in the protection provided + by non-executable pages, the best solution is to not have any + files on your system that would require this option. + +config PAX_MPROTECT + bool "Restrict mprotect()" + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && !PPC64 + help + Enabling this option will prevent programs from + - changing the executable status of memory pages that were + not originally created as executable, + - making read-only executable pages writable again, + - creating executable pages from anonymous memory. + + You should say Y here to complete the protection provided by + the enforcement of non-executable pages. + + NOTE: you can use the 'chpax' or 'paxctl' utilities to control + this feature on a per file basis. + +config PAX_NOELFRELOCS + bool "Disallow ELF text relocations" + depends on PAX_MPROTECT && !PAX_ETEXECRELOCS && (IA64 || X86 || X86_64) + help + Non-executable pages and mprotect() restrictions are effective + in preventing the introduction of new executable code into an + attacked task's address space. There remain only two venues + for this kind of attack: if the attacker can execute already + existing code in the attacked task then he can either have it + create and mmap() a file containing his code or have it mmap() + an already existing ELF library that does not have position + independent code in it and use mprotect() on it to make it + writable and copy his code there. While protecting against + the former approach is beyond PaX, the latter can be prevented + by having only PIC ELF libraries on one's system (which do not + need to relocate their code). If you are sure this is your case, + then enable this option otherwise be careful as you may not even + be able to boot or log on your system (for example, some PAM + modules are erroneously compiled as non-PIC by default). + + NOTE: if you are using dynamic ELF executables (as suggested + when using ASLR) then you must have made sure that you linked + your files using the PIC version of crt1 (the et_dyn.tar.gz package + referenced there has already been updated to support this). + +config PAX_ETEXECRELOCS + bool "Allow ELF ET_EXEC text relocations" + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC) + default y + help + On some architectures there are incorrectly created applications + that require text relocations and would not work without enabling + this option. If you are an alpha, ia64 or parisc user, you should + enable this option and disable it once you have made sure that + none of your applications need it. + +config PAX_EMUPLT + bool "Automatically emulate ELF PLT" + depends on PAX_MPROTECT && (ALPHA || PARISC || PPC32 || SPARC32 || SPARC64) + default y + help + Enabling this option will have the kernel automatically detect + and emulate the Procedure Linkage Table entries in ELF files. + On some architectures such entries are in writable memory, and + become non-executable leading to task termination. Therefore + it is mandatory that you enable this option on alpha, parisc, + ppc (if secure-plt is not used throughout in userland), sparc + and sparc64, otherwise your system would not even boot. + + NOTE: this feature *does* open up a loophole in the protection + provided by the non-executable pages, therefore the proper + solution is to modify the toolchain to produce a PLT that does + not need to be writable. + +config PAX_DLRESOLVE + bool + depends on PAX_EMUPLT && (SPARC32 || SPARC64) + default y + +config PAX_SYSCALL + bool + depends on PAX_PAGEEXEC && PPC32 + default y + +config PAX_KERNEXEC + bool "Enforce non-executable kernel pages" + depends on PAX_NOEXEC && X86_32 && !EFI && !COMPAT_VDSO && X86_WP_WORKS_OK && !PARAVIRT + help + This is the kernel land equivalent of PAGEEXEC and MPROTECT, + that is, enabling this option will make it harder to inject + and execute 'foreign' code in kernel memory itself. + +endmenu + +menu "Address Space Layout Randomization" + depends on PAX + +config PAX_ASLR + bool "Address Space Layout Randomization" + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS + help + Many if not most exploit techniques rely on the knowledge of + certain addresses in the attacked program. The following options + will allow the kernel to apply a certain amount of randomization + to specific parts of the program thereby forcing an attacker to + guess them in most cases. Any failed guess will most likely crash + the attacked program which allows the kernel to detect such attempts + and react on them. PaX itself provides no reaction mechanisms, + instead it is strongly encouraged that you make use of Nergal's + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's + (http://www.grsecurity.net/) built-in crash detection features or + develop one yourself. + + By saying Y here you can choose to randomize the following areas: + - top of the task's kernel stack + - top of the task's userland stack + - base address for mmap() requests that do not specify one + (this includes all libraries) + - base address of the main executable + + It is strongly recommended to say Y here as address space layout + randomization has negligible impact on performance yet it provides + a very effective protection. + + NOTE: you can use the 'chpax' or 'paxctl' utilities to control + this feature on a per file basis. + +config PAX_RANDKSTACK + bool "Randomize kernel stack base" + depends on PAX_ASLR && X86_TSC && X86_32 + help + By saying Y here the kernel will randomize every task's kernel + stack on every system call. This will not only force an attacker + to guess it but also prevent him from making use of possible + leaked information about it. + + Since the kernel stack is a rather scarce resource, randomization + may cause unexpected stack overflows, therefore you should very + carefully test your system. Note that once enabled in the kernel + configuration, this feature cannot be disabled on a per file basis. + +config PAX_RANDUSTACK + bool "Randomize user stack base" + depends on PAX_ASLR + help + By saying Y here the kernel will randomize every task's userland + stack. The randomization is done in two steps where the second + one may apply a big amount of shift to the top of the stack and + cause problems for programs that want to use lots of memory (more + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is). + For this reason the second step can be controlled by 'chpax' or + 'paxctl' on a per file basis. + +config PAX_RANDMMAP + bool "Randomize mmap() base" + depends on PAX_ASLR + help + By saying Y here the kernel will use a randomized base address for + mmap() requests that do not specify one themselves. As a result + all dynamically loaded libraries will appear at random addresses + and therefore be harder to exploit by a technique where an attacker + attempts to execute library code for his purposes (e.g. spawn a + shell from an exploited program that is running at an elevated + privilege level). + + Furthermore, if a program is relinked as a dynamic ELF file, its + base address will be randomized as well, completing the full + randomization of the address space layout. Attacking such programs + becomes a guess game. You can find an example of doing this at + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at + http://www.grsecurity.net/grsec-gcc-specs.tar.gz . + + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this + feature on a per file basis. + +endmenu + +menu "Miscellaneous hardening features" + +config PAX_MEMORY_SANITIZE + bool "Sanitize all freed memory" + help + By saying Y here the kernel will erase memory pages as soon as they + are freed. This in turn reduces the lifetime of data stored in the + pages, making it less likely that sensitive information such as + passwords, cryptographic secrets, etc stay in memory for too long. + + This is especially useful for programs whose runtime is short, long + lived processes and the kernel itself benefit from this as long as + they operate on whole memory pages and ensure timely freeing of pages + that may hold sensitive information. + + The tradeoff is performance impact, on a single CPU system kernel + compilation sees a 3% slowdown, other systems and workloads may vary + and you are advised to test this feature on your expected workload + before deploying it. + + Note that this feature does not protect data stored in live pages, + e.g., process memory swapped to disk may stay there for a long time. + +config PAX_MEMORY_UDEREF + bool "Prevent invalid userland pointer dereference" + depends on X86_32 && !COMPAT_VDSO + help + By saying Y here the kernel will be prevented from dereferencing + userland pointers in contexts where the kernel expects only kernel + pointers. This is both a useful runtime debugging feature and a + security measure that prevents exploiting a class of kernel bugs. + + The tradeoff is that some virtualization solutions may experience + a huge slowdown and therefore you should not enable this feature + for kernels meant to run in such environments. Whether a given VM + solution is affected or not is best determined by simply trying it + out, the performance impact will be obvious right on boot as this + mechanism engages from very early on. A good rule of thumb is that + VMs running on CPUs without hardware virtualization support (i.e., + the majority of IA-32 CPUs) will likely experience the slowdown. + +endmenu + +endmenu + config KEYS bool "Enable access key retention support" depends on !VSERVER_SECURITY diff -urNp linux-2.6.22.1/sound/core/oss/pcm_oss.c linux-2.6.22.1/sound/core/oss/pcm_oss.c --- linux-2.6.22.1/sound/core/oss/pcm_oss.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/sound/core/oss/pcm_oss.c 2007-08-02 11:38:48.000000000 -0400 @@ -2880,8 +2880,8 @@ static void snd_pcm_oss_proc_done(struct } } #else /* !CONFIG_SND_VERBOSE_PROCFS */ -#define snd_pcm_oss_proc_init(pcm) -#define snd_pcm_oss_proc_done(pcm) +#define snd_pcm_oss_proc_init(pcm) do {} while (0) +#define snd_pcm_oss_proc_done(pcm) do {} while (0) #endif /* CONFIG_SND_VERBOSE_PROCFS */ /* diff -urNp linux-2.6.22.1/sound/core/seq/seq_lock.h linux-2.6.22.1/sound/core/seq/seq_lock.h --- linux-2.6.22.1/sound/core/seq/seq_lock.h 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/sound/core/seq/seq_lock.h 2007-08-02 11:38:48.000000000 -0400 @@ -23,10 +23,10 @@ void snd_use_lock_sync_helper(snd_use_lo #else /* SMP || CONFIG_SND_DEBUG */ typedef spinlock_t snd_use_lock_t; /* dummy */ -#define snd_use_lock_init(lockp) /**/ -#define snd_use_lock_use(lockp) /**/ -#define snd_use_lock_free(lockp) /**/ -#define snd_use_lock_sync(lockp) /**/ +#define snd_use_lock_init(lockp) do {} while (0) +#define snd_use_lock_use(lockp) do {} while (0) +#define snd_use_lock_free(lockp) do {} while (0) +#define snd_use_lock_sync(lockp) do {} while (0) #endif /* SMP || CONFIG_SND_DEBUG */ diff -urNp linux-2.6.22.1/sound/pci/ac97/ac97_patch.c linux-2.6.22.1/sound/pci/ac97/ac97_patch.c --- linux-2.6.22.1/sound/pci/ac97/ac97_patch.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/sound/pci/ac97/ac97_patch.c 2007-08-02 11:38:48.000000000 -0400 @@ -1415,7 +1415,7 @@ static const struct snd_ac97_res_table a { AC97_VIDEO, 0x9f1f }, { AC97_AUX, 0x9f1f }, { AC97_PCM, 0x9f1f }, - { } /* terminator */ + { 0, 0 } /* terminator */ }; static int patch_ad1819(struct snd_ac97 * ac97) @@ -3489,7 +3489,7 @@ static struct snd_ac97_res_table lm4550_ { AC97_AUX, 0x1f1f }, { AC97_PCM, 0x1f1f }, { AC97_REC_GAIN, 0x0f0f }, - { } /* terminator */ + { 0, 0 } /* terminator */ }; static int patch_lm4550(struct snd_ac97 *ac97) diff -urNp linux-2.6.22.1/sound/pci/ens1370.c linux-2.6.22.1/sound/pci/ens1370.c --- linux-2.6.22.1/sound/pci/ens1370.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/sound/pci/ens1370.c 2007-08-02 11:38:48.000000000 -0400 @@ -453,7 +453,7 @@ static struct pci_device_id snd_audiopci { 0x1274, 0x5880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1373 - CT5880 */ { 0x1102, 0x8938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* Ectiva EV1938 */ #endif - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, snd_audiopci_ids); diff -urNp linux-2.6.22.1/sound/pci/intel8x0.c linux-2.6.22.1/sound/pci/intel8x0.c --- linux-2.6.22.1/sound/pci/intel8x0.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/sound/pci/intel8x0.c 2007-08-02 11:38:48.000000000 -0400 @@ -436,7 +436,7 @@ static struct pci_device_id snd_intel8x0 { 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD8111 */ { 0x1022, 0x7445, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD768 */ { 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI }, /* Ali5455 */ - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, snd_intel8x0_ids); @@ -2044,7 +2044,7 @@ static struct ac97_quirk ac97_quirks[] _ .type = AC97_TUNE_HP_ONLY }, #endif - { } /* terminator */ + { 0, 0, 0, 0, NULL, 0 } /* terminator */ }; static int __devinit snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock, diff -urNp linux-2.6.22.1/sound/pci/intel8x0m.c linux-2.6.22.1/sound/pci/intel8x0m.c --- linux-2.6.22.1/sound/pci/intel8x0m.c 2007-07-10 14:56:30.000000000 -0400 +++ linux-2.6.22.1/sound/pci/intel8x0m.c 2007-08-02 11:38:48.000000000 -0400 @@ -240,7 +240,7 @@ static struct pci_device_id snd_intel8x0 { 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD8111 */ { 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI }, /* Ali5455 */ #endif - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids); @@ -1261,7 +1261,7 @@ static struct shortname_table { { 0x5455, "ALi M5455" }, { 0x746d, "AMD AMD8111" }, #endif - { 0 }, + { 0, NULL }, }; static int __devinit snd_intel8x0m_probe(struct pci_dev *pci, --- linux-2.6.22./fs/ext3/balloc.c 2007-08-09 00:16:48.425144000 +0200 +++ linux-2.6.22/fs/ext3/balloc.c 2007-08-09 20:38:20.862277750 +0200 @@ -1373,14 +1373,14 @@ static int ext3_has_free_blocks(struct s DLIMIT_ADJUST_BLOCK(sb, dx_current_tag(), &free_blocks, &root_blocks); cond = (free_blocks < root_blocks + 1 && - !capable(CAP_SYS_RESOURCE) && + !capable_nolog(CAP_SYS_RESOURCE) && sbi->s_resuid != current->fsuid && (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))); vxdprintk(VXD_CBIT(dlim, 3), "ext3_has_free_blocks(%p): %llu<%llu+1, %c, %u!=%u r=%d", sb, free_blocks, root_blocks, - !capable(CAP_SYS_RESOURCE)?'1':'0', + !capable_nolog(CAP_SYS_RESOURCE)?'1':'0', sbi->s_resuid, current->fsuid, cond?0:1); return (cond ? 0 : 1); diff -urNp linux-2.6.22/fs/ext3/namei.c linux-2.6.22/fs/ext3/namei.c --- linux-2.6.22/fs/ext3/namei.c 2007-09-00 00:00:00.000000000 -0400 +++ linux-2.6.22/fs/ext3/namei.c 2007-09-00 00:00:00.000000000 -0400 @@ -1178,9 +1178,9 @@ static struct ext3_dir_entry_2 *do_split u32 hash2; struct dx_map_entry *map; char *data1 = (*bh)->b_data, *data2; - unsigned split, move, size, i; + unsigned split, move, size; struct ext3_dir_entry_2 *de = NULL, *de2; - int err = 0; + int i, err = 0; bh2 = ext3_append (handle, dir, &newblock, &err); if (!(bh2)) { diff -urNp linux-2.6.22/fs/ext3/xattr.c linux-2.6.22/fs/ext3/xattr.c --- linux-2.6.22/fs/ext3/xattr.c 2007-09-00 00:00:00.000000000 -0400 +++ linux-2.6.22/fs/ext3/xattr.c 2007-09-00 00:00:00.000000000 -0400 @@ -89,8 +89,8 @@ printk("\n"); \ } while (0) #else -# define ea_idebug(f...) -# define ea_bdebug(f...) +# define ea_idebug(f...) do {} while (0) +# define ea_bdebug(f...) do {} while (0) #endif static void ext3_xattr_cache_insert(struct buffer_head *); diff -urNp linux-2.6.22./fs/ext4/balloc.c linux-2.6.22/fs/ext4/balloc.c --- linux-2.6.22./fs/ext4/balloc.c 2007-08-09 00:16:48.441145000 +0200 +++ linux-2.6.22/fs/ext4/balloc.c 2007-08-09 20:40:25.878090750 +0200 @@ -1390,14 +1390,14 @@ static int ext4_has_free_blocks(struct s DLIMIT_ADJUST_BLOCK(sb, dx_current_tag(), &free_blocks, &root_blocks); cond = (free_blocks < root_blocks + 1 && - !capable(CAP_SYS_RESOURCE) && + !capable_nolog(CAP_SYS_RESOURCE) && sbi->s_resuid != current->fsuid && (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))); vxdprintk(VXD_CBIT(dlim, 3), "ext4_has_free_blocks(%p): %llu<%llu+1, %c, %u!=%u r=%d", sb, free_blocks, root_blocks, - !capable(CAP_SYS_RESOURCE)?'1':'0', + !capable_nolog(CAP_SYS_RESOURCE)?'1':'0', sbi->s_resuid, current->fsuid, cond?0:1); return (cond ? 0 : 1); diff -urNp linux-2.6.22/fs/ext4/namei.c linux-2.6.22/fs/ext4/namei.c --- linux-2.6.22/fs/ext4/namei.c 2007-09-00 00:00:00.000000000 -0400 +++ linux-2.6.22/fs/ext4/namei.c 2007-09-00 00:00:00.000000000 -0400 @@ -1176,9 +1176,9 @@ static struct ext4_dir_entry_2 *do_split u32 hash2; struct dx_map_entry *map; char *data1 = (*bh)->b_data, *data2; - unsigned split, move, size, i; + unsigned split, move, size; struct ext4_dir_entry_2 *de = NULL, *de2; - int err = 0; + int i, err = 0; bh2 = ext4_append (handle, dir, &newblock, &err); if (!(bh2)) {